Browse Source

Merge remote-tracking branch 'origin/candidate-3.8.x'

Signed-off-by: Richard Chapman <rchapman@hpccsystems.com>
Richard Chapman 13 years ago
parent
commit
6b798f3971
75 changed files with 1896 additions and 1597 deletions
  1. 1 1
      cmake_modules/dependencies/el5.cmake
  2. 1 0
      cmake_modules/dependencies/el6.cmake
  3. 1 1
      cmake_modules/dependencies/lenny.cmake
  4. 1 1
      cmake_modules/dependencies/lucid.cmake
  5. 1 1
      cmake_modules/dependencies/natty.cmake
  6. 1 1
      cmake_modules/dependencies/oneiric.cmake
  7. 1 1
      cmake_modules/dependencies/precise.cmake
  8. 1 1
      cmake_modules/dependencies/squeeze.cmake
  9. 1 1
      cmake_modules/dependencies/suse11.3.cmake
  10. 1 1
      cmake_modules/dependencies/suse11.4.cmake
  11. 8 1
      ecl/hqlcpp/hqlcppc.hpp
  12. 19 4
      ecl/hqlcpp/hqlhtcpp.cpp
  13. 2 2
      ecl/hqlcpp/hqlhtcpp.ipp
  14. 1 1
      ecl/hqlcpp/hqllib.ipp
  15. 49 16
      ecl/hqlcpp/hqlresource.cpp
  16. 1 0
      ecl/hqlcpp/hqlresource.ipp
  17. 44 0
      ecl/regress/loophoist.ecl
  18. 45 0
      ecl/regress/loophoist2.ecl
  19. 45 0
      ecl/regress/loophoist3.ecl
  20. 164 0
      esp/files/dojox/html/ellipsis.js
  21. 147 0
      esp/files/dojox/html/metrics.js
  22. 4 0
      esp/files/dojox/main.js
  23. 14 4
      esp/files/scripts/configmgr/navtree.js
  24. 41 4
      esp/services/WsDeploy/WsDeployService.cpp
  25. 200 3
      esp/services/WsDeploy/WsDeployService.hpp
  26. 1 8
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/CMakeLists.txt
  27. 0 2
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/DFUFile.java
  28. 38 35
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclConnection.java
  29. 16 21
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclDatabaseMetaData.java
  30. 55 57
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclDriver.java
  31. 145 82
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclEngine.java
  32. 46 0
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclFunction.java
  33. 44 0
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclFunctions.java
  34. 1 2
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclPreparedStatement.java
  35. 0 429
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclResultSet.java
  36. 12 303
      plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/SqlParser.java
  37. 1 0
      system/mp/mptag.hpp
  38. 5 1
      thorlcr/activities/fetch/thfetch.cpp
  39. 3 2
      thorlcr/activities/fetch/thfetchslave.cpp
  40. 12 11
      thorlcr/activities/indexread/thindexread.cpp
  41. 1 0
      thorlcr/activities/indexread/thindexreadslave.cpp
  42. 11 12
      thorlcr/activities/indexwrite/thindexwrite.cpp
  43. 2 3
      thorlcr/activities/indexwrite/thindexwriteslave.cpp
  44. 6 5
      thorlcr/activities/keyedjoin/thkeyedjoin.cpp
  45. 5 0
      thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp
  46. 1 1
      thorlcr/activities/lookupjoin/thlookupjoinslave.cpp
  47. 1 1
      thorlcr/activities/loop/thloop.cpp
  48. 1 1
      thorlcr/activities/loop/thloopslave.cpp
  49. 1 0
      thorlcr/activities/thdiskbase.cpp
  50. 3 0
      thorlcr/activities/thdiskbaseslave.cpp
  51. 15 8
      thorlcr/graph/thgraph.cpp
  52. 8 5
      thorlcr/graph/thgraph.hpp
  53. 53 67
      thorlcr/graph/thgraphmaster.cpp
  54. 2 3
      thorlcr/graph/thgraphmaster.ipp
  55. 77 49
      thorlcr/graph/thgraphslave.cpp
  56. 5 4
      thorlcr/graph/thgraphslave.hpp
  57. 160 78
      thorlcr/master/mawatchdog.cpp
  58. 18 11
      thorlcr/master/mawatchdog.hpp
  59. 24 23
      thorlcr/master/thdemonserver.cpp
  60. 1 1
      thorlcr/master/thdemonserver.hpp
  61. 2 2
      thorlcr/master/thmastermain.cpp
  62. 8 8
      thorlcr/msort/tsortl.cpp
  63. 36 39
      thorlcr/msort/tsortm.cpp
  64. 33 58
      thorlcr/msort/tsortmp.cpp
  65. 16 18
      thorlcr/msort/tsortmp.hpp
  66. 34 40
      thorlcr/msort/tsorts.cpp
  67. 4 6
      thorlcr/msort/tsorts.hpp
  68. 25 20
      thorlcr/msort/tsorts1.cpp
  69. 2 6
      thorlcr/shared/thor.hpp
  70. 7 12
      thorlcr/shared/thwatchdog.hpp
  71. 1 1
      thorlcr/slave/slavmain.cpp
  72. 87 41
      thorlcr/slave/slwatchdog.cpp
  73. 2 2
      thorlcr/slave/slwatchdog.hpp
  74. 43 47
      thorlcr/thorutil/thmem.cpp
  75. 28 28
      thorlcr/thorutil/thmem.hpp

+ 1 - 1
cmake_modules/dependencies/el5.cmake

@@ -1 +1 @@
-set ( CPACK_RPM_PACKAGE_REQUIRES "boost141-regex, openldap, libicu, m4, libtool, xalan-c, xerces-c, gcc-c++, openssh-server, openssh-clients, expect")
+set ( CPACK_RPM_PACKAGE_REQUIRES "boost141-regex, openldap, libicu, m4, libtool, xalan-c, xerces-c, gcc-c++, openssh-server, openssh-clients, expect, libarchive")

+ 1 - 0
cmake_modules/dependencies/el6.cmake

@@ -0,0 +1 @@
+set ( CPACK_RPM_PACKAGE_REQUIRES "boost141-regex, openldap, libicu, m4, libtool, libxslt, libxml2, gcc-c++, openssh-server, openssh-clients, expect, libarchive")

+ 1 - 1
cmake_modules/dependencies/lenny.cmake

@@ -1 +1 @@
-set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.34.1, libicu38, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, sudo, openssh-client, openssh-server, expect")
+set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.34.1, libicu38, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, sudo, openssh-client, openssh-server, expect, libarchive")

+ 1 - 1
cmake_modules/dependencies/lucid.cmake

@@ -1 +1 @@
-set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.40.0, libicu42, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect")
+set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.40.0, libicu42, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect, libarchive")

+ 1 - 1
cmake_modules/dependencies/natty.cmake

@@ -1 +1 @@
-set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.42.0, libicu44, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect")
+set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.42.0, libicu44, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect, libarchive")

+ 1 - 1
cmake_modules/dependencies/oneiric.cmake

@@ -1 +1 @@
-set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.46.1, libicu44, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect")
+set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.46.1, libicu44, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect, libarchive1")

+ 1 - 1
cmake_modules/dependencies/precise.cmake

@@ -1 +1 @@
-set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.46.1, libicu48, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect")
+set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.46.1, libicu48, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect, libarchive12")

+ 1 - 1
cmake_modules/dependencies/squeeze.cmake

@@ -1 +1 @@
-set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.42.0, libicu44, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect")
+set ( CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-regex1.42.0, libicu44, libxalan110, libxerces-c28, binutils, libldap-2.4-2, openssl, zlib1g, g++, openssh-client, openssh-server, expect, libarchive")

+ 1 - 1
cmake_modules/dependencies/suse11.3.cmake

@@ -1 +1 @@
-set ( CPACK_RPM_PACKAGE_REQUIRES  "binutils, gcc-c++, openssh, libldap-2_4-2, libicu, libboost_regex1_42_0, libxerces-c-3_0, libxalan-c110, expect" )
+set ( CPACK_RPM_PACKAGE_REQUIRES  "binutils, gcc-c++, openssh, libldap-2_4-2, libicu, libboost_regex1_42_0, libxerces-c-3_0, libxalan-c110, expect, libarchive2" )

+ 1 - 1
cmake_modules/dependencies/suse11.4.cmake

@@ -1 +1 @@
-set ( CPACK_RPM_PACKAGE_REQUIRES  "binutils, gcc-c++, openssh, libldap-2_4-2, libicu, libboost_regex1_44_0, libxerces-c-3_0, libxalan-c110, expect" )
+set ( CPACK_RPM_PACKAGE_REQUIRES  "binutils, gcc-c++, openssh, libldap-2_4-2, libicu, libboost_regex1_44_0, libxerces-c-3_0, libxalan-c110, expect, libarchive2" )

+ 8 - 1
ecl/hqlcpp/hqlcppc.hpp

@@ -61,13 +61,19 @@ public:
 class HQLCPP_API ABoundActivity : public CInterface, public IInterface
 {
 public:
-    ABoundActivity(IHqlExpression * _dataset, IHqlExpression * _bound, unsigned _activityid, unsigned _graphId, ThorActivityKind _kind) : represents(_dataset), bound(_bound) { activityId = _activityid; graphId = _graphId; outputCount = 0; kind = _kind; }
+    ABoundActivity(IHqlExpression * _dataset, IHqlExpression * _bound, unsigned _activityid, unsigned _containerid, unsigned _graphId, ThorActivityKind _kind)
+        : represents(_dataset), bound(_bound), activityId(_activityid), containerId(_containerid), graphId(_graphId)
+    {
+        outputCount = 0;
+        kind = _kind;
+    }
     IMPLEMENT_IINTERFACE
 
     inline  IHqlExpression * queryBound() const { return bound; }
     inline  IHqlExpression * queryDataset() const { return represents; }
     inline unsigned queryActivityId() const { return activityId; }
     inline ThorActivityKind queryActivityKind() const { return kind; }
+    inline unsigned queryContainerId() const { return containerId; }
     inline unsigned queryGraphId() const { return graphId; }
     inline unsigned nextOutputCount() { return outputCount++; }
     IHqlDelayedCodeGenerator * createOutputCountCallback();
@@ -78,6 +84,7 @@ private:
     HqlExprAttr             represents;
     HqlExprAttr             bound;
     unsigned                activityId;
+    unsigned                containerId;
     unsigned                graphId;
     unsigned                outputCount;
     ThorActivityKind        kind;

+ 19 - 4
ecl/hqlcpp/hqlhtcpp.cpp

@@ -1832,7 +1832,6 @@ ActivityInstance::ActivityInstance(HqlCppTranslator & _translator, BuildCtx & ct
     argsName.set(s.clear().append("oAc").append(activityId).str());
 
     OwnedHqlExpr boundName = createVariable(instanceName, dataset->getType());
-    table = new ThorBoundActivity(dataset, boundName, activityId, translator.curSubGraphId(ctx), kind);
     isMember = false;
     instanceIsLocal = false;
     classStmt = NULL;
@@ -1892,8 +1891,15 @@ ActivityInstance::ActivityInstance(HqlCppTranslator & _translator, BuildCtx & ct
     if (!parentExtract && (translator.getTargetClusterType() == RoxieCluster))
         executedRemotely = isNonLocal(dataset);
 
+
+    unsigned containerId = 0;
     if (containerActivity)
+    {
         containerActivity->hasChildActivity = true;
+        containerId = containerActivity->activityId;
+    }
+
+    table = new ThorBoundActivity(dataset, boundName, activityId, containerId, translator.curSubGraphId(ctx), kind);
 }
 
 ActivityInstance::~ActivityInstance()
@@ -6803,10 +6809,19 @@ void HqlCppTranslator::addDependency(BuildCtx & ctx, ABoundActivity * element, A
             addGraphAttributeInt(edge, "_sourceIndex", outputIndex);
     }
 
-    if (kind == dependencyAtom)
-        addGraphAttributeBool(edge, "_dependsOn", true);
-    else if (kind == childAtom)
+    if (kind == childAtom)
+    {
         addGraphAttributeBool(edge, "_childGraph", true);
+    }
+    else if (kind == dependencyAtom)
+    {
+        addGraphAttributeBool(edge, "_dependsOn", true);
+    }
+    else if (sourceActivity->queryContainerId() != sinkActivity->queryContainerId())
+    {
+        //mark as a dependendency if the source and target aren't at the same depth
+        addGraphAttributeBool(edge, "_dependsOn", true);
+    }
 
     if (whenId)
         addGraphAttributeInt(edge, "_when", whenId);

+ 2 - 2
ecl/hqlcpp/hqlhtcpp.ipp

@@ -30,8 +30,8 @@
 class HQLCPP_API ThorBoundActivity : public ABoundActivity
 {
 public:
-    ThorBoundActivity(IHqlExpression * _dataset, IHqlExpression * _bound, unsigned _activityid, unsigned _graphId, ThorActivityKind _kind) 
-    : ABoundActivity(_dataset->queryBody(), _bound, _activityid, _graphId, _kind) {}
+    ThorBoundActivity(IHqlExpression * _dataset, IHqlExpression * _bound, unsigned _activityid, unsigned _containerid, unsigned _graphId, ThorActivityKind _kind)
+    : ABoundActivity(_dataset->queryBody(), _bound, _activityid, _containerid, _graphId, _kind) {}
 };
 
 //===========================================================================

+ 1 - 1
ecl/hqlcpp/hqllib.ipp

@@ -86,7 +86,7 @@ class ThorBoundLibraryActivity : public ThorBoundActivity
 {
 public:
     ThorBoundLibraryActivity(ABoundActivity * activity, IPropertyTree * _graphNode, HqlCppLibraryInstance * _libraryInstance)
-    : ThorBoundActivity(activity->queryDataset(), activity->queryBound(), activity->queryActivityId(), activity->queryGraphId(), activity->queryActivityKind()) 
+    : ThorBoundActivity(activity->queryDataset(), activity->queryBound(), activity->queryActivityId(), activity->queryContainerId(), activity->queryGraphId(), activity->queryActivityKind())
     {
         graphNode.set(_graphNode);
         libraryInstance.set(_libraryInstance);

+ 49 - 16
ecl/hqlcpp/hqlresource.cpp

@@ -1953,9 +1953,16 @@ static HqlTransformerInfo eclHoistLocatorInfo("EclHoistLocator");
 class EclHoistLocator : public NewHqlTransformer
 {
 public:
-    EclHoistLocator(HqlExprCopyArray & _originalMatches, HqlExprArray & _matches, BoolArray & _alwaysHoistMatches) 
-        : NewHqlTransformer(eclHoistLocatorInfo), originalMatched(_originalMatches), matched(_matches), alwaysHoistMatches(_alwaysHoistMatches)
-    { 
+    EclHoistLocator(HqlExprCopyArray & _originalMatches, HqlExprArray & _matches, BoolArray & _singleNode, BoolArray & _alwaysHoistMatches)
+        : NewHqlTransformer(eclHoistLocatorInfo), originalMatched(_originalMatches), matched(_matches), singleNode(_singleNode), alwaysHoistMatches(_alwaysHoistMatches)
+    {
+        alwaysSingle = true;
+    }
+
+    void analyseChild(IHqlExpression * expr, bool _alwaysSingle)
+    {
+        alwaysSingle = _alwaysSingle;
+        analyse(expr, 0);
     }
 
     void noteDataset(IHqlExpression * expr, IHqlExpression * hoisted, bool alwaysHoist)
@@ -1969,9 +1976,15 @@ public:
             originalMatched.append(*expr);
             matched.append(*LINK(hoisted));
             alwaysHoistMatches.append(alwaysHoist);
+            singleNode.append(alwaysSingle);
+        }
+        else
+        {
+            if (alwaysHoist && !alwaysHoistMatches.item(match))
+                alwaysHoistMatches.replace(true, match);
+            if (alwaysSingle && !singleNode.item(match))
+                singleNode.replace(true, match);
         }
-        else if (alwaysHoist && !alwaysHoistMatches.item(match))
-            alwaysHoistMatches.replace(true, match);
     }
 
     void noteScalar(IHqlExpression * expr, IHqlExpression * value)
@@ -2036,13 +2049,16 @@ public:
             originalMatched.append(*expr);
             matched.append(*hoisted.getClear());
             alwaysHoistMatches.append(true);
+            singleNode.append(true);
         }
     }
 
 protected:
     HqlExprCopyArray & originalMatched;
     HqlExprArray & matched;
+    BoolArray & singleNode;
     BoolArray & alwaysHoistMatches;
+    bool alwaysSingle;
 };
 
 
@@ -2050,8 +2066,8 @@ protected:
 class EclChildSplitPointLocator : public EclHoistLocator
 {
 public:
-    EclChildSplitPointLocator(IHqlExpression * _original, HqlExprCopyArray & _selectors, HqlExprCopyArray & _originalMatches, HqlExprArray & _matches, BoolArray & _alwaysHoistMatches, bool _groupedChildIterators, bool _supportsChildQueries) 
-    : EclHoistLocator(_originalMatches, _matches, _alwaysHoistMatches), selectors(_selectors), groupedChildIterators(_groupedChildIterators), supportsChildQueries(_supportsChildQueries)
+    EclChildSplitPointLocator(IHqlExpression * _original, HqlExprCopyArray & _selectors, HqlExprCopyArray & _originalMatches, HqlExprArray & _matches, BoolArray & _singleNode, BoolArray & _alwaysHoistMatches, bool _groupedChildIterators, bool _supportsChildQueries)
+    : EclHoistLocator(_originalMatches, _matches, _singleNode, _alwaysHoistMatches), selectors(_selectors), groupedChildIterators(_groupedChildIterators), supportsChildQueries(_supportsChildQueries)
     { 
         original = _original;
         okToSelect = false; 
@@ -2068,14 +2084,16 @@ public:
         }
     }
 
-    void findSplitPoints(IHqlExpression * expr, unsigned from, unsigned to, bool _executedOnce)
+    void findSplitPoints(IHqlExpression * expr, unsigned from, unsigned to, bool _alwaysSingle, bool _executedOnce)
     {
+        alwaysSingle = _alwaysSingle;
         for (unsigned i=from; i < to; i++)
         {
             IHqlExpression * cur = expr->queryChild(i);
             executedOnce = _executedOnce || cur->isAttribute();     // assume attributes are only executed once.
             findSplitPoints(cur);
         }
+        alwaysSingle = false;
     }
 
 protected:
@@ -2434,7 +2452,7 @@ protected:
 void EclResourcer::gatherChildSplitPoints(IHqlExpression * expr, BoolArray & alwaysHoistChild, ResourcerInfo * info, unsigned first, unsigned last)
 {
     //NB: Don't call member functions to ensure correct nesting of transform mutexes.
-    EclChildSplitPointLocator locator(expr, activeSelectors, info->originalChildDependents, info->childDependents, alwaysHoistChild, options.groupedChildIterators, options.supportsChildQueries);
+    EclChildSplitPointLocator locator(expr, activeSelectors, info->originalChildDependents, info->childDependents, info->childSingleNode, alwaysHoistChild, options.groupedChildIterators, options.supportsChildQueries);
     unsigned max = expr->numChildren();
 
     //If child queries are supported then don't hoist the expressions if they might only be evaluated once
@@ -2444,18 +2462,32 @@ void EclResourcer::gatherChildSplitPoints(IHqlExpression * expr, BoolArray & alw
     case no_setresult:
     case no_selectnth:
         //set results, only done once=>don't hoist conditionals
-        locator.findSplitPoints(expr, last, max, true);
+        locator.findSplitPoints(expr, last, max, true, true);
         return;
+    case no_loop:
+        if ((options.targetClusterType == ThorLCRCluster) && !options.isChildQuery)
+        {
+            //This is ugly!  The body is executed in parallel, so don't force that as a work unit result
+            //It means some child query expressions within loops don't get forced into work unit writes
+            //but that just means that the generated code will be not as good as it could be.
+            const unsigned bodyArg = 4;
+            locator.findSplitPoints(expr, 1, bodyArg, true, false);
+            locator.findSplitPoints(expr, bodyArg, bodyArg+1, false, false);
+            locator.findSplitPoints(expr, bodyArg+1, max, true, false);
+            return;
+        }
+        break;
     }
-    locator.findSplitPoints(expr, 0, first, true);          // IF() conditions only evaluated once... => don't force
-    locator.findSplitPoints(expr, last, max, false);
+    locator.findSplitPoints(expr, 0, first, true, true);          // IF() conditions only evaluated once... => don't force
+    locator.findSplitPoints(expr, last, max, true, false);
 }
 
 
 class EclThisNodeLocator : public EclHoistLocator
 {
 public:
-    EclThisNodeLocator(HqlExprCopyArray & _originalMatches, HqlExprArray & _matches, BoolArray & _alwaysHoistMatches) : EclHoistLocator(_originalMatches, _matches, _alwaysHoistMatches)
+    EclThisNodeLocator(HqlExprCopyArray & _originalMatches, HqlExprArray & _matches, BoolArray & _singleNode, BoolArray & _alwaysHoistMatches)
+        : EclHoistLocator(_originalMatches, _matches, _singleNode, _alwaysHoistMatches)
     { 
         allNodesDepth = 0;
     }
@@ -2646,8 +2678,8 @@ bool EclResourcer::findSplitPoints(IHqlExpression * expr)
         case no_allnodes:
             {
                 //MORE: This needs to recursively walk and lift any contained no_selfnode, but don't go past another nested no_allnodes;
-                EclThisNodeLocator locator(info->originalChildDependents, info->childDependents, alwaysHoistChild);
-                locator.analyse(expr->queryChild(0), 0);
+                EclThisNodeLocator locator(info->originalChildDependents, info->childDependents, info->childSingleNode, alwaysHoistChild);
+                locator.analyseChild(expr->queryChild(0), true);
                 break;
             }
         default:
@@ -3472,7 +3504,8 @@ void EclResourcer::addDependencies(IHqlExpression * expr, ResourceGraphInfo * gr
         {
             addDependencies(&cur, NULL, NULL);
             ResourcerInfo * sourceInfo = queryResourceInfo(&cur);
-            sourceInfo->noteUsedFromChild();
+            if (info->childSingleNode.item(i))
+                sourceInfo->noteUsedFromChild();
             ResourceGraphLink * link = new ResourceGraphDependencyLink(sourceInfo->graph, &cur, graph, expr);
             graph->dependsOn.append(*link);
             links.append(*link);

+ 1 - 0
ecl/hqlcpp/hqlresource.ipp

@@ -277,6 +277,7 @@ public:
     HqlExprArray conditions;
     HqlExprArray childDependents;
     HqlExprCopyArray originalChildDependents;
+    BoolArray childSingleNode;
     HqlExprAttr spilledDataset;
     HqlExprAttr splitterOutput;
 

+ 44 - 0
ecl/regress/loophoist.ecl

@@ -0,0 +1,44 @@
+/*##############################################################################
+
+    Copyright (C) 2011 HPCC Systems.
+
+    All rights reserved. This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as
+    published by the Free Software Foundation, either version 3 of the
+    License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+############################################################################## */
+
+
+namesRecord :=
+            RECORD
+string20        surname;
+string10        forename;
+integer2        age := 25;
+            END;
+
+namesTable := dataset('x',namesRecord,FLAT);
+
+otherTable := dataset('other',namesRecord,FLAT);
+
+
+processLoop(dataset(namesRecord) in) := FUNCTION
+
+    x := otherTable(LENGTH(TRIM(surname)) > 1);
+    x2 := dedup(x, surname, all);
+
+    y := JOIN(in, x2, LEFT.surname = RIGHT.surname);
+
+    RETURN y;
+END;
+
+
+ds1 := LOOP(namesTable, 100, processLoop(ROWS(LEFT)));
+output(ds1);

+ 45 - 0
ecl/regress/loophoist2.ecl

@@ -0,0 +1,45 @@
+/*##############################################################################
+
+    Copyright (C) 2011 HPCC Systems.
+
+    All rights reserved. This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as
+    published by the Free Software Foundation, either version 3 of the
+    License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+############################################################################## */
+
+
+namesRecord :=
+            RECORD
+string20        surname;
+string10        forename;
+integer2        age := 25;
+            END;
+
+namesTable := dataset('x',namesRecord,FLAT);
+
+otherTable := dataset('other',namesRecord,FLAT);
+
+
+processLoop(dataset(namesRecord) in, unsigned c) := FUNCTION
+
+    x := otherTable(LENGTH(TRIM(surname)) > 1);
+    x2 := dedup(x, surname, all);
+
+    //Use x2 from a child query - so it IS force to a single node
+    y := JOIN(in, x2, LEFT.surname = RIGHT.surname and LEFT.surname != x2[c].surname);
+
+    RETURN y;
+END;
+
+
+ds1 := LOOP(namesTable, 100, processLoop(ROWS(LEFT), COUNTER));
+output(ds1);

+ 45 - 0
ecl/regress/loophoist3.ecl

@@ -0,0 +1,45 @@
+/*##############################################################################
+
+    Copyright (C) 2011 HPCC Systems.
+
+    All rights reserved. This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as
+    published by the Free Software Foundation, either version 3 of the
+    License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+############################################################################## */
+
+
+namesRecord :=
+            RECORD
+string20        surname;
+string10        forename;
+integer2        age := 25;
+            END;
+
+namesTable := dataset('x',namesRecord,FLAT);
+
+otherTable := dataset('other',namesRecord,FLAT);
+
+    x := otherTable(LENGTH(TRIM(surname)) > 1);
+    x2 := dedup(x, surname, all);
+
+processLoop(dataset(namesRecord) in, unsigned c) := FUNCTION
+
+
+    //Use x2 from a child query - so it IS force to a single node
+    y := JOIN(in, x2, LEFT.surname = RIGHT.surname);
+
+    RETURN y;
+END;
+
+
+ds1 := LOOP(namesTable, 100, LEFT.surname != x2[COUNTER].surname, processLoop(ROWS(LEFT), COUNTER));
+output(ds1);

+ 164 - 0
esp/files/dojox/html/ellipsis.js

@@ -0,0 +1,164 @@
+//>>built
+define("dojox/html/ellipsis",["dojo/_base/kernel","dojo/_base/lang","dojo/_base/array","dojo/_base/Color","dojo/colors"],function(d){
+if(d.isFF<7){
+var _1=1;
+if("dojoxFFEllipsisDelay" in d.config){
+_1=Number(d.config.dojoxFFEllipsisDelay);
+if(isNaN(_1)){
+_1=1;
+}
+}
+try{
+var _2=(function(){
+var _3="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
+var _4=document.createElementNS(_3,"window");
+var _5=document.createElementNS(_3,"description");
+_5.setAttribute("crop","end");
+_4.appendChild(_5);
+return function(n){
+var x=_4.cloneNode(true);
+x.firstChild.setAttribute("value",n.textContent);
+n.innerHTML="";
+n.appendChild(x);
+};
+})();
+}
+catch(e){
+}
+var _6=d.create;
+var dd=d.doc;
+var dp=d.place;
+var _7=_6("iframe",{className:"dojoxEllipsisIFrame",src:"javascript:'<html><head><script>if(\"loadFirebugConsole\" in window){window.loadFirebugConsole();}</script></head><body></body></html>'"});
+var _8=function(r,_9){
+if(r.collapsed){
+return;
+}
+if(_9>0){
+do{
+_8(r);
+_9--;
+}while(_9);
+return;
+}
+if(r.endContainer.nodeType==3&&r.endOffset>0){
+r.setEnd(r.endContainer,r.endOffset-1);
+}else{
+if(r.endContainer.nodeType==3){
+r.setEndBefore(r.endContainer);
+_8(r);
+return;
+}else{
+if(r.endOffset&&r.endContainer.childNodes.length>=r.endOffset){
+var _a=r.endContainer.childNodes[r.endOffset-1];
+if(_a.nodeType==3){
+r.setEnd(_a,_a.length-1);
+}else{
+if(_a.childNodes.length){
+r.setEnd(_a,_a.childNodes.length);
+_8(r);
+return;
+}else{
+r.setEndBefore(_a);
+_8(r);
+return;
+}
+}
+}else{
+r.setEndBefore(r.endContainer);
+_8(r);
+return;
+}
+}
+}
+};
+var _b=function(n){
+var c=_6("div",{className:"dojoxEllipsisContainer"});
+var e=_6("div",{className:"dojoxEllipsisShown",style:{display:"none"}});
+n.parentNode.replaceChild(c,n);
+c.appendChild(n);
+c.appendChild(e);
+var i=_7.cloneNode(true);
+var ns=n.style;
+var es=e.style;
+var _c;
+var _d=function(){
+ns.display="";
+es.display="none";
+if(n.scrollWidth<=n.offsetWidth){
+return;
+}
+var r=dd.createRange();
+r.selectNodeContents(n);
+ns.display="none";
+es.display="";
+var _e=false;
+do{
+var _f=1;
+dp(r.cloneContents(),e,"only");
+var sw=e.scrollWidth,ow=e.offsetWidth;
+_e=(sw<=ow);
+var pct=(1-((ow*1)/sw));
+if(pct>0){
+_f=Math.max(Math.round(e.textContent.length*pct)-1,1);
+}
+_8(r,_f);
+}while(!r.collapsed&&!_e);
+};
+i.onload=function(){
+i.contentWindow.onresize=_d;
+_d();
+};
+c.appendChild(i);
+};
+var hc=d.hasClass;
+var doc=d.doc;
+var s,fn,opt;
+if(doc.querySelectorAll){
+s=doc;
+fn="querySelectorAll";
+opt=".dojoxEllipsis";
+}else{
+if(doc.getElementsByClassName){
+s=doc;
+fn="getElementsByClassName";
+opt="dojoxEllipsis";
+}else{
+s=d;
+fn="query";
+opt=".dojoxEllipsis";
+}
+}
+fx=function(){
+d.forEach(s[fn].apply(s,[opt]),function(n){
+if(!n||n._djx_ellipsis_done){
+return;
+}
+n._djx_ellipsis_done=true;
+if(_2&&n.textContent==n.innerHTML&&!hc(n,"dojoxEllipsisSelectable")){
+_2(n);
+}else{
+_b(n);
+}
+});
+};
+d.addOnLoad(function(){
+var t=null;
+var c=null;
+var _10=function(){
+if(c){
+d.disconnect(c);
+c=null;
+}
+if(t){
+clearTimeout(t);
+}
+t=setTimeout(function(){
+t=null;
+fx();
+c=d.connect(d.body(),"DOMSubtreeModified",_10);
+},_1);
+};
+_10();
+});
+}
+});

+ 147 - 0
esp/files/dojox/html/metrics.js

@@ -0,0 +1,147 @@
+//>>built
+define("dojox/html/metrics",["dojo/_base/kernel","dojo/_base/lang","dojo/_base/sniff","dojo/ready","dojo/_base/unload","dojo/_base/window","dojo/dom-geometry"],function(_1,_2,_3,_4,_5,_6,_7){
+var _8=_2.getObject("dojox.html.metrics",true);
+var _9=_2.getObject("dojox");
+_8.getFontMeasurements=function(){
+var _a={"1em":0,"1ex":0,"100%":0,"12pt":0,"16px":0,"xx-small":0,"x-small":0,"small":0,"medium":0,"large":0,"x-large":0,"xx-large":0};
+if(_3("ie")){
+_6.doc.documentElement.style.fontSize="100%";
+}
+var _b=_6.doc.createElement("div");
+var ds=_b.style;
+ds.position="absolute";
+ds.left="-100px";
+ds.top="0";
+ds.width="30px";
+ds.height="1000em";
+ds.borderWidth="0";
+ds.margin="0";
+ds.padding="0";
+ds.outline="0";
+ds.lineHeight="1";
+ds.overflow="hidden";
+_6.body().appendChild(_b);
+for(var p in _a){
+ds.fontSize=p;
+_a[p]=Math.round(_b.offsetHeight*12/16)*16/12/1000;
+}
+_6.body().removeChild(_b);
+_b=null;
+return _a;
+};
+var _c=null;
+_8.getCachedFontMeasurements=function(_d){
+if(_d||!_c){
+_c=_8.getFontMeasurements();
+}
+return _c;
+};
+var _e=null,_f={};
+_8.getTextBox=function(_10,_11,_12){
+var m,s;
+if(!_e){
+m=_e=_6.doc.createElement("div");
+var c=_6.doc.createElement("div");
+c.appendChild(m);
+s=c.style;
+s.overflow="scroll";
+s.position="absolute";
+s.left="0px";
+s.top="-10000px";
+s.width="1px";
+s.height="1px";
+s.visibility="hidden";
+s.borderWidth="0";
+s.margin="0";
+s.padding="0";
+s.outline="0";
+_6.body().appendChild(c);
+}else{
+m=_e;
+}
+m.className="";
+s=m.style;
+s.borderWidth="0";
+s.margin="0";
+s.padding="0";
+s.outline="0";
+if(arguments.length>1&&_11){
+for(var i in _11){
+if(i in _f){
+continue;
+}
+s[i]=_11[i];
+}
+}
+if(arguments.length>2&&_12){
+m.className=_12;
+}
+m.innerHTML=_10;
+var box=_7.position(m);
+box.w=m.parentNode.scrollWidth;
+return box;
+};
+var _13={w:16,h:16};
+_8.getScrollbar=function(){
+return {w:_13.w,h:_13.h};
+};
+_8._fontResizeNode=null;
+_8.initOnFontResize=function(_14){
+var f=_8._fontResizeNode=_6.doc.createElement("iframe");
+var fs=f.style;
+fs.position="absolute";
+fs.width="5em";
+fs.height="10em";
+fs.top="-10000px";
+if(_3("ie")){
+f.onreadystatechange=function(){
+if(f.contentWindow.document.readyState=="complete"){
+f.onresize=f.contentWindow.parent[_9._scopeName].html.metrics._fontresize;
+}
+};
+}else{
+f.onload=function(){
+f.contentWindow.onresize=f.contentWindow.parent[_9._scopeName].html.metrics._fontresize;
+};
+}
+f.setAttribute("src","javascript:'<html><head><script>if(\"loadFirebugConsole\" in window){window.loadFirebugConsole();}</script></head><body></body></html>'");
+_6.body().appendChild(f);
+_8.initOnFontResize=function(){
+};
+};
+_8.onFontResize=function(){
+};
+_8._fontresize=function(){
+_8.onFontResize();
+};
+_5.addOnUnload(function(){
+var f=_8._fontResizeNode;
+if(f){
+if(_3("ie")&&f.onresize){
+f.onresize=null;
+}else{
+if(f.contentWindow&&f.contentWindow.onresize){
+f.contentWindow.onresize=null;
+}
+}
+_8._fontResizeNode=null;
+}
+});
+_4(function(){
+try{
+var n=_6.doc.createElement("div");
+n.style.cssText="top:0;left:0;width:100px;height:100px;overflow:scroll;position:absolute;visibility:hidden;";
+_6.body().appendChild(n);
+_13.w=n.offsetWidth-n.clientWidth;
+_13.h=n.offsetHeight-n.clientHeight;
+_6.body().removeChild(n);
+delete n;
+}
+catch(e){
+}
+if("fontSizeWatch" in _1.config&&!!_1.config.fontSizeWatch){
+_8.initOnFontResize();
+}
+});
+return _8;
+});

+ 4 - 0
esp/files/dojox/main.js

@@ -0,0 +1,4 @@
+//>>built
+define("dojox/main",["dojo/_base/kernel"],function(_1){
+return _1.dojox;
+});

+ 14 - 4
esp/files/scripts/configmgr/navtree.js

@@ -317,14 +317,18 @@ function keepAlive() {
         resetHiddenVars();
         document.forms['treeForm'].mode.value = prevmode;
       }
-      else if (form.saveInProgress.value !== "true" && (RefreshClient1[0] === 'true' || (LastSaved1[0].charAt(0) != '<' && form.lastSaved.value.length && (form.lastSaved.value != LastSaved1[0])))) {
+      else if ( document.getElementById('ReadWrite').checked == false &&
+                form.saveInProgress.value !== "true" &&
+                (RefreshClient1[0] === 'true' || (LastSaved1[0].charAt(0) != '<' && form.lastSaved.value.length && (form.lastSaved.value != LastSaved1[0]))))
+      {
         if (document.forms['treeForm'].mode.value === '2')
           msg = "Environment has been updated by another user. Press Ok to reload the Environment.";
         form.lastSaved.value = LastSaved1[0];
       }
-
-      if (msg.length > 0)
+      if (msg.length > 0  && form.isChanged.value === "false")
+      {
         refresh(msg);
+      }
     },
     failure: function(o) {
     },
@@ -1597,9 +1601,13 @@ function unlockEnvironment(navtable, saveEnv) {
             var form = document.forms['treeForm'];
             var LastSaved = o.responseText.split(/<LastSaved>/g);
             var LastSaved1 = LastSaved[1].split(/<\/LastSaved>/g);
+            var Refresh = "false";
 
             if (LastSaved1[0].charAt(0) != '<' && form.lastSaved.value !== LastSaved1[0])
+            {
+              Refresh = "true";
               form.lastSaved.value = LastSaved1[0];
+            }
 
             Dom.removeClass(navtable.getTrEl(0), 'envlocked');
             form.isLocked.value = "false";
@@ -1607,8 +1615,10 @@ function unlockEnvironment(navtable, saveEnv) {
             form.isChanged.value = "false";
             updateEnvCtrls(false);
 
-            if (saveEnv || changed === "true")
+            if (saveEnv || changed === "true" || Refresh === "true")
+            {
               refresh();
+            }
           }
         }
         else if (o.responseText.indexOf("<html") === 0) {

+ 41 - 4
esp/services/WsDeploy/WsDeployService.cpp

@@ -24,7 +24,6 @@
 #include "daclient.hpp"
 #include "dadfs.hpp"
 #include "jencrypt.hpp"
-#include "build-config.h"
 
 #ifdef _WINDOWS
 #include <winsock2.h>
@@ -274,7 +273,7 @@ IPropertyTree* CWsDeployFileInfo::getEnvTree(IEspContext &context, IConstWsDeplo
   
   context.getPeer(sbUserIp);
 
-  if (m_userWithLock.length() && !strcmp(sbName.str(), m_userWithLock.str()) && !strcmp(sbUserIp.str(), m_userIp.str()))
+  if (m_userWithLock.length() && !strcmp(sbName.str(), m_userWithLock.str()) && !strcmp(sbUserIp.str(), m_userIp.str()) &&  m_Environment != NULL)
     return &m_Environment->getPTree();
   else
     return &m_constEnvRdOnly->getPTree();
@@ -2824,6 +2823,12 @@ bool CWsDeployFileInfo::clientAlive(IEspContext &context, IEspClientAliveRequest
   StringBuffer sb(sbName);
   sb.append(sbUserIp);
 
+  if (getConfigChanged() == true)
+  {
+    updateConfigFromFile();
+    setConfigChanged(false);
+  }
+
   if (!strcmp(sbName.str(), m_userWithLock.str()) && !strcmp(sbUserIp.str(), m_userIp.str()))
   {
     CClientAliveThread* pClientAliveThread = m_keepAliveHTable.getValue(sb.str());
@@ -5244,6 +5249,33 @@ const char* CWsDeployFileInfo::GetDisplayProcessName(const char* processName, ch
   }
 }
 
+void CWsDeployFileInfo::updateConfigFromFile()
+{
+  StringBuffer sbxml;
+
+  synchronized block(m_mutex);
+
+  if (m_pFileIO.get() != NULL)
+  {
+    m_pFileIO.clear();
+  }
+  if (m_lastSaved.isNull())
+  {
+    m_lastSaved.setNow();
+  }
+
+  m_pFileIO.setown(m_pFile->open(IFOread));
+  Owned <IPropertyTree> pTree = createPTree(*m_pFileIO);
+  toXML(pTree, sbxml.clear());
+
+  Owned<IEnvironmentFactory> factory = getEnvironmentFactory();
+
+  m_constEnvRdOnly.clear();
+  m_constEnvRdOnly.setown(factory->loadLocalEnvironment(sbxml.str()));
+  m_lastSaved.clear();
+  m_lastSaved.setNow();
+}
+
 bool CWsDeployFileInfo::deploy(IEspContext &context, IEspDeployRequest& req, IEspDeployResponse& resp)
 {
   synchronized block(m_mutex);
@@ -5465,6 +5497,7 @@ void CWsDeployFileInfo::saveEnvironment(IEspContext* pContext, IConstWsDeployReq
           else
           {
             Owned<IFile> pFile(createIFile(sb.str()));
+            setSkipNotification(true);
             copyFile(pFile, m_pFile, 0x100000);
             break;
           }
@@ -5557,6 +5590,8 @@ void CWsDeployFileInfo::saveEnvironment(IEspContext* pContext, IConstWsDeployReq
       throw MakeStringException(0, "%s", sMsg.str());
     }
   }
+
+   CConfigFileMonitorThread::getInstance()->addObserver(*this);
 }
 
 void CWsDeployFileInfo::unlockEnvironment(IEspContext* context, IConstWsDeployReqInfo *reqInfo, const char* xmlArg, StringBuffer& sbErrMsg, bool saveEnv)
@@ -6046,9 +6081,9 @@ void CWsDeployFileInfo::initFileInfo(bool createOrOverwrite)
 
   if (!fileExists)
     toXML(pEnvRoot, sbxml.clear());
-  
+
   m_Environment.clear();
-  
+
   if (m_constEnvRdOnly.get() == NULL)
   {
     if (fileExists)
@@ -6084,6 +6119,7 @@ CWsDeployFileInfo::~CWsDeployFileInfo()
     unlockCloud.start(sbMsg);
   }
 
+  CWsDeployFileInfo::CConfigFileMonitorThread::getInstance()->removeObserver(*this);
   m_pNavTree.clear();
   m_pGraphXml.clear();
   m_Environment.clear();
@@ -6547,6 +6583,7 @@ CWsDeployFileInfo* CWsDeployExCE::getFileInfo(const char* fileName, bool addIfNo
       try
       {
         fi->initFileInfo(createFile);
+        CWsDeployFileInfo::CConfigFileMonitorThread::getInstance()->addObserver(*fi);
       }
       catch (IException* e)
       {

+ 200 - 3
esp/services/WsDeploy/WsDeployService.hpp

@@ -28,6 +28,8 @@
 #include "jsocket.hpp"
 #include "XMLTags.h"
 #include "httpclient.hpp"
+#include "jqueue.tpp"
+#include "build-config.h"
 
 typedef enum EnvAction_
 {
@@ -41,13 +43,21 @@ typedef enum EnvAction_
 } EnvAction;
 
 #define CLOUD_SOAPCALL_TIMEOUT 10000
+#define CONFIG_MONITOR_CHECK_INTERVAL  1000
+#define CONFIG_MONITOR_TIMEOUT_PERIOD  6000
 
 class CCloudTask;
 class CCloudActionHandler;
 class CWsDeployEx;
 class CWsDeployExCE;
 
-class CWsDeployFileInfo : public CInterface, implements IInterface
+interface IConfigFileObserver : extends IObserver
+{
+public:
+  virtual const char* getConfigFilePath() = 0;
+};
+
+class CWsDeployFileInfo : public CInterface, implements IConfigFileObserver
 {
 private:
     //==========================================================================================
@@ -125,11 +135,166 @@ private:
         {
             m_constEnv.set(pConstEnv);
         }
+
     private:
         CThreaded* m_pWorkerThread;
         Linked<CWsDeployExCE> m_pService;
         Linked<IConstEnvironment> m_constEnv;
     };
+  public:
+
+    class CConfigChangeNotification : implements INotification
+    {
+    public:
+
+      CConfigChangeNotification(IObservable *pSource) : m_pSource(pSource)
+      {
+      };
+
+      virtual ~CConfigChangeNotification()
+      {
+      };
+
+      virtual NotifyAction getAction(void)
+      {
+        return NotifyNone;
+      }
+
+      virtual IObservable* querySource(void)
+      {
+        return m_pSource;
+      }
+
+    private:
+      IObservable* m_pSource;
+      CConfigChangeNotification() {};
+    };
+
+    class CConfigFileMonitorThread
+      : public CInterface, implements IThreaded, implements IObservable
+    {
+    public:
+      CConfigFileMonitorThread(unsigned int uCheckInterval, unsigned int uTimeout)
+        : m_pWorkerThread(NULL), m_quitThread(false), m_uCheckInterval(uCheckInterval), m_uTimeout(uTimeout), m_configChangeNotification(this)
+      {
+      };
+
+      virtual ~CConfigFileMonitorThread()
+      {
+        m_quitThread = true;
+        m_pWorkerThread->join();
+        delete m_pWorkerThread;
+      };
+
+      IMPLEMENT_IINTERFACE;
+
+      virtual void notify(IDirectoryDifferenceIterator *diffIter)
+      {
+        if ( diffIter == NULL )
+        {
+          return;
+        }
+        else
+        {
+          CriticalBlock block(m_critsecObserverQueue);
+
+          for (unsigned int idxObservers = 0; idxObservers < m_qObservers.ordinality(); idxObservers++)
+          {
+            IConfigFileObserver *pConfigFileObserver = m_qObservers.query(idxObservers);
+
+            for (diffIter->first(); diffIter->isValid() == true; diffIter->next())
+            {
+              bool bDoNotify = true;
+
+              if ( (diffIter->getFlags() == IDDIunchanged) || (pConfigFileObserver != NULL && (strcmp( pConfigFileObserver->getConfigFilePath(), diffIter->query().queryFilename() ) != 0)) )
+              {
+                bDoNotify = false;
+              }
+
+              if (bDoNotify == true)
+              {
+                 m_qObservers.query(idxObservers)->onNotify(m_configChangeNotification);
+              }
+            }
+          }
+        }
+      }
+
+      virtual void addObserver( IConfigFileObserver &observer )
+      {
+         CriticalBlock block(m_critsecObserverQueue);
+
+        //allow observers to register only once
+        if (m_qObservers.find(&observer) == (unsigned)-1)
+        {
+          m_qObservers.enqueue(&observer);
+        }
+      }
+
+      virtual void removeObserver( IConfigFileObserver &observer )
+      {
+        CriticalBlock block(m_critsecObserverQueue);
+
+        m_qObservers.dequeue(&observer);
+      }
+
+      virtual void main()
+      {
+        Owned<IFile> configFiles = createIFile(CONFIG_SOURCE_DIR);
+
+        while ( m_quitThread == false )
+        {
+          Owned<IDirectoryDifferenceIterator> diffIter = configFiles->monitorDirectory(NULL, NULL, false, false, m_uCheckInterval, m_uTimeout);
+
+          if (diffIter.get() != NULL)
+          {
+            notify(diffIter.get());
+          }
+        }
+      };
+
+      void init()
+      {
+        if ( m_pWorkerThread == NULL)
+        {
+          m_pWorkerThread = new CThreaded("CConfigFileMonitorThread");
+          IThreaded* pIThreaded = this;
+          m_pWorkerThread->init(pIThreaded);
+        }
+      };
+
+      static CConfigFileMonitorThread* getInstance()
+      {
+        static Owned<CConfigFileMonitorThread> s_configFileMonitorSingleton;
+        static CSingletonLock slock;
+
+        if (slock.lock() == true)
+        {
+          if (s_configFileMonitorSingleton.get() == NULL)
+          {
+            s_configFileMonitorSingleton.setown(new CWsDeployFileInfo::CConfigFileMonitorThread(CONFIG_MONITOR_CHECK_INTERVAL, CONFIG_MONITOR_TIMEOUT_PERIOD));
+            s_configFileMonitorSingleton->init();
+          }
+        }
+
+        return s_configFileMonitorSingleton.get();
+      };
+
+    protected:
+      CThreaded* m_pWorkerThread;
+
+      bool m_quitThread;
+      unsigned int m_uTimeout;
+      unsigned int m_uCheckInterval;
+      QueueOf<IConfigFileObserver,false> m_qObservers;
+      CriticalSection m_critsecObserverQueue;
+      CConfigChangeNotification m_configChangeNotification;
+
+    private:
+      CConfigFileMonitorThread() : m_configChangeNotification(NULL) {};
+      CConfigFileMonitorThread(const CConfigFileMonitorThread& configFileThread) : m_configChangeNotification(NULL) {};
+      CConfigFileMonitorThread& operator=(CConfigFileMonitorThread const&) {};
+    };
 
     class CClientAliveThread : public CInterface, implements IThreaded, implements IInterface
     {
@@ -248,12 +413,41 @@ private:
 
 public:
     IMPLEMENT_IINTERFACE;
-    CWsDeployFileInfo(CWsDeployExCE* pService, const char* pEnvFile, bool bCloud):m_pService(pService),m_bCloud(bCloud)
+    CWsDeployFileInfo(CWsDeployExCE* pService, const char* pEnvFile, bool bCloud) : m_configChanged(false), m_pService(pService),m_bCloud(bCloud)
     {
         m_envFile.clear().append(pEnvFile);
     }
     ~CWsDeployFileInfo();
-     void initFileInfo(bool createFile);
+    void initFileInfo(bool createFile);
+    void setConfigChanged(bool b)
+    {
+      m_configChanged = b;
+    };
+    bool getConfigChanged() const
+    {
+      return m_configChanged;
+    };
+    bool getSkipNotification() const
+    {
+      return m_bSkipNotification;
+    }
+    void setSkipNotification(bool b)
+    {
+      m_bSkipNotification = b;
+    }
+    virtual bool onNotify(INotification &notify)
+    {
+      if (notify.getAction() == NotifyNone && getSkipNotification() == false)
+      {
+        setConfigChanged(true);
+      }
+      setSkipNotification(false);
+    };
+    virtual const char* getConfigFilePath()
+    {
+       return m_pFile->queryFilename();
+    };
+    virtual void updateConfigFromFile();
     virtual bool deploy(IEspContext &context, IEspDeployRequest &req, IEspDeployResponse &resp);
     virtual bool graph(IEspContext &context, IEspEmptyRequest& req, IEspGraphResponse& resp);
     virtual bool navMenuEvent(IEspContext &context, IEspNavMenuEventRequest &req, 
@@ -347,8 +541,11 @@ private:
     StringBuffer              m_cloudEnvId;
     short                     m_daliServerPort;
     Owned<CGenerateJSFactoryThread> m_pGenJSFactoryThread;
+private:
     bool                      m_skipEnvUpdateFromNotification;
     bool                      m_activeUserNotResp;
+    bool                      m_configChanged;
+    bool                      m_bSkipNotification;
     bool                      m_bCloud;
     Owned<IFile>              m_pFile;
     Owned<IFileIO>            m_pFileIO;

+ 1 - 8
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/CMakeLists.txt

@@ -1,9 +1,5 @@
 cmake_minimum_required(VERSION 2.8)
 
-#set ( HPCC_ECLJDBC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
-#set ( HPCC_SOURCE_DIR ${HPCC_ECLJDBC_SOURCE_DIR}/../../../../../../../../..)
-#include(${HPCC_SOURCE_DIR}/version.cmake)
-
 configure_file("EclVersionTracker.java.in" "${HPCC_ECLJDBC_SOURCE_DIR}/EclVersionTracker.java")
 
 set ( CMAKE_MODULE_PATH "${HPCC_SOURCE_DIR}/cmake_modules")
@@ -11,13 +7,10 @@ set ( EXECUTABLE_OUTPUT_PATH "${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}/bin" )
 set ( PRODUCT_PREFIX "hpccsystems" )
 
 SET (CLASS_DIR "class")
-#SET (JAR_DIR "jar")
 FILE(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${CLASS_DIR})
-#FILE(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${JAR_DIR})
 SET (JAR_FILE "${PRODUCT_PREFIX}-ecljdbc_${HPCC_PROJECT}-${HPCC_MAJOR}.${HPCC_MINOR}.${HPCC_POINT}.jar")
 SET (JAVA_FILES
-
-EclDatabaseMetaData.java EclQuery.java SqlExpression.java DFUFile.java EclDriver.java EclResultSet.java SqlOperator.java EclColumn.java EclEngine.java EclResultSetMetadata.java SqlParser.java EclColumnMetaData.java EclPreparedStatement.java EclStatement.java SqlWhereClause.java EclConnection.java EclQueries.java SqlColumn.java Utils.java EclVersionTracker.java
+EclDatabaseMetaData.java EclQuery.java SqlExpression.java DFUFile.java EclDriver.java EclResultSet.java SqlOperator.java EclColumn.java EclEngine.java EclResultSetMetadata.java SqlParser.java EclColumnMetaData.java EclPreparedStatement.java EclStatement.java SqlWhereClause.java EclConnection.java EclQueries.java SqlColumn.java Utils.java EclFunction.java EclFunctions.java EclVersionTracker.java 
 
 )
 

+ 0 - 2
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/DFUFile.java

@@ -386,7 +386,6 @@ public class DFUFile
 
 	public void setFileFields(String eclString)
 	{
-		//Ecl = "filerecstruct := RECORD ";
 		Ecl = "";
 		if (eclString != null && eclString.length()>0)
 		{
@@ -424,7 +423,6 @@ public class DFUFile
 							columnmeta.setTableName(this.FullyQualifiedName);
 
 							Ecl += type + " " + name + "; ";
-							//Fields.add(columnmeta);
 							Fields.put(name.toUpperCase(), columnmeta);
 						}
 						index++;

+ 38 - 35
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclConnection.java

@@ -28,53 +28,65 @@ public class EclConnection implements Connection {
     private EclDatabaseMetaData metadata;
     private Properties props;
     private String serverAddress;
-    private String cluster;
+    //private String cluster;
     private Properties clientInfo;
 
     public EclConnection(Properties props)
     {
-       closed = false;
+		closed = false;
 
-       this.serverAddress = props.getProperty("ServerAddress","localhost");
-       this.cluster = props.getProperty("Cluster","myroxie");
-       this.props = props;
+		this.serverAddress = "localhost";
 
-       if (!this.props.containsKey("WsECLWatchPort"))
-    	   this.props.setProperty("WsECLWatchPort", "8010");
+		if (props.containsKey("ServerAddress"))
+			this.serverAddress = props.getProperty("ServerAddress");
+		else
+			props.setProperty("ServerAddress", this.serverAddress);
 
-       if (!this.props.containsKey("WsECLPort"))
-    	   this.props.setProperty("WsECLPort", "8002");
+		this.props = props;
 
-       //TODO soon wsecldirect will be part of wseclwatch
-       //I will continue to support this property but, by
-       //default we should use the wseclwatch port
-       if (!this.props.containsKey("WsECLDirectPort"))
-    	   this.props.setProperty("WsECLDirectPort", "8008");
+		if (!this.props.containsKey("Cluster"))
+			this.props.setProperty("Cluster", "hthor");
 
-       if (!this.props.containsKey("username"))
-    	   this.props.setProperty("username", "");
+		if (!this.props.containsKey("WsECLWatchAddress"))
+			this.props.setProperty("WsECLWatchAddress", serverAddress);
 
-       if (!this.props.containsKey("password"))
-    	   this.props.setProperty("password", "");
+		if (!this.props.containsKey("WsECLWatchPort"))
+			this.props.setProperty("WsECLWatchPort", "8010");
 
-       if (!this.props.containsKey("EclLimit"))
-    	   this.props.setProperty("EclLimit", "100");
+		if (!this.props.containsKey("WsECLAddress"))
+			this.props.setProperty("WsECLAddress", serverAddress);
 
-       //basicAuth
-       String userPassword = this.props.getProperty("username") + ":" + props.getProperty("password");
-       //String basicAuth = "Basic " + new String(new Base64().encode(userPassword.getBytes()));
+		if (!this.props.containsKey("WsECLPort"))
+			this.props.setProperty("WsECLPort", "8002");
+
+		if (!this.props.containsKey("WsECLDirectAddress"))
+			this.props.setProperty("WsECLDirectAddress", serverAddress);
+
+		if (!this.props.containsKey("WsECLDirectPort"))
+			this.props.setProperty("WsECLDirectPort", "8008");
+
+		if (!this.props.containsKey("username"))
+			this.props.setProperty("username", "");
+
+		if (!this.props.containsKey("password"))
+			this.props.setProperty("password", "");
+
+		if (!this.props.containsKey("EclLimit"))
+			this.props.setProperty("EclLimit", "100");
+
+		// basicAuth
+		String userPassword = this.props.getProperty("username") + ":"
+				+ props.getProperty("password");
 
        String basicAuth = "Basic " + Utils.Base64Encode(userPassword.getBytes(), false);
 
-       //System.out.println(Utils.Base64Decode(Utils.Base64Encode(userPassword.getBytes(), false).getBytes()));
        this.props.put("BasicAuth", basicAuth);
        metadata = new EclDatabaseMetaData(props);
 
        //TODO not doing anything w/ this yet, just exposing it to comply w/ API definition...
        clientInfo = new Properties();
 
-       System.out.println("EclConnection initialized - server: " + this.serverAddress + " cluster: " + this.cluster);
-
+       System.out.println("EclConnection initialized - server: " + this.serverAddress);
     }
 
     public Properties getProperties()
@@ -86,13 +98,6 @@ public class EclConnection implements Connection {
     {
     	return props.getProperty(propname, "");
     }
-    public String getCluster() {
-        return cluster;
-    }
-
-    public void setCluster(String cluster) {
-        this.cluster = cluster;
-    }
 
     public String getServerAddress() {
         return serverAddress;
@@ -112,13 +117,11 @@ public class EclConnection implements Connection {
 
 
     public Statement createStatement() throws SQLException {
-    	System.out.println("##Statement EclConnection::createStatement()##");
         return new EclPreparedStatement(this, null);
     }
 
 
     public PreparedStatement prepareStatement(String query) throws SQLException {
-    	System.out.println("##PreparedStatement EclConnection::createStatement("+ query +")##");
         return new EclPreparedStatement(this, query);
     }
 

+ 16 - 21
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclDatabaseMetaData.java

@@ -41,13 +41,12 @@ import org.xml.sax.SAXException;
 
 public class EclDatabaseMetaData implements DatabaseMetaData {
 
-	//private Properties eclQueryAliases;
 	private EclQueries eclqueries;
 	private Properties dfufiles;
 	private static Map<Integer, String> SQLFieldMapping;
 
-	public static final short JDBCVerMajor = 0;
-	public static final short JDBCVerMinor = 1;
+	public static final short JDBCVerMajor = 3;
+	public static final short JDBCVerMinor = 0;
 
 	private static String HPCCBuildVersionFull = "";
 	@SuppressWarnings("unused")
@@ -58,8 +57,10 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 	private boolean isMetaDataCached;
 	private String serverAddress;
 	private String cluster;
+	private String wseclwatchaddress;
+	private String wsecladdress;
 	private String wseclwatchport;
-	private String wseclport;
+	//private String wseclport;
 	private String basicAuth;
 	private String UserName;
 
@@ -68,16 +69,14 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 		super();
 		this.serverAddress = props.getProperty("ServerAddress","localhost");
 		this.cluster = props.getProperty("Cluster","myroxie");
-		this.wseclwatchport  = props.getProperty("WsECLWatchPort"); ;
-		this.wseclport = props.getProperty("WsECLPort");
+		this.wseclwatchport  = props.getProperty("WsECLWatchPort");
+		this.wseclwatchaddress  = props.getProperty("WsECLWatchAddress");
 		this.UserName = props.getProperty("username");
-		//this.defaultDB =  props.getProperty("DefaultDB");
 		this.basicAuth = props.getProperty("BasicAuth");
 
-		System.out.println("EclDatabaseMetaData ServerAddress: " + serverAddress + " Cluster: " + cluster + " eclwatch: " + wseclwatchport + " ecl: " + wseclport);
+		System.out.println("EclDatabaseMetaData ServerAddress: " + serverAddress + " Cluster: " + cluster + " eclwatch: " + wseclwatchaddress +":"+  wseclwatchport);
 
 		dfufiles = new Properties();
-		//eclQueryAliases = new Properties();
 		eclqueries = new EclQueries(this.cluster);
 		SQLFieldMapping = new HashMap<Integer, String>();
 
@@ -164,7 +163,7 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 
 		try
 		{
-			String urlString = "http://" + serverAddress + ":" + wseclwatchport + "/WsDfu/DFUQuery?rawxml_";
+			String urlString = "http://" + wseclwatchaddress + ":" + wseclwatchport + "/WsDfu/DFUQuery?rawxml_";
 
 			URL dfuLogicalFilesURL;
 			dfuLogicalFilesURL = new URL(urlString);
@@ -295,7 +294,7 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 
 						if (file.getFullyQualifiedName().length() > 0 && file.getClusterName()!=null)
 						{
-							String filedetailUrl = "http://" + serverAddress + ":" + wseclwatchport +
+							String filedetailUrl = "http://" + wseclwatchaddress + ":" + wseclwatchport +
 									"/WsDfu/DFUInfo?Name=" +
 									URLEncoder.encode(file.getFullyQualifiedName(), "UTF-8") +
 									"&Cluster=" +
@@ -319,11 +318,10 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 							// Get all pertinent detail info regarding this file (files are being treated as DB tables)
 							registerFileDetails(docElement, file);
 
-
 							//we might need more info if this file is actually an index:
 							if(file.isKeyFile())
 							{
-								String openfiledetailUrl = "http://" + serverAddress + ":" + wseclwatchport +
+								String openfiledetailUrl = "http://" + wseclwatchaddress + ":" + wseclwatchport +
 										"/WsDfu/DFUSearchData?OpenLogicalName=" +
 										URLEncoder.encode(file.getFullyQualifiedName(), "UTF-8") +
 										"&Cluster=" +
@@ -345,11 +343,9 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 
 								Element docElement2 = dom3.getDocumentElement();
 
-								//NodeList keyfiledetail = docElement2.getElementsByTagName("DFUSearchDataResponse");
 								NodeList keyfiledetail = docElement2.getChildNodes();
 								if (keyfiledetail.getLength()>0)
 								{
-									//NodeList resultslist = keyfiledetail.item(0).getChildNodes(); //ECLResult nodes
 									for (int k = 0; k< keyfiledetail.getLength(); k++)
 									{
 										Node currentnode = keyfiledetail.item(k);
@@ -365,7 +361,6 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 													Node keyedcolumnfield = KeyedColumnFields.item(q);
 													if (keyedcolumnfield.getNodeName().equals("ColumnLabel"))
 													{
-														//file.addKeyedColumn(fieldindex+1,keyedcolumnfield.getTextContent());
 														file.addKeyedColumnInOrder(keyedcolumnfield.getTextContent());
 														break;
 													}
@@ -380,7 +375,6 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 										{
 											NodeList nonKeyedColumns = currentnode.getChildNodes();
 
-											//Properties nonkeyedcolumns = new Properties();
 											for (int fieldindex = 0 ; fieldindex < nonKeyedColumns.getLength(); fieldindex++)
 											{
 												Node nonKeyedColumn = nonKeyedColumns.item(fieldindex);
@@ -390,7 +384,6 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 													Node nonkeyedcolumnfield = nonKeyedColumnFields.item(q);
 													if (nonkeyedcolumnfield.getNodeName().equals("ColumnLabel"))
 													{
-														//file.addNonKeyedColumn(fieldindex+1, nonkeyedcolumnfield.getTextContent());
 														file.addNonKeyedColumnInOrder(nonkeyedcolumnfield.getTextContent());
 														break;
 													}
@@ -442,7 +435,8 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 
 		try
 		{
-			String urlString = "http://" + serverAddress + ":" + wseclwatchport + "/WsWorkunits/WUQuerysetDetails?QuerySetName=" + cluster + "&rawxml_";
+
+			String urlString = "http://" + wseclwatchaddress + ":" + wseclwatchport + "/WsWorkunits/WUQuerysetDetails?QuerySetName=" + cluster + "&rawxml_";
 
 			URL querysetURL;
 			querysetURL = new URL(urlString);
@@ -501,7 +495,7 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 
 						}
 						//for each QuerySetQuery found above, get all schema related info:
-						String queryinfourl = "http://" + serverAddress + ":" + wseclwatchport +
+						String queryinfourl = "http://" + wseclwatchaddress + ":" + wseclwatchport +
 								"/WsWorkunits/WUInfo/WUInfoRequest?Wuid=" +
 								query.getWUID() +
 								"&IncludeExceptions=0" +
@@ -2224,7 +2218,8 @@ public class EclDatabaseMetaData implements DatabaseMetaData {
 		if (serverAddress == null || cluster == null)
 			return false;
 
-		String urlString = "http://" + serverAddress + ":"+wseclwatchport+"/WsSMC/Activity?rawxml_";
+		//String urlString = "http://" + serverAddress + ":"+wseclwatchport+"/WsSMC/Activity?rawxml_";
+		String urlString = "http://" + wseclwatchaddress + ":"+wseclwatchport+"/WsSMC/Activity?rawxml_";
 
 		URL querysetURL;
 

+ 55 - 57
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclDriver.java

@@ -33,9 +33,6 @@ public class EclDriver implements Driver
 
 	public Connection connect(String url, Properties info) throws SQLException
 	{
-		String serverAddress = info.getProperty("ServerAddress");
-		String cluster = info.getProperty("Cluster");
-
 		try
 		{
 			StringTokenizer urltokens = new StringTokenizer(url,";");
@@ -51,6 +48,8 @@ public class EclDriver implements Driver
 						String value = keyvalues.nextToken();
 						if (!info.containsKey(key))
 							info.put(key, value);
+						else
+							System.out.println("Connection property: " + key + " found in info properties and URL, ignoring URL value");
 					}
 				}
 			}
@@ -60,8 +59,9 @@ public class EclDriver implements Driver
 			System.out.println("Issue parsing URL! \"" + url +"\"" );
 		}
 
-
-		System.out.println("EclDriver::connect" + serverAddress + ":" + cluster);
+		String serverAddress = info.getProperty("ServerAddress");
+		System.out.println("EclDriver::connect" + serverAddress);
+		
 		return new EclConnection(info);
 	}
 
@@ -105,65 +105,63 @@ public class EclDriver implements Driver
 			//info.put("ServerAddress", "10.239.219.10"); //fishbeck
 			//info.put("ServerAddress", "172.25.237.145"); //arjuna
 
-			info.put("Cluster", "thor"); //analagous to DB server instance
-			info.put("DefaultQuery", "fetchpeoplebyzipservice"); //analagous to default DB name
+			info.put("Cluster", "thor");
 			info.put("WsECLWatchPort", "8010");
-			//info.put("EclLimit", "100");
+			info.put("EclLimit", "10");
 			info.put("WsECLPort", "8002");
 			info.put("WsECLDirectPort", "8008");
 			info.put("username", "_rpastrana");
-			info.put("password", "ch@ng3m3");
+			info.put("password", "a");
 
 			//conn = (EclConnection) d.connect("url:jdbc:ecl;ServerAddress=192.168.124.128;Cluster=myroxie",info);
 			//conn = (EclConnection) d.connect("url:jdbc:ecl;ServerAddress=10.239.20.80;Cluster=thor;EclLimit=8",info);
-			conn = (EclConnection) d.connect("url:jdbc:ecl;ServerAddress=10.239.219.10;Cluster=thor;EclLimit=8",info);
-
-
-			//PreparedStatement p = conn.prepareStatement("SELECT 2");
-			//PreparedStatement p = conn.prepareStatement("select  * from thor::full_test_distributed_index where lname = BRYANT , fname = whoknows group by zips order by birth_month DESC");
-			//PreparedStatement p = conn.prepareStatement("select  * from thor::full_test_distributed_index where lname = BRYANT AND fname = SILVA ");
-
-			//PreparedStatement p = conn.prepareStatement("select  x, 'firstname', 1  from tutorial::rp::tutorialperson where  firstname >= ? and firstname >= ? limit 1000");
-			//PreparedStatement p = conn.prepareStatement("select  count(*) from tutorial::rp::tutorialperson where  firstname = 'A' or firstname = 'Z' limit 1000");
-			//PreparedStatement p = conn.prepareStatement("select  firstname from tutorial::rp::tutorialperson where  middlename =' ' ");
-			//PreparedStatement p = conn.prepareStatement("select  * from tutorial::rp::tutorialperson where zip = ? and  middlename>='W'    city='DELRAY BEACH' limit 1000" );
-			//PreparedStatement p = conn.prepareStatement("select * from tutorial::rp::tutorialperson where firstname = 'MARICHELLE'  order by lastname ASC, firstname DESC limit 1000");
-			//PreparedStatement p = conn.prepareStatement("select count(*) from tutorial::rp::tutorialperson");
-			//PreparedStatement p = conn.prepareStatement("select tutorial::rp::peoplebyzipindex.zip from \n tutorial::rp::peoplebyzipindex order by zip");
-			//PreparedStatement p = conn.prepareStatement("select zip, count( * ) from tutorial::rp::tutorialperson  group by zip");
-			//PreparedStatement p = conn.prepareStatement("select city, zip, count(*) from tutorial::rp::tutorialperson where zip ='33445' limit 1000");
-			//PreparedStatement p = conn.prepareStatement("select city from tutorial::rp::tutorialperson USE INDEX(tutorial::rp::peoplebyzipindex2) where zip = ? ");
-			//PreparedStatement p = conn.prepareStatement("select count(*) from tutorial::rp::tutorialperson USE INDEX(0) where zip > ?");
-			//PreparedStatement p = conn.prepareStatement("select count(city)  from tutorial::rp::tutorialperson where zip = '33445'");//where zip = '33445'");
-			//PreparedStatement p = conn.prepareStatement("select * from enron::final where tos = 'randy.young@enron.com' limit 1000");
-			PreparedStatement p = conn.prepareStatement("select count(*), zip from tutorial::rp::tutorialperson ");
-
-
-
-			//PreparedStatement p = conn.prepareStatement("select tbl.* from progguide::exampledata::peopleaccts tbl");
-			//PreparedStatement p = conn.prepareStatement("select firstname, lastname, middlename, city, street, state, zip from tutorial::rp::tutorialperson where firstname = VIMA LIMIT 1000");
-		//PreparedStatement p = conn.prepareStatement("select tbl.* from progguide::exampledata::people tbl");
-
-			//PreparedStatement p = conn.prepareStatement("select * from certification::full_test_distributed limit 100");
-			//PreparedStatement p = conn.prepareStatement("select * from certification::full_test_distributed where birth_state = FL LIMIT 1000");
-			//PreparedStatement p = conn.prepareStatement("select * from customer::customer");
-			//PreparedStatement p = conn.prepareStatement("select count(*) from tutorial::rp::tutorialperson");
-			//PreparedStatement p = conn.prepareStatement("select * from tutorial::rp::tutorialperson");
-
-
-			//PreparedStatement p = conn.prepareStatement("select tbl.* from .::xdbcsample tbl");
-
-			//PreparedStatement p = conn.prepareStatement("select tbl.* from fetchpeoplebyzipservice tbl where zipvalue=33445  order by fname DESC group by zip limit 10");
-			//PreparedStatement p = conn.prepareStatement("select tbl.* from fetchpeoplebyzipservice tbl where zipvalue=33445 group by zip, fname order by fname DESC, lname, zip ASC limit 10");
-			//PreparedStatement p = conn.prepareStatement("call fetchpeoplebyzipservice(?)");
-			//PreparedStatement p = conn.prepareStatement("select tbl.* from bestdemo tbl ");
-			//PreparedStatement p = conn.prepareStatement("select * fname from fetchpeoplebyzipservice");
-
-
-			//PreparedStatement p = conn.prepareStatement("select * from 'Result 1' where zipvalue=33445");
-
-			//PreparedStatement p = conn.prepareStatement("select * from Timeline_Total_Property_Sales");
-			//PreparedStatement p = conn.prepareStatement("select * from motiondemo");
+			//conn = (EclConnection) d.connect("url:jdbc:ecl;ServerAddress=10.239.219.10;Cluster=thor;EclLimit=8",info);
+			conn = (EclConnection) d.connect("url:jdbc:ecl;",info);
+
+			PreparedStatement p = conn.prepareStatement(
+			//"select  * from thor::full_test_distributed_index where lname = BRYANT , fname = whoknows group by zips order by birth_month DESC"
+			//"select  * from thor::full_test_distributed_index where lname = BRYANT AND fname = SILVA "
+
+			//"select  x, 'firstname', 1  from tutorial::rp::tutorialperson where  firstname >= ? and firstname >= ? limit 1000"
+			//"select  count(*) from tutorial::rp::tutorialperson where  firstname = 'A' or firstname = 'Z' limit 1000"
+			//"select  firstname from tutorial::rp::tutorialperson where  middlename =' ' "
+			//"select  * from tutorial::rp::tutorialperson where zip = ? and  middlename>='W'    city='DELRAY BEACH' limit 1000" );
+			//"select * from tutorial::rp::tutorialperson where firstname = 'MARICHELLE'  order by lastname ASC, firstname DESC limit 1000"
+			//"select count(*) from tutorial::rp::tutorialperson"
+			//"select tutorial::rp::peoplebyzipindex.zip from \n tutorial::rp::peoplebyzipindex order by zip"
+			//"select zip, count( * ) from tutorial::rp::tutorialperson  group by zip"
+			//"select city, zip, count(*) from tutorial::rp::tutorialperson where zip ='33445' limit 1000"
+			//"select city from tutorial::rp::tutorialperson USE INDEX(tutorial::rp::peoplebyzipindex2) where zip = ? "
+			//"select count(*) from tutorial::rp::tutorialperson USE INDEX(0) where zip > ?"
+			//"select count(city)  from tutorial::rp::tutorialperson where zip = '33445'"//where zip = '33445'"
+			//"select * from enron::final where tos = 'randy.young@enron.com' limit 1000"
+			//"select count(*), zip from tutorial::rp::tutorialperson where zip = '33445' "
+			"select * from tutorial::rp::tutorialperson where zip > '32605'"
+			//"select MAX(firstname), lastname from tutorial::rp::tutorialperson  limit 1000"
+			//"select 1"
+			//"select zip, city from tutorial::rp::tutorialperson where city = 'ABBEVILLE' "
+			//"select 1 "
+
+			//"select MIN(zip), city from tutorial::rp::tutorialperson where zip  > '33445'"
+
+			//"select tbl.* from progguide::exampledata::peopleaccts tbl"
+			//"select firstname, lastname, middlename, city, street, state, zip from tutorial::rp::tutorialperson where firstname = VIMA LIMIT 1000"
+			//"select tbl.* from progguide::exampledata::people tbl"
+
+			//"select * from certification::full_test_distributed limit 100"
+			//"select * from certification::full_test_distributed where birth_state = FL LIMIT 1000"
+			//"select * from customer::customer"
+			//"select count(*) from tutorial::rp::tutorialperson"
+			//"select * from tutorial::rp::tutorialperson"
+
+			//"select tbl.* from .::xdbcsample tbl"
+			//"select tbl.* from fetchpeoplebyzipservice tbl where zipvalue=33445  order by fname DESC group by zip limit 10"
+			//"select tbl.* from fetchpeoplebyzipservice tbl where zipvalue=33445 group by zip, fname order by fname DESC, lname, zip ASC limit 10"
+			//"call fetchpeoplebyzipservice(?)"
+			//"select tbl.* from bestdemo tbl "
+			//"select * fname from fetchpeoplebyzipservice"
+
+			);
 
 			p.clearParameters();
 			p.setObject(1, "'33445'");

+ 145 - 82
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclEngine.java

@@ -70,7 +70,17 @@ public class EclEngine
 	{
 		try
 		{
-			urlString = "http://" + props.getProperty("ServerAddress") + ":" + props.getProperty("WsECLDirectPort") + "/EclDirect/RunEcl?Submit&eclText=";
+			urlString = "http://" + props.getProperty("WsECLDirectAddress") + ":" + props.getProperty("WsECLDirectPort") + "/EclDirect/RunEcl?Submit";
+
+			if (props.containsKey("Cluster"))
+			{
+				urlString += "&cluster=";
+				urlString += props.getProperty("Cluster");
+			}
+			else
+				System.out.println("No cluster property found, executing query on EclDirect default cluster");
+
+			urlString += "&eclText=";
 			urlString +=  URLEncoder.encode(eclstring, "UTF-8");
 
 			System.out.println("WSECL:executeSelect: " + urlString);
@@ -122,19 +132,29 @@ public class EclEngine
 				parser.verifySelectColumns(dfufile);
 
 				expectedretcolumns = parser.getSelectColumns();
+
 				totalparamcount = parser.getWhereClauseExpressionsCount();
 
 				if (indexToUseName != null)
 				{
+					System.out.print("USING INDEX FILE: " + indexToUseName);
+
 					indexfiletouse = dbMetadata.getDFUFile(indexToUseName);
 					indexposfield = indexfiletouse.getIdxFilePosField();
 
 					isPayloadIndex = processIndex(indexfiletouse, keyedandwild);
 
+					if (isPayloadIndex)
+						System.out.println(" as PAYLOAD");
+					else
+						System.out.println(" NOT as PAYLOAD");
+
 					eclEnteties.put("KEYEDWILD", keyedandwild.toString());
 					if (isPayloadIndex)
 						eclEnteties.put("PAYLOADINDEX", "true");
 				}
+				else
+					System.out.println("NOT USING INDEX!");
 
 				eclEnteties.put("PARAMCOUNT", Integer.toString(totalparamcount));
 
@@ -144,6 +164,7 @@ public class EclEngine
 						eclcode.append(dfufile.getFileRecDefwithIndexpos(indexfiletouse.getFieldMetaData(indexposfield), "filerecstruct"));
 					else
 						eclcode.append(dfufile.getFileRecDef("filerecstruct"));
+					eclcode.append("\n");
 				}
 				else
 					throw new Exception("Target HPCC file ("+hpccfilename+") does not contain ECL record definition");
@@ -162,9 +183,8 @@ public class EclEngine
 				}
 
 				StringBuilder selectstruct = new StringBuilder(" selectstruct:=RECORD ");
-				String datasource = indexToUseName == null || isPayloadIndex ? "fileds" : "fetchedds";
+				String datasource = indexToUseName == null ? "fileds" : "idxds";
 
-				//boolean usescalar = expectedretcolumns.size() == 1;
 				for (int i = 0; i < expectedretcolumns.size(); i++)
 				{
 					EclColumnMetaData col = expectedretcolumns.get(i);
@@ -241,47 +261,28 @@ public class EclEngine
 								}
 							}
 							selectstruct.append(" );");
+						}
+						else if (col.getColumnName().equalsIgnoreCase("MIN"))
+						{
+							eclEnteties.put("MINFN", "TRUE");
+							selectstruct.append("minout :=  ");
 
-							/*
 							if (parser.hasGroupByColumns())
 							{
-								selectstruct.append(col.getColumnName().toUpperCase()).append("( GROUP");
-								List<EclColumnMetaData> funccols = col.getFunccols();
-
-								if (funccols.size() > 0)
-								{
-									String paramname = funccols.get(0).getColumnName();
-									eclEnteties.put("FNCOLS", paramname);
-									if (!paramname.equals("*") && funccols.get(0).getColumnType() != EclColumnMetaData.COLUMN_TYPE_CONSTANT)
-									{
-										selectstruct.append(", ");
-										selectstruct.append(datasource);
-										selectstruct.append(".");
-										selectstruct.append(paramname);
-									}
-								}
-								selectstruct.append(" );");
+								selectstruct.append("MIN( GROUP ");
 							}
 							else
 							{
-								selectstruct.append(" totalmax;");
-								//if (expectedretcolumns.size() == 1)
-								//	eclEnteties.put("SCALAROUTNAME", col.getColumnName());
+								selectstruct.append("MIN( ").append(datasource);
+								if (eclEnteties.size() > 0)
+									addFilterClause(selectstruct, eclEnteties);
 							}
-							*/
-							/*
-
-							selectstruct.append(col.getColumnName().toUpperCase()).append(" ( ");
-							if (parser.hasGroupByColumns())
-								selectstruct.append("GROUP");
-							else
-								selectstruct.append(datasource);
 
 							List<EclColumnMetaData> funccols = col.getFunccols();
-
 							if (funccols.size() > 0)
 							{
 								String paramname = funccols.get(0).getColumnName();
+								eclEnteties.put("FNCOLS", paramname);
 								if (!paramname.equals("*") && funccols.get(0).getColumnType() != EclColumnMetaData.COLUMN_TYPE_CONSTANT)
 								{
 									selectstruct.append(", ");
@@ -290,8 +291,9 @@ public class EclEngine
 									selectstruct.append(paramname);
 								}
 							}
-							selectstruct.append(" );");*/
+							selectstruct.append(" );");
 						}
+
 					}
 					else
 						selectstruct.append(col.getEclType()).append(" ").append(col.getColumnName()).append(" := ").append(datasource).append(".").append(col.getColumnName()).append("; ");
@@ -315,10 +317,8 @@ public class EclEngine
 			case SqlParser.SQL_TYPE_SELECTCONST:
 			{
 				System.out.println("Processing test_query...");
-				//ArrayList<EclColumnMetaData> columns = new ArrayList();
 				eclcode.append("selectstruct:=RECORD ");
 				expectedretcolumns = parser.getSelectColumns();
-				//defaultEclQueryReturnDatasetName = "ConstECLQueryResult";
 				StringBuilder ecloutput = new StringBuilder(" OUTPUT(DATASET([{ ");
 				for (int i = 1;  i <= expectedretcolumns.size(); i++)
 				{
@@ -329,7 +329,6 @@ public class EclEngine
 						ecloutput.append(", ");
 				}
 				ecloutput.append("}],selectstruct), NAMED(\'");
-				//ecloutput.append(defaultEclQueryReturnDatasetName);
 				ecloutput.append("ConstECLQueryResult");
 				ecloutput.append("\'));");
 
@@ -428,10 +427,18 @@ public class EclEngine
 		int responseCode = -1;
 		try
 		{
-			urlString = "http://" + props.getProperty("ServerAddress") + ":" + props.getProperty("WsECLDirectPort") + "/EclDirect/RunEcl?Submit"; 			//&eclText=
+			urlString = "http://" + props.getProperty("WsECLDirectAddress") + ":" + props.getProperty("WsECLDirectPort") + "/EclDirect/RunEcl?Submit";
+
+			StringBuilder sb = new StringBuilder();
 
-			StringBuilder sb = new StringBuilder("&eclText=");
+			if (props.containsKey("Cluster"))
+				sb.append("&cluster=").append(props.getProperty("Cluster"));
+			else
+				System.out.println("No cluster property found, executing query on EclDirect default cluster");
+
+			sb.append("&eclText=");
 			sb.append(eclcode);
+			sb.append("\n");
 
 			if (indexfile == null) //no indexfile read...
 			{
@@ -455,6 +462,16 @@ public class EclEngine
 						sb.append(parameters.get("FNCOLS"));
 						sb.append(");");
 					}
+					else if (parameters.containsKey("MINFN"))
+					{
+						sb.append("scalarout := MIN(fileds");
+						if (parameters.size() > 0)
+							addFilterClause(sb, parameters);
+
+						sb.append(" , fileds.");
+						sb.append(parameters.get("FNCOLS"));
+						sb.append(");");
+					}
 				}
 
 				if (parameters.containsKey("SCALAROUTNAME"))
@@ -495,46 +512,98 @@ public class EclEngine
 			}
 			else // use index
 			{
-				sb.append("IDX := INDEX(fileds, {")
+				sb.append("idx := INDEX(fileds, {")
 				.append(indexfile.getKeyedFieldsAsDelmitedString(',', null))
 				.append("}");
 
 				if(indexfile.getNonKeyedColumnsCount()>0)
 					sb.append(",{ ").append(indexfile.getNonKeyedFieldsAsDelmitedString(',', null)).append(" }");
 
-				sb.append(",\'~").append(indexfile.getFullyQualifiedName()).append("\');");
+				sb.append(",\'~").append(indexfile.getFullyQualifiedName()).append("\');\n");
 
 				if( parameters.containsKey("PAYLOADINDEX"))
 				{
-					sb.append(" OUTPUT(CHOOSEN(");
-					if (parameters.containsKey("ORDERBY"))
-						sb.append("SORT( ");
-
-					sb.append("IDX(")
+					sb.append("idxds := idx(")
 					.append(parameters.get("KEYEDWILD"))
-					.append(")");
-					if (parameters.containsKey("ORDERBY"))
+					.append(");\n");
+
+					if (!parameters.containsKey("GROUPBY"))
 					{
-						sb.append(",");
-						sb.append(parameters.get("ORDERBY"));
-						sb.append(")");
+						if (parameters.containsKey("COUNTFN"))
+							sb.append("scalarout := COUNT(idxds");
+						if (parameters.containsKey("MAXFN"))
+						{
+							sb.append("scalarout := MAX(idxds, fileds.");
+							sb.append(parameters.get("FNCOLS"));
+						}
+						if (parameters.containsKey("MINFN"))
+						{
+							sb.append("scalarout := MIN(idxds, fileds.");
+							sb.append(parameters.get("FNCOLS"));
+						}
+						sb.append(", KEYED);\n");
+					}
+
+					if (parameters.containsKey("SCALAROUTNAME"))
+					{
+						sb.append("OUTPUT(scalarout ,NAMED(\'");
+						sb.append(parameters.get("SCALAROUTNAME"));
+						sb.append("\'));");
+					}
+					else
+					{
+						sb.append(parameters.get("SELECTSTRUCT"));
+
+						sb.append(" idxdstable := TABLE(idxds, selectstruct ");
+
+						if (parameters.containsKey("GROUPBY"))
+						{
+							sb.append(", ");
+							sb.append(parameters.get("GROUPBY"));
+						}
+						sb.append(");\n");
+
+
+						if (parameters.containsKey("ORDERBY"))
+						{
+							sb.append("sortedidxtable := SORT( idxdstable, ");
+							sb.append(parameters.get("ORDERBY"));
+							sb.append(");\n");
+							sb.append("resultset := sortedidxtable;\n");
+						}
+						else
+							sb.append("resultset := idxdstable;\n");
+
+
+						sb.append(" OUTPUT(CHOOSEN(");
+						sb.append(" resultset ");
 					}
 				}
 				else
 				{
-					sb.append("fetchedds := FETCH(fileds, IDX( ");
+					sb.append("idxds := FETCH(fileds, idx( ");
 
 					sb.append(parameters.get("KEYEDWILD"));
 					sb.append("), RIGHT.");
-					sb.append(indexfile.getIdxFilePosField()).append(");");
-
+					sb.append(indexfile.getIdxFilePosField()).append(");\n");
 
 					if (!parameters.containsKey("GROUPBY"))
 					{
 						if (parameters.containsKey("COUNTFN"))
-							sb.append("scalarout := COUNT(fetchedds);");
+							sb.append("scalarout := COUNT(idxds);");
 						if (parameters.containsKey("MAXFN"))
-							sb.append("scalarout := MAX(fetchedds, fileds.zip);");
+						{
+							sb.append("scalarout := MAX(idxds, fileds.");
+							sb.append(parameters.get("FNCOLS"));
+							sb.append(");");
+						}
+						if (parameters.containsKey("MINFN"))
+						{
+							sb.append("scalarout := MIN(idxds, fileds.");
+							sb.append(parameters.get("FNCOLS"));
+							sb.append(");");
+						}
+						sb.append("\n");
 					}
 
 					if (parameters.containsKey("SCALAROUTNAME"))
@@ -545,39 +614,32 @@ public class EclEngine
 					}
 					else
 					{
+						sb.append(parameters.get("SELECTSTRUCT"));
 
-					sb.append(parameters.get("SELECTSTRUCT"));
-
-					sb.append(" IDXTABLE := TABLE(fetchedds , selectstruct ");
-
-					if (parameters.containsKey("GROUPBY"))
-					{
-						sb.append(", ");
-						sb.append(parameters.get("GROUPBY"));
-					}
-
-					sb.append("); ");
-
-					sb.append(" OUTPUT(CHOOSEN(");
-					sb.append(" IDXTABLE ");
-					}
+						sb.append(" idxtable := TABLE(idxds , selectstruct ");
 
-					/*{
-						sb.append(" OUTPUT(CHOOSEN(");
-						if (parameters.containsKey("ORDERBY"))
-							sb.append("SORT( ");
+						if (parameters.containsKey("GROUPBY"))
+						{
+							sb.append(", ");
+							sb.append(parameters.get("GROUPBY"));
+						}
 
-						sb.append("fetchedds");
-						if (parameters.size() > 0)
-							addFilterClause(sb, parameters);
+						sb.append(");\n ");
 
 						if (parameters.containsKey("ORDERBY"))
 						{
-							sb.append(",");
+							sb.append("sortedidxtable := SORT( idxtable, ");
 							sb.append(parameters.get("ORDERBY"));
-							sb.append(")");
+							sb.append(");\n");
+
+							sb.append("resultset := sortedidxtable;\n");
 						}
-					}*/
+						else
+							sb.append("resultset := idxtable;");
+
+						sb.append(" OUTPUT(CHOOSEN(");
+						sb.append(" resultset ");
+					}
 				}
 			}
 
@@ -638,7 +700,7 @@ public class EclEngine
 	{
 		try
 		{
-			urlString = "http://" + props.getProperty("ServerAddress") + ":" + props.getProperty("WsECLPort") + "/WsEcl/submit/query/" + props.getProperty("Cluster") + "/" + eclqueryname + "/expanded";
+			urlString = "http://" + props.getProperty("WsECLAddress") + ":" + props.getProperty("WsECLPort") + "/WsEcl/submit/query/" + props.getProperty("Cluster") + "/" + eclqueryname + "/expanded";
 			System.out.println("WSECL:executeCall: " + urlString);
 
 			// Construct data
@@ -903,7 +965,8 @@ public class EclEngine
 		info.put("WsECLWatchPort", "8010");
 		info.put("WsECLPort", "8002");
 		info.put("username", "_rpastrana");
-		info.put("password", "ch@ng3m3");
+		info.put("password", "a");
+
 
 		EclEngine wsEcl = new EclEngine("fetchpeoplebyzipservice.2","" ,info);
 		// http://192.168.56.102:8002/

+ 46 - 0
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclFunction.java

@@ -0,0 +1,46 @@
+package com.hpccsystems.ecljdbc;
+
+
+public class EclFunction 
+{
+	private String name;
+	private boolean acceptsWilCard;
+	private boolean acceptsMultipleInputs;
+	private EclColumnMetaData returnType;
+	
+
+	public EclFunction (String thename, boolean acceptswild, EclColumnMetaData returntype, boolean multipleInput)
+	{
+		name = thename;
+		acceptsWilCard = acceptswild;
+		returnType = returntype;
+		acceptsMultipleInputs = multipleInput;
+	}
+	
+	public EclFunction (String thename, EclColumnMetaData returntype)
+	{
+		name = thename;
+		acceptsWilCard = false;
+		returnType = returntype;
+	}
+	
+	public String getName() 
+	{
+		return name;
+	}
+	
+	public boolean acceptsWilCard() 
+	{
+		return acceptsWilCard;
+	}
+		
+	public EclColumnMetaData getReturnType() 
+	{
+		return returnType;
+	}
+
+	public boolean acceptsMultipleInputs() 
+	{
+		return acceptsMultipleInputs;
+	}
+}

+ 44 - 0
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclFunctions.java

@@ -0,0 +1,44 @@
+package com.hpccsystems.ecljdbc;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class EclFunctions
+{
+	static Map<String, EclFunction> functions;
+	static
+	{
+		functions = new HashMap<String, EclFunction>();
+		
+		functions.put("COUNT", new EclFunction("COUNT", true, new EclColumnMetaData("countreturn", 0, java.sql.Types.NUMERIC), true));
+		functions.put("MAX", new EclFunction("MAX", true, new EclColumnMetaData("maxreturn", 0, java.sql.Types.NUMERIC), false));
+		functions.put("MIN", new EclFunction("MIN", true, new EclColumnMetaData("minreturn", 0, java.sql.Types.NUMERIC), false));	
+	}
+	
+	static EclFunction getEclFunction( String funcname)
+	{
+		return functions.get(funcname);
+	}
+	
+	static public boolean verifyEclFunction(String name, List<EclColumnMetaData> funccols)
+	{
+		String upperCaseName = name.toUpperCase();
+		if (functions.containsKey(upperCaseName))
+		{
+			EclFunction function = functions.get(upperCaseName);
+			
+			if (funccols.size() > 1 && !function.acceptsMultipleInputs())
+				return false;
+			
+			for (EclColumnMetaData tmp: funccols)
+			{
+				if (!function.acceptsWilCard() && tmp.getColumnName().contains("*"))
+					return false;
+			}
+			return true;
+		}
+		else 
+			return false;
+	}
+}

+ 1 - 2
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclPreparedStatement.java

@@ -41,7 +41,7 @@ public class EclPreparedStatement implements PreparedStatement {
 
     public EclPreparedStatement(Connection connection, String query)
     {
-    	System.out.println("#####ECLPreparedStatement::ECLPreparedStatement: " + query + "###");
+    	System.out.println("ECLPreparedStatement::ECLPreparedStatement: " + query);
         this.query = query;
         this.connection = connection;
         warnings = new ArrayList<SQLWarning>();
@@ -66,7 +66,6 @@ public class EclPreparedStatement implements PreparedStatement {
 
     public ResultSet executeQuery() throws SQLException
     {
-    	System.out.println("#####ECLPreparedStatement::executeQuery: " + query + "###");
     	return new EclResultSet(this, query, parameters);
     }
 

+ 0 - 429
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/EclResultSet.java

@@ -181,435 +181,6 @@ public class EclResultSet implements ResultSet
 				warnings.add(new SQLWarning(ex.getMessage()));
 			throw new SQLException(ex);
 		}
-
-//		try
-//		{
-//
-//			lastResult = new Object();
-//
-//			this.statement = statement;
-//			EclConnection connection = (EclConnection) statement.getConnection();
-//			dbMetadata = connection.getDatabaseMetaData();
-//			HashMap parameters = new HashMap();
-//			rows = new ArrayList();
-//
-//			SqlParser parser = new SqlParser();
-//			parser.parse(query);
-//
-//			int sqlreqtype = parser.getSqlType();
-//
-//			//ArrayList<EclColumnMetaData> expectedretcolumns = new ArrayList();
-//			List<EclColumnMetaData> expectedretcolumns = new ArrayList();
-//			ArrayList<EclColumnMetaData> storeProcInParams = new ArrayList();
-//			String hpccfilename = "";
-//			String indextouse = null;
-//			String indexposfield = null;
-//			DFUFile indexfiletouse = null;
-//			StringBuilder eclcode = new StringBuilder("");
-//			int totalparamcount = 0;
-//			if(sqlreqtype == SqlParser.SQL_TYPE_SELECT)
-//			{}
-//			else if(sqlreqtype == 9999)
-//			{
-//				hpccfilename = Utils.handleQuotedString(parser.getTableName());
-//				if(!dbMetadata.tableExists("", hpccfilename))
-//					throw new Exception("Invalid table found: " + hpccfilename);
-//
-//				DFUFile dfufile = dbMetadata.getDFUFile(hpccfilename);
-//
-//				//in future this might need to be a container of dfufile(s)
-//				parser.verifySelectColumns(dfufile);
-//
-//				expectedretcolumns = parser.getSelectColumns();
-//
-//				totalparamcount = parser.getWhereClauseExpressionsCount();
-//				if (totalparamcount > 0)
-//				{
-//					String [] paramnames = parser.getWhereClauseNames();
-//					//String [] uniqueparameters = parser.getUniqueWhereClauseNames();
-//					parser.populateParametrizedExpressions(inParameters);
-//
-//					parameters.put("WHERECLAUSE", parser.getWhereClauseString());
-//
-//					//Choose an index to use
-//					if(!dfufile.isKeyFile() && dfufile.hasRelatedIndexes())
-//					{
-//						if(((ECLPreparedStatement)statement).isIndexSet())
-//						{
-//							indextouse = ((ECLPreparedStatement)statement).getIndexToUse();
-//						}
-//						else
-//						{
-//							List<String> relindexes = dfufile.getRelatedIndexesList();
-//							int indexscore [][] = new int 	[dfufile.getRelatedIndexesCount()]
-//															[INDEXSCORECRITERIA/*[FieldsInIndexCount][LeftMostKeyIndex][ColsKeyedcount]*/];
-//							int highscore = Integer.MIN_VALUE;
-//							boolean payloadIdxWithAtLeast1KeyedFieldFound = false;
-//
-//							for (int indexcounter = 0; indexcounter < relindexes.size(); indexcounter++)
-//							{
-//								String indexname = relindexes.get(indexcounter);
-//								DFUFile indexfile = dbMetadata.getDFUFile(indexname);
-//
-//								if (indexfile!= null && indexfile.isKeyFile() && indexfile.hasValidIdxFilePosField())
-//								{
-//									for(int j = 0; j < expectedretcolumns.size(); j++)
-//									{
-//										if(indexfile.containsField(expectedretcolumns.get(j), true))
-//											++indexscore[indexcounter][NumberOfCommonParamInThisIndex];
-//									}
-//
-//									if (payloadIdxWithAtLeast1KeyedFieldFound && indexscore[indexcounter][NumberOfCommonParamInThisIndex] == 0)
-//										break; //Don't bother with this index
-//
-//									int localleftmostindex = Integer.MAX_VALUE;
-//									Properties KeyColumns = indexfile.getKeyedColumns();
-//
-//									if(KeyColumns!=null)
-//									{
-//										for (int i = 0; i < paramnames.length; i++)
-//										{
-//											String currentparam = paramnames[i];
-//											if(KeyColumns.contains(currentparam))
-//											{
-//												++indexscore[indexcounter][NumberofColsKeyedInThisIndex];
-//												int paramindex = indexfile.getKeyColumnIndex(currentparam);
-//												if (localleftmostindex>paramindex)
-//													localleftmostindex = paramindex;
-//											}
-//										}
-//										indexscore[indexcounter][LeftMostKeyIndexPosition] = localleftmostindex;
-//									}
-//
-//									if (indexscore[indexcounter][NumberOfCommonParamInThisIndex]==expectedretcolumns.size()
-//											&& indexscore[indexcounter][NumberofColsKeyedInThisIndex] > 0
-//											&& (!parser.whereClauseContainsOrOperator()))
-//										payloadIdxWithAtLeast1KeyedFieldFound = true; // during scoring, give this priority
-//								}
-//							}
-//
-//							for (int i = 0; i<dfufile.getRelatedIndexesCount(); i++)
-//							{
-//								if (indexscore[i][NumberOfCommonParamInThisIndex] == 0 || indexscore[i][NumberofColsKeyedInThisIndex] == 0) //does one imply the other?
-//									continue; //not good enough
-//
-//								if (payloadIdxWithAtLeast1KeyedFieldFound && indexscore[i][NumberOfCommonParamInThisIndex]<expectedretcolumns.size())
-//									continue; //not good enough
-//
-//								if (indexscore[i][NumberofColsKeyedInThisIndex] < parser.getWhereClauseExpressionsCount() && parser.whereClauseContainsOrOperator())
-//									continue; //not so sure about this rule.
-//
-//								//TODO if current is not payload index, check for fpos field if not found... discard???
-//								//int localscore = ((indexscore[i][NumberOfCommonParamInThisIndex]/selectColumns.size()) * 5) -
-//								int localscore = ((indexscore[i][NumberOfCommonParamInThisIndex]/expectedretcolumns.size()) * 5) -
-//											 	 (((indexscore[i][LeftMostKeyIndexPosition]/totalparamcount)-1) * 3) +
-//											 	 ((indexscore[i][NumberofColsKeyedInThisIndex]) * 2);
-//
-//								if (highscore < localscore )
-//								{
-//									highscore = localscore;
-//									indextouse = relindexes.get(i);
-//
-//									if (payloadIdxWithAtLeast1KeyedFieldFound && indexscore[i][NumberOfCommonParamInThisIndex]==expectedretcolumns.size())
-//										parameters.put("PAYLOADINDEX", "true");
-//									else
-//										parameters.remove("PAYLOADINDEX");
-//								}
-//							}
-//						}
-//
-//						if (indextouse != null)
-//						{
-//							indexfiletouse = dbMetadata.getDFUFile(indextouse);
-//							indexposfield = indexfiletouse.getIdxFilePosField();
-//
-//							Vector<String> keyed = new Vector<String>();
-//							Vector<String> wild = new Vector<String>();
-//
-//							//Create keyed and wild string
-//							Properties keyedcols = indexfiletouse.getKeyedColumns();
-//							for (int i = 1; i <= keyedcols.size(); i++)
-//							{
-//								String keyedcolname = (String)keyedcols.get(i);
-//								if(parser.whereClauseContainsKey(keyedcolname))
-//									keyed.add(" " + parser.getExpressionFromName(keyedcolname).toString() + " ");
-//								else if (keyed.isEmpty())
-//									wild.add(" " + keyedcolname + " ");
-//							}
-//
-//							StringBuilder keyedandwild = new StringBuilder();
-//							if (parameters.containsKey("PAYLOADINDEX"))
-//							{
-//								if (keyed.size()>0)
-//								{
-//									keyedandwild.append("KEYED( ");
-//									for (int i = 0 ; i < keyed.size(); i++)
-//									{
-//										keyedandwild.append(keyed.get(i));
-//										if (i < keyed.size()-1)
-//											keyedandwild.append(" AND ");
-//									}
-//									keyedandwild.append(" )");
-//								}
-//								if (wild.size()>0)
-//								{
-//									//TODO should I bother making sure there's a KEYED entry ?
-//									for (int i = 0 ; i < wild.size(); i++)
-//									{
-//										keyedandwild.append(" and WILD( ");
-//										keyedandwild.append(wild.get(i));
-//										keyedandwild.append(" )");
-//									}
-//								}
-//
-//								/*Properties nonkeyedcols = indexfiletouse.getNonKeyedColumns();
-//								for (int i = 1; i <= nonkeyedcols.size(); i++)
-//								{
-//									String nonkeyedcolname = (String)nonkeyedcols.get(i);
-//									//if(parameters.containsKey(nonkeyedcolname))
-//									if(parser.whereClauseContainsKey(nonkeyedcolname))
-//										//keyedandwild.append(" and ").append(nonkeyedcolname).append(" = \'").append(parameters.get(nonkeyedcolname)).append('\'');
-//										keyedandwild.append(" and ").append(parser.getExpressionFromName(nonkeyedcolname).toString());
-//								}*/
-//
-//								keyedandwild.append(" and (").append(parser.getWhereClauseString()).append(" )");
-//							}
-//							else //non-payload just AND the keyed expressions
-//							{
-//								keyedandwild.append("( ");
-//								keyedandwild.append(parser.getWhereClauseString());
-//								keyedandwild.append(" )");
-//
-//								/*if (keyed.size()>0)
-//								{
-//									keyedandwild.append("( ");
-//									for (int i = 0 ; i < keyed.size(); i++)
-//									{
-//										keyedandwild.append(keyed.get(i));
-//										if (i < keyed.size()-1)
-//											keyedandwild.append(" AND ");
-//									}
-//									keyedandwild.append(" )");
-//								}*/
-//							}
-//
-//							parameters.put("KEYEDWILD", keyedandwild.toString());
-//							((ECLPreparedStatement)statement).setIndexToUse(indexfiletouse.getFullyQualifiedName());
-//							System.out.println("Found index to use: " + indextouse + " field: " + indexposfield);
-//						}
-//						else
-//							System.out.println("No appropriate index found");
-//					}
-//				}
-//				parameters.put("PARAMCOUNT", totalparamcount);
-//
-//				if(dfufile.hasFileRecDef())
-//				{
-//					if (indextouse != null && indexposfield != null)
-//						eclcode.append(dfufile.getFileRecDefwithIndexpos(dbMetadata.getDFUFile(indextouse).getFieldMetaData(indexposfield), "filerecstruct"));
-//					else
-//						eclcode.append(dfufile.getFileRecDef("filerecstruct"));
-//				}
-//				else
-//					throw new Exception("Target HPCC file ("+hpccfilename+") does not contain ECL record definition");
-//
-//				if(!dfufile.isKeyFile())
-//					eclcode.append("fileds := DATASET(\'~").append(dfufile.getFullyQualifiedName()).append("\', filerecstruct,").append(dfufile.getFormat()).append("); ");
-//				else
-//				{
-//					//eclcode.append("fileds := INDEX( filerecstruct, \'~").append(dfufile.getFullyQualifiedName()).append("\');");
-//					eclcode.append("fileds := INDEX( ");
-//					eclcode.append('{');
-//					eclcode.append(dfufile.getKeyedFieldsAsDelmitedString(',', "filerecstruct"));
-//					eclcode.append("},{");
-//					eclcode.append(dfufile.getNonKeyedFieldsAsDelmitedString(',', "filerecstruct"));
-//					eclcode.append("},");
-//					eclcode.append("\'~").append(dfufile.getFullyQualifiedName()).append("\');");
-//				}
-//
-//				StringBuilder selectstruct = new StringBuilder(" selectstruct:=RECORD ");
-//				String datasource = indextouse == null || parameters.containsKey("PAYLOADINDEX") ? "fileds" : "fetchedds";
-//
-//				String scalarOutput = null;
-//
-//				for (int i = 0; i < expectedretcolumns.size(); i++)
-//				{
-//					EclColumnMetaData col = expectedretcolumns.get(i);
-//
-//					if (col.getColumnType() == EclColumnMetaData.COLUMN_TYPE_CONSTANT)
-//						selectstruct.append(col.getEclType()).append(" ").append(col.getColumnName()).append(" := ").append(col.getConstantValue()).append("; ");
-//
-//					else if (col.getColumnType() == EclColumnMetaData.COLUMN_TYPE_FNCTION)
-//					{
-//						if (col.getColumnName().equalsIgnoreCase("COUNT"))
-//						{
-//							parameters.put("COUNTFN", "TRUE");
-//							selectstruct.append("countout := ");
-//							if (parser.hasGroupByColumns())
-//							{
-//								selectstruct.append(col.getColumnName().toUpperCase()).append("( GROUP");
-//								List<EclColumnMetaData> funccols = col.getFunccols();
-//								//for count() only one param allowed
-//								if (funccols.size() > 0)
-//								{
-//									String paramname = funccols.get(0).getColumnName();
-//									if (!paramname.equals("*") && funccols.get(0).getColumnType() != EclColumnMetaData.COLUMN_TYPE_CONSTANT)
-//									{
-//										selectstruct.append(", ");
-//										selectstruct.append(datasource);
-//										selectstruct.append(".");
-//										selectstruct.append(paramname);
-//										selectstruct.append("<> \'\'");
-//									}
-//								}
-//								selectstruct.append(" );");
-//							}
-//							else
-//								selectstruct.append(" totalcount;");
-//
-//							col.setSQLType(java.sql.Types.NUMERIC);
-//						}
-//					}
-//					else
-//						selectstruct.append(col.getEclType()).append(" ").append(col.getColumnName()).append(" := ").append(datasource).append(".").append(col.getColumnName()).append("; ");
-//
-//					if (i == 0 && expectedretcolumns.size() == 1 )
-//						parameters.put("SCALAROUTNAME", col.getColumnName());
-//				}
-//				selectstruct.append("END; ");
-//
-//				parameters.put("SELECTSTRUCT", selectstruct.toString());
-//
-//				//columns are base 1 indexed
-//				resultMetadata = new EclResultsetMetadata(expectedretcolumns, hpccfilename);
-//
-//			}
-//			else if(sqlreqtype == SqlParser.SQL_TYPE_CALL)
-//			{
-//				hpccfilename = Utils.handleQuotedString(parser.getStoredProcName());
-//				if(!dbMetadata.eclQueryExists("", hpccfilename))
-//					throw new Exception("Invalid store procedure found");
-//
-//				defaultEclQueryReturnDatasetName = dbMetadata.getdefaultECLQueryResultDatasetName("", hpccfilename);
-//
-//				expectedretcolumns = dbMetadata.getStoredProcOutColumns("", hpccfilename);
-//				storeProcInParams = dbMetadata.getStoredProcInColumns("",hpccfilename);
-//				//columns are base 1 indexed
-//				resultMetadata = new EclResultsetMetadata(expectedretcolumns, hpccfilename);
-//
-//				String[] procInParamValues = parser.getStoredProcInParamVals();
-//
-//				int paramcount = 0;
-//				for (int i = 0; i < storeProcInParams.size(); i++)
-//				{
-//					if (procInParamValues[i].contains("${")	|| procInParamValues[i].equals("?"))
-//					{
-//						Object value = inParameters.get(new Integer(++paramcount));
-//						parameters.put(storeProcInParams.get(i).getColumnName(), value);
-//						System.out.println("Found Parameter - "	+ storeProcInParams.get(i).getColumnName() + " = " + value);
-//					}
-//					else
-//					{
-//						parameters.put(storeProcInParams.get(i).getColumnName(), procInParamValues[i]);
-//					}
-//				}
-//			}
-//			else if(sqlreqtype == SqlParser.SQL_TYPE_SELECTCONST)
-//			{
-//				System.out.println("Processing test_query...");
-//				//ArrayList<EclColumnMetaData> columns = new ArrayList();
-//				eclcode.append("selectstruct:=RECORD ");
-//				expectedretcolumns = parser.getSelectColumns();
-//				defaultEclQueryReturnDatasetName = "ConstECLQueryResult";
-//				StringBuilder ecloutput = new StringBuilder(" OUTPUT(DATASET([{ ");
-//				for (int i = 1;  i <= expectedretcolumns.size(); i++)
-//				{
-//					EclColumnMetaData col = expectedretcolumns.get(i-1);
-//					eclcode.append(col.getEclType()).append(" ").append(col.getColumnName()).append("; ");
-//					ecloutput.append(col.getConstantValue());
-//					if (i < expectedretcolumns.size())
-//						ecloutput.append(", ");
-//				}
-//				ecloutput.append("}],selectstruct), NAMED(\'");
-//				ecloutput.append(defaultEclQueryReturnDatasetName);
-//				ecloutput.append("\'));");
-//
-//				eclcode.append(" END; ");
-//				eclcode.append(ecloutput.toString());
-//
-//
-//				resultMetadata = new EclResultsetMetadata(expectedretcolumns, "Constants");
-//
-//			}
-//			else
-//			{
-//				throw new SQLException("SQL request type not determined");
-//			}
-//
-//			if(parser.hasOrderByColumns())
-//			{
-//				parameters.put("ORDERBY",parser.getOrderByString());
-//			}
-//			if (parser.hasGroupByColumns())
-//				parameters.put("GROUPBY",parser.getGroupByString());
-//			if (parser.hasLimitBy())
-//				parameters.put("LIMIT",parser.getLimit());
-//
-//			EclEngine eclengine = new EclEngine(hpccfilename, defaultEclQueryReturnDatasetName, connection.getProperties());
-//			ArrayList dsList = null;
-//
-//			if(sqlreqtype == SqlParser.SQL_TYPE_SELECT)
-//			{
-//				dsList = eclengine.executeSelect(eclcode.toString(), parameters, indexfiletouse);
-//			}
-//			else if(sqlreqtype == SqlParser.SQL_TYPE_CALL)
-//			{
-//				dsList = eclengine.executeCall(parameters);
-//			}
-//			else if(sqlreqtype == SqlParser.SQL_TYPE_SELECTCONST)
-//			{
-//				dsList = eclengine.executeSelectConstant(eclcode.toString());
-//			}
-//
-//			/*if (wsEcl.hasResultSchema())
-//			{
-//				Properties res = new Properties();f
-//				//EclDatabaseMetaData.registerSchemaElements(wsEcl.getResultschema().item(0), res, "");
-//
-//				NodeList resSchema = wsEcl.getResultschema();
-//				for (int i = 0; i < resSchema.getLength(); i++)
-//				{
-//					org.w3c.dom.Node node = resSchema.item(i);
-//					String nodename = node.getAttributes().getNamedItem("name").getNodeValue();
-//					if (nodename.equalsIgnoreCase(defaultEclQueryReturnDatasetName))
-//					{
-//						System.out.print("we found result schema");
-//						NodeList schemas = node.getChildNodes();
-//						for (int j = 0; j < schemas.getLength(); j++)
-//						{
-//							org.w3c.dom.Node schema = schemas.item(j);
-//							if(schema.getNodeType() != org.w3c.dom.Node.TEXT_NODE)
-//							{
-//								resultMetadata.parseResultSchema(schema);
-//								//dbMetadata.registerSchemaElements(schema, res, "");
-//								break;
-//							}
-//						}
-//						break;
-//					}
-//				}
-//			}*/
-//
-//			// Get the data
-//			fetchData(dsList,expectedretcolumns);
-//
-//		}
-//		catch (Exception ex)
-//		{
-//			if (ex.getMessage() != null)
-//				warnings.add(new SQLWarning(ex.getMessage()));
-//			throw new SQLException(ex);
-//		}
 	}
 
 	private void fetchData(ArrayList dsList, List<EclColumnMetaData> expectedretcolumns)

+ 12 - 303
plugins/dbconnectors/ecljdbc/src/main/java/com/hpccsystems/ecljdbc/SqlParser.java

@@ -26,13 +26,9 @@ public class SqlParser {
 
 	private String tableName;
 	private String tableAlias;
-	//private String whereStatement;
 	private int sqlType;
-	//private String[] selectColumns;
 	private List<EclColumnMetaData> selectColumns;
 	private String[] columnValues;
-	//private String[] columnWhereNames;
-	//private String[] columnWhereValues;
 	private SqlWhereClause whereclause;
 	private String[] columnGroupByNames;
 	private String[] columnOrderByNames;
@@ -58,14 +54,10 @@ public class SqlParser {
 		limit = -1;
 		tableName = null;
 		tableAlias = null;
-		//selectColumns = new String[0];
 		selectColumns = new ArrayList<EclColumnMetaData>();
 		columnValues = new String[0];
-		//columnWhereNames = new String[0];
-		//columnWhereValues = new String[0];
 		whereclause = new SqlWhereClause();
 		procInParamValues = new String[0];
-		//whereStatement = null;
 		storedProcName = null;
 		sqlType = SQL_TYPE_UNKNOWN;
 		sql = sql.trim();
@@ -218,21 +210,6 @@ public class SqlParser {
 				{
 					groupByToken = upperSql.substring(groupPos + 10, orderPos);
 					orderByToken = upperSql.substring(orderPos + 10);
-
-					/*int dirPos = orderByColumn.lastIndexOf("ASC");
-					if (dirPos == -1)
-						dirPos = orderByColumn.lastIndexOf("DESC");
-
-					//not else if from above if!!
-					if (dirPos != -1)
-					{
-						orderByAscending = Boolean.toString(orderByColumn.contains("ASC"));
-						orderByColumn = orderByColumn.substring(0,dirPos).trim();
-					}
-					else
-						orderByAscending = "true";
-						*/
-
 				}
 
 				upperSql = upperSql.substring(0, groupPos);
@@ -250,20 +227,6 @@ public class SqlParser {
 					groupByToken = upperSql.substring(groupPos + 10);
 
 				}
-
-				/*int dirPos = orderByColumn.lastIndexOf("ASC");
-				if (dirPos == -1)
-					dirPos = orderByColumn.lastIndexOf("DESC");
-
-				//not else if from above if!!
-				if (dirPos != -1)
-				{
-					orderByAscending = Boolean.toString(orderByColumn.contains("ASC"));
-					orderByColumn = orderByColumn.substring(0, dirPos).trim();
-				}
-				else
-					orderByAscending = "true";
-				*/
 				upperSql = upperSql.substring(0, orderPos);
 			}
 
@@ -338,8 +301,12 @@ public class SqlParser {
 					List<EclColumnMetaData> funccols = new ArrayList<EclColumnMetaData>();
 
 					String funcname = col.substring(0,col.indexOf('('));
-					//if (funcname.equalsIgnoreCase("count"))
-					//	hasCount = true;
+					EclFunction func= EclFunctions.getEclFunction(funcname.toUpperCase());
+
+					if (func == null)
+						throw new Exception("ECL Function " + funcname + "is not currently supported");
+
+					boolean a = func.acceptsWilCard();
 
 					col = col.substring(col.indexOf('(')+1).trim();
 
@@ -347,7 +314,9 @@ public class SqlParser {
 					{
 						col = col.substring(0, col.indexOf(")")).trim();
 						if (col.length()>0)
+						{
 							funccols.add(new EclColumnMetaData(col,funcparampos++,java.sql.Types.OTHER));
+						}
 					}
 					else
 					{
@@ -364,8 +333,11 @@ public class SqlParser {
 							funccols.add(new EclColumnMetaData(col,funcparampos++,java.sql.Types.OTHER));
 						}
 					}
-					colmetadata = new EclColumnMetaData(funcname, sqlcolpos++, funccols);
 
+					if (EclFunctions.verifyEclFunction(funcname, funccols))
+						colmetadata = new EclColumnMetaData(funcname, sqlcolpos++, funccols);
+					else
+						throw new Exception("Funtion " + funcname + " does not map to ECL as written");
 				}
 
 				if(col.contains("."))
@@ -453,271 +425,8 @@ public class SqlParser {
 				}
 
 				System.out.println(whereclause);
-
-//				while (tokenizerWhere.hasMoreTokens())
-//				{
-//					String strToken = tokenizerWhere.nextToken();
-//					if (strToken.toLowerCase().indexOf(" and ") != -1) {
-//						String temp = strToken;
-//						int andPos = 0;
-//						out: do {
-//							andPos = temp.toLowerCase().indexOf(" and ");
-//							String strTokenAdd;
-//							if (andPos != -1) {
-//								strTokenAdd = temp.substring(0, andPos).trim();
-//							} else {
-//								strTokenAdd = temp.trim();
-//							}
-//							int delimiter2 = strTokenAdd.indexOf("=");
-//							if (delimiter2 != -1) {
-//								String valueAdd = strTokenAdd.substring(
-//										delimiter2 + 1).trim();
-//								valueAdd = Utils.handleQuotedString(valueAdd);
-//								whereCols.add(strTokenAdd.substring(0,
-//										delimiter2).trim());
-//								valueAdd = Utils.replaceAll(valueAdd,
-//										COMMA_ESCAPE, ",");
-//								valueAdd = Utils.replaceAll(valueAdd,
-//										QUOTE_ESCAPE, "'");
-//								whereValues.add(valueAdd);
-//							} else {
-//								int delimiter3 = strTokenAdd.toLowerCase()
-//										.indexOf(" is ");
-//								whereCols.add(strTokenAdd.substring(0,
-//										delimiter3).trim());
-//								whereValues.add(null);
-//							}
-//							temp = temp.substring(andPos + 5);
-//							if (temp.toLowerCase().indexOf(" and ") == -1) {
-//								strTokenAdd = temp.trim();
-//								int delimiter4 = strTokenAdd.indexOf("=");
-//								if (delimiter4 != -1) {
-//									String valueAdd = strTokenAdd.substring(
-//											delimiter4 + 1).trim();
-//									valueAdd = Utils
-//											.handleQuotedString(valueAdd);
-//									whereCols.add(strTokenAdd.substring(0,
-//											delimiter4).trim());
-//									valueAdd = Utils.replaceAll(valueAdd,
-//											COMMA_ESCAPE, ",");
-//									valueAdd = Utils.replaceAll(valueAdd,
-//											QUOTE_ESCAPE, "'");
-//									whereValues.add(valueAdd);
-//								} else {
-//									int delimiter3 = strTokenAdd.toLowerCase()
-//											.indexOf(" is ");
-//									whereCols.add(strTokenAdd.substring(0,
-//											delimiter3).trim());
-//									whereValues.add(null);
-//								}
-//								break out;
-//							}
-//
-//						} while (true);
-//
-//					}
-//					else
-//					{
-//						int delimiter = strToken.indexOf("=");
-//						if (delimiter != -1) {
-//							String value = strToken.substring(delimiter + 1)
-//									.trim();
-//							value = Utils.handleQuotedString(value);
-//							whereCols.add(strToken.substring(0, delimiter)
-//									.trim());
-//							value = Utils.replaceAll(value, COMMA_ESCAPE, ",");
-//							value = Utils.replaceAll(value, QUOTE_ESCAPE, "'");
-//							whereValues.add(value);
-//						} else {
-//							int delimiter1 = strToken.toLowerCase().indexOf(
-//									" is ");
-//							whereCols.add(strToken.substring(0, delimiter1)
-//									.trim());
-//							whereValues.add(null);
-//						}
-//					}
-//				}
-//
-//				columnWhereNames = new String[whereCols.size()];
-//				columnWhereValues = new String[whereValues.size()];
-//				whereCols.copyInto(columnWhereNames);
-//				whereValues.copyInto(columnWhereValues);
-//
 			}
 		}
-		// INSERT
-		/*if (upperSql.startsWith("INSERT ")) {
-			if (upperSql.lastIndexOf(" VALUES") == -1) {
-				throw new Exception("Malformed SQL. Missing VALUES statement.");
-			}
-			sqlType = INSERT;
-			int intoPos = 0;
-			if (upperSql.indexOf(" INTO ") != -1) {
-				intoPos = upperSql.indexOf(" INTO ") + 6;
-			} else {
-				intoPos = upperSql.indexOf("INSERT ") + 7;
-			}
-			int bracketPos = upperSql.indexOf("(");
-			int lastBracketPos = upperSql.indexOf(")");
-			tableName = sql.substring(intoPos, bracketPos).trim();
-
-			Vector<String> cols = new Vector<String>();
-			StringTokenizer tokenizer = new StringTokenizer(upperSql.substring(
-					bracketPos + 1, lastBracketPos), ",");
-			while (tokenizer.hasMoreTokens()) {
-				cols.add(tokenizer.nextToken().trim());
-			}
-			selectColumns = new String[cols.size()];
-			cols.copyInto(selectColumns);
-
-			int valuesPos = upperSql.indexOf("VALUES");
-			String endStatement = sql.substring(valuesPos + 6).trim();
-			bracketPos = endStatement.indexOf("(");
-			lastBracketPos = endStatement.lastIndexOf(")");
-			Vector<String> values = new Vector<String>();
-			StringTokenizer tokenizer2 = new StringTokenizer(
-					endStatement.substring(bracketPos + 1, lastBracketPos), ",");
-			while (tokenizer2.hasMoreTokens()) {
-				String value = tokenizer2.nextToken().trim();
-				value = Utils.handleQuotedString(value);
-				value = Utils.replaceAll(value, COMMA_ESCAPE, ",");
-				value = Utils.replaceAll(value, QUOTE_ESCAPE, "'");
-				values.add(value);
-			}
-			columnValues = new String[values.size()];
-			values.copyInto(columnValues);
-		}
-
-		// UPDATE
-		if (upperSql.startsWith("UPDATE ")) {
-			if (upperSql.lastIndexOf(" SET ") == -1) {
-				throw new Exception("Malformed SQL. Missing SET statement.");
-			}
-			sqlType = UPDATE;
-			int updatePos = upperSql.indexOf("UPDATE");
-			int setPos = upperSql.indexOf(" SET ");
-			//int equalPos = upperSql.indexOf("=");
-			int wherePos = upperSql.indexOf(" WHERE ");
-			tableName = sql.substring(updatePos + 6, setPos).trim();
-
-			String setString = "";
-			if (wherePos != -1) {
-				setString = sql.substring(setPos + 5, wherePos);
-			} else {
-				setString = sql.substring(setPos + 5, sql.length());
-			}
-			StringTokenizer tokenizerSet = new StringTokenizer(setString, ",");
-			Vector<String> setNames = new Vector<String>();
-			Vector<String> setValues = new Vector<String>();
-
-			while (tokenizerSet.hasMoreTokens()) {
-				String strToken = tokenizerSet.nextToken();
-				int delimiter = strToken.indexOf("=");
-				setNames.add(strToken.substring(0, delimiter).trim());
-				String value = strToken.substring(delimiter + 1).trim();
-				value = Utils.handleQuotedString(value);
-				value = Utils.replaceAll(value, COMMA_ESCAPE, ",");
-				value = Utils.replaceAll(value, QUOTE_ESCAPE, "'");
-				setValues.add(value);
-			}
-
-			selectColumns = new String[setNames.size()];
-			columnValues = new String[setValues.size()];
-			setNames.copyInto(selectColumns);
-			setValues.copyInto(columnValues);
-			if (wherePos != -1) {
-				String strWhere = sql.substring(wherePos + 6).trim();
-				Vector<String> whereCols = new Vector<String>();
-				Vector<String> whereValues = new Vector<String>();
-				StringTokenizer tokenizerWhere = new StringTokenizer(strWhere,
-						",");
-
-				while (tokenizerWhere.hasMoreTokens()) {
-					String strToken = tokenizerWhere.nextToken();
-					if (strToken.toLowerCase().indexOf(" and ") != -1) {
-						String temp = strToken;
-						int andPos = 0;
-						out: do {
-							andPos = temp.toLowerCase().indexOf(" and ");
-							String strTokenAdd;
-							if (andPos != -1) {
-								strTokenAdd = temp.substring(0, andPos).trim();
-							} else {
-								strTokenAdd = temp.trim();
-							}
-							int delimiter2 = strTokenAdd.indexOf("=");
-							if (delimiter2 != -1) {
-								String valueAdd = strTokenAdd.substring(
-										delimiter2 + 1).trim();
-								valueAdd = Utils.handleQuotedString(valueAdd);
-								whereCols.add(strTokenAdd.substring(0,
-										delimiter2).trim());
-								valueAdd = Utils.replaceAll(valueAdd,
-										COMMA_ESCAPE, ",");
-								valueAdd = Utils.replaceAll(valueAdd,
-										QUOTE_ESCAPE, "'");
-								whereValues.add(valueAdd);
-							} else {
-								int delimiter3 = strTokenAdd.toLowerCase()
-										.indexOf(" is ");
-								whereCols.add(strTokenAdd.substring(0,
-										delimiter3).trim());
-								whereValues.add(null);
-							}
-							temp = temp.substring(andPos + 5);
-							if (temp.toLowerCase().indexOf(" and ") == -1) {
-								strTokenAdd = temp.trim();
-								int delimiter4 = strTokenAdd.indexOf("=");
-								if (delimiter4 != -1) {
-									String valueAdd = strTokenAdd.substring(
-											delimiter4 + 1).trim();
-									valueAdd = Utils
-											.handleQuotedString(valueAdd);
-									whereCols.add(strTokenAdd.substring(0,
-											delimiter4).trim());
-									valueAdd = Utils.replaceAll(valueAdd,
-											COMMA_ESCAPE, ",");
-									valueAdd = Utils.replaceAll(valueAdd,
-											QUOTE_ESCAPE, "'");
-									whereValues.add(valueAdd);
-								} else {
-									int delimiter3 = strTokenAdd.toLowerCase()
-											.indexOf(" is ");
-									whereCols.add(strTokenAdd.substring(0,
-											delimiter3).trim());
-									whereValues.add(null);
-								}
-								break out;
-							}
-
-						} while (true);
-
-					} else {
-						int delimiter = strToken.indexOf("=");
-						if (delimiter != -1) {
-							String value = strToken.substring(delimiter + 1)
-									.trim();
-							value = Utils.handleQuotedString(value);
-							whereCols.add(strToken.substring(0, delimiter)
-									.trim());
-							value = Utils.replaceAll(value, COMMA_ESCAPE, ",");
-							value = Utils.replaceAll(value, QUOTE_ESCAPE, "'");
-							whereValues.add(value);
-						} else {
-							int delimiter1 = strToken.toLowerCase().indexOf(
-									" is ");
-							whereCols.add(strToken.substring(0, delimiter1)
-									.trim());
-							whereValues.add(null);
-						}
-					}
-				}
-				columnWhereNames = new String[whereCols.size()];
-				columnWhereValues = new String[whereValues.size()];
-				whereCols.copyInto(columnWhereNames);
-				whereValues.copyInto(columnWhereValues);
-			}
-		}*/
 	}
 
 	private boolean parseConstantSelect(String sql) throws Exception

+ 1 - 0
system/mp/mptag.hpp

@@ -55,6 +55,7 @@ TAGENUM
     DEFTAG ( MPTAG_THOR )
     DEFTAG ( MPTAG_THORRESOURCELOCK )
     DEFTAG ( MPTAG_MPTX )
+    DEFTAG ( MPTAG_THORWATCHDOG )
 
     // new static tags go above here
 

+ 5 - 1
thorlcr/activities/fetch/thfetch.cpp

@@ -33,6 +33,8 @@ class CFetchActivityMaster : public CMasterActivity
     SocketEndpoint *endpoints;
 
 protected:
+    IHThorFetchArg *helper;
+    IHThorFetchContext *fetchContext;
     Owned<IDistributedFile> fetchFile;
 public:
     CFetchActivityMaster(CMasterGraphElement *info) : CMasterActivity(info)
@@ -40,6 +42,9 @@ public:
         endpoints = NULL;
         if (!container.queryLocalOrGrouped())
             mpTag = container.queryJob().allocateMPTag();
+        helper = (IHThorFetchArg *)queryHelper();
+        fetchContext = static_cast<IHThorFetchContext *>(helper->selectInterface(TAIfetchcontext_1));
+        reInit = 0 != (fetchContext->getFetchFlags() & (FFvarfilename|FFdynamicfilename));
     }
     ~CFetchActivityMaster()
     {
@@ -47,7 +52,6 @@ public:
     }
     virtual void init()
     {
-        IHThorFetchArg *helper = (IHThorFetchArg *)queryHelper();
         fetchFile.setown(queryThorFileManager().lookup(container.queryJob(), helper->getFileName(), false, 0 != (helper->getFetchFlags() & FFdatafileoptional), true));
         if (fetchFile)
         {

+ 3 - 2
thorlcr/activities/fetch/thfetchslave.cpp

@@ -291,6 +291,9 @@ public:
         fetchStream = NULL;
         keyIn = NULL;
         fetchStreamOut = NULL;
+        fetchBaseHelper = (IHThorFetchBaseArg *)queryHelper();
+        fetchContext = static_cast<IHThorFetchContext *>(fetchBaseHelper->selectInterface(TAIfetchcontext_1));
+        reInit = 0 != (fetchContext->getFetchFlags() & (FFvarfilename|FFdynamicfilename));
     }
     ~CFetchSlaveBase()
     {
@@ -300,8 +303,6 @@ public:
 
     virtual void init(MemoryBuffer &data, MemoryBuffer &slaveData)
     {
-        fetchBaseHelper = (IHThorFetchBaseArg *)queryHelper();
-        fetchContext = static_cast<IHThorFetchContext *>(queryHelper()->selectInterface(TAIfetchcontext_1));
         unsigned numParts;
         data.read(numParts);
         offsetCount = 0;

+ 12 - 11
thorlcr/activities/indexread/thindexread.cpp

@@ -32,7 +32,7 @@ protected:
     Linked<IDistributedFile> index;
     Owned<IFileDescriptor> fileDesc;
     rowcount_t limit;
-    IHThorIndexReadBaseArg *helper;
+    IHThorIndexReadBaseArg *indexBaseHelper;
     Owned<CSlavePartMapping> mapping;
     bool nofilter;
     ProgressInfoArray progressInfoArr;
@@ -69,7 +69,7 @@ protected:
         unsigned nparts = f->numParts(); // includes tlks if any, but unused in array
         performPartLookup.ensure(nparts);
 
-        bool checkTLKConsistency = NULL != super && 0 != (TIRsorted & helper->getFlags());
+        bool checkTLKConsistency = NULL != super && 0 != (TIRsorted & indexBaseHelper->getFlags());
         if (nofilter)
         {
             while (nparts--) performPartLookup.append(true);
@@ -154,11 +154,11 @@ protected:
                     }
                 }
                 if (!keyIndex)
-                    throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", helper->getFileName());
+                    throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", indexBaseHelper->getFileName());
             
-                unsigned maxSize = helper->queryDiskRecordSize()->querySerializedMeta()->getRecordSize(NULL); // used only if fixed
+                unsigned maxSize = indexBaseHelper->queryDiskRecordSize()->querySerializedMeta()->getRecordSize(NULL); // used only if fixed
                 Owned <IKeyManager> tlk = createKeyManager(keyIndex, maxSize, NULL);
-                helper->createSegmentMonitors(tlk);
+                indexBaseHelper->createSegmentMonitors(tlk);
                 tlk->finishSegmentMonitors();
                 tlk->reset();
                 while (tlk->lookup(false))
@@ -179,6 +179,7 @@ protected:
 public:
     CIndexReadBase(CMasterGraphElement *info) : CMasterActivity(info)
     {
+        indexBaseHelper = (IHThorIndexReadBaseArg *)queryHelper();
         limit = RCMAX;
         if (!container.queryLocalOrGrouped())
             mpTag = container.queryJob().allocateMPTag();
@@ -187,16 +188,16 @@ public:
         ForEachItemIn(l, progressLabels)
             progressInfoArr.append(*new ProgressInfo);
         inputProgress.setown(new ProgressInfo);
+        reInit = 0 != (indexBaseHelper->getFlags() & (TIRvarfilename|TIRdynamicfilename));
     }
     void init()
     {
-        helper = (IHThorIndexReadArg *)queryHelper();
         nofilter = false;
 
-        index.setown(queryThorFileManager().lookup(container.queryJob(), helper->getFileName(), false, 0 != (TIRoptional & helper->getFlags()), true));
+        index.setown(queryThorFileManager().lookup(container.queryJob(), indexBaseHelper->getFileName(), false, 0 != (TIRoptional & indexBaseHelper->getFlags()), true));
         if (index)
         {
-            nofilter = 0 != (TIRnofilter & helper->getFlags());
+            nofilter = 0 != (TIRnofilter & indexBaseHelper->getFlags());
             if (index->queryAttributes().getPropBool("@local"))
                 nofilter = true;
             else
@@ -206,8 +207,8 @@ public:
                 if (sub && 1 == sub->numParts())
                     nofilter = true;
             }   
-            checkFormatCrc(this, index, helper->getFormatCrc(), true);
-            if ((container.queryLocalOrGrouped() || helper->canMatchAny()) && index->numParts())
+            checkFormatCrc(this, index, indexBaseHelper->getFormatCrc(), true);
+            if ((container.queryLocalOrGrouped() || indexBaseHelper->canMatchAny()) && index->numParts())
             {
                 fileDesc.setown(getConfiguredFileDescriptor(*index));
                 if (container.queryLocalOrGrouped())
@@ -220,7 +221,7 @@ public:
     }
     void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {
-        dst.append(helper->getFileName());
+        dst.append(indexBaseHelper->getFileName());
         if (!container.queryLocalOrGrouped())
             dst.append(mpTag);
         IArrayOf<IPartDescriptor> parts;

+ 1 - 0
thorlcr/activities/indexread/thindexreadslave.cpp

@@ -170,6 +170,7 @@ public:
         localKey = false;
         fixedDiskRecordSize = helper->queryDiskRecordSize()->querySerializedMeta()->getFixedSize(); // 0 if variable and unused
         progress = 0;
+        reInit = 0 != (helper->getFlags() & (TIRvarfilename|TIRdynamicfilename));
     }
     rowcount_t sendGetCount(rowcount_t count)
     {

+ 11 - 12
thorlcr/activities/indexwrite/thindexwrite.cpp

@@ -38,24 +38,26 @@ class IndexWriteActivityMaster : public CMasterActivity
     Owned<ProgressInfo> replicateProgress;
     bool publishReplicatedDone;
     CDfsLogicalFileName dlfn;
+    IHThorIndexWriteArg *helper;
 
 public:
     IndexWriteActivityMaster(CMasterGraphElement *info) : CMasterActivity(info)
     {
+        helper = (IHThorIndexWriteArg *)queryHelper();
         replicateProgress.setown(new ProgressInfo);
         publishReplicatedDone = !globals->getPropBool("@replicateAsync", true);
         recordsProcessed = 0;
         refactor = singlePartKey = isLocal = false;
         mpTag2 = TAG_NULL;
+        reInit = (0 != (TIWvarfilename & helper->getFlags()));
     }
     ~IndexWriteActivityMaster()
     {
         if (TAG_NULL != mpTag2)
             container.queryJob().freeMPTag(mpTag2);
     }
-    void init()
+    virtual void init()
     {
-        IHThorIndexWriteArg *helper = (IHThorIndexWriteArg *)queryHelper();
         dlfn.set(helper->getFileName());
         isLocal = 0 != (TIWlocal & helper->getFlags());
         unsigned maxSize = helper->queryDiskRecordSize()->getMinRecordSize();
@@ -63,6 +65,7 @@ public:
             throw MakeActivityException(this, 0, "Index minimum record length (%d) exceeds 32767 internal limit", maxSize);
 
         singlePartKey = 0 != (helper->getFlags() & TIWsmall) || dlfn.isExternal();
+        clusters.kill();
         unsigned idx=0;
         while (helper->queryCluster(idx))
             clusters.append(helper->queryCluster(idx++));
@@ -170,7 +173,7 @@ public:
         mpTag = container.queryJob().allocateMPTag();
         mpTag2 = container.queryJob().allocateMPTag();
     }
-    void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
+    virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {
         IHThorIndexWriteArg *helper = (IHThorIndexWriteArg *)queryHelper(); 
         dst.append(mpTag);  // used to build TLK on node1
@@ -220,7 +223,7 @@ public:
             }
         }
     }
-    void done()
+    virtual void done()
     {
         IHThorIndexWriteArg *helper = (IHThorIndexWriteArg *)queryHelper();
         StringBuffer scopedName;
@@ -287,16 +290,12 @@ public:
             }
         }
     }
-    void process()
-    {
-        CMasterActivity::process();
-    }
-    void abort()
+    virtual void abort()
     {
         CMasterActivity::abort();
         cancelReceiveMsg(RANK_ALL, mpTag2);
     }
-    void preStart(size32_t parentExtractSz, const byte *parentExtract)
+    virtual void preStart(size32_t parentExtractSz, const byte *parentExtract)
     {
         CMasterActivity::preStart(parentExtractSz, parentExtract);
         IHThorIndexWriteArg *helper = (IHThorIndexWriteArg *) queryHelper();
@@ -311,14 +310,14 @@ public:
             }
         }
     }
-    void deserializeStats(unsigned node, MemoryBuffer &mb)
+    virtual void deserializeStats(unsigned node, MemoryBuffer &mb)
     {
         CMasterActivity::deserializeStats(node, mb);
         unsigned repPerc;
         mb.read(repPerc);
         replicateProgress->set(node, repPerc);
     }
-    void getXGMML(IWUGraphProgress *progress, IPropertyTree *node)
+    virtual void getXGMML(IWUGraphProgress *progress, IPropertyTree *node)
     {
         CMasterActivity::getXGMML(progress, node);
         if (publishReplicatedDone)

+ 2 - 3
thorlcr/activities/indexwrite/thindexwriteslave.cpp

@@ -102,7 +102,7 @@ public:
 
     IndexWriteSlaveActivity(CGraphElementBase *_container) : ProcessSlaveActivity(_container)
     {
-        helper = NULL;
+        helper = static_cast <IHThorIndexWriteArg *> (queryHelper());
         sizeSignalled = false;
         initTotalCount = totalCount = 0;
         maxDiskRecordSize = lastRowSize = firstRowSize = 0;
@@ -117,12 +117,11 @@ public:
         needFirstRow = true;
         receivingTag2 = false;
         enableTlkPart0 = (0 != container.queryJob().getWorkUnitValueInt("enableTlkPart0", globals->getPropBool("@enableTlkPart0", true)));
+        reInit = (0 != (TIWvarfilename & helper->getFlags()));
     }
 
     void init(MemoryBuffer &data, MemoryBuffer &slaveData)
     {
-        helper = static_cast <IHThorIndexWriteArg *> (queryHelper());
-
         isLocal = 0 != (TIWlocal & helper->getFlags());
 
         mpTag = container.queryJob().deserializeMPTag(data);

+ 6 - 5
thorlcr/activities/keyedjoin/thkeyedjoin.cpp

@@ -26,6 +26,7 @@
 
 class CKeyedJoinMaster : public CMasterActivity
 {
+    IHThorKeyedJoinArg *helper;
     Owned<CSlavePartMapping> dataFileMapping;
     Owned<IDistributedFile> indexFile, dataFile;
     MemoryBuffer offsetMapMb, initMb;
@@ -39,13 +40,13 @@ class CKeyedJoinMaster : public CMasterActivity
 public:
     CKeyedJoinMaster(CMasterGraphElement *info) : CMasterActivity(info)
     {
+        helper = (IHThorKeyedJoinArg *) queryHelper();
         progressLabels.append("seeks");
         progressLabels.append("scans");
         progressLabels.append("accepted");
         progressLabels.append("postfiltered");
         progressLabels.append("prefiltered");
 
-        IHThorKeyedJoinArg *helper = (IHThorKeyedJoinArg *) queryHelper();
         if (helper->diskAccessRequired())
         {
             progressLabels.append("diskSeeks");
@@ -57,6 +58,7 @@ public:
         localKey = false;
         numTags = 0;
         tags[0] = tags[1] = tags[2] = tags[3] = TAG_NULL;
+        reInit = 0 != (helper->getFetchFlags() & (FFvarfilename|FFdynamicfilename));
     }
     ~CKeyedJoinMaster()
     {
@@ -67,14 +69,14 @@ public:
     }
     void init()
     {
-        IHThorKeyedJoinArg *helper = (IHThorKeyedJoinArg *)queryHelper();
-        
         indexFile.setown(queryThorFileManager().lookup(container.queryJob(), helper->getIndexFileName(), false, 0 != (helper->getJoinFlags() & JFindexoptional), true));
 
         unsigned keyReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJKRR", 0);
         if (!keyReadWidth || keyReadWidth>container.queryJob().querySlaves())
             keyReadWidth = container.queryJob().querySlaves();
         
+
+        initMb.clear();
         initMb.append(helper->getIndexFileName());
         if (helper->diskAccessRequired())
             numTags += 2;
@@ -222,7 +224,7 @@ public:
                                 dataReadWidth = container.queryJob().querySlaves();
                             Owned<IGroup> grp = container.queryJob().querySlaveGroup().subset((unsigned)0, dataReadWidth);
                             dataFileMapping.setown(getFileSlaveMaps(dataFile->queryLogicalName(), *dataFileDesc, container.queryJob().queryUserDescriptor(), *grp, false, false, NULL));
-                            dataFileMapping->serializeFileOffsetMap(offsetMapMb);
+                            dataFileMapping->serializeFileOffsetMap(offsetMapMb.clear());
                             queryThorFileManager().noteFileRead(container.queryJob(), dataFile);
                         }
                         else
@@ -240,7 +242,6 @@ public:
     }
     void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {
-        IHThorKeyedJoinArg *helper = (IHThorKeyedJoinArg *)queryHelper();
         dst.append(initMb);
         if (indexFile && helper->diskAccessRequired())
         {

+ 5 - 0
thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp

@@ -1587,6 +1587,7 @@ public:
         lastTick = 0;
 #endif
         helper = (IHThorKeyedJoinArg *)queryHelper();
+        reInit = 0 != (helper->getFetchFlags() & (FFvarfilename|FFdynamicfilename));
     }
     ~CKeyedJoinSlave()
     {
@@ -1832,6 +1833,10 @@ public:
             tags.append(tag);
             container.queryJob().queryJobComm().flush(tag);
         }
+        indexParts.kill();
+        dataParts.kill();
+        tlkKeySet.setown(createKeyIndexSet());
+        partKeySet.setown(createKeyIndexSet());
         unsigned numIndexParts;
         data.read(numIndexParts);
         if (numIndexParts)

+ 1 - 1
thorlcr/activities/lookupjoin/thlookupjoinslave.cpp

@@ -1082,7 +1082,7 @@ public:
         if (exception.get())
         {
             StringBuffer errStr(joinStr);
-            errStr.append("(").append(container.queryId()).appendf(") right-hand side is too large (%"I64F"u bytes in %"RCPF"d rows) for %s : (",(unsigned __int64) rhs.serializedSize(),rhs.ordinality(),joinStr.get());
+            errStr.append("(").append(container.queryId()).appendf(") right-hand side is too large (%"I64F"u bytes in %"RIPF"d rows) for %s : (",(unsigned __int64) rhs.serializedSize(),rhs.ordinality(),joinStr.get());
             errStr.append(exception->errorCode()).append(", ");
             exception->errorMessage(errStr);
             errStr.append(")");

+ 1 - 1
thorlcr/activities/loop/thloop.cpp

@@ -175,7 +175,7 @@ class CLoopActivityMaster : public CLoopActivityMasterBase
             bool overLimit = slaveEmptyIterations > maxEmptyLoopIterations;
             emptyIterations->set(sender-1, overLimit);
             if (emptyIterations->scan(0, 0) >= nodes) // all empty
-                throw MakeActivityException(this, 0, "Executed LOOP with empty input and output > %maxEmptyLoopIterations times on all nodes", maxEmptyLoopIterations);
+                throw MakeActivityException(this, 0, "Executed LOOP with empty input and output > %d maxEmptyLoopIterations times on all nodes", maxEmptyLoopIterations);
         }
     }
 public:

+ 1 - 1
thorlcr/activities/loop/thloopslave.cpp

@@ -219,7 +219,7 @@ class CLoopSlaveActivity : public CLoopSlaveActivityBase
     Owned<IRowStream> curInput;
     Owned<CNextRowFeeder> nextRowFeeder;
     Owned<IRowWriterMultiReader> loopPending;
-    unsigned loopPendingCount;
+    rowcount_t loopPendingCount;
     unsigned flags, lastMs;
     IHThorLoopArg *helper;
     bool eof, finishedLooping;

+ 1 - 0
thorlcr/activities/thdiskbase.cpp

@@ -47,6 +47,7 @@ void CDiskReadMasterBase::init()
             fileDesc.setown(getConfiguredFileDescriptor(*file));
         else
             fileDesc.setown(file->getFileDescriptor());
+        reInit = 0 != (helper->getFlags() & (TDXvarfilename|TDXdynamicfilename));
         if (container.queryLocal() || helper->canMatchAny()) // if local, assume may match
         {
             bool local;

+ 3 - 0
thorlcr/activities/thdiskbaseslave.cpp

@@ -201,6 +201,7 @@ const char * CDiskPartHandlerBase::queryLogicalFilename(const void * row)
 CDiskReadSlaveActivityBase::CDiskReadSlaveActivityBase(CGraphElementBase *_container) : CSlaveActivity(_container)
 {
     helper = (IHThorDiskReadBaseArg *)queryHelper();
+    reInit = 0 != (helper->getFlags() & (TDXvarfilename|TDXdynamicfilename));
     crcCheckCompressed = 0 != container.queryJob().getWorkUnitValueInt("crcCheckCompressed", 0);
     markStart = gotMeta = false;
     checkFileCrc = !globals->getPropBool("Debug/@fileCrcDisabled");
@@ -209,6 +210,8 @@ CDiskReadSlaveActivityBase::CDiskReadSlaveActivityBase(CGraphElementBase *_conta
 // IThorSlaveActivity
 void CDiskReadSlaveActivityBase::init(MemoryBuffer &data, MemoryBuffer &slaveData)
 {
+    subfileLogicalFilenames.kill();
+    partDescs.kill();
     data.read(logicalFilename);
     unsigned subfiles;
     data.read(subfiles);

+ 15 - 8
thorlcr/graph/thgraph.cpp

@@ -77,7 +77,7 @@ class CThorGraphResult : public CInterface, implements IThorResult, implements I
         virtual void flush() { }
         virtual IRowStream *getReader()
         {
-            return rows.createRowStream(0, (rowcount_t)-1, false);
+            return rows.createRowStream(0, (rowidx_t)-1, false);
         }
     };
 public:
@@ -171,9 +171,10 @@ public:
     }
     virtual void getLinkedResult(unsigned &countResult, byte * * & result)
     {
+        assertex(rowStreamCount==((unsigned)rowStreamCount)); // catch, just in case
         Owned<IRowStream> stream = getRowStream();
         countResult = 0;
-        OwnedConstThorRow _rowset = allocator->createRowset(rowStreamCount);
+        OwnedConstThorRow _rowset = allocator->createRowset((unsigned)rowStreamCount);
         const void **rowset = (const void **)_rowset.get();
         loop
         {
@@ -243,7 +244,7 @@ public:
         IThorResult *loopResult = results->createResult(activity, 0, resultRowIf, !activity.queryGraph().isLocalChild()); // loop output
         IThorResult *inputResult = results->createResult(activity, 1, resultRowIf, !activity.queryGraph().isLocalChild());
     }
-    virtual IRowStream *execute(CActivityBase &activity, unsigned counter, IRowWriterMultiReader *inputStream, unsigned rowStreamCount, size32_t parentExtractSz, const byte *parentExtract)
+    virtual IRowStream *execute(CActivityBase &activity, unsigned counter, IRowWriterMultiReader *inputStream, rowcount_t rowStreamCount, size32_t parentExtractSz, const byte *parentExtract)
     {
         Owned<IThorGraphResults> results = graph->createThorGraphResults(3);
         prepareLoopResults(activity, results);
@@ -522,8 +523,7 @@ void CGraphElementBase::serializeCreateContext(MemoryBuffer &mb)
 
 void CGraphElementBase::serializeStartContext(MemoryBuffer &mb)
 {
-    if (!onStartCalled) return;
-    mb.append(queryId());
+    assertex(onStartCalled);
     unsigned pos = mb.length();
     mb.append((size32_t)0);
     queryHelper()->serializeStartContext(mb);
@@ -545,6 +545,7 @@ void CGraphElementBase::deserializeStartContext(MemoryBuffer &mb)
     mb.read(startCtxLen);
     startCtxMb.append(startCtxLen, mb.readDirect(startCtxLen));
     haveStartCtx = true;
+    onStartCalled = false; // allow to be called again
 }
 
 void CGraphElementBase::onCreate()
@@ -1073,6 +1074,7 @@ void CGraphBase::serializeStartContexts(MemoryBuffer &mb)
     ForEach (*iter)
     {
         CGraphElementBase &element = iter->query();
+        mb.append(element.queryId());
         element.serializeStartContext(mb);
     }
     mb.append((activity_id)0);
@@ -1673,7 +1675,8 @@ void CGraphBase::createFromXGMML(IPropertyTree *_node, CGraphBase *_owner, CGrap
     {
         localResults.setown(createThorGraphResults(numResults));
         resultsGraph = this;
-        tmpHandler.setown(queryJob().createTempHandler());
+        // JCSMORE - it might more sense if this temp handler was owned by parent act., which may finish(get stopped) earlier than the owning graph
+        tmpHandler.setown(queryJob().createTempHandler(false));
     }
 
     localChild = false;
@@ -1955,7 +1958,11 @@ void CGraphTempHandler::deregisterFile(const char *name, bool kept)
     CriticalBlock b(crit);
     CFileUsageEntry *fileUsage = tmpFiles.find(name);
     if (!fileUsage)
-        throw MakeThorException(TE_FileNotFound, "File not found (%s) deregistering tmp file", name);
+    {
+        if (errorOnMissing)
+            throw MakeThorException(TE_FileNotFound, "File not found (%s) deregistering tmp file", name);
+        return;
+    }
     if (0 == fileUsage->queryUsage()) // marked 'not to be deleted' until workunit complete.
         return;
     else if (1 == fileUsage->queryUsage())
@@ -2625,7 +2632,7 @@ IThorResource &queryThor()
 CActivityBase::CActivityBase(CGraphElementBase *_container) : container(*_container), timeActivities(_container->queryJob().queryTimeActivities())
 {
     mpTag = TAG_NULL;
-    abortSoon = cancelledReceive = false;
+    abortSoon = cancelledReceive = reInit = false;
     baseHelper.set(container.queryHelper());
     parentExtractSz = 0;
     parentExtract = NULL;

+ 8 - 5
thorlcr/graph/thgraph.hpp

@@ -142,7 +142,7 @@ interface IThorBoundLoopGraph : extends IInterface
 {
     virtual void prepareLoopResults(CActivityBase &activity, IThorGraphResults *results) = 0;
     virtual void prepareCounterResult(CActivityBase &activity, IThorGraphResults *results, unsigned loopCounter, unsigned pos) = 0;
-    virtual IRowStream *execute(CActivityBase &activity, unsigned counter, IRowWriterMultiReader *rowStream, unsigned rowStreamCount, size32_t parentExtractSz, const byte * parentExtract) = 0;
+    virtual IRowStream *execute(CActivityBase &activity, unsigned counter, IRowWriterMultiReader *rowStream, rowcount_t rowStreamCount, size32_t parentExtractSz, const byte * parentExtract) = 0;
     virtual void execute(CActivityBase &activity, unsigned counter, IThorGraphResults * graphLoopResults, size32_t parentExtractSz, const byte * parentExtract) = 0;
     virtual CGraphBase *queryGraph() = 0;
 };
@@ -391,10 +391,12 @@ protected:
     CFileUsageTable tmpFiles;
     CJobBase &job;
     mutable CriticalSection crit;
+    bool errorOnMissing;
+
 public:
     IMPLEMENT_IINTERFACE;
 
-    CGraphTempHandler(CJobBase &_job) : job(_job) { }
+    CGraphTempHandler(CJobBase &_job, bool _errorOnMissing) : job(_job), errorOnMissing(_errorOnMissing) { }
     ~CGraphTempHandler()
     {
     }
@@ -729,7 +731,7 @@ public:
     void setResults(IThorGraphResults *results);
     virtual void executeChild(size32_t parentExtractSz, const byte *parentExtract, IThorGraphResults *results, IThorGraphResults *graphLoopResults);
     virtual void executeChild(size32_t parentExtractSz, const byte *parentExtract);
-    virtual void serializeStats(MemoryBuffer &mb) { }
+    virtual bool serializeStats(MemoryBuffer &mb) { return false; }
     virtual bool prepare(size32_t parentExtractSz, const byte *parentExtract, bool checkDependencies, bool shortCircuit, bool async);
     virtual void create(size32_t parentExtractSz, const byte *parentExtract);
     virtual bool preStart(size32_t parentExtractSz, const byte *parentExtract);
@@ -849,7 +851,7 @@ public:
     const char *queryGraphName() const { return graphName; }
     bool queryForceLogging(graph_id graphId, bool def) const;
     ITimeReporter &queryTimeReporter() { return *timeReporter; }
-    virtual IGraphTempHandler *createTempHandler() = 0;
+    virtual IGraphTempHandler *createTempHandler(bool errorOnMissing) = 0;
     virtual CGraphBase *createGraph() = 0;
     void joinGraph(CGraphBase &graph);
     void startGraph(CGraphBase &graph, IGraphCallback &callback, bool checkDependencies, size32_t parentExtractSize, const byte *parentExtract);
@@ -950,7 +952,7 @@ protected:
     const bool &timeActivities; // purely for access efficiency
     size32_t parentExtractSz;
     const byte *parentExtract;
-    bool receiving, cancelledReceive;
+    bool receiving, cancelledReceive, reInit;
     unsigned maxCores; // NB: only used by acts that sort at the moment
 
 public:
@@ -963,6 +965,7 @@ public:
     inline const mptag_t queryMpTag() const { return mpTag; }
     inline const bool &queryAbortSoon() const { return abortSoon; }
     inline IHThorArg *queryHelper() const { return baseHelper; }
+    inline bool needReInit() const { return reInit; }
     inline const bool &queryTimeActivities() const { return timeActivities; } 
     void onStart(size32_t _parentExtractSz, const byte *_parentExtract) { parentExtractSz = _parentExtractSz; parentExtract = _parentExtract; }
     bool receiveMsg(CMessageBuffer &mb, const rank_t rank, const mptag_t mpTag, rank_t *sender=NULL, unsigned timeout=MP_WAIT_FOREVER);

+ 53 - 67
thorlcr/graph/thgraphmaster.cpp

@@ -183,17 +183,6 @@ void CSlaveMessageHandler::main()
                     msg.read(gid);
                     Owned<CMasterGraph> graph = (CMasterGraph *)job.getGraph(gid);
                     assertex(graph);
-                    size32_t parentExtractSz;
-                    msg.read(parentExtractSz);
-                    const byte *parentExtract = NULL;
-                    if (parentExtractSz)
-                    {
-                        parentExtract = msg.readDirect(parentExtractSz);
-                        StringBuffer msg("Graph(");
-                        msg.append(graph->queryGraphId()).append(") - initializing master graph with parentExtract ").append(parentExtractSz).append(" bytes");
-                        DBGLOG("%s", msg.str());
-                        parentExtract = graph->setParentCtx(parentExtractSz, parentExtract);
-                    }
                     {
                         CriticalBlock b(graph->queryCreateLock());
                         Owned<IThorActivityIterator> iter = graph->getIterator();
@@ -202,8 +191,6 @@ void CSlaveMessageHandler::main()
                         {
                             CMasterGraphElement &element = (CMasterGraphElement &)iter->query();
                             element.onCreate();
-                            if (isDiskInput(element.getKind()))
-                                element.onStart(parentExtractSz, parentExtract);
                         }
                     }
                     msg.clear();
@@ -218,19 +205,32 @@ void CSlaveMessageHandler::main()
                     Owned<CMasterGraph> graph = (CMasterGraph *)job.getGraph(gid);
                     assertex(graph);
                     CGraphElementArray toSerialize;
+                    CriticalBlock b(graph->queryCreateLock());
+                    size32_t parentExtractSz;
+                    msg.read(parentExtractSz);
+                    const byte *parentExtract = NULL;
+                    if (parentExtractSz)
                     {
-                        CriticalBlock b(graph->queryCreateLock());
-                        loop
-                        {
-                            activity_id id;
-                            msg.read(id);
-                            if (!id)
-                                break;
-                            CMasterGraphElement *element = (CMasterGraphElement *)graph->queryElement(id);
-                            assertex(element);
-                            element->doCreateActivity();
-                            toSerialize.append(*LINK(element));
-                        }
+                        parentExtract = msg.readDirect(parentExtractSz);
+                        StringBuffer msg("Graph(");
+                        msg.append(graph->queryGraphId()).append(") - initializing master graph with parentExtract ").append(parentExtractSz).append(" bytes");
+                        DBGLOG("%s", msg.str());
+                        parentExtract = graph->setParentCtx(parentExtractSz, parentExtract);
+                    }
+                    loop
+                    {
+                        activity_id id;
+                        msg.read(id);
+                        if (!id)
+                            break;
+                        CMasterGraphElement *element = (CMasterGraphElement *)graph->queryElement(id);
+                        assertex(element);
+                        element->deserializeStartContext(msg);
+                        element->doCreateActivity(parentExtractSz, parentExtract);
+                        CActivityBase *activity = element->queryActivity();
+                        if (activity && activity->needReInit())
+                            element->sentActInitData->set(slave, 0); // clear to permit serializeActivityInitData to resend
+                        toSerialize.append(*LINK(element));
                     }
                     msg.clear();
                     CMessageBuffer replyMsg;
@@ -529,13 +529,14 @@ bool CMasterGraphElement::checkUpdate()
 
 void CMasterGraphElement::initActivity()
 {
-    if (activity)
-        return;
+    CriticalBlock b(crit);
+    bool first = (NULL == activity);
     CGraphElementBase::initActivity();
-    ((CMasterActivity *)activity.get())->init();
+    if (first || activity->needReInit())
+        ((CMasterActivity *)activity.get())->init();
 }
 
-void CMasterGraphElement::doCreateActivity()
+void CMasterGraphElement::doCreateActivity(size32_t parentExtractSz, const byte *parentExtract)
 {
     bool ok=false;
     switch (getKind())
@@ -560,6 +561,8 @@ void CMasterGraphElement::doCreateActivity()
     if (!ok)
         return;
     onCreate();
+    if (isDiskInput(getKind()))
+       onStart(parentExtractSz, parentExtract);
     initActivity();
 }
 
@@ -652,7 +655,7 @@ public:
 class CMasterGraphTempHandler : public CGraphTempHandler
 {
 public:
-    CMasterGraphTempHandler(CJobBase &job) : CGraphTempHandler(job) { }
+    CMasterGraphTempHandler(CJobBase &job, bool errorOnMissing) : CGraphTempHandler(job, errorOnMissing) { }
 
     virtual bool removeTemp(const char *name)
     {
@@ -1239,7 +1242,7 @@ CJobMaster::CJobMaster(IConstWorkUnit &_workunit, const char *graphName, const c
     mpJobTag = allocateMPTag();
     slavemptag = allocateMPTag();
     slaveMsgHandler = new CSlaveMessageHandler(*this, slavemptag);
-    tmpHandler.setown(new CMasterGraphTempHandler(*this));
+    tmpHandler.setown(createTempHandler(true));
 }
 
 CJobMaster::~CJobMaster()
@@ -1707,9 +1710,9 @@ IBarrier *CJobMaster::createBarrier(mptag_t tag)
     return new CBarrierMaster(*jobComm, tag);
 }
 
-IGraphTempHandler *CJobMaster::createTempHandler()
+IGraphTempHandler *CJobMaster::createTempHandler(bool errorOnMissing)
 {
-    return new CMasterGraphTempHandler(*this);
+    return new CMasterGraphTempHandler(*this, errorOnMissing);
 }
 
 bool CJobMaster::fireException(IException *e)
@@ -2017,22 +2020,6 @@ void CMasterGraph::serializeCreateContexts(MemoryBuffer &mb)
     }
 }
 
-void CMasterGraph::serializeStartCtxs(MemoryBuffer &mb)
-{
-    Owned<IThorActivityIterator> iter = getTraverseIterator();
-    ForEach (*iter)
-    {
-        CMasterGraphElement &element = (CMasterGraphElement &)iter->query();
-        mb.append(element.queryId());
-        unsigned pos = mb.length();
-        mb.append((size32_t)0);
-        element.queryHelper()->serializeStartContext(mb);
-        size32_t sz = (mb.length()-pos)-sizeof(size32_t);
-        mb.writeDirect(pos, sizeof(sz), &sz);
-    }
-    mb.append((activity_id)0);
-}
-
 bool CMasterGraph::serializeActivityInitData(unsigned slave, MemoryBuffer &mb, IThorActivityIterator &iter)
 {
     CriticalBlock b(createdCrit);
@@ -2162,6 +2149,9 @@ void CMasterGraph::sendActivityInitData()
         ForEach(*iter)
         {
             CGraphElementBase &element = iter->query();
+            CActivityBase *activity = element.queryActivity();
+            if (activity && activity->needReInit())
+                element.sentActInitData->set(w, false); // force act init to be resent
             if (!element.sentActInitData->test(w)) // has it been sent
                 ++needActInit;
         }
@@ -2291,7 +2281,7 @@ void CMasterGraph::sendGraph()
     CMessageBuffer msg;
     msg.append(GraphInit);
     msg.append(job.queryKey());
-    node->serialize(msg); // everthing
+    node->serialize(msg); // everything
     if (TAG_NULL == executeReplyTag)
         executeReplyTag = queryJob().allocateMPTag();
     serializeMPtag(msg, executeReplyTag);
@@ -2320,7 +2310,7 @@ bool CMasterGraph::preStart(size32_t parentExtractSz, const byte *parentExtract)
     {
         sentStartCtx = true;
         CMessageBuffer msg;
-        serializeStartCtxs(msg);
+        serializeStartContexts(msg);
         try
         {
             jobM.broadcastToSlaves(msg, mpTag, LONGTIMEOUT, "startCtx", &bcastTag, true);
@@ -2414,23 +2404,19 @@ void CMasterGraph::getFinalProgress()
         {
             if (globals->getPropBool("@watchdogProgressEnabled"))
             {
-                HeartBeatPacket &hb = *(HeartBeatPacket *) msg.readDirect(sizeof(hb.packetsize));
-                if (hb.packetsize)
+                try
                 {
-                    size32_t sz = hb.packetsize-sizeof(hb.packetsize);
-                    if (sz)
-                    {
-                        msg.readDirect(sz);
-                        try
-                        {
-                            queryJobManager().queryDeMonServer()->takeHeartBeat(hb);
-                        }
-                        catch (IException *e)
-                        {
-                            GraphPrintLog(e, "Failure whilst deserializing stats/progress");
-                            e->Release();
-                        }
-                    }
+                    size32_t progressLen;
+                    msg.read(progressLen);
+                    MemoryBuffer progressData;
+                    progressData.setBuffer(progressLen, (void *)msg.readDirect(progressLen));
+                    const SocketEndpoint &ep = queryClusterGroup().queryNode(sender).endpoint();
+                    queryJobManager().queryDeMonServer()->takeHeartBeat(ep, progressData);
+                }
+                catch (IException *e)
+                {
+                    GraphPrintLog(e, "Failure whilst deserializing stats/progress");
+                    e->Release();
                 }
             }
         }

+ 2 - 3
thorlcr/graph/thgraphmaster.ipp

@@ -66,7 +66,6 @@ public:
     CriticalSection &queryCreateLock() { return createdCrit; }
     void handleSlaveDone(unsigned node, MemoryBuffer &mb);
     void serializeCreateContexts(MemoryBuffer &mb);
-    void serializeStartCtxs(MemoryBuffer &mb);
     bool serializeActivityInitData(unsigned slave, MemoryBuffer &mb, IThorActivityIterator &iter);
     void readActivityInitData(MemoryBuffer &mb, unsigned slave);
     bool deserializeStats(unsigned node, MemoryBuffer &mb);
@@ -147,7 +146,7 @@ public:
     }
     
 // CJobBase impls.
-    virtual IGraphTempHandler *createTempHandler();
+    virtual IGraphTempHandler *createTempHandler(bool errorOnMissing);
     virtual CGraphBase *createGraph();
 
     CMasterGraphElement *locateActivity(activity_id id)
@@ -277,7 +276,7 @@ public:
     bool sentCreateCtx;
 
     CMasterGraphElement(CGraphBase &owner, IPropertyTree &xgmml);
-    void doCreateActivity();
+    void doCreateActivity(size32_t parentExtractSz=0, const byte *parentExtract=NULL);
     virtual bool checkUpdate();
 
     virtual void initActivity();

+ 77 - 49
thorlcr/graph/thgraphslave.cpp

@@ -247,7 +247,11 @@ unsigned __int64 CSlaveActivity::queryLocalCycles() const
         if (TAKchildif == container.getKind())
         {
             if (inputs.ordinality() && (((unsigned)-1) != container.whichBranch))
-                inputCycles += inputs.item(container.whichBranch)->queryTotalCycles();
+            {
+                IThorDataLink *input = inputs.item(container.whichBranch);
+                if (input)
+                    inputCycles += input->queryTotalCycles();
+            }
         }
         else
         {
@@ -295,6 +299,8 @@ void CSlaveGraph::init(MemoryBuffer &mb)
     waitBarrier = job.createBarrier(waitBarrierTag);
     if (doneBarrierTag != TAG_NULL)
         doneBarrier = job.createBarrier(doneBarrierTag);
+    initialized = false;
+    progressActive = progressToCollect = false;
     unsigned subCount;
     mb.read(subCount);
     while (subCount--)
@@ -308,6 +314,8 @@ void CSlaveGraph::init(MemoryBuffer &mb)
 
 void CSlaveGraph::initWithActData(MemoryBuffer &in, MemoryBuffer &out)
 {
+    CriticalBlock b(progressCrit);
+    initialized = true;
     activity_id id;
     loop
     {
@@ -353,26 +361,21 @@ void CSlaveGraph::recvStartCtx()
         CMessageBuffer msg;
         if (!job.queryJobComm().recv(msg, 0, mpTag, NULL, LONGTIMEOUT))
             throw MakeStringException(0, "Error receiving startCtx data for graph: %"GIDPF"d", graphId);
-        activity_id id;
-        loop
-        {
-            msg.read(id);
-            if (0 == id) break;
-            CSlaveGraphElement *element = (CSlaveGraphElement *)queryElement(id);
-            assertex(element);
-            element->deserializeStartContext(msg);
-        }
+        deserializeStartContexts(msg);
     }
 }
 
-bool CSlaveGraph::recvActivityInitData()
+bool CSlaveGraph::recvActivityInitData(size32_t parentExtractSz, const byte *parentExtract)
 {
     bool ret = true;
     unsigned needActInit = 0;
     Owned<IThorActivityIterator> iter = getTraverseIterator();
     ForEach(*iter)
     {
-        CSlaveGraphElement &element = (CSlaveGraphElement &)iter->query();
+        CGraphElementBase &element = (CGraphElementBase &)iter->query();
+        CActivityBase *activity = element.queryActivity();
+        if (activity && activity->needReInit())
+            element.sentActInitData->set(0, false); // force act init to be resent
         if (!element.sentActInitData->test(0))
             ++needActInit;
     }
@@ -396,12 +399,18 @@ bool CSlaveGraph::recvActivityInitData()
             // initialize any for which no data was sent
             msg.append(smt_initActDataReq); // may cause graph to be created at master
             msg.append(queryGraphId());
+            assertex(!parentExtractSz || NULL!=parentExtract);
+            msg.append(parentExtractSz);
+            msg.append(parentExtractSz, parentExtract);
             Owned<IThorActivityIterator> iter = getTraverseIterator();
             ForEach(*iter)
             {
                 CSlaveGraphElement &element = (CSlaveGraphElement &)iter->query();
                 if (!element.sentActInitData->test(0))
+                {
                     msg.append(element.queryId());
+                    element.serializeStartContext(msg);
+                }
             }
             msg.append((activity_id)0);
             if (!queryJob().queryJobComm().sendRecv(msg, 0, queryJob().querySlaveMpTag(), LONGTIMEOUT))
@@ -458,7 +467,7 @@ bool CSlaveGraph::preStart(size32_t parentExtractSz, const byte *parentExtract)
     recvStartCtx();
     CGraphBase::preStart(parentExtractSz, parentExtract);
 
-    if (!recvActivityInitData())
+    if (!recvActivityInitData(parentExtractSz, parentExtract))
         return false;
     connect(); // only now do slave acts. have all their outputs prepared.
     if (isGlobal())
@@ -471,6 +480,11 @@ bool CSlaveGraph::preStart(size32_t parentExtractSz, const byte *parentExtract)
 
 void CSlaveGraph::start()
 {
+    {
+        SpinBlock b(progressActiveLock);
+        progressActive = true;
+        progressToCollect = true;
+    }
     bool forceAsync = !queryOwner() || isGlobal();
     Owned<IThorActivityIterator> iter = getSinkIterator();
     unsigned sinks = 0;
@@ -579,9 +593,6 @@ void CSlaveGraph::create(size32_t parentExtractSz, const byte *parentExtract)
                 CMessageBuffer msg;
                 msg.append(smt_initGraphReq);
                 msg.append(graphId);
-                assertex(!parentExtractSz || NULL!=parentExtract);
-                msg.append(parentExtractSz);
-                msg.append(parentExtractSz, parentExtract);
                 if (!queryJob().queryJobComm().sendRecv(msg, 0, queryJob().querySlaveMpTag(), LONGTIMEOUT))
                     throwUnexpected();
                 unsigned len;
@@ -608,6 +619,11 @@ void CSlaveGraph::abort(IException *e)
 void CSlaveGraph::done()
 {
     GraphPrintLog("End of sub-graph");
+    {
+        SpinBlock b(progressActiveLock);
+        progressActive = false;
+        progressToCollect = true; // NB: ensure collected after end of graph
+    }
     if (!aborted && (!queryOwner() || isGlobal()))
         getDoneSem.wait(); // must wait on master
     if (!queryOwner())
@@ -646,46 +662,62 @@ void CSlaveGraph::end()
     }
 }
 
-void CSlaveGraph::serializeStats(MemoryBuffer &mb)
+bool CSlaveGraph::serializeStats(MemoryBuffer &mb)
 {
+    unsigned beginPos = mb.length();
     mb.append(queryGraphId());
     unsigned cPos = mb.length();
     unsigned count = 0;
     mb.append(count);
     CriticalBlock b(progressCrit);
-    if (started || 0 == activityCount())
+    // until started and activities initialized, activities are not ready to serlialize stats.
+    if ((started&&initialized) || 0 == activityCount())
     {
-        unsigned sPos = mb.length();
-        Owned<IThorActivityIterator> iter = getTraverseIterator();
-        ForEach (*iter)
+        bool collect=false;
         {
-            if (mb.length() > (DATA_MAX-30))
+            SpinBlock b(progressActiveLock);
+            if (progressActive || progressToCollect)
             {
-                WARNLOG("Act: Progress packet too big!");
-                break;
+                progressToCollect = false;
+                collect = true;
             }
-            
-            CGraphElementBase &element = iter->query();
-            CSlaveActivity &activity = (CSlaveActivity &)*element.queryActivity();
-            unsigned pos = mb.length();
-            mb.append(activity.queryContainer().queryId());
-            activity.serializeStats(mb);
-            if (pos == mb.length()-sizeof(activity_id))
-                mb.rewrite(pos);
-            else
-                ++count;
         }
-        mb.writeDirect(cPos, sizeof(count), &count);
-        mb.append(queryChildGraphCount());
+        if (collect)
+        {
+            unsigned sPos = mb.length();
+            Owned<IThorActivityIterator> iter = getTraverseIterator();
+            ForEach (*iter)
+            {
+                CGraphElementBase &element = iter->query();
+                CSlaveActivity &activity = (CSlaveActivity &)*element.queryActivity();
+                unsigned pos = mb.length();
+                mb.append(activity.queryContainer().queryId());
+                activity.serializeStats(mb);
+                if (pos == mb.length()-sizeof(activity_id))
+                    mb.rewrite(pos);
+                else
+                    ++count;
+            }
+            mb.writeDirect(cPos, sizeof(count), &count);
+        }
+        unsigned cqCountPos = mb.length();
+        unsigned cq=0;
+        mb.append(cq);
         Owned<IThorGraphIterator> childIter = getChildGraphs();
         ForEach(*childIter)
         {
             CSlaveGraph &graph = (CSlaveGraph &)childIter->query();
-            graph.serializeStats(mb);
+            if (graph.serializeStats(mb))
+                ++cq;
+        }
+        if (count || cq)
+        {
+            mb.writeDirect(cqCountPos, sizeof(cq), &cq);
+            return true;
         }
     }
-    else
-        mb.append((unsigned)0); // sub graph count
+    mb.rewrite(beginPos);
+    return false;
 }
 
 void CSlaveGraph::serializeDone(MemoryBuffer &mb)
@@ -732,11 +764,7 @@ void CSlaveGraph::getDone(MemoryBuffer &doneInfoMb)
             if (!queryOwner())
             {
                 if (globals->getPropBool("@watchdogProgressEnabled"))
-                {
-                    HeartBeatPacket hb;
-                    jobS.queryProgressHandler()->stopGraph(*this, &hb);
-                    doneInfoMb.append(hb.packetsize, &hb);
-                }
+                    jobS.queryProgressHandler()->stopGraph(*this, &doneInfoMb);
             }
             doneInfoMb.append(job.queryMaxDiskUsage());
             queryJob().queryTimeReporter().serialize(doneInfoMb);
@@ -959,7 +987,7 @@ public:
 class CSlaveGraphTempHandler : public CGraphTempHandler
 {
 public:
-    CSlaveGraphTempHandler(CJobBase &job) : CGraphTempHandler(job)
+    CSlaveGraphTempHandler(CJobBase &job, bool errorOnMissing) : CGraphTempHandler(job, errorOnMissing)
     {
     }
     virtual bool removeTemp(const char *name)
@@ -1027,7 +1055,7 @@ CJobSlave::CJobSlave(ISlaveWatchdog *_watchdog, IPropertyTree *_workUnitInfo, co
 #endif
     querySo.setown(createDllEntry(_querySo, false, NULL));
     codeCtx = new CThorCodeContextSlave(*this, *querySo, *userDesc, slavemptag);
-    tmpHandler.setown(new CSlaveGraphTempHandler(*this));
+    tmpHandler.setown(createTempHandler(true));
     startJob();
 }
 
@@ -1053,7 +1081,7 @@ void CJobSlave::startJob()
     unsigned __int64 freeSpaceRep = getFreeSpace(queryBaseDirectory(true));
     PROGLOG("Disk space: %s = %"I64F"d, %s = %"I64F"d", queryBaseDirectory(), freeSpace/0x100000, queryBaseDirectory(true), freeSpaceRep/0x100000);
 
-    unsigned minFreeSpace = getWorkUnitValueInt("MINIMUM_DISK_SPACE", 0);
+    unsigned minFreeSpace = (unsigned)getWorkUnitValueInt("MINIMUM_DISK_SPACE", 0);
     if (minFreeSpace)
     {
         if (freeSpace < ((unsigned __int64)minFreeSpace)*0x100000)
@@ -1102,9 +1130,9 @@ IBarrier *CJobSlave::createBarrier(mptag_t tag)
     return new CBarrierSlave(*jobComm, tag);
 }
 
-IGraphTempHandler *CJobSlave::createTempHandler()
+IGraphTempHandler *CJobSlave::createTempHandler(bool errorOnMissing)
 {
-    return new CSlaveGraphTempHandler(*this);
+    return new CSlaveGraphTempHandler(*this, errorOnMissing);
 }
 
 // IGraphCallback

+ 5 - 4
thorlcr/graph/thgraphslave.hpp

@@ -90,8 +90,9 @@ class graphslave_decl CSlaveGraph : public CGraphBase
     CJobSlave &jobS;
     Owned<IInterface> progressHandler;
     Semaphore getDoneSem;
-    bool needsFinalInfo;
+    bool initialized, progressActive, progressToCollect;
     CriticalSection progressCrit;
+    SpinLock progressActiveLock;
 
 public:
 
@@ -101,7 +102,7 @@ public:
     void connect();
     void init(MemoryBuffer &mb);
     void recvStartCtx();
-    bool recvActivityInitData();
+    bool recvActivityInitData(size32_t parentExtractSz, const byte *parentExtract);
     void setExecuteReplyTag(mptag_t _executeReplyTag) { executeReplyTag = _executeReplyTag; }
     void initWithActData(MemoryBuffer &in, MemoryBuffer &out);
     void getDone(MemoryBuffer &doneInfoMb);
@@ -109,7 +110,7 @@ public:
     IThorResult *getGlobalResult(CActivityBase &activity, IRowInterfaces *rowIf, unsigned id);
 
     virtual void executeSubGraph(size32_t parentExtractSz, const byte *parentExtract);
-    virtual void serializeStats(MemoryBuffer &mb);
+    virtual bool serializeStats(MemoryBuffer &mb);
     virtual bool preStart(size32_t parentExtractSz, const byte *parentExtract);
     virtual void start();
     virtual void create(size32_t parentExtractSz, const byte *parentExtract);
@@ -158,7 +159,7 @@ public:
 
     virtual __int64 getWorkUnitValueInt(const char *prop, __int64 defVal) const;
     virtual StringBuffer &getWorkUnitValue(const char *prop, StringBuffer &str) const;
-    virtual IGraphTempHandler *createTempHandler();
+    virtual IGraphTempHandler *createTempHandler(bool errorOnMissing);
     virtual CGraphBase *createGraph()
     {
         return new CSlaveGraph(*this);

+ 160 - 78
thorlcr/master/mawatchdog.cpp

@@ -42,63 +42,63 @@ public:
     SocketEndpoint ep;
     bool alive;
     bool markdead;
-    HeartBeatPacket lastpacket;
     CMachineStatus(const SocketEndpoint &_ep)
         : ep(_ep)
     {
         alive = true;
         markdead = false;
-        memset(&lastpacket,0,sizeof(lastpacket));
     }
-    void update(HeartBeatPacket &packet)
+    void update(HeartBeatPacketHeader &packet)
     {
         alive = true;
-        if (markdead) {
+        if (markdead)
+        {
             markdead = false;
             StringBuffer epstr;
             ep.getUrlStr(epstr);
             LOG(MCdebugProgress, unknownJob, "Watchdog : Marking Machine as Up! [%s]", epstr.str());
         }
-        if(packet.progressSize > 0)
-            lastpacket = packet;
     }   
 };
 
 
-CMasterWatchdog::CMasterWatchdog() : threaded("CMasterWatchdog")
+CMasterWatchdogBase::CMasterWatchdogBase() : threaded("CMasterWatchdogBase")
 {
-    stopped = false;
-    sock = NULL;
-    if (globals->getPropBool("@watchdogEnabled"))
-    {
-        if (!sock)
-            sock = ISocket::udp_create(getFixedPort(TPORT_watchdog));
-        LOG(MCdebugProgress, unknownJob, "Starting watchdog");
+    stopped = true;
+    watchdogMachineTimeout = globals->getPropInt("@slaveDownTimeout", DEFAULT_SLAVEDOWNTIMEOUT);
+    if (watchdogMachineTimeout <= HEARTBEAT_INTERVAL*10)
+        watchdogMachineTimeout = HEARTBEAT_INTERVAL*10;
+    watchdogMachineTimeout *= 1000;
 #ifdef _WIN32
-        threaded.adjustPriority(+1); // it is critical that watchdog packets get through.
+    threaded.adjustPriority(+1); // it is critical that watchdog packets get through.
 #endif
-        threaded.init(this);
-    }
 }
 
-CMasterWatchdog::~CMasterWatchdog()
+CMasterWatchdogBase::~CMasterWatchdogBase()
 {
     stop();
-    ::Release(sock);
-    ForEachItemInRev(i, state) {
+    ForEachItemInRev(i, state)
+    {
         CMachineStatus *mstate=(CMachineStatus *)state.item(i);
         delete mstate;
     }
 }
 
-void CMasterWatchdog::addSlave(const SocketEndpoint &slave)
+void CMasterWatchdogBase::start()
+{
+    PROGLOG("Starting watchdog");
+    stopped = false;
+    threaded.init(this);
+}
+
+void CMasterWatchdogBase::addSlave(const SocketEndpoint &slave)
 {
     synchronized block(mutex);
     CMachineStatus *mstate=new CMachineStatus(slave);
     state.append(mstate);
 }
 
-void CMasterWatchdog::removeSlave(const SocketEndpoint &slave)
+void CMasterWatchdogBase::removeSlave(const SocketEndpoint &slave)
 {
     synchronized block(mutex);
     CMachineStatus *ms = findSlave(slave);
@@ -108,9 +108,10 @@ void CMasterWatchdog::removeSlave(const SocketEndpoint &slave)
     }
 }
 
-CMachineStatus *CMasterWatchdog::findSlave(const SocketEndpoint &ep)
+CMachineStatus *CMasterWatchdogBase::findSlave(const SocketEndpoint &ep)
 {
-    ForEachItemInRev(i, state) {
+    ForEachItemInRev(i, state)
+    {
         CMachineStatus *mstate=(CMachineStatus *)state.item(i);
         if (mstate->ep.equals(ep))
             return mstate;
@@ -119,7 +120,7 @@ CMachineStatus *CMasterWatchdog::findSlave(const SocketEndpoint &ep)
 }
 
 
-void CMasterWatchdog::stop()
+void CMasterWatchdogBase::stop()
 {
     threaded.adjustPriority(0); // restore to normal before stopping
     { synchronized block(mutex);
@@ -128,27 +129,16 @@ void CMasterWatchdog::stop()
         LOG(MCdebugProgress, unknownJob, "Stopping watchdog");
         stopped = true;
     }
-    if (sock)
-    {
-        SocketEndpoint masterEp(getMasterPortBase());
-        StringBuffer ipStr;
-        masterEp.getIpText(ipStr);
-        Owned<ISocket> sock = ISocket::udp_connect(getFixedPort(masterEp.port, TPORT_watchdog), ipStr.str());
-        HeartBeatPacket hbpacket;
-        memset(&hbpacket, 0, sizeof(hbpacket));
-        MemoryBuffer mb;
-        size32_t sz = ThorCompress(&hbpacket, hbpacket.packetSize(), mb);
-        sock->write(mb.toByteArray(), sz);
-        sock->close();
-    }
+    stopReading();
     threaded.join();
     LOG(MCdebugProgress, unknownJob, "Stopped watchdog");
 }
 
-void CMasterWatchdog::checkMachineStatus()
+void CMasterWatchdogBase::checkMachineStatus()
 {
     synchronized block(mutex);
-    ForEachItemInRev(i, state) {
+    ForEachItemInRev(i, state)
+    {
         CMachineStatus *mstate=(CMachineStatus *)state.item(i);
         if (!mstate->alive)
         {
@@ -167,67 +157,78 @@ void CMasterWatchdog::checkMachineStatus()
             mstate->alive = false;
         }
     }
+}
 
+unsigned CMasterWatchdogBase::readPacket(HeartBeatPacketHeader &hb, MemoryBuffer &mb)
+{
+    mb.clear();
+    unsigned read = readData(mb);
+    if (read)
+    {
+        if (read < sizeof(HeartBeatPacketHeader))
+        {
+            WARNLOG("Receive Monitor Packet: wrong size, got %d, less than HeartBeatPacketHeader size", read);
+            return 0;
+        }
+        memcpy(&hb, mb.readDirect(sizeof(HeartBeatPacketHeader)), sizeof(HeartBeatPacketHeader));
+        if (read != hb.packetSize)  // check for corrupt packets
+        {
+            WARNLOG("Receive Monitor Packet: wrong size, expected %d, got %d", hb.packetSize, read);
+            return 0;
+        }
+        mb.setLength(hb.packetSize);
+        return hb.packetSize;
+    }
+    else
+        mb.clear();
+    return 0;
 }
 
-void CMasterWatchdog::main()
+void CMasterWatchdogBase::main()
 {
     LOG(MCdebugProgress, unknownJob, "Started watchdog");
     unsigned lastbeat=msTick();
     unsigned lastcheck=lastbeat;
 
-    unsigned watchdogMachineTimeout = globals->getPropInt("@slaveDownTimeout", DEFAULT_SLAVEDOWNTIMEOUT);
-    if (watchdogMachineTimeout <= HEARTBEAT_INTERVAL*10)
-        watchdogMachineTimeout = HEARTBEAT_INTERVAL*10;
-    watchdogMachineTimeout *= 1000;
     retrycount = 0;
-    try {
-        while (!stopped) {
-            HeartBeatPacket hbpacket;
-            try {
-                size32_t read;
-                MemoryBuffer packetCompressedMb;
-                sock->readtms(packetCompressedMb.reserveTruncate(hbpacket.maxPacketSize()), hbpacket.minPacketSize(), hbpacket.maxPacketSize(), read, watchdogMachineTimeout);
-                MemoryBuffer packetMb;
-                read = ThorExpand(packetCompressedMb.toByteArray(), read, &hbpacket, hbpacket.maxPacketSize());
-                if (0==hbpacket.packetsize)
-                    break; // signal to stop
-                if(read > hbpacket.minPacketSize() && read == hbpacket.packetsize)  // check for corrupt packets
+    try
+    {
+        while (!stopped)
+        {
+            HeartBeatPacketHeader hb;
+            MemoryBuffer progressData;
+            unsigned sz = readPacket(hb, progressData);
+            if (stopped)
+                break;
+            else if (sz)
+            {
+                synchronized block(mutex);
+                CMachineStatus *ms = findSlave(hb.sender);
+                if (ms)
                 {
-                    synchronized block(mutex);
-                    CMachineStatus *ms = findSlave(hbpacket.sender);
-                    if (ms) 
+                    ms->update(hb);
+                    if (progressData.remaining())
                     {
-                        ms->update(hbpacket);
                         Owned<IJobManager> jobManager = getJobManager();
                         if (jobManager)
-                            jobManager->queryDeMonServer()->takeHeartBeat(hbpacket);
-                    }
-                    else {
-                        StringBuffer epstr;
-                        hbpacket.sender.getUrlStr(epstr);
-                        LOG(MCdebugProgress, unknownJob, "Watchdog : Unknown Machine! [%s]", epstr.str()); //TBD
+                            jobManager->queryDeMonServer()->takeHeartBeat(hb.sender, progressData);
                     }
                 }
                 else
                 {
-                    LOG(MCdebugProgress, unknownJob, "Receive Monitor Packet: wrong size, expected %d, got %d", hbpacket.packetsize, read);
+                    StringBuffer epstr;
+                    hb.sender.getUrlStr(epstr);
+                    LOG(MCdebugProgress, unknownJob, "Watchdog : Unknown Machine! [%s]", epstr.str()); //TBD
                 }
             }
-            catch (IJSOCK_Exception *e)
-            {
-                if ((e->errorCode()!=JSOCKERR_timeout_expired)&&(e->errorCode()!=JSOCKERR_broken_pipe)&&(e->errorCode()!=JSOCKERR_graceful_close)) 
-                    throw;
-                e->Release();
-            }
-            if (stopped)
-                break;
             unsigned now=msTick();
-            if (now-lastcheck>watchdogMachineTimeout) {
+            if (now-lastcheck>watchdogMachineTimeout)
+            {
                 checkMachineStatus();
                 lastcheck = msTick();
             }
-            if (now-lastbeat>THORBEAT_INTERVAL) {
+            if (now-lastbeat>THORBEAT_INTERVAL)
+            {
                 if (retrycount<=0) retrycount=THORBEAT_RETRY_INTERVAL; else retrycount -= THORBEAT_INTERVAL;
                 lastbeat = msTick();
             }
@@ -240,3 +241,84 @@ void CMasterWatchdog::main()
     }
 }
 
+
+class CMasterWatchdogUDP : public CMasterWatchdogBase
+{
+    ISocket *sock;
+public:
+    CMasterWatchdogUDP()
+    {
+        sock = ISocket::udp_create(getFixedPort(TPORT_watchdog));
+        start();
+    }
+    ~CMasterWatchdogUDP()
+    {
+        ::Release(sock);
+    }
+    virtual unsigned readData(MemoryBuffer &mb)
+    {
+        size32_t read;
+        try
+        {
+            sock->readtms(mb.reserveTruncate(UDP_DATA_MAX), sizeof(HeartBeatPacketHeader), UDP_DATA_MAX, read, watchdogMachineTimeout);
+        }
+        catch (IJSOCK_Exception *e)
+        {
+            if ((e->errorCode()!=JSOCKERR_timeout_expired)&&(e->errorCode()!=JSOCKERR_broken_pipe)&&(e->errorCode()!=JSOCKERR_graceful_close))
+                throw;
+            e->Release();
+            return 0; // will retry
+        }
+        return read;
+    }
+    virtual void stopReading()
+    {
+        if (sock)
+        {
+            SocketEndpoint masterEp(getMasterPortBase());
+            StringBuffer ipStr;
+            masterEp.getIpText(ipStr);
+            Owned<ISocket> sock = ISocket::udp_connect(getFixedPort(masterEp.port, TPORT_watchdog), ipStr.str());
+            // send empty packet, stopped set, will cease reading
+            HeartBeatPacketHeader hb;
+            memset(&hb, 0, sizeof(hb));
+            hb.packetSize = sizeof(HeartBeatPacketHeader);
+            sock->write(&hb, sizeof(HeartBeatPacketHeader));
+            sock->close();
+        }
+    }
+};
+
+/////////////////////
+
+class CMasterWatchdogMP : public CMasterWatchdogBase
+{
+public:
+    CMasterWatchdogMP()
+    {
+        start();
+    }
+    virtual unsigned readData(MemoryBuffer &mb)
+    {
+        CMessageBuffer msg;
+        rank_t sender;
+        if (!queryClusterComm().recv(msg, RANK_ALL, MPTAG_THORWATCHDOG, &sender, watchdogMachineTimeout))
+            return 0;
+        mb.swapWith(msg);
+        return mb.length();
+    }
+    virtual void stopReading()
+    {
+        queryClusterComm().cancel(0, MPTAG_THORWATCHDOG);
+    }
+};
+
+/////////////////////
+
+CMasterWatchdogBase *createMasterWatchdog(bool udp)
+{
+    if (udp)
+        return new CMasterWatchdogUDP();
+    else
+        return new CMasterWatchdogMP();
+}

+ 18 - 11
thorlcr/master/mawatchdog.hpp

@@ -25,28 +25,35 @@
 #include "jmutex.hpp"
 
 class CMachineStatus;
-struct HeartBeatPacket;
+struct HeartBeatPacketHeader;
 
-class CMasterWatchdog : public CSimpleInterface, implements IThreaded
+class CMasterWatchdogBase : public CSimpleInterface, implements IThreaded
 {
+    PointerArray state;
+    SocketEndpoint master;
+    Mutex mutex;
+    int retrycount;
     CThreaded threaded;
+protected:
+    bool stopped;
+    unsigned watchdogMachineTimeout;
 public:
-    CMasterWatchdog();
-    ~CMasterWatchdog();
+    CMasterWatchdogBase();
+    ~CMasterWatchdogBase();
     void addSlave(const SocketEndpoint &slave);
     void removeSlave(const SocketEndpoint &slave);
     CMachineStatus *findSlave(const SocketEndpoint &ep);
     void checkMachineStatus();
+    unsigned readPacket(HeartBeatPacketHeader &hb, MemoryBuffer &mb);
+    void start();
     void stop();
     void main();
-private:
-    PointerArray state;
-    SocketEndpoint master;
-    ISocket *sock;
-    Mutex mutex;
-    bool stopped;
-    int retrycount;
+
+    virtual unsigned readData(MemoryBuffer &mb) = 0;
+    virtual void stopReading() = 0;
 };
 
+CMasterWatchdogBase *createMasterWatchdog(bool udp=false);
+
 #endif
 

+ 24 - 23
thorlcr/master/thdemonserver.cpp

@@ -25,6 +25,7 @@
 
 #include "thormisc.hpp"
 #include "thorport.hpp"
+#include "thcompressutil.hpp"
 #include "thgraphmaster.ipp"
 #include "thgraphmanager.hpp"
 #include "thwatchdog.hpp"
@@ -159,28 +160,27 @@ public:
         reportRate = globals->getPropInt("@watchdogProgressInterval", 30);
     }
 
-    virtual void takeHeartBeat(HeartBeatPacket & hbpacket)
+    virtual void takeHeartBeat(const SocketEndpoint &sender, MemoryBuffer &progressMb)
     {
         synchronized block(mutex);
-        
-        if((hbpacket.packetsize>sizeof(hbpacket.packetsize))&&(hbpacket.progressSize > 0))
+        if (0 == activeGraphs.ordinality())
         {
-            if (0 == activeGraphs.ordinality())
-            {
-                StringBuffer urlStr;
-                LOG(MCdebugProgress, unknownJob, "heartbeat packet received with no active graphs, from=%s", hbpacket.sender.getUrlStr(urlStr).str());
-                return;
-            }
-            rank_t node = querySlaveGroup().rank(hbpacket.sender);
-            assertex(node != RANK_NULL);
-
-            MemoryBuffer statsMb;
-            statsMb.setBuffer(hbpacket.progressSize, hbpacket.perfdata);
+            StringBuffer urlStr;
+            LOG(MCdebugProgress, unknownJob, "heartbeat packet received with no active graphs, from=%s", sender.getUrlStr(urlStr).str());
+            return;
+        }
+        rank_t node = querySlaveGroup().rank(sender);
+        assertex(node != RANK_NULL);
 
-            while (statsMb.remaining())
+        size32_t compressedProgressSz = progressMb.remaining();
+        if (compressedProgressSz)
+        {
+            MemoryBuffer uncompressedMb;
+            ThorExpand(progressMb.readDirect(compressedProgressSz), compressedProgressSz, uncompressedMb);
+            do
             {
                 graph_id graphId;
-                statsMb.read(graphId);
+                uncompressedMb.read(graphId);
                 CMasterGraph *graph = NULL;
                 ForEachItemIn(g, activeGraphs) if (activeGraphs.item(g).queryGraphId() == graphId) graph = (CMasterGraph *)&activeGraphs.item(g);
                 if (!graph)
@@ -188,18 +188,19 @@ public:
                     LOG(MCdebugProgress, unknownJob, "heartbeat received from unknown graph %"GIDPF"d", graphId);
                     break;
                 }
-                if (!graph->deserializeStats(node, statsMb))
+                if (!graph->deserializeStats(node, uncompressedMb))
                 {
                     LOG(MCdebugProgress, unknownJob, "heartbeat error in graph %"GIDPF"d", graphId);
                     break;
                 }
             }
-            unsigned now=msTick();
-            if (now-lastReport > 1000*reportRate) 
-            {
-                reportGraph(false);
-                lastReport = msTick();
-            }
+            while (uncompressedMb.remaining());
+        }
+        unsigned now=msTick();
+        if (now-lastReport > 1000*reportRate)
+        {
+            reportGraph(false);
+            lastReport = msTick();
         }
     }
     void startGraph(CGraphBase *graph)

+ 1 - 1
thorlcr/master/thdemonserver.hpp

@@ -27,7 +27,7 @@ interface IWUGraphProgress;
 class CGraphBase;
 interface IDeMonServer : extends IInterface
 {
-    virtual void takeHeartBeat(HeartBeatPacket & hbpacket) = 0;
+    virtual void takeHeartBeat(const SocketEndpoint &sender, MemoryBuffer &progressMbb) = 0;
     virtual void startGraph(CGraphBase *graph) = 0;
     virtual void reportGraph(IWUGraphProgress *progress, CGraphBase *graph, bool finished) = 0;
     virtual void endGraph(CGraphBase *graph, bool success) = 0;

+ 2 - 2
thorlcr/master/thmastermain.cpp

@@ -133,7 +133,7 @@ class CRegistryServer : public CSimpleInterface
         }
     } deregistrationWatch;
 public:
-    Linked<CMasterWatchdog> watchdog;
+    Linked<CMasterWatchdogBase> watchdog;
     IBitSet *status;
 
     CRegistryServer()  : deregistrationWatch(*this), stopped(false)
@@ -142,7 +142,7 @@ public:
         msgDelay = SLAVEREG_VERIFY_DELAY;
         slavesRegistered = 0;
         if (globals->getPropBool("@watchdogEnabled"))
-            watchdog.setown(new CMasterWatchdog);
+            watchdog.setown(createMasterWatchdog(globals->getPropBool("@useUDPWatchdog")));
         else
             globals->setPropBool("@watchdogProgressEnabled", false);
         CriticalBlock b(regCrit);

+ 8 - 8
thorlcr/msort/tsortl.cpp

@@ -63,11 +63,11 @@ public:
 
 struct TransferStreamHeader
 {
-    rowmap_t numrecs;
+    rowcount_t numrecs;
     rowcount_t pos;
-    size32_t   recsize;
+    size32_t recsize;
     unsigned id;
-    TransferStreamHeader(rowcount_t _pos, rowmap_t _numrecs, unsigned _recsize, unsigned _id) 
+    TransferStreamHeader(rowcount_t _pos, rowcount_t _numrecs, unsigned _recsize, unsigned _id)
         : pos(_pos), numrecs(_numrecs), recsize(_recsize), id(_id)
     {
     }
@@ -281,22 +281,22 @@ public:
 };
 
 
-IRowStream *ConnectMergeRead(unsigned id,IRowInterfaces *rowif,SocketEndpoint &nodeaddr,rowcount_t startrec,rowmap_t numrecs)
+IRowStream *ConnectMergeRead(unsigned id,IRowInterfaces *rowif,SocketEndpoint &nodeaddr,rowcount_t startrec,rowcount_t numrecs)
 {
     Owned<ISocket> socket = DoConnect(nodeaddr);
     TransferStreamHeader hdr(startrec,numrecs,0,id);
 #ifdef _FULL_TRACE
     StringBuffer s;
     nodeaddr.getUrlStr(s);
-    PROGLOG("ConnectMergeRead(%d,%s,%x,%"RCPF"d,%"RMF"u)",id,s.str(),(unsigned)(memsize_t)socket.get(),startrec,numrecs);
+    PROGLOG("ConnectMergeRead(%d,%s,%x,%"RCPF"d,%"RCPF"u)",id,s.str(),(unsigned)(memsize_t)socket.get(),startrec,numrecs);
 #endif
     hdr.winrev();
     socket->write(&hdr,sizeof(hdr));
-    return  new CSocketRowStream(id,rowif->queryRowAllocator(),rowif->queryRowDeserializer(),socket);
+    return new CSocketRowStream(id,rowif->queryRowAllocator(),rowif->queryRowDeserializer(),socket);
 }
 
 
-ISocketRowWriter *ConnectMergeWrite(IRowInterfaces *rowif,ISocket *socket,size32_t bufsize,rowcount_t &startrec,rowmap_t &numrecs)
+ISocketRowWriter *ConnectMergeWrite(IRowInterfaces *rowif,ISocket *socket,size32_t bufsize,rowcount_t &startrec,rowcount_t &numrecs)
 {
     TransferStreamHeader hdr;
     socket->read(&hdr,sizeof(hdr));
@@ -306,7 +306,7 @@ ISocketRowWriter *ConnectMergeWrite(IRowInterfaces *rowif,ISocket *socket,size32
 #ifdef _FULL_TRACE
     char name[100];
     int port = socket->peer_name(name,sizeof(name));
-    PROGLOG("ConnectMergeWrite(%d,%s:%d,%x,%"RCPF"d,%"RMF"u)",hdr.id,name,port,(unsigned)(memsize_t)socket,startrec,numrecs);
+    PROGLOG("ConnectMergeWrite(%d,%s:%d,%x,%"RCPF"d,%"RCPF"u)",hdr.id,name,port,(unsigned)(memsize_t)socket,startrec,numrecs);
 #endif
     return new CSocketRowWriter(hdr.id,rowif,socket,bufsize);
 }

+ 36 - 39
thorlcr/msort/tsortm.cpp

@@ -85,7 +85,7 @@ public:
     unsigned short  mpport;
     mptag_t         mpTagRPC;
     unsigned        beat;
-    rowmap_t        numrecs;
+    rowcount_t      numrecs;
     offset_t        slavesize;
     bool            overflow;
     unsigned        scale;     // num times overflowed
@@ -157,8 +157,7 @@ public:
         scale = 1;
     }
 
-    void AdjustNumRecs(rowmap_t n);
-
+    void AdjustNumRecs(rowcount_t n);
 };
 
 class CTimer
@@ -414,7 +413,7 @@ public:
         total = 0;
         stotal = 0;
         totalmem = 0;
-        minrecsonnode = UINT_MAX;
+        minrecsonnode = RCMAX;
         maxrecsonnode = 0;
         numnodes = slaves.ordinality();
         estrecsize = 100;
@@ -534,11 +533,12 @@ public:
     unsigned __int64 CalcMinMax(OwnedConstThorRow &min, OwnedConstThorRow &max)
     {
         // initialize min/max keys
-        unsigned __int64 tot=0;
+        rowcount_t tot=0;
         unsigned i;
         size32_t ers = 0;
         unsigned ersn=0;
-        for (i=0;i<numnodes;i++) {
+        for (i=0;i<numnodes;i++)
+        {
             CSortNode &slave = slaves.item(i);
             if (slave.numrecs==0)
                 continue;
@@ -546,13 +546,14 @@ public:
             void *p = NULL;
             size32_t retlen = 0;
             size32_t avrecsize=0;
-            rowmap_t num=slave.GetMinMax(retlen,p,avrecsize);
+            rowcount_t num=slave.GetMinMax(retlen,p,avrecsize);
             if (avrecsize) {
                 ers += avrecsize;       // should probably do mode but this is OK
                 ersn++;
             }
             tot += num;
-            if (num>0) {
+            if (num>0)
+            {
                 minmax.deserialize(retlen, p);
                 free(p);
                 const void *p = minmax.query(0);
@@ -595,12 +596,10 @@ public:
     }
 
 
-    rowmap_t *CalcPartitionUsingSampling()
+    rowcount_t *CalcPartitionUsingSampling()
     {   // doesn't support between
 #define OVERSAMPLE 16
-        OwnedMalloc<rowmap_t> splitMap(numnodes*numnodes, true);
-        if (sizeof(rowmap_t)<=4) 
-            assertex(total/numnodes<INT_MAX); // keep record numbers on individual nodes in 31 bits
+        OwnedMalloc<rowcount_t> splitMap(numnodes*numnodes, true);
         unsigned numsplits=numnodes-1;
         if (total==0) {
             // no partition info!
@@ -608,7 +607,7 @@ public:
             return splitMap.getClear();
         }
         unsigned averagesamples = OVERSAMPLE*numnodes;  
-        rowmap_t averagerecspernode = (rowmap_t)(total/numnodes);
+        rowcount_t averagerecspernode = (rowcount_t)(total/numnodes);
         CriticalSection asect;
         CThorExpandingRowArray sample(*activity, rowif, true);
 #ifdef ASYNC_PARTIONING
@@ -618,9 +617,9 @@ public:
             CThorExpandingRowArray &sample;
             CriticalSection &asect;
             unsigned averagesamples;
-            rowmap_t averagerecspernode;
+            rowcount_t averagerecspernode;
         public:
-            casyncfor1(NodeArray &_slaves, CThorExpandingRowArray &_sample, unsigned _averagesamples, rowmap_t _averagerecspernode, CriticalSection &_asect)
+            casyncfor1(NodeArray &_slaves, CThorExpandingRowArray &_sample, unsigned _averagesamples, rowcount_t _averagerecspernode, CriticalSection &_asect)
                 : slaves(_slaves), sample(_sample), asect(_asect)
             { 
                 averagesamples = _averagesamples;
@@ -629,7 +628,7 @@ public:
             void Do(unsigned i)
             {
                 CSortNode &slave = slaves.item(i);
-                unsigned slavesamples = averagerecspernode?((unsigned)((averagerecspernode/2+averagesamples*(count_t)slave.numrecs)/averagerecspernode)):1;  
+                unsigned slavesamples = averagerecspernode?((unsigned)((averagerecspernode/2+averagesamples*slave.numrecs)/averagerecspernode)):1;
                 //PrintLog("%d samples for %d",slavesamples,i);
                 if (slavesamples) {
                     size32_t samplebufsize;
@@ -646,7 +645,7 @@ public:
         unsigned i;
         for (i=0;i<numnodes;i++) {
             CSortNode &slave = slaves.item(i);
-            unsigned slavesamples = (unsigned)((count_t)averagesamples*(count_t)slave.numrecs/(count_t)averagerecspernode);
+            unsigned slavesamples = (unsigned)((count_t)averagesamples*slave.numrecs/(count_t)averagerecspernode);
             PrintLog("%d samples for %d",slavesamples,i);
             if (!slavesamples)
                 continue;
@@ -731,11 +730,11 @@ public:
         class casyncfor3: public CAsyncFor
         {
             NodeArray &slaves;
-            rowmap_t *splitmap;
+            rowcount_t *splitmap;
             unsigned numnodes;
             unsigned numsplits;
         public:
-            casyncfor3(NodeArray &_slaves,rowmap_t *_splitmap,unsigned _numnodes,unsigned _numsplits)
+            casyncfor3(NodeArray &_slaves,rowcount_t *_splitmap,unsigned _numnodes,unsigned _numsplits)
                 : slaves(_slaves)
             { 
                 splitmap = _splitmap;
@@ -746,7 +745,7 @@ public:
             {
                 CSortNode &slave = slaves.item(i);
                 if (slave.numrecs!=0) {
-                    rowmap_t *res=splitmap+(i*numnodes);
+                    rowcount_t *res=splitmap+(i*numnodes);
                     slave.MultiBinChopStop(numsplits,res);
                     res[numnodes-1] = slave.numrecs;
                 }
@@ -757,7 +756,7 @@ public:
         for (i=0;i<numnodes;i++) {
             CSortNode &slave = slaves.item(i);
             if (slave.numrecs!=0) {
-                rowmap_t *res=splitMap+(i*numnodes);
+                rowcount_t *res=splitMap+(i*numnodes);
                 slave.MultiBinChopStop(numsplits,res);
                 res[numnodes-1] = slave.numrecs;
             }
@@ -779,17 +778,15 @@ public:
     }
 
 
-    rowmap_t *CalcPartition(bool logging)
+    rowcount_t *CalcPartition(bool logging)
     {
         CriticalBlock block(ECFcrit);       
         // this is a bit long winded
-        if (sizeof(rowmap_t)<=4) 
-            assertex(stotal/numnodes<INT_MAX); // keep record numbers on individual nodes in 31 bits
 
         OwnedConstThorRow mink;
         OwnedConstThorRow maxk;
         // so as won't overflow
-        OwnedMalloc<rowmap_t> splitmap(numnodes*numnodes, true);
+        OwnedMalloc<rowcount_t> splitmap(numnodes*numnodes, true);
         if (CalcMinMax(mink,maxk)==0) {
             // no partition info!
             partitioninfo->kill();
@@ -956,7 +953,7 @@ public:
                 for (i=0;i<numnodes;i++) {
                     CSortNode &slave = slaves.item(i);
                     if (slave.numrecs!=0) {
-                        rowmap_t *res=splitmap+(i*numnodes);
+                        rowcount_t *res=splitmap+(i*numnodes);
                         slave.MultiBinChopStop(numsplits,res);
                         res[numnodes-1] = slave.numrecs;
                     }
@@ -1041,7 +1038,7 @@ public:
     }
 
 
-    rowmap_t *UsePartitionInfo(PartitionInfo &pi, bool uppercmp)
+    rowcount_t *UsePartitionInfo(PartitionInfo &pi, bool uppercmp)
     {
         unsigned i;
 #ifdef _TRACE
@@ -1057,10 +1054,10 @@ public:
         // first find split points
         unsigned numnodes = pi.numnodes;
         unsigned numsplits = numnodes-1;
-        OwnedMalloc<rowmap_t> splitMap(numnodes*numnodes, true);
-        OwnedMalloc<rowmap_t> res(numsplits);
+        OwnedMalloc<rowcount_t> splitMap(numnodes*numnodes, true);
+        OwnedMalloc<rowcount_t> res(numsplits);
         unsigned j;
-        rowmap_t *mapp=splitMap;
+        rowcount_t *mapp=splitMap;
         for (i=0;i<numnodes;i++) {
             CSortNode &slave = slaves.item(i);
             if (numsplits>0) {
@@ -1068,13 +1065,13 @@ public:
                 pi.splitkeys.serialize(mb);
                 assertex(pi.splitkeys.ordinality()==numsplits);
                 slave.MultiBinChop(mb.length(),(const byte *)mb.bufferBase(),numsplits,res,uppercmp?CMPFN_UPPER:CMPFN_COLLATE,true);
-                rowmap_t *resp = res;
-                rowmap_t p=*resp;
+                rowcount_t *resp = res;
+                rowcount_t p=*resp;
                 *mapp = p;
                 resp++;
                 mapp++;
                 for (j=1;j<numsplits;j++) {
-                    rowmap_t n = *resp;
+                    rowcount_t n = *resp;
                     *mapp = n;
                     if (p>n) {
                         ActPrintLog(activity, "ERROR: Split positions out of order!");
@@ -1091,7 +1088,7 @@ public:
 #ifdef _TRACE
 #ifdef TRACE_PARTITION
         ActPrintLog(activity, "UsePartitionInfo result");
-        rowmap_t *p = splitMap;
+        rowcount_t *p = splitMap;
         for (i=0;i<numnodes;i++) {
             StringBuffer s;
             s.appendf("%d: ",i);
@@ -1165,7 +1162,7 @@ public:
         partitioninfo->numnodes = numnodes;
     }
 
-    IThorException *CheckSkewed(unsigned __int64 threshold, double skewWarning, double skewError, rowmap_t n, unsigned __int64 total, rowcount_t max)
+    IThorException *CheckSkewed(unsigned __int64 threshold, double skewWarning, double skewError, unsigned n, rowcount_t total, rowcount_t max)
     {
         if (n<=0)
             return NULL;
@@ -1238,7 +1235,7 @@ public:
 #endif
         bool useAux = false; // JCSMORE using existing partioning and auxillary rowIf (only used if overflow)
         loop {
-            OwnedMalloc<rowmap_t> splitMap, splitMapUpper;
+            OwnedMalloc<rowcount_t> splitMap, splitMapUpper;
             CTimer timer;
             if (numnodes>1) {
                 timer.start();
@@ -1356,7 +1353,7 @@ public:
                     }
                 }
 
-                OwnedMalloc<rowmap_t> tot(numnodes, true);
+                OwnedMalloc<rowcount_t> tot(numnodes, true);
                 rowcount_t max=0;
                 unsigned imax=numnodes;
                 for (i=0;i<imax;i++) {
@@ -1461,9 +1458,9 @@ IThorSorterMaster *CreateThorSorterMaster(CActivityBase *activity)
 
 
 
-void CSortNode::AdjustNumRecs(rowmap_t num)
+void CSortNode::AdjustNumRecs(rowcount_t num)
 {
-    rowmap_t old = numrecs;
+    rowcount_t old = numrecs;
     numrecs = num;
     sorter.total += num-old;
     if (num>sorter.maxrecsonnode)

+ 33 - 58
thorlcr/msort/tsortmp.cpp

@@ -19,7 +19,6 @@ enum MPSlaveFunctions
     FN_GetMultiMidPoint,
     FN_GetMultiMidPointStart,
     FN_GetMultiMidPointStop,
-    FN_SingleBinChop,
     FN_MultiBinChop,
     FN_MultiBinChopStart,
     FN_MultiBinChopStop,
@@ -106,7 +105,7 @@ void SortSlaveMP::StartGather()
     sendRecv(mb);
 }
 
-void SortSlaveMP::GetGatherInfo(rowmap_t &numlocal, offset_t &totalsize, unsigned &overflowscale, bool hasserializer)
+void SortSlaveMP::GetGatherInfo(rowcount_t &numlocal, offset_t &totalsize, unsigned &overflowscale, bool hasserializer)
 {
     CMessageBuffer mb;
     mb.append((byte)FN_GetGatherInfo);
@@ -115,14 +114,14 @@ void SortSlaveMP::GetGatherInfo(rowmap_t &numlocal, offset_t &totalsize, unsigne
     mb.read(numlocal).read(totalsize).read(overflowscale);
 }
 
-rowmap_t SortSlaveMP::GetMinMax(size32_t &keybuffsize,void *&keybuff, size32_t &avrecsizesize)
+rowcount_t SortSlaveMP::GetMinMax(size32_t &keybuffsize,void *&keybuff, size32_t &avrecsizesize)
 {
     CMessageBuffer mb;
     mb.append((byte)FN_GetMinMax);
     sendRecv(mb);
     deserializeblk(mb,keybuffsize,keybuff);
     mb.read(avrecsizesize);
-    rowmap_t ret;
+    rowcount_t ret;
     mb.read(ret);
     return ret;
 }
@@ -167,28 +166,15 @@ void SortSlaveMP::GetMultiMidPointStop(size32_t &mkeybuffsize, void * &mkeybuf)
     deserializeblk(mb,mkeybuffsize,mkeybuf);
 }
 
-rowmap_t SortSlaveMP::SingleBinChop(size32_t keysize,const byte *key,byte cmpfn)
-{
-    CMessageBuffer mb;
-    mb.append((byte)FN_SingleBinChop);
-    serializeblk(mb,keysize,key).append(cmpfn);
-    sendRecv(mb);
-    rowmap_t ret;
-    mb.read(ret);
-    return ret;
-}
-
-void SortSlaveMP::MultiBinChop(size32_t keybuffsize,const byte *keybuff, unsigned num,rowmap_t *pos,byte cmpfn,bool useaux)
+void SortSlaveMP::MultiBinChop(size32_t keybuffsize,const byte *keybuff, unsigned num,rowcount_t *pos,byte cmpfn,bool useaux)
 {
     CMessageBuffer mb;
     mb.append((byte)FN_MultiBinChop);
     serializeblk(mb,keybuffsize,keybuff).append(num).append(cmpfn).append(useaux);
     sendRecv(mb);
-    mb.read(num*sizeof(rowmap_t),pos);
+    mb.read(num*sizeof(rowcount_t),pos);
 }
 
-
-
 void SortSlaveMP::MultiBinChopStart(size32_t keybuffsize,const byte *keybuff, byte cmpfn) /* async */
 {
     CMessageBuffer mb;
@@ -197,41 +183,41 @@ void SortSlaveMP::MultiBinChopStart(size32_t keybuffsize,const byte *keybuff, by
     sendRecv(mb);
 }
 
-void SortSlaveMP::MultiBinChopStop(unsigned num,rowmap_t *pos)
+void SortSlaveMP::MultiBinChopStop(unsigned num,rowcount_t *pos)
 {
     CMessageBuffer mb;
     mb.append((byte)FN_MultiBinChopStop);
     mb.append(num);
     sendRecv(mb);
-    mb.read(num*sizeof(rowmap_t),pos);
+    mb.read(num*sizeof(rowcount_t),pos);
 }
 
-void SortSlaveMP::OverflowAdjustMapStart( unsigned mapsize,rowmap_t *map,size32_t keybuffsize,const byte *keybuff, byte cmpfn,bool useaux) /* async */
+void SortSlaveMP::OverflowAdjustMapStart(unsigned mapsize,rowcount_t *map,size32_t keybuffsize,const byte *keybuff, byte cmpfn,bool useaux) /* async */
 {
     CMessageBuffer mb;
     mb.append((byte)FN_OverflowAdjustMapStart);
-    mb.append(mapsize).append(mapsize*sizeof(rowmap_t),map);
+    mb.append(mapsize).append(mapsize*sizeof(rowcount_t),map);
     serializeblk(mb,keybuffsize,keybuff).append(cmpfn).append(useaux);
     sendRecv(mb);
 
 }
 
-rowmap_t SortSlaveMP::OverflowAdjustMapStop( unsigned mapsize, rowmap_t *map)
+rowcount_t SortSlaveMP::OverflowAdjustMapStop( unsigned mapsize, rowcount_t *map)
 {
     CMessageBuffer mb;
     mb.append((byte)FN_OverflowAdjustMapStop);
     mb.append(mapsize);
     sendRecv(mb);
-    rowmap_t ret;
-    mb.read(ret).read(mapsize*sizeof(rowmap_t),map);
+    rowcount_t ret;
+    mb.read(ret).read(mapsize*sizeof(rowcount_t),map);
     return ret;
 }
 
-void SortSlaveMP::MultiMerge(unsigned mapsize,rowmap_t *map,unsigned num,SocketEndpoint* endpoints) /* async */
+void SortSlaveMP::MultiMerge(unsigned mapsize,rowcount_t *map,unsigned num,SocketEndpoint* endpoints) /* async */
 {
     CMessageBuffer mb;
     mb.append((byte)FN_MultiMerge);
-    mb.append(mapsize).append(mapsize*sizeof(rowmap_t),map);
+    mb.append(mapsize).append(mapsize*sizeof(rowcount_t),map);
     mb.append(num);
     while (num--) {
         endpoints->serialize(mb);
@@ -241,12 +227,12 @@ void SortSlaveMP::MultiMerge(unsigned mapsize,rowmap_t *map,unsigned num,SocketE
 }
 
 
-void SortSlaveMP::MultiMergeBetween(unsigned mapsize,rowmap_t *map,rowmap_t *mapupper,unsigned num,SocketEndpoint* endpoints) /* async */
+void SortSlaveMP::MultiMergeBetween(unsigned mapsize,rowcount_t *map,rowcount_t *mapupper,unsigned num,SocketEndpoint* endpoints) /* async */
 {
     CMessageBuffer mb;
     mb.append((byte)FN_MultiMergeBetween);
-    mb.append(mapsize).append(mapsize*sizeof(rowmap_t),map);
-    mb.append(mapsize*sizeof(rowmap_t),mapupper);
+    mb.append(mapsize).append(mapsize*sizeof(rowcount_t),map);
+    mb.append(mapsize*sizeof(rowcount_t),mapupper);
     mb.append(num);
     while (num--) {
         endpoints->serialize(mb);
@@ -352,7 +338,7 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
             case FN_GetGatherInfo: {
                 bool hasserializer;
                 mb.read(hasserializer);
-                rowmap_t numlocal;
+                rowcount_t numlocal;
                 unsigned overflowscale;
                 offset_t totalsize;
                 slave.GetGatherInfo(numlocal,totalsize,overflowscale,hasserializer);
@@ -363,7 +349,7 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                 size32_t keybuffsize;
                 void *keybuff;
                 size32_t avrecsize;
-                rowmap_t ret = slave.GetMinMax(keybuffsize,keybuff,avrecsize);
+                rowcount_t ret = slave.GetMinMax(keybuffsize,keybuff,avrecsize);
                 serializeblk(mbout,keybuffsize,keybuff).append(avrecsize).append(ret);
                 free(keybuff);
             }
@@ -417,8 +403,8 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
             case FN_MultiBinChopStop: {
                 unsigned num;
                 mb.read(num);
-                void *out = mbout.reserveTruncate(num*sizeof(rowmap_t));
-                slave.MultiBinChopStop(num,(rowmap_t *)out);
+                void *out = mbout.reserveTruncate(num*sizeof(rowcount_t));
+                slave.MultiBinChopStop(num,(rowcount_t *)out);
             }
             break;
             case FN_GetMultiMidPointStop: {
@@ -429,17 +415,6 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                 free(mkeybuff);
             }
             break;
-            case FN_SingleBinChop: {
-                size32_t keysize;
-                byte * key;
-                deserializeblk(mb,keysize,key);
-                byte cmpfn;
-                mb.read(cmpfn);
-                rowmap_t ret = slave.SingleBinChop(keysize,key,cmpfn);
-                mbout.append(ret);
-                free(key);
-            }
-            break;
             case FN_MultiBinChopStart: {
                 replydone = true;
                 comm->reply(mbout);
@@ -460,8 +435,8 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                 byte cmpfn;
                 bool useaux;
                 mb.read(num).read(cmpfn).read(useaux);
-                void *out = mbout.reserveTruncate(num*sizeof(rowmap_t));
-                slave.MultiBinChop(keybuffsize,(const byte *)keybuff,num,(rowmap_t *)out,cmpfn,useaux);
+                void *out = mbout.reserveTruncate(num*sizeof(rowcount_t));
+                slave.MultiBinChop(keybuffsize,(const byte *)keybuff,num,(rowcount_t *)out,cmpfn,useaux);
                 free(keybuff);
             }
             break;
@@ -470,7 +445,7 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                 comm->reply(mbout);
                 unsigned mapsize;
                 mb.read(mapsize);
-                const void * map = mb.readDirect(mapsize*sizeof(rowmap_t));
+                const void * map = mb.readDirect(mapsize*sizeof(rowcount_t));
                 size32_t keybuffsize;
                 void * keybuff;
                 deserializeblk(mb,keybuffsize,keybuff);
@@ -478,18 +453,18 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                 mb.read(cmpfn);
                 bool useaux;
                 mb.read(useaux);
-                slave.OverflowAdjustMapStart(mapsize,(rowmap_t *)map,keybuffsize,(const byte *)keybuff,cmpfn,useaux);
+                slave.OverflowAdjustMapStart(mapsize,(rowcount_t *)map,keybuffsize,(const byte *)keybuff,cmpfn,useaux);
                 free(keybuff);
             }
             break;
             case FN_OverflowAdjustMapStop: {
                 unsigned mapsize;
                 mb.read(mapsize);
-                rowmap_t ret=0;
+                rowcount_t ret=0;
                 size32_t retofs = mbout.length();
                 mbout.append(ret);
-                void *map=mbout.reserveTruncate(mapsize*sizeof(rowmap_t));
-                ret = slave.OverflowAdjustMapStop(mapsize,(rowmap_t *)map);     // could avoid copy here if passed mb
+                void *map=mbout.reserveTruncate(mapsize*sizeof(rowcount_t));
+                ret = slave.OverflowAdjustMapStop(mapsize,(rowcount_t *)map);     // could avoid copy here if passed mb
                 mbout.writeDirect(retofs,sizeof(ret),&ret);
             }
             break;
@@ -498,7 +473,7 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                 comm->reply(mbout);
                 unsigned mapsize;
                 mb.read(mapsize);
-                const void *map = mb.readDirect(mapsize*sizeof(rowmap_t));
+                const void *map = mb.readDirect(mapsize*sizeof(rowcount_t));
                 unsigned num;
                 mb.read(num);
                 SocketEndpointArray epa;
@@ -507,7 +482,7 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                     ep.deserialize(mb);
                     epa.append(ep);
                 }
-                slave.MultiMerge(mapsize,(rowmap_t *)map,num,epa.getArray());
+                slave.MultiMerge(mapsize,(rowcount_t *)map,num,epa.getArray());
             }
             break;
             case FN_MultiMergeBetween: {
@@ -515,8 +490,8 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                 comm->reply(mbout);
                 unsigned mapsize;
                 mb.read(mapsize);
-                const void *map = mb.readDirect(mapsize*sizeof(rowmap_t));
-                const void *mapupper = mb.readDirect(mapsize*sizeof(rowmap_t));
+                const void *map = mb.readDirect(mapsize*sizeof(rowcount_t));
+                const void *mapupper = mb.readDirect(mapsize*sizeof(rowcount_t));
                 unsigned num;
                 mb.read(num);
                 SocketEndpointArray epa;
@@ -525,7 +500,7 @@ bool SortSlaveMP::marshall(ISortSlaveMP &slave, ICommunicator* comm, mptag_t tag
                     ep.deserialize(mb);
                     epa.append(ep);
                 }
-                slave.MultiMergeBetween(mapsize,(rowmap_t *)map,(rowmap_t *)mapupper,num,epa.getArray());
+                slave.MultiMergeBetween(mapsize,(rowcount_t *)map,(rowcount_t *)mapupper,num,epa.getArray());
             }
             break;
             case FN_SingleMerge: {

+ 16 - 18
thorlcr/msort/tsortmp.hpp

@@ -15,20 +15,19 @@ interface ISortSlaveMP
 {
     virtual bool Connect(unsigned _part, unsigned _numnodes)=0;
     virtual void StartGather()=0;
-    virtual void GetGatherInfo(rowmap_t &numlocal, offset_t &totalsize, unsigned &overflowscale, bool hasserializer)=0;
-    virtual rowmap_t GetMinMax(size32_t &keybuffsize,void *&keybuff, size32_t &avrecsizesize)=0;
+    virtual void GetGatherInfo(rowcount_t &numlocal, offset_t &totalsize, unsigned &overflowscale, bool hasserializer)=0;
+    virtual rowcount_t GetMinMax(size32_t &keybuffsize,void *&keybuff, size32_t &avrecsizesize)=0;
     virtual bool GetMidPoint     (size32_t lkeysize, const byte * lkey, size32_t hkeysize, const byte * hkey, size32_t &mkeysize, byte * &mkey)=0;
     virtual void GetMultiMidPoint(size32_t lkeybuffsize, const void * lkeybuff, size32_t hkeybuffsize, const void * hkeybuff, size32_t &mkeybuffsize, void * &mkeybuf)=0;
     virtual void GetMultiMidPointStart(size32_t lkeybuffsize, const void * lkeybuff, size32_t hkeybuffsize, const void * hkeybuff)=0; /* async */
     virtual void GetMultiMidPointStop(size32_t &mkeybuffsize, void * &mkeybuf)=0;
-    virtual rowmap_t SingleBinChop(size32_t keysize,const byte *key,byte cmpfn)=0;
-    virtual void MultiBinChop(size32_t keybuffsize,const byte *keybuff, unsigned num,rowmap_t *pos,byte cmpfn,bool useaux)=0;
+    virtual void MultiBinChop(size32_t keybuffsize,const byte *keybuff, unsigned num,rowcount_t *pos,byte cmpfn,bool useaux)=0;
     virtual void MultiBinChopStart(size32_t keybuffsize,const byte *keybuff, byte cmpfn)=0; /* async */
-    virtual void MultiBinChopStop(unsigned num,rowmap_t *pos)=0;
-    virtual void OverflowAdjustMapStart( unsigned mapsize,rowmap_t *map,size32_t keybuffsize,const byte *keybuff, byte cmpfn, bool useaux)=0; /* async */
-    virtual rowmap_t OverflowAdjustMapStop( unsigned mapsize, rowmap_t *map)=0;
-    virtual void MultiMerge(unsigned mapsize,rowmap_t *map,unsigned num,SocketEndpoint* endpoints)=0; /* async */
-    virtual void MultiMergeBetween(unsigned mapsize,rowmap_t *map,rowmap_t *mapupper,unsigned num,SocketEndpoint* endpoints)=0; /* async */
+    virtual void MultiBinChopStop(unsigned num,rowcount_t *pos)=0;
+    virtual void OverflowAdjustMapStart(unsigned mapsize,rowcount_t *map,size32_t keybuffsize,const byte *keybuff, byte cmpfn, bool useaux)=0; /* async */
+    virtual rowcount_t OverflowAdjustMapStop(unsigned mapsize, rowcount_t *map)=0;
+    virtual void MultiMerge(unsigned mapsize,rowcount_t *map,unsigned num,SocketEndpoint* endpoints)=0; /* async */
+    virtual void MultiMergeBetween(unsigned mapsize,rowcount_t *map,rowcount_t *mapupper,unsigned num,SocketEndpoint* endpoints)=0; /* async */
     virtual void SingleMerge()=0; /* async */
     virtual bool FirstRowOfFile(const char *filename,size32_t &rowbuffsize, byte * &rowbuf)=0;
     virtual void GetMultiNthRow(unsigned numsplits,size32_t &mkeybuffsize, void * &mkeybuf)=0;              
@@ -52,20 +51,19 @@ public:
     void init(ICommunicator *_comm, rank_t _rank,mptag_t _tag);
     bool Connect(unsigned _part, unsigned _numnodes);
     void StartGather();
-    void GetGatherInfo(rowmap_t &numlocal, offset_t &totalsize, unsigned &overflowscale, bool hasserializer);
-    rowmap_t GetMinMax(size32_t &keybuffsize,void *&keybuff, size32_t &avrecsizesize);
+    void GetGatherInfo(rowcount_t &numlocal, offset_t &totalsize, unsigned &overflowscale, bool hasserializer);
+    rowcount_t GetMinMax(size32_t &keybuffsize,void *&keybuff, size32_t &avrecsizesize);
     bool GetMidPoint     (size32_t lkeysize, const byte * lkey, size32_t hkeysize, const byte * hkey, size32_t &mkeysize, byte * &mkey);
     void GetMultiMidPoint(size32_t lkeybuffsize, const void * lkeybuff, size32_t hkeybuffsize, const void * hkeybuff, size32_t &mkeybuffsize, void * &mkeybuf);
     void GetMultiMidPointStart(size32_t lkeybuffsize, const void * lkeybuff, size32_t hkeybuffsize, const void * hkeybuff); /* async */
     void GetMultiMidPointStop(size32_t &mkeybuffsize, void * &mkeybuf);
-    rowmap_t SingleBinChop(size32_t keysize,const byte *key,byte cmpfn);
-    void MultiBinChop(size32_t keybuffsize,const byte *keybuff, unsigned num,rowmap_t *pos,byte cmpfn,bool useaux);
+    void MultiBinChop(size32_t keybuffsize,const byte *keybuff, unsigned num,rowcount_t *pos,byte cmpfn,bool useaux);
     void MultiBinChopStart(size32_t keybuffsize,const byte *keybuff, byte cmpfn); /* async */
-    void MultiBinChopStop(unsigned num,rowmap_t *pos);
-    void OverflowAdjustMapStart( unsigned mapsize,rowmap_t *map,size32_t keybuffsize,const byte *keybuff, byte cmpfn,bool useaux); /* async */
-    rowmap_t OverflowAdjustMapStop( unsigned mapsize, rowmap_t *map);
-    void MultiMerge(unsigned mapsize,rowmap_t *map,unsigned num,SocketEndpoint* endpoints); /* async */
-    void MultiMergeBetween(unsigned mapsize,rowmap_t *map,rowmap_t *mapupper,unsigned num,SocketEndpoint* endpoints); /* async */
+    void MultiBinChopStop(unsigned num,rowcount_t *pos);
+    void OverflowAdjustMapStart(unsigned mapsize,rowcount_t *map,size32_t keybuffsize,const byte *keybuff, byte cmpfn,bool useaux); /* async */
+    rowcount_t OverflowAdjustMapStop(unsigned mapsize, rowcount_t *map);
+    void MultiMerge(unsigned mapsize,rowcount_t *map,unsigned num,SocketEndpoint* endpoints); /* async */
+    void MultiMergeBetween(unsigned mapsize,rowcount_t *map,rowcount_t *mapupper,unsigned num,SocketEndpoint* endpoints); /* async */
     void SingleMerge(); /* async */
     bool FirstRowOfFile(const char *filename,size32_t &rowbuffsize, byte * &rowbuf);
     void GetMultiNthRow(unsigned numsplits,size32_t &mkeybuffsize, void * &mkeybuf);                

+ 34 - 40
thorlcr/msort/tsorts.cpp

@@ -124,7 +124,7 @@ class CWriteIntercept : public CSimpleInterface
             fixedsize = (size32_t)(o-lastofs);
         lastofs = o;
     }
-    size32_t _readOverflowPos(rowmap_t pos, unsigned n, offset_t *ofs, bool closeIO)
+    size32_t _readOverflowPos(rowcount_t pos, unsigned n, offset_t *ofs, bool closeIO)
     {
         if (fixedsize)
         {
@@ -247,12 +247,12 @@ public:
         dataFileDeserializerSource.setStream(NULL);
         idxFileIO.clear();
     }
-    size32_t readOverflowPos(rowmap_t pos, unsigned n, offset_t *ofs, bool closeIO)
+    size32_t readOverflowPos(rowcount_t pos, unsigned n, offset_t *ofs, bool closeIO)
     {
         CriticalBlock block(crit);
         return _readOverflowPos(pos, n, ofs, closeIO);
     }
-    const void *getRow(rowmap_t pos)
+    const void *getRow(rowcount_t pos)
     {
         CriticalBlock block(crit);
         offset_t ofs[2]; // JCSMORE doesn't really need 2, only to verify read right amount below
@@ -574,7 +574,7 @@ class CThorSorter : public CSimpleInterface, implements IThorSorter, implements
     unsigned partno, numnodes; // JCSMORE - shouldn't be necessary
     rowcount_t totalrows, grandtotal;
     offset_t grandtotalsize;
-    rowmap_t *overflowmap, *multibinchoppos;
+    rowcount_t *overflowmap, *multibinchoppos;
     bool stopping, gatherdone, nosort, isstable;
     ICompare *icompare;
     ICompare *icollate; // used for co-sort
@@ -643,14 +643,14 @@ class CThorSorter : public CSimpleInterface, implements IThorSorter, implements
         }
         return NULL;
     }
-    unsigned BinChop(const void *row, bool lesseq, bool firstdup, byte cmpfn)
+    rowidx_t BinChop(const void *row, bool lesseq, bool firstdup, byte cmpfn)
     {
-        unsigned n = rowArray.ordinality();
-        unsigned l=0;
-        unsigned r=n;
+        rowidx_t n = rowArray.ordinality();
+        rowidx_t l=0;
+        rowidx_t r=n;
         ICompare* icmp=queryCmpFn(cmpfn);
         while (l<r) {
-            unsigned m = (l+r)/2;
+            rowidx_t m = (l+r)/2;
             const void *p = rowArray.query(m);
             int cmp = icmp->docompare(row, p);
             if (cmp < 0)
@@ -673,7 +673,7 @@ class CThorSorter : public CSimpleInterface, implements IThorSorter, implements
             return l-1;
         return l;
     }
-    void doBinChop(CThorExpandingRowArray &keys, rowmap_t * pos, unsigned num, byte cmpfn)
+    void doBinChop(CThorExpandingRowArray &keys, rowcount_t * pos, unsigned num, byte cmpfn)
     {
         MemoryBuffer tmp;
         for (unsigned n=0;n<num;n++)
@@ -695,15 +695,15 @@ class CThorSorter : public CSimpleInterface, implements IThorSorter, implements
             }
         }
     }
-    void AdjustOverflow(rowmap_t &apos, const void *key, byte cmpfn)
+    void AdjustOverflow(rowcount_t &apos, const void *key, byte cmpfn)
     {
 #ifdef TRACE_PARTITION_OVERFLOW
         ActPrintLog(activity, "AdjustOverflow: in (%"RCPF"d)",apos);
         TraceKey(" ",(byte *)key);
 #endif
-        rowmap_t pos = (rowmap_t)(apos+1)*(rowmap_t)overflowinterval;
+        rowcount_t pos = (apos+1)*(rowcount_t)overflowinterval;
         if (pos>grandtotal)
-            pos = (rowmap_t)grandtotal;
+            pos = grandtotal;
         assertex(intercept);
         MemoryBuffer bufma;
         while (pos>0)
@@ -769,7 +769,7 @@ public:
         gatherdone = false;
         startgathersem.signal();
     }
-    virtual void GetGatherInfo(rowmap_t &numlocal, offset_t &totalsize, unsigned &_overflowscale, bool haskeyserializer)
+    virtual void GetGatherInfo(rowcount_t &numlocal, offset_t &totalsize, unsigned &_overflowscale, bool haskeyserializer)
     {
         if (!gatherdone)
             ERRLOG("GetGatherInfo:***Error called before gather complete");
@@ -784,7 +784,7 @@ public:
         _overflowscale = overflowinterval;
         totalsize = grandtotalsize; // used by master, if nothing overflowed to see if can MiniSort
     }
-    virtual rowmap_t GetMinMax(size32_t &keybufsize,void *&keybuf,size32_t &avrecsize)
+    virtual rowcount_t GetMinMax(size32_t &keybufsize,void *&keybuf,size32_t &avrecsize)
     {
         CThorExpandingRowArray ret(*activity, rowif, true);
         avrecsize = 0;
@@ -902,13 +902,7 @@ public:
         mbufsize = midkeybufsize;
         midkeybuf = NULL;
     }
-    virtual rowmap_t SingleBinChop(size32_t keysize, const byte * key,byte cmpfn)
-    {
-        OwnedConstThorRow row;
-        row.deserialize(rowif,keysize,key);
-        return BinChop(row.get(),false,true,cmpfn);
-    }
-    virtual void MultiBinChop(size32_t keybufsize, const byte * keybuf, unsigned num, rowmap_t * pos, byte cmpfn, bool useaux)
+    virtual void MultiBinChop(size32_t keybufsize, const byte * keybuf, unsigned num, rowcount_t * pos, byte cmpfn, bool useaux)
     {
         CThorExpandingRowArray keys(*activity, useaux?auxrowif:rowif, true);
         keys.deserialize(keybufsize, keybuf);
@@ -920,23 +914,23 @@ public:
         keys.deserializeExpand(keybufsize, keybuf);
         assertex(multibinchoppos==NULL); // check for reentrancy
         multibinchopnum = keys.ordinality();
-        multibinchoppos = (rowmap_t *)malloc(sizeof(rowmap_t)*multibinchopnum);
+        multibinchoppos = (rowcount_t *)malloc(sizeof(rowcount_t)*multibinchopnum);
         doBinChop(keys, multibinchoppos, multibinchopnum, cmpfn);
     }
-    virtual void MultiBinChopStop(unsigned num, rowmap_t * pos)
+    virtual void MultiBinChopStop(unsigned num, rowcount_t * pos)
     {
         assertex(multibinchoppos);
         assertex(multibinchopnum==num);
-        memcpy(pos,multibinchoppos,num*sizeof(rowmap_t));
+        memcpy(pos,multibinchoppos,num*sizeof(rowcount_t));
         free(multibinchoppos);
         multibinchoppos = NULL;
     }
-    virtual void OverflowAdjustMapStart(unsigned mapsize, rowmap_t * map,
+    virtual void OverflowAdjustMapStart(unsigned mapsize, rowcount_t * map,
                                size32_t keybufsize, const byte * keybuf, byte cmpfn, bool useaux)
     {
         assertex(intercept);
-        overflowmap = (rowmap_t *)malloc(mapsize*sizeof(rowmap_t));
-        memcpy(overflowmap,map,mapsize*sizeof(rowmap_t));
+        overflowmap = (rowcount_t *)malloc(mapsize*sizeof(rowcount_t));
+        memcpy(overflowmap,map,mapsize*sizeof(rowcount_t));
         unsigned i;
 #ifdef TRACE_PARTITION_OVERFLOW
 
@@ -949,27 +943,25 @@ public:
         keys.deserialize(keybufsize, keybuf);
         for (i=0;i<mapsize-1;i++)
             AdjustOverflow(overflowmap[i], keys.query(i), cmpfn);
-        assertex(grandtotal==(unsigned)grandtotal);
-        overflowmap[mapsize-1] = (unsigned)grandtotal;
+        overflowmap[mapsize-1] = grandtotal;
 #ifdef TRACE_PARTITION_OVERFLOW
         ActPrintLog(activity, "Out: ");
         for (i=0;i<mapsize;i++)
             ActPrintLog(activity, "%"RCPF"u ",overflowmap[i]);
 #endif
     }
-    virtual rowmap_t OverflowAdjustMapStop(unsigned mapsize, rowmap_t * map)
+    virtual rowcount_t OverflowAdjustMapStop(unsigned mapsize, rowcount_t * map)
     {
-        memcpy(map,overflowmap,mapsize*sizeof(rowmap_t));
+        memcpy(map,overflowmap,mapsize*sizeof(rowcount_t));
         free(overflowmap);
-        assertex(grandtotal==(rowmap_t)grandtotal);
-        return (rowmap_t)grandtotal;
+        return grandtotal;
     }
-    virtual void MultiMerge(unsigned mapsize,rowmap_t *map,
+    virtual void MultiMerge(unsigned mapsize,rowcount_t *map,
                     unsigned num,SocketEndpoint* endpoints)
     {
         MultiMergeBetween(mapsize,map,NULL,num,endpoints);
     }
-    virtual void MultiMergeBetween(unsigned mapsize, rowmap_t * map, rowmap_t * mapupper, unsigned num, SocketEndpoint * endpoints)
+    virtual void MultiMergeBetween(unsigned mapsize, rowcount_t * map, rowcount_t * mapupper, unsigned num, SocketEndpoint * endpoints)
     {
         assertex(transferserver.get()!=NULL);
         if (intercept)
@@ -1090,10 +1082,8 @@ public:
     }
 
 // ISortSlaveBase
-    virtual IRowStream *createMergeInputStream(rowmap_t sstart, rowcount_t snum)
+    virtual IRowStream *createMergeInputStream(rowcount_t sstart, rowcount_t snum)
     {
-        unsigned _snum = (unsigned)snum;    // only support 2^32 rows locally
-        assertex(snum==_snum);
         if (intercept)
         {
             offset_t startofs;  
@@ -1102,7 +1092,11 @@ public:
             return intercept->getStream(startofs, snum);
         }
         else
-            return rowArray.createRowStream((unsigned)sstart, _snum, false); // must be false as rows may overlap (between join)
+        {
+            unsigned _snum = (rowidx_t)snum; // only support 2^32 rows in memory
+            assertex(snum==_snum);
+            return rowArray.createRowStream((rowidx_t)sstart, _snum, false); // must be false as rows may overlap (between join)
+        }
     }
     virtual size32_t getTransferBlockSize()
     {

+ 4 - 6
thorlcr/msort/tsorts.hpp

@@ -28,8 +28,6 @@
 #include "mptag.hpp"
 #include "mpbase.hpp"
 
-typedef rowcount_t rowmap_t;
-
 interface ISortKeySerializer;
 interface IRowInterfaces;
 interface IThorDataLink;
@@ -67,8 +65,8 @@ interface ISocketRowWriter: extends IRowWriter
 
 class CActivityBase;
 IThorSorter *CreateThorSorter(CActivityBase *activity, SocketEndpoint &ep,IDiskUsage *iDiskUsage,ICommunicator *clusterComm, mptag_t _mpTagRPC);
-IRowStream *ConnectMergeRead(unsigned id,IRowInterfaces *rowif,SocketEndpoint &nodeaddr,rowcount_t startrec,rowmap_t numrecs);
-ISocketRowWriter *ConnectMergeWrite(IRowInterfaces *rowif,ISocket *socket,size32_t bufsize,rowcount_t &startrec,rowmap_t &numrecs);
+IRowStream *ConnectMergeRead(unsigned id,IRowInterfaces *rowif,SocketEndpoint &nodeaddr,rowcount_t startrec,rowcount_t numrecs);
+ISocketRowWriter *ConnectMergeWrite(IRowInterfaces *rowif,ISocket *socket,size32_t bufsize,rowcount_t &startrec,rowcount_t &numrecs);
 #define SOCKETSERVERINC                    1
 #define NUMSLAVESOCKETS                    2
 
@@ -84,7 +82,7 @@ interface ISortedInput: extends IInterface // reads rows from sorted local data
 
 interface ISortSlaveBase  // for global merging 
 {
-    virtual IRowStream *createMergeInputStream(rowmap_t sstart, rowcount_t _snum) = 0;
+    virtual IRowStream *createMergeInputStream(rowcount_t sstart, rowcount_t _snum) = 0;
     virtual size32_t getTransferBlockSize() = 0;
     virtual unsigned getTransferPort() = 0;
     virtual void startMerging(IArrayOf<IRowStream> &readers, rowcount_t _totalrows) = 0;
@@ -95,7 +93,7 @@ interface ISortSlaveBase  // for global merging
 interface IMergeTransferServer: extends IInterface
 {
     virtual void start() = 0;
-    virtual rowmap_t merge(unsigned mapsize,rowmap_t *map,rowmap_t *mapupper,
+    virtual rowcount_t merge(unsigned mapsize,rowcount_t *map,rowcount_t *mapupper,
                             unsigned num,SocketEndpoint* endpoints,
                             unsigned partno
                            ) = 0;

+ 25 - 20
thorlcr/msort/tsorts1.cpp

@@ -58,7 +58,7 @@ protected:
     }
 public:
     IMPLEMENT_IINTERFACE_USING(CSimpleInterface);
-    CMergeReadStream(IRowInterfaces *rowif, unsigned streamno,SocketEndpoint &targetep, rowcount_t startrec, rowmap_t numrecs)
+    CMergeReadStream(IRowInterfaces *rowif, unsigned streamno,SocketEndpoint &targetep, rowcount_t startrec, rowcount_t numrecs)
     {
         endpoint = targetep;
 #ifdef _TRACE
@@ -127,7 +127,7 @@ class CSortMerge: public CSimpleInterface, implements ISocketSelectNotify
     ISortSlaveBase &src;
     Owned<ISocketRowWriter> out;
     rowcount_t poscount;
-    rowmap_t numrecs;
+    rowcount_t numrecs;
 //  unsigned pos;
 //  unsigned endpos;
     unsigned ndone;
@@ -144,7 +144,7 @@ protected:
 public:
     IMPLEMENT_IINTERFACE_USING(CSimpleInterface);
 
-    CSortMerge(CSortTransferServerThread *_parent,ISocket* _socket,ISocketRowWriter *_out,rowcount_t _poscount,rowmap_t _numrecs,ISocketSelectHandler *_selecthandler);
+    CSortMerge(CSortTransferServerThread *_parent,ISocket* _socket,ISocketRowWriter *_out,rowcount_t _poscount,rowcount_t _numrecs,ISocketSelectHandler *_selecthandler);
     ~CSortMerge()
     {
 #ifdef _FULL_TRACE
@@ -166,12 +166,13 @@ public:
         url.append(name).append(':').append(port);
         PrintLog("SORT Merge WRITE: start %s, pos=%"RCPF"d, len=%"RCPF"d",url.str(),poscount,numrecs);
 #endif
-        rowmap_t pos=(rowmap_t)poscount;
-        assertex(pos==poscount);
-        try {
+        rowcount_t pos=poscount;
+        try
+        {
             iseq.setown(src.createMergeInputStream(pos,numrecs));
         }
-        catch (IException *e) {
+        catch (IException *e)
+        {
             PrintExceptionLog(e,"**Exception(4a)");
             throw;
         }
@@ -343,11 +344,11 @@ public:
                 }
 
                 rowcount_t poscount=0;
-                rowmap_t numrecs=0;
+                rowcount_t numrecs=0;
                 ISocketRowWriter *strm=NULL;
                 try {
                     waitRowIF();
-                    strm = ConnectMergeWrite(rowif,socket,0x100000,poscount,numrecs);   
+                    strm = ConnectMergeWrite(rowif,socket,0x100000,poscount,numrecs);
                 }
                 catch (IJSOCK_Exception *e) { // retry if failed
                     PrintExceptionLog(e,"WARNING: Exception(ConnectMergeWrite)");
@@ -407,7 +408,7 @@ public:
 
     }
 
-    void add(ISocketRowWriter *strm,ISocket *socket,rowcount_t poscount,rowmap_t numrecs) // takes ownership of sock
+    void add(ISocketRowWriter *strm,ISocket *socket,rowcount_t poscount,rowcount_t numrecs) // takes ownership of sock
     {
         CriticalBlock proc(childsect);
         if (!selecthandler) {
@@ -429,7 +430,7 @@ public:
 
 
 
-    rowmap_t merge(unsigned mapsize,rowmap_t *map,rowmap_t *mapupper,
+    rowcount_t merge(unsigned mapsize,rowcount_t *map,rowcount_t *mapupper,
                    unsigned numnodes,SocketEndpoint* endpoints,
                    unsigned partno)
     {
@@ -458,7 +459,7 @@ public:
             }
         }
 #endif  
-        rowmap_t resnum=0;
+        rowcount_t resnum=0;
         for (i=0;i<numnodes;i++) 
             resnum += vMAPU(i,partno)-vMAPL(i,partno-1);
         // calculate start position
@@ -467,18 +468,22 @@ public:
             for (j=0;j<numnodes;j++) 
                 respos += vMAPL(j,i)-vMAPL(j,i-1);      // note we are adding up all of the lower as we want start
 
-        rowmap_t totalrows = resnum;
+        rowcount_t totalrows = resnum;
         PrintLog("Output start = %"RCPF"d, num = %"RCPF"u",respos,resnum);
 
         IArrayOf<IRowStream> readers;
         IException *exc = NULL;
-        try {
-            for (j=0;j<numnodes;j++) {
+        try
+        {
+            for (j=0;j<numnodes;j++)
+            {
                 unsigned i=j;
-                rowmap_t sstart=vMAPL(i,partno-1);
-                rowmap_t snum=vMAPU(i,partno)-sstart; 
-                if (snum>0) {
-                    if (i==partno) {
+                rowcount_t sstart=vMAPL(i,partno-1);
+                rowcount_t snum=vMAPU(i,partno)-sstart;
+                if (snum>0)
+                {
+                    if (i==partno)
+                    {
                         PrintLog("SORT Merge READ: Stream(%u) local, pos=%"RCPF"u len=%"RCPF"u",i,sstart,snum);
                         readers.append(*slave.createMergeInputStream(sstart,snum));
                     }
@@ -508,7 +513,7 @@ public:
     }
 };
 
-CSortMerge::CSortMerge(CSortTransferServerThread *_parent,ISocket* _socket,ISocketRowWriter *_out,rowcount_t _poscount,rowmap_t _numrecs,ISocketSelectHandler *_selecthandler)
+CSortMerge::CSortMerge(CSortTransferServerThread *_parent,ISocket* _socket,ISocketRowWriter *_out,rowcount_t _poscount,rowcount_t _numrecs,ISocketSelectHandler *_selecthandler)
     : src(_parent->slave),socket(_socket),out(_out)
 {
     parent = _parent;

+ 2 - 6
thorlcr/shared/thor.hpp

@@ -27,17 +27,13 @@ typedef unsigned graph_id;
 #define ACTPF
 #define GIDPF
 
-#ifdef __64BIT__
 typedef unsigned __int64 rowcount_t;
 #define RCPF I64F
 #define RCMAX ((rowcount_t)(__int64)-1)
-#else
-typedef unsigned rowcount_t;
-#define RCPF ""
-#define RCMAX UINT_MAX
-#endif
 #define RCUNBOUND RCMAX
 #define RCUNSET RCMAX
+typedef size32_t rowidx_t;
+#define RIPF ""
 
 template <class T>
 inline rowcount_t validRC(T X)

+ 7 - 12
thorlcr/shared/thwatchdog.hpp

@@ -23,22 +23,17 @@
 #include "thor.hpp"
 
 #define HEARTBEAT_INTERVAL      15          // seconds
-#define DATA_MAX            1024 * 8    // 8k
+#define UDP_DATA_MAX            1024 * 8    // 8k
 #define THORBEAT_INTERVAL       10*1000     // 10 sec!
 #define THORBEAT_RETRY_INTERVAL 4*60*1000   // 4 minutes
 
-struct HeartBeatPacket
-{
-    unsigned short  packetsize;                 // used as validity check must be first
-    SocketEndpoint  sender;
-    unsigned        tick;                       // sequence check
-    unsigned short  progressSize;               // size of progress data (following performamce data)
-
-    byte            perfdata[DATA_MAX]; // performance/progress data from here on
 
-    inline size32_t packetSize() { return progressSize + (sizeof(HeartBeatPacket) - sizeof(perfdata)); }
-    inline size32_t minPacketSize() { return sizeof(progressSize) + sizeof(tick) + sizeof(sender) + sizeof(packetsize); }
-    inline size32_t maxPacketSize() { return DATA_MAX + minPacketSize(); }
+struct HeartBeatPacketHeader
+{
+    size32_t packetSize;   // used as validity check must be first
+    SocketEndpoint sender;
+    unsigned tick;         // sequence check
+    size32_t progressSize; // size of progress data (following performamce data)
 };
 
 #endif

+ 1 - 1
thorlcr/slave/slavmain.cpp

@@ -137,7 +137,7 @@ public:
             querySoCache.init(soPath.str(), DEFAULT_QUERYSO_LIMIT, soPattern);
         Owned<ISlaveWatchdog> watchdog;
         if (globals->getPropBool("@watchdogEnabled"))
-            watchdog.setown(createProgressHandler());
+            watchdog.setown(createProgressHandler(globals->getPropBool("@useUDPWatchdog")));
 
         CMessageBuffer msg;
         stopped = false;

+ 87 - 41
thorlcr/slave/slwatchdog.cpp

@@ -29,46 +29,47 @@
 #include "slwatchdog.hpp"
 #include "thgraphslave.hpp"
 
-class CGraphProgressHandler : public CSimpleInterface, implements ISlaveWatchdog, implements IThreaded
+class CGraphProgressHandlerBase : public CSimpleInterface, implements ISlaveWatchdog, implements IThreaded
 {
     CriticalSection crit;
     CGraphArray activeGraphs;
     bool stopped, progressEnabled;
-    Owned<ISocket> sock;
     CThreaded threaded;
     SocketEndpoint self;
 
-    void sendData()
+    void gatherAndSend()
     {
-        HeartBeatPacket hbpacket;
-        gatherData(hbpacket);
-        if(hbpacket.packetsize > 0)
-        {
-            MemoryBuffer mb;
-            size32_t sz = ThorCompress(&hbpacket,hbpacket.packetsize, mb, 0x200);
-            sock->write(mb.toByteArray(), sz);
-        }
+        MemoryBuffer sendMb, progressMb;
+        HeartBeatPacketHeader hb;
+        hb.sender = self;
+        hb.tick++;
+        size32_t progressSizePos = (byte *)&hb.progressSize - (byte *)&hb;
+        sendMb.append(sizeof(HeartBeatPacketHeader), &hb);
+
+        hb.progressSize = gatherData(progressMb);
+        sendMb.writeDirect(progressSizePos, sizeof(hb.progressSize), &hb.progressSize);
+        sendMb.append(progressMb);
+        size32_t packetSize = sendMb.length();
+        sendMb.writeDirect(0, sizeof(hb.packetSize), &packetSize);
+        sendData(sendMb);
     }
+    virtual void sendData(MemoryBuffer &mb) = 0;
 
 public:
     IMPLEMENT_IINTERFACE_USING(CSimpleInterface);
-    CGraphProgressHandler() : threaded("CGraphProgressHandler")
+    CGraphProgressHandlerBase() : threaded("CGraphProgressHandler")
     {
         self = queryMyNode()->endpoint();
         stopped = true;
 
-        StringBuffer ipStr;
-        queryClusterGroup().queryNode(0).endpoint().getIpText(ipStr);
-        sock.setown(ISocket::udp_connect(getFixedPort(getMasterPortBase(), TPORT_watchdog),ipStr.str()));
         progressEnabled = globals->getPropBool("@watchdogProgressEnabled");
-        sendData();                         // send initial data
         stopped = false;
 #ifdef _WIN32
         threaded.adjustPriority(+1); // it is critical that watchdog packets get through.
 #endif
         threaded.init(this);
     }
-    ~CGraphProgressHandler()
+    ~CGraphProgressHandlerBase()
     {
         stop();
     }
@@ -82,31 +83,27 @@ public:
         LOG(MCdebugProgress, thorJob, "Stopped watchdog");
     }
 
-    void gatherData(HeartBeatPacket &hb)
+    size32_t gatherData(MemoryBuffer &mb)
     {
         CriticalBlock b(crit);
-        hb.sender = self;
-        hb.progressSize = 0;
         if (progressEnabled)
         {
-            CriticalBlock b(crit);
-            MemoryBuffer mb;
-            mb.setBuffer(DATA_MAX, hb.perfdata);
-            mb.rewrite();
-            ForEachItemIn(g, activeGraphs)
-            {
-                CGraphBase &graph = activeGraphs.item(g);
-                graph.serializeStats(mb);
-                if (mb.length() > (DATA_MAX-30))
+            MemoryBuffer progressData;
+            { CriticalBlock b(crit);
+                ForEachItemIn(g, activeGraphs)
                 {
-                    WARNLOG("Progress packet too big!");
-                    break;
+                    CGraphBase &graph = activeGraphs.item(g);
+                    graph.serializeStats(progressData);
                 }
             }
-            hb.progressSize = mb.length();
+            size32_t sz = progressData.length();
+            if (sz)
+            {
+                ThorCompress(progressData, mb, 0x200);
+                return sz;
+            }
         }
-        hb.tick++;
-        hb.packetsize = hb.packetSize();
+        return 0;
     }
 
 // ISlaveWatchdog impl.
@@ -117,15 +114,21 @@ public:
         StringBuffer str("Watchdog: Start Job ");
         LOG(MCdebugProgress, thorJob, "%s", str.append(graph.queryGraphId()).str());
     }
-    void stopGraph(CGraphBase &graph, HeartBeatPacket *hb)
+    void stopGraph(CGraphBase &graph, MemoryBuffer *mb)
     {
         CriticalBlock b(crit);
         if (NotFound != activeGraphs.find(graph))
         {
             StringBuffer str("Watchdog: Stop Job ");
             LOG(MCdebugProgress, thorJob, "%s", str.append(graph.queryGraphId()).str());
-            if (hb)
-                gatherData(*hb);
+            if (mb)
+            {
+                unsigned pos=mb->length();
+                mb->append((size32_t)0); // placeholder
+                gatherData(*mb);
+                size32_t len=(mb->length()-pos)-sizeof(size32_t);
+                mb->writeDirect(pos, sizeof(len), &len);
+            }
             activeGraphs.zap(graph);
         }
     }
@@ -134,6 +137,7 @@ public:
     void main()
     {
         LOG(MCdebugProgress, thorJob, "Watchdog: thread running");
+        gatherAndSend(); // send initial data
         assertex(HEARTBEAT_INTERVAL>=8);
         unsigned count = HEARTBEAT_INTERVAL+getRandom()%8-4;
         while (!stopped)
@@ -141,15 +145,57 @@ public:
             Sleep(1000);
             if (count--==0)
             {
-                sendData();
+                gatherAndSend();
                 count = HEARTBEAT_INTERVAL+getRandom()%8-4;         
             }
         }
     }
 };
 
-ISlaveWatchdog *createProgressHandler()
+
+class CGraphProgressUDPHandler : public CGraphProgressHandlerBase
 {
-    return new CGraphProgressHandler();
-}
+    Owned<ISocket> sock;
+public:
+    CGraphProgressUDPHandler()
+    {
+        StringBuffer ipStr;
+        queryClusterGroup().queryNode(0).endpoint().getIpText(ipStr);
+        sock.setown(ISocket::udp_connect(getFixedPort(getMasterPortBase(), TPORT_watchdog),ipStr.str()));
+    }
+    virtual void sendData(MemoryBuffer &mb)
+    {
+        HeartBeatPacketHeader hb;
+        memcpy(&hb, mb.toByteArray(), sizeof(HeartBeatPacketHeader));
+        if (hb.packetSize > UDP_DATA_MAX)
+        {
+            WARNLOG("Progress packet too big! progress lost");
+            hb.progressSize = 0;
+            hb.packetSize = sizeof(HeartBeatPacketHeader);
+        }
+        sock->write(mb.toByteArray(), mb.length());
+    }
+};
+
 
+class CGraphProgressMPHandler : public CGraphProgressHandlerBase
+{
+public:
+    CGraphProgressMPHandler()
+    {
+    }
+    virtual void sendData(MemoryBuffer &mb)
+    {
+        CMessageBuffer msg;
+        msg.swapWith(mb);
+        queryClusterComm().send(msg, 0, MPTAG_THORWATCHDOG);
+    }
+};
+
+ISlaveWatchdog *createProgressHandler(bool udp)
+{
+    if (udp)
+        return new CGraphProgressUDPHandler();
+    else
+        return new CGraphProgressMPHandler();
+}

+ 2 - 2
thorlcr/slave/slwatchdog.hpp

@@ -26,11 +26,11 @@ class CGraphBase;
 interface ISlaveWatchdog : extends IInterface
 {
     virtual void startGraph(CGraphBase &graph) = 0;
-    virtual void stopGraph(CGraphBase &graph, HeartBeatPacket *hb=NULL) = 0;
+    virtual void stopGraph(CGraphBase &graph, MemoryBuffer *mb=NULL) = 0;
     virtual void stop() = 0;
 };
-ISlaveWatchdog *createProgressHandler();
 
+ISlaveWatchdog *createProgressHandler(bool udp=false);
 
 #endif
 

+ 43 - 47
thorlcr/thorutil/thmem.cpp

@@ -205,7 +205,6 @@ class CSpillableStreamBase : public CSimpleInterface, implements roxiemem::IBuff
 {
 protected:
     CActivityBase &activity;
-    rowcount_t pos;
     IRowInterfaces *rowIf;
     bool preserveNulls, ownsRows;
     CThorSpillableRowArray rows;
@@ -215,7 +214,7 @@ protected:
     bool spillRows()
     {
         // NB: Should always be called whilst 'rows' is locked (with CThorSpillableRowArrayLock)
-        rowcount_t numRows = rows.numCommitted();
+        rowidx_t numRows = rows.numCommitted();
         if (0 == numRows)
             return false;
 
@@ -235,8 +234,6 @@ public:
         : activity(_activity), rowIf(_rowIf), rows(_activity, _rowIf, _preserveNulls), preserveNulls(_preserveNulls)
     {
         rows.swap(inRows);
-        pos = 0;
-
         activity.queryJob().queryRowManager()->addRowBuffer(this);
     }
     ~CSpillableStreamBase()
@@ -265,7 +262,7 @@ class CSharedSpillableRowSet : public CSpillableStreamBase, implements IInterfac
 {
     class CStream : public CSimpleInterface, implements IRowStream, implements IWritePosCallback
     {
-        rowcount_t pos;
+        rowidx_t pos;
         offset_t outputOffset;
         Owned<IRowStream> spillStream;
         Linked<CSharedSpillableRowSet> owner;
@@ -302,7 +299,7 @@ class CSharedSpillableRowSet : public CSpillableStreamBase, implements IInterfac
         }
         virtual void stop() { }
     // IWritePosCallback
-        virtual rowcount_t queryRecordNumber()
+        virtual rowidx_t queryRecordNumber()
         {
             return pos;
         }
@@ -335,7 +332,7 @@ public:
 // NB: A single unshared spillable stream
 class CSpillableStream : public CSpillableStreamBase, implements IRowStream
 {
-    rowcount_t numReadRows, granularity;
+    rowidx_t pos, numReadRows, granularity;
     const void **readRows;
 
 public:
@@ -344,7 +341,7 @@ public:
     CSpillableStream(CActivityBase &_activity, CThorSpillableRowArray &inRows, IRowInterfaces *_rowIf, bool _preserveNulls)
         : CSpillableStreamBase(_activity, inRows, _rowIf, _preserveNulls)
     {
-        numReadRows = 0;
+        pos = numReadRows = 0;
         granularity = 500; // JCSMORE - rows
 
         // a small amount of rows to read from swappable rows
@@ -373,7 +370,7 @@ public:
                 spillStream.setown(createRowStream(spillFile, rowIf, 0, (offset_t)-1, (unsigned __int64)-1, false, preserveNulls));
                 return spillStream->nextRow();
             }
-            rowcount_t fetch = rows.numCommitted();
+            rowidx_t fetch = rows.numCommitted();
             if (0 == fetch)
                 return NULL;
             if (fetch >= granularity)
@@ -395,7 +392,7 @@ public:
 
 //====
 
-void CThorExpandingRowArray::init(rowcount_t initialSize, bool _stableSort)
+void CThorExpandingRowArray::init(rowidx_t initialSize, bool _stableSort)
 {
     rowManager = activity.queryJob().queryRowManager();
     stableSort = _stableSort;
@@ -417,7 +414,7 @@ void CThorExpandingRowArray::init(rowcount_t initialSize, bool _stableSort)
     numRows = 0;
 }
 
-const void *CThorExpandingRowArray::allocateNewRows(rowcount_t requiredRows, OwnedConstThorRow &newStableSortTmp)
+const void *CThorExpandingRowArray::allocateNewRows(rowidx_t requiredRows, OwnedConstThorRow &newStableSortTmp)
 {
     unsigned newSize = maxRows;
     //This condition must be <= at least 1/scaling factor below otherwise you'll get an infinite loop.
@@ -477,7 +474,7 @@ void CThorExpandingRowArray::doSort(unsigned n, void **const rows, ICompare &com
         parqsortvec((void **const)rows, n, compare, maxCores);
 }
 
-CThorExpandingRowArray::CThorExpandingRowArray(CActivityBase &_activity, IRowInterfaces *_rowIf, bool _allowNulls, bool _stableSort, bool _throwOnOom, rowcount_t initialSize) : activity(_activity)
+CThorExpandingRowArray::CThorExpandingRowArray(CActivityBase &_activity, IRowInterfaces *_rowIf, bool _allowNulls, bool _stableSort, bool _throwOnOom, rowidx_t initialSize) : activity(_activity)
 {
     init(initialSize, _stableSort);
     setup(_rowIf, _allowNulls, _stableSort, _throwOnOom);
@@ -512,7 +509,7 @@ void CThorExpandingRowArray::setup(IRowInterfaces *_rowIf, bool _allowNulls, boo
 
 void CThorExpandingRowArray::clearRows()
 {
-    for (rowcount_t i = 0; i < numRows; i++)
+    for (rowidx_t i = 0; i < numRows; i++)
         ReleaseThorRow(rows[i]);
     numRows = 0;
 }
@@ -536,8 +533,8 @@ void CThorExpandingRowArray::swap(CThorExpandingRowArray &other)
     bool otherAllowNulls = other.allowNulls;
     bool otherStableSort = other.stableSort;
     bool otherThrowOnOom = other.throwOnOom;
-    rowcount_t otherMaxRows = other.maxRows;
-    rowcount_t otherNumRows = other.numRows;
+    rowidx_t otherMaxRows = other.maxRows;
+    rowidx_t otherNumRows = other.numRows;
 
     other.rowManager = rowManager;
     other.setup(rowIf, allowNulls, stableSort, throwOnOom);
@@ -554,7 +551,7 @@ void CThorExpandingRowArray::swap(CThorExpandingRowArray &other)
     numRows = otherNumRows;
 }
 
-void CThorExpandingRowArray::transferRows(rowcount_t & outNumRows, const void * * & outRows)
+void CThorExpandingRowArray::transferRows(rowidx_t & outNumRows, const void * * & outRows)
 {
     outNumRows = numRows;
     outRows = rows;
@@ -579,13 +576,13 @@ void CThorExpandingRowArray::transferFrom(CThorSpillableRowArray &donor)
 	transferFrom((CThorExpandingRowArray &)donor);
 }
 
-void CThorExpandingRowArray::removeRows(rowcount_t start, rowcount_t n)
+void CThorExpandingRowArray::removeRows(rowidx_t start, rowidx_t n)
 {
     assertex(numRows-start >= n);
     assertex(!n || rows);
     if (rows)
     {
-        for (rowcount_t i = start; i < start+n; i++)
+        for (rowidx_t i = start; i < start+n; i++)
             ReleaseThorRow(rows[i]);
         //firstRow = 0;
         numRows -= n;
@@ -600,12 +597,12 @@ void CThorExpandingRowArray::clearUnused()
         memset(rows+numRows, 0, (maxRows-numRows) * sizeof(void *));
 }
 
-bool CThorExpandingRowArray::ensure(rowcount_t requiredRows)
+bool CThorExpandingRowArray::ensure(rowidx_t requiredRows)
 {
     OwnedConstThorRow newStableSortTmp;
     OwnedConstThorRow newRows = allocateNewRows(requiredRows, newStableSortTmp);
     if (!newRows)
-        throw MakeActivityException(&activity, 0, "Out of memory, allocating row array, had %"RCPF"d, trying to allocate %"RCPF"d elements", ordinality(), requiredRows);
+        throw MakeActivityException(&activity, 0, "Out of memory, allocating row array, had %"RIPF"d, trying to allocate %"RIPF"d elements", ordinality(), requiredRows);
 
     const void **oldRows = rows;
     void **oldStableSortTmp = stableSortTmp;
@@ -627,7 +624,7 @@ void CThorExpandingRowArray::sort(ICompare &compare, unsigned maxCores)
         doSort(numRows, (void **const)rows, compare, maxCores);
 }
 
-void CThorExpandingRowArray::reorder(rowcount_t start, rowcount_t num, unsigned *neworder)
+void CThorExpandingRowArray::reorder(rowidx_t start, rowidx_t num, unsigned *neworder)
 {
     if (start>=numRows)
         return;
@@ -671,19 +668,18 @@ bool CThorExpandingRowArray::checkSorted(ICompare *icmp)
     return true;
 }
 
-IRowStream *CThorExpandingRowArray::createRowStream(rowcount_t start, rowcount_t num, bool streamOwns)
+IRowStream *CThorExpandingRowArray::createRowStream(rowidx_t start, rowidx_t num, bool streamOwns)
 {
     class CStream : public CSimpleInterface, implements IRowStream
     {
-        rowcount_t pos;
         CThorExpandingRowArray &parent;
+        rowidx_t pos, lastRow;
         bool owns;
-        rowcount_t lastRow;
 
     public:
         IMPLEMENT_IINTERFACE_USING(CSimpleInterface);
 
-        CStream(CThorExpandingRowArray &_parent, rowcount_t firstRow, rowcount_t _lastRow, bool _owns)
+        CStream(CThorExpandingRowArray &_parent, rowidx_t firstRow, rowidx_t _lastRow, bool _owns)
             : parent(_parent), pos(firstRow), lastRow(_lastRow), owns(_owns)
         {
         }
@@ -703,8 +699,8 @@ IRowStream *CThorExpandingRowArray::createRowStream(rowcount_t start, rowcount_t
 
     if (start>ordinality())
         start = ordinality();
-    rowcount_t lastRow;
-    if ((num==(rowcount_t)-1)||(start+num>ordinality()))
+    rowidx_t lastRow;
+    if ((num==(rowidx_t)-1)||(start+num>ordinality()))
         lastRow = ordinality();
     else
         lastRow = start+num;
@@ -744,7 +740,7 @@ void CThorExpandingRowArray::partition(ICompare &compare, unsigned num, Unsigned
 
 offset_t CThorExpandingRowArray::serializedSize()
 {
-    rowcount_t c = ordinality();
+    rowidx_t c = ordinality();
     assertex(serializer);
     offset_t total = 0;
     for (unsigned i=0; i<c; i++)
@@ -760,10 +756,10 @@ void CThorExpandingRowArray::serialize(IRowSerializerTarget &out)
 {
     bool warnnull = true;
     assertex(serializer);
-    rowcount_t n = ordinality();
+    rowidx_t n = ordinality();
     if (n)
     {
-        for (rowcount_t i = 0; i < n; i++)
+        for (rowidx_t i = 0; i < n; i++)
         {
             const void *row = query(i);
             if (row)
@@ -787,10 +783,10 @@ void CThorExpandingRowArray::serialize(MemoryBuffer &mb)
     {
         unsigned short guard = 0x7631;
         mb.append(guard);
-        rowcount_t n = ordinality();
+        rowidx_t n = ordinality();
         if (n)
         {
-            for (rowcount_t i = 0; i < n; i++)
+            for (rowidx_t i = 0; i < n; i++)
             {
                 const void *row = query(i);
                 bool isnull = (row==NULL);
@@ -895,7 +891,7 @@ void CThorSpillableRowArray::unregisterWriteCallback(IWritePosCallback &cb)
     writeCallbacks.zap(cb);
 }
 
-CThorSpillableRowArray::CThorSpillableRowArray(CActivityBase &activity, IRowInterfaces *rowIf, bool allowNulls, bool stable, rowcount_t initialSize, size32_t _commitDelta)
+CThorSpillableRowArray::CThorSpillableRowArray(CActivityBase &activity, IRowInterfaces *rowIf, bool allowNulls, bool stable, rowidx_t initialSize, size32_t _commitDelta)
     : CThorExpandingRowArray(activity, rowIf, false, stable, false, initialSize), commitDelta(_commitDelta)
 {
     commitRows = 0;
@@ -909,7 +905,7 @@ CThorSpillableRowArray::~CThorSpillableRowArray()
 
 void CThorSpillableRowArray::clearRows()
 {
-    for (rowcount_t i = firstRow; i < numRows; i++)
+    for (rowidx_t i = firstRow; i < numRows; i++)
         ReleaseThorRow(rows[i]);
     numRows = 0;
     firstRow = 0;
@@ -922,7 +918,7 @@ void CThorSpillableRowArray::kill()
     CThorExpandingRowArray::kill();
 }
 
-bool CThorSpillableRowArray::ensure(rowcount_t requiredRows)
+bool CThorSpillableRowArray::ensure(rowidx_t requiredRows)
 {
     //Only the writer is allowed to reallocate rows (otherwise append can't be optimized), so rows is valid outside the lock
 
@@ -962,16 +958,16 @@ void CThorSpillableRowArray::sort(ICompare &compare, unsigned maxCores)
     }
 }
 
-unsigned CThorSpillableRowArray::save(IFile &iFile, rowcount_t watchRecNum, offset_t *watchFilePosResult)
+unsigned CThorSpillableRowArray::save(IFile &iFile, rowidx_t watchRecNum, offset_t *watchFilePosResult)
 {
-    rowcount_t n = numCommitted();
+    rowidx_t n = numCommitted();
     if (0 == n)
         return 0;
     const void **rows = getBlock(n);
     Owned<IExtRowWriter> writer = createRowWriter(&iFile, rowIf->queryRowSerializer(), rowIf->queryRowAllocator(), allowNulls, false, true);
-    ActPrintLog(&activity, "CThorSpillableRowArray::save %"RCPF"d rows", numRows);
+    ActPrintLog(&activity, "CThorSpillableRowArray::save %"RIPF"d rows", numRows);
     offset_t startPos = writer->getPosition();
-    for (rowcount_t i=0; i < n; i++)
+    for (rowidx_t i=0; i < n; i++)
     {
         const void *row = rows[i];
         assertex(row || allowNulls);
@@ -996,7 +992,7 @@ unsigned CThorSpillableRowArray::save(IFile &iFile, rowcount_t watchRecNum, offs
 
 
 // JCSMORE - these methods are essentially borrowed from RoxieOutputRowArray, would be good to unify
-const void **CThorSpillableRowArray::getBlock(rowcount_t readRows)
+const void **CThorSpillableRowArray::getBlock(rowidx_t readRows)
 {
     dbgassertex(firstRow+readRows <= commitRows);
     return rows + firstRow;
@@ -1029,8 +1025,8 @@ void CThorSpillableRowArray::swap(CThorSpillableRowArray &other)
 {
     CThorSpillableRowArrayLock block(*this);
     CThorExpandingRowArray::swap(other);
-    rowcount_t otherFirstRow = other.firstRow;
-    rowcount_t otherCommitRows = other.commitRows;
+    rowidx_t otherFirstRow = other.firstRow;
+    rowidx_t otherCommitRows = other.commitRows;
 
     other.firstRow = firstRow;
     other.commitRows = commitRows;
@@ -1068,8 +1064,8 @@ protected:
     PointerIArrayOf<CFileOwner> spillFiles;
     Owned<IOutputRowSerializer> serializer;
     RowCollectorFlags diskMemMix;
+    rowcount_t totalRows;
     unsigned spillPriority;
-    unsigned totalRows;
     unsigned overflowCount;
     unsigned maxCores;
     unsigned outStreams;
@@ -1084,7 +1080,7 @@ protected:
     {
         if (rc_allMem == diskMemMix)
             return false;
-        rowcount_t numRows = spillableRows.numCommitted();
+        rowidx_t numRows = spillableRows.numCommitted();
         if (numRows == 0)
             return false;
 
@@ -1240,7 +1236,8 @@ protected:
     {
         spillableRows.kill();
         spillFiles.kill();
-        totalRows = overflowCount = outStreams = 0;
+        totalRows = 0;
+        overflowCount = outStreams = 0;
     }
 public:
     CThorRowCollectorBase(CActivityBase &_activity, IRowInterfaces *_rowIf, ICompare *_iCompare, bool _isStable, RowCollectorFlags _diskMemMix, unsigned _spillPriority)
@@ -1250,8 +1247,7 @@ public:
     {
         preserveGrouping = false;
         totalRows = 0;
-        overflowCount = 0;
-        outStreams = 0;
+        overflowCount = outStreams = 0;
         mmRegistered = false;
         if (rc_allMem == diskMemMix)
             spillPriority = SPILL_PRIORITY_DISABLE; // all mem, implies no spilling

+ 28 - 28
thorlcr/thorutil/thmem.hpp

@@ -241,16 +241,16 @@ protected:
     const void **rows;
     void **stableSortTmp;
     bool stableSort, throwOnOom, allowNulls;
-    rowcount_t maxRows;  // Number of rows that can fit in the allocated memory.
-    rowcount_t numRows;  // rows that have been added can only be updated by writing thread.
+    rowidx_t maxRows;  // Number of rows that can fit in the allocated memory.
+    rowidx_t numRows;  // rows that have been added can only be updated by writing thread.
 
-    void init(rowcount_t initialSize, bool stable);
-    const void *allocateNewRows(rowcount_t requiredRows, OwnedConstThorRow &newStableSortTmp);
+    void init(rowidx_t initialSize, bool stable);
+    const void *allocateNewRows(rowidx_t requiredRows, OwnedConstThorRow &newStableSortTmp);
     void serialize(IRowSerializerTarget &out);
     void doSort(unsigned n, void **const rows, ICompare &compare, unsigned maxCores);
 
 public:
-    CThorExpandingRowArray(CActivityBase &activity, IRowInterfaces *rowIf, bool allowNulls=false, bool stableSort=false, bool throwOnOom=true, rowcount_t initialSize=InitialSortElements);
+    CThorExpandingRowArray(CActivityBase &activity, IRowInterfaces *rowIf, bool allowNulls=false, bool stableSort=false, bool throwOnOom=true, rowidx_t initialSize=InitialSortElements);
     ~CThorExpandingRowArray();
     CActivityBase &queryActivity() { return activity; }
     // NB: throws error on OOM by default
@@ -260,7 +260,7 @@ public:
     void clearRows();
     void kill();
 
-    void setRow(rowcount_t idx, const void *row) // NB: takes ownership
+    void setRow(rowidx_t idx, const void *row) // NB: takes ownership
     {
         OwnedConstThorRow _row = row;
         assertex(idx < maxRows);
@@ -282,13 +282,13 @@ public:
         rows[numRows++] = row;
         return true;
     }
-    inline const void *query(rowcount_t i) const
+    inline const void *query(rowidx_t i) const
     {
         if (i>=numRows)
             return NULL;
         return rows[i];
     }
-    inline const void *get(rowcount_t i) const
+    inline const void *get(rowidx_t i) const
     {
         if (i>=numRows)
             return NULL;
@@ -297,7 +297,7 @@ public:
             LinkThorRow(row);
         return row;
     }
-    inline const void *getClear(rowcount_t i)
+    inline const void *getClear(rowidx_t i)
     {
         if (i>=numRows)
             return NULL;
@@ -305,7 +305,7 @@ public:
         rows[i] = NULL;
         return row;
     }
-    inline rowcount_t ordinality() const { return numRows; }
+    inline rowidx_t ordinality() const { return numRows; }
 
     inline const void **getRowArray() { return rows; }
     void swap(CThorExpandingRowArray &src);
@@ -314,18 +314,18 @@ public:
         kill();
         swap(from);
     }
-    void transferRows(rowcount_t & outNumRows, const void * * & outRows);
+    void transferRows(rowidx_t & outNumRows, const void * * & outRows);
     void transferFrom(CThorExpandingRowArray &src); 
     void transferFrom(CThorSpillableRowArray &src);
-    void removeRows(rowcount_t start, rowcount_t n);
+    void removeRows(rowidx_t start, rowidx_t n);
     void clearUnused();
     void sort(ICompare &compare, unsigned maxCores);
-    void reorder(rowcount_t start, rowcount_t num, unsigned *neworder);
+    void reorder(rowidx_t start, rowidx_t num, unsigned *neworder);
 
     bool equal(ICompare *icmp, CThorExpandingRowArray &other);
     bool checkSorted(ICompare *icmp);
 
-    IRowStream *createRowStream(rowcount_t start=0, rowcount_t num=(rowcount_t)-1, bool streamOwns=true);
+    IRowStream *createRowStream(rowidx_t start=0, rowidx_t num=(rowidx_t)-1, bool streamOwns=true);
 
     void partition(ICompare &compare, unsigned num, UnsignedArray &out); // returns num+1 points
 
@@ -337,25 +337,25 @@ public:
     void deserialize(size32_t sz, const void *buf);
     void deserializeExpand(size32_t sz, const void *data);
 
-    virtual bool ensure(rowcount_t requiredRows);
+    virtual bool ensure(rowidx_t requiredRows);
 };
 
 interface IWritePosCallback : extends IInterface
 {
-    virtual rowcount_t queryRecordNumber() = 0;
+    virtual rowidx_t queryRecordNumber() = 0;
     virtual void filePosition(offset_t pos) = 0;
 };
 
 class graph_decl CThorSpillableRowArray : private CThorExpandingRowArray
 {
     const size32_t commitDelta;  // How many rows need to be written before they are added to the committed region?
-    rowcount_t firstRow; // Only rows firstRow..numRows are considered initialized.  Only read/write within cs.
-    rowcount_t commitRows;  // can only be updated by writing thread within a critical section
+    rowidx_t firstRow; // Only rows firstRow..numRows are considered initialized.  Only read/write within cs.
+    rowidx_t commitRows;  // can only be updated by writing thread within a critical section
     mutable CriticalSection cs;
     ICopyArrayOf<IWritePosCallback> writeCallbacks;
 
 protected:
-    virtual bool ensure(rowcount_t requiredRows);
+    virtual bool ensure(rowidx_t requiredRows);
 
 public:
 
@@ -368,7 +368,7 @@ public:
         inline ~CThorSpillableRowArrayLock() { rows.unlock(); }
     };
 
-    CThorSpillableRowArray(CActivityBase &activity, IRowInterfaces *rowIf, bool allowNulls=false, bool stableSort=false, rowcount_t initialSize=InitialSortElements, size32_t commitDelta=CommitStep);
+    CThorSpillableRowArray(CActivityBase &activity, IRowInterfaces *rowIf, bool allowNulls=false, bool stableSort=false, rowidx_t initialSize=InitialSortElements, size32_t commitDelta=CommitStep);
     ~CThorSpillableRowArray();
     // NB: throwOnOom false
     void setup(IRowInterfaces *rowIf, bool allowNulls=false, bool stableSort=false, bool throwOnOom=false)
@@ -400,17 +400,17 @@ public:
     }
 
     //The following can be accessed from the reader without any need to lock
-    inline const void *query(rowcount_t i) const
+    inline const void *query(rowidx_t i) const
     {
         CThorSpillableRowArrayLock block(*this);
         return CThorExpandingRowArray::query(i);
     }
-    inline const void *get(rowcount_t i) const
+    inline const void *get(rowidx_t i) const
     {
         CThorSpillableRowArrayLock block(*this);
         return CThorExpandingRowArray::get(i);
     }
-    inline const void *getClear(rowcount_t i)
+    inline const void *getClear(rowidx_t i)
     {
         CThorSpillableRowArrayLock block(*this);
         return CThorExpandingRowArray::getClear(i);
@@ -418,17 +418,17 @@ public:
 
     //A thread calling the following functions must own the lock, or guarantee no other thread will access
     void sort(ICompare & compare, unsigned maxcores);
-    unsigned save(IFile &file, rowcount_t watchRecNum=(rowcount_t)-1, offset_t *watchFilePosResult=NULL);
-    const void **getBlock(rowcount_t readRows);
-    inline void noteSpilled(rowcount_t spilledRows)
+    unsigned save(IFile &file, rowidx_t watchRecNum=(rowidx_t)-1, offset_t *watchFilePosResult=NULL);
+    const void **getBlock(rowidx_t readRows);
+    inline void noteSpilled(rowidx_t spilledRows)
     {
         firstRow += spilledRows;
     }
 
     //The block returned is only valid until the critical section is released
 
-    inline rowcount_t firstCommitted() const { return firstRow; }
-    inline rowcount_t numCommitted() const { return commitRows - firstRow; }
+    inline rowidx_t firstCommitted() const { return firstRow; }
+    inline rowidx_t numCommitted() const { return commitRows - firstRow; }
 
     //Locking functions - use CThorSpillableRowArrayLock above
     inline void lock() const { cs.enter(); }