Browse Source

Merge branch 'candidate-8.6.x'

Signed-off-by: Richard Chapman <rchapman@hpccsystems.com>
Richard Chapman 3 years ago
parent
commit
a65971c0db
91 changed files with 1412 additions and 912 deletions
  1. 8 8
      .github/workflows/build-vcpkg.yml
  2. 0 275
      dali/base/dautils.cpp
  3. 0 2
      dali/base/dautils.hpp
  4. 3 3
      dali/dfu/dfurun.cpp
  5. 3 1
      dali/dfu/dfuwu.cpp
  6. 1 1
      dali/dfu/dfuwu.hpp
  7. 1 1
      dali/ft/daft.hpp
  8. 2 2
      dali/ft/daftprogress.cpp
  9. 2 2
      dali/ft/daftprogress.hpp
  10. 12 2
      dali/ft/filecopy.cpp
  11. 2 0
      dali/ft/filecopy.ipp
  12. 10 2
      dali/ft/ftbase.cpp
  13. 2 0
      dali/ft/ftbase.ipp
  14. 12 1
      dali/ft/fttransform.cpp
  15. 1 0
      dali/ft/fttransform.hpp
  16. 3 0
      dali/ft/fttransform.ipp
  17. 132 25
      docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml
  18. 138 1
      docs/EN_US/HPCCClientTools/CT_Mods/CT_ECL_CLI.xml
  19. 5 3
      ecl/eclagent/eclagent.cpp
  20. 22 20
      ecl/eclcc/CMakeLists.txt
  21. 3 1
      ecl/eclcc/eclcc.cpp
  22. 2 0
      ecl/hql/hqlatoms.cpp
  23. 1 0
      ecl/hql/hqlatoms.hpp
  24. 6 0
      ecl/hqlcpp/hqlcpp.cpp
  25. 3 2
      ecl/hqlcpp/hqlcpp.ipp
  26. 2 2
      ecl/hqlcpp/hqlecl.cpp
  27. 56 26
      ecl/hqlcpp/hqlhtcpp.cpp
  28. 6 0
      ecl/hqlcpp/hqlstmt.cpp
  29. 1 0
      ecl/hqlcpp/hqlstmt.hpp
  30. 26 24
      ecl/hthor/CMakeLists.txt
  31. 4 1
      ecl/hthor/hthor.cpp
  32. 308 2
      esp/clients/ws_dfsclient/ws_dfsclient.cpp
  33. 7 0
      esp/clients/ws_dfsclient/ws_dfsclient.hpp
  34. 7 2
      esp/scm/ldapenvironment.ecm
  35. 53 59
      esp/services/ldapenvironment/ldapenvironmentService.cpp
  36. 8 1
      esp/services/ldapenvironment/ldapenvironmentService.hpp
  37. 4 4
      esp/services/ws_workunits/ws_workunitsHelpers.cpp
  38. 4 4
      esp/services/ws_workunits/ws_workunitsHelpers.hpp
  39. 2 2
      esp/services/ws_workunits/ws_workunitsService.cpp
  40. 3 1
      esp/src/eclwatch/HPCCPlatformWidget.js
  41. 8 3
      esp/src/eclwatch/LockDialogWidget.js
  42. 6 6
      esp/src/package-lock.json
  43. 1 1
      esp/src/package.json
  44. 23 11
      esp/src/src-react/components/ECLPlayground.tsx
  45. 7 2
      esp/src/src-react/components/Workunits.tsx
  46. 8 3
      esp/src/src-react/components/forms/Fields.tsx
  47. 1 1
      esp/src/src-react/hooks/activity.ts
  48. 13 10
      esp/src/src-react/hooks/grid.tsx
  49. 2 2
      esp/src/src-react/hooks/metrics.ts
  50. 24 24
      esp/src/src/ECLArchiveWidget.ts
  51. 1 1
      esp/src/src/ESPSearch.ts
  52. 2 2
      esp/src/src/ws_access.ts
  53. 10 0
      helm/hpcc/values.schema.json
  54. 3 3
      initfiles/bin/check_executes
  55. 29 27
      roxie/ccd/CMakeLists.txt
  56. 2 0
      roxie/ccd/ccd.hpp
  57. 4 3
      roxie/ccd/ccdcontext.cpp
  58. 2 1
      roxie/ccd/ccddali.cpp
  59. 23 26
      roxie/ccd/ccdfile.cpp
  60. 4 0
      roxie/ccd/ccdmain.cpp
  61. 3 0
      roxie/ccd/ccdstate.cpp
  62. 6 4
      thorlcr/activities/diskread/thdiskread.cpp
  63. 17 7
      thorlcr/activities/fetch/thfetch.cpp
  64. 1 5
      thorlcr/activities/hashdistrib/thhashdistrib.cpp
  65. 4 4
      thorlcr/activities/hashdistrib/thhashdistribslave.cpp
  66. 35 29
      thorlcr/activities/indexread/thindexread.cpp
  67. 2 4
      thorlcr/activities/keydiff/thkeydiff.cpp
  68. 12 14
      thorlcr/activities/keyedjoin/thkeyedjoin-legacy.cpp
  69. 14 10
      thorlcr/activities/keyedjoin/thkeyedjoin.cpp
  70. 1 1
      thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp
  71. 2 5
      thorlcr/activities/keypatch/thkeypatch.cpp
  72. 2 2
      thorlcr/activities/lookupjoin/thlookupjoinslave.cpp
  73. 1 1
      thorlcr/activities/merge/thmergeslave.cpp
  74. 1 2
      thorlcr/activities/msort/thmsort.cpp
  75. 1 1
      thorlcr/activities/nsplitter/thnsplitterslave.cpp
  76. 7 4
      thorlcr/activities/thactivityutil.cpp
  77. 51 39
      thorlcr/activities/thdiskbase.cpp
  78. 2 0
      thorlcr/activities/thdiskbase.ipp
  79. 2 2
      thorlcr/activities/thdiskbaseslave.cpp
  80. 1 1
      thorlcr/graph/thgraph.cpp
  81. 108 39
      thorlcr/graph/thgraphmaster.cpp
  82. 5 2
      thorlcr/graph/thgraphmaster.ipp
  83. 6 0
      thorlcr/graph/thgraphslave.cpp
  84. 0 11
      thorlcr/master/thmastermain.cpp
  85. 7 14
      thorlcr/mfilemanager/thmfilemanager.cpp
  86. 2 2
      thorlcr/msort/tsorts.cpp
  87. 1 1
      thorlcr/slave/slavmain.cpp
  88. 5 13
      thorlcr/slave/thslavemain.cpp
  89. 2 2
      thorlcr/thorutil/thmem.cpp
  90. 68 82
      thorlcr/thorutil/thormisc.cpp
  91. 5 5
      thorlcr/thorutil/thormisc.hpp

+ 8 - 8
.github/workflows/build-vcpkg.yml

@@ -72,7 +72,7 @@ jobs:
           echo "Checkout to $Env:GITHUB_WORKSPACE"
 
       - name: Ubuntu Dependencies
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' && matrix.os == 'ubuntu-20.04' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' && matrix.os == 'ubuntu-20.04' }}
         shell: "bash"
         run: |
           sudo apt-get update -y
@@ -88,33 +88,33 @@ jobs:
           sudo apt-get install -y nodejs
 
       - name: OSX Dependencies
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' && matrix.os == 'macos-11' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' && matrix.os == 'macos-11' }}
         shell: "bash"
         run: |
           brew install bison flex pkg-config automake libtool cmake
 
       - name: "Remove builtin vcpkg"
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' }}
         working-directory: .
         shell: "bash"
         run: |
           ${{ matrix.sudo }} rm -rf "$VCPKG_INSTALLATION_ROOT"
 
       - name: Check out source code
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' }}
         uses: actions/checkout@v2
         with:
           submodules: recursive
 
       - name: "vcpkg Bootstrap"
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' }}
         working-directory: .
         shell: "bash"
         run: |
           ./vcpkg/bootstrap-vcpkg.sh
 
       - name: "Setup NuGet credentials"
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' }}
         working-directory: ./vcpkg
         shell: "bash"
         run: |
@@ -130,7 +130,7 @@ jobs:
             -source "https://nuget.pkg.github.com/hpcc-systems/index.json"
 
       - name: "Generate HPCC-Platform Build Files"
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' }}
         working-directory: .
         shell: "bash"
         run: |
@@ -139,7 +139,7 @@ jobs:
           cmake .. -DCMAKE_TOOLCHAIN_FILE=../vcpkg/scripts/buildsystems/vcpkg.cmake ${{ matrix.cmake_config_options }}
 
       - name: "Build HPCC-Platform"
-        if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
+        if: ${{ needs.check-skip.outputs.should_skip != 'true' }}
         working-directory: ./build
         shell: "bash"
         run: |

+ 0 - 275
dali/base/dautils.cpp

@@ -3231,281 +3231,6 @@ void safeChangeModeWrite(IRemoteConnection *conn,const char *name,bool &reload,
     }
 }
 
-
-class CLocalOrDistributedFile: implements ILocalOrDistributedFile, public CInterface
-{
-    bool fileExists;
-    Owned<IDistributedFile> dfile;
-    CDfsLogicalFileName lfn;    // set if localpath but prob not useful
-    StringAttr localpath;
-    StringAttr fileDescPath;
-public:
-    IMPLEMENT_IINTERFACE;
-    CLocalOrDistributedFile()
-    {
-        fileExists = false;
-    }
-
-    virtual const char *queryLogicalName() override
-    {
-        return lfn.get();
-    }
-
-    virtual IDistributedFile * queryDistributedFile() override
-    { 
-        return dfile.get(); 
-    }
-
-    bool init(const char *fname,IUserDescriptor *user,bool onlylocal,bool onlydfs, bool write, bool isPrivilegedUser, const StringArray *clusters)
-    {
-        fileExists = false;
-        if (!onlydfs)
-            lfn.allowOsPath(true);
-        if (!lfn.setValidate(fname))
-            return false;
-        if (!onlydfs)
-        {
-            bool gotlocal = true;
-            if (isAbsolutePath(fname)||(stdIoHandle(fname)>=0)) 
-                localpath.set(fname);
-            else if (!strstr(fname,"::"))
-            {
-                // treat it as a relative file
-                StringBuffer fn;
-                localpath.set(makeAbsolutePath(fname,fn).str());
-            }
-            else if (!lfn.isExternal())
-                gotlocal = false;
-            if (gotlocal)
-            {
-                if (!write && !onlylocal) // MORE - this means the dali access checks not happening... maybe that's ok?
-                    dfile.setown(queryDistributedFileDirectory().lookup(lfn,user,write,false,false,nullptr,isPrivilegedUser)); // MORE - if dFile is not null then arguably exists should be true
-                Owned<IFile> file = getPartFile(0,0);
-                if (file.get())
-                {
-                    fileExists = file->exists();
-                    return fileExists || write;
-                }
-            }
-        }
-        if (!onlylocal)
-        {
-            if (lfn.isExternal())
-            {
-                Owned<IFileDescriptor> fDesc = createExternalFileDescriptor(lfn.get());
-                dfile.setown(queryDistributedFileDirectory().createExternal(fDesc, lfn.get()));
-                Owned<IFile> file = getPartFile(0,0);
-                if (file.get())
-                    fileExists = file->exists();
-                if (write && lfn.isExternal()&&(dfile->numParts()==1))   // if it is writing to an external file then don't return distributed
-                    dfile.clear();
-                return true;
-            }
-            else
-            {
-                dfile.setown(queryDistributedFileDirectory().lookup(lfn,user,write,false,false,nullptr,isPrivilegedUser));
-                if (dfile.get())
-                    return true;
-            }
-
-            StringBuffer dir;
-            unsigned stripeNum = 0;
-#ifdef _CONTAINERIZED
-            StringBuffer cluster;
-            if (clusters)
-            {
-                if (clusters->ordinality()>1)
-                    throw makeStringExceptionV(0, "Container mode does not yet support output to multiple clusters while writing file %s)", fname);
-                cluster.append(clusters->item(0));
-            }
-            else
-                getDefaultStoragePlane(cluster);
-            Owned<IStoragePlane> plane = getDataStoragePlane(cluster, true);
-            dir.append(plane->queryPrefix());
-            unsigned numStripedDevices = plane->numDevices();
-            stripeNum = calcStripeNumber(0, lfn.get(), numStripedDevices);
-#endif
-            StringBuffer descPath;
-            makePhysicalDirectory(descPath, lfn.get(), 0, DFD_OSdefault, dir);
-            fileDescPath.set(descPath);
-
-            // MORE - should we create the IDistributedFile here ready for publishing (and/or to make sure it's locked while we write)?
-            StringBuffer physicalPath;
-            makePhysicalPartName(lfn.get(), 1, 1, physicalPath, 0, DFD_OSdefault, dir, false, stripeNum); // more - may need to override path for roxie
-            localpath.set(physicalPath);
-            fileExists = (dfile != NULL);
-            return write;
-        }
-        return false;
-    }
-
-    virtual IFileDescriptor *getFileDescriptor() override
-    {
-        if (dfile.get())
-            return dfile->getFileDescriptor();
-        Owned<IFileDescriptor> fileDesc = createFileDescriptor();
-        fileDesc->setTraceName(lfn.get());
-        StringBuffer dir;
-        if (localpath.isEmpty()) { // e.g. external file
-            StringBuffer tail;
-            IException *e=NULL;
-            bool iswin=
-#ifdef _WIN32
-                true;
-#else
-                false;
-#endif
-            if (!lfn.getExternalPath(dir,tail,iswin,&e)) {
-                if (e)
-                    throw e;
-                return NULL;
-            }
-        }
-        else 
-            splitDirTail(fileDescPath,dir);
-        fileDesc->setDefaultDir(dir.str());
-        RemoteFilename rfn;
-        getPartFilename(rfn,0,0);
-        fileDesc->setPart(0,rfn);
-        fileDesc->queryPartDiskMapping(0).defaultCopies = DFD_DefaultCopies;
-        return fileDesc.getClear();
-    }
-
-    virtual bool getModificationTime(CDateTime &dt) override
-    {
-        if (dfile.get())
-            return dfile->getModificationTime(dt);
-        Owned<IFile> file = getPartFile(0,0);
-        if (file.get()) {
-            CDateTime dt;
-            return file->getTime(NULL,&dt,NULL);
-        }
-        return false;
-    }
-
-    virtual unsigned numParts() override 
-    {
-        if (dfile.get()) 
-            return dfile->numParts();
-        return 1;
-    }
-
-    virtual unsigned numPartCopies(unsigned partnum) override
-    {
-        if (dfile.get()) 
-            return dfile->queryPart(partnum).numCopies();
-        return 1;
-    }
-    
-    virtual IFile *getPartFile(unsigned partnum,unsigned copy) override
-    {
-        RemoteFilename rfn;
-        if ((partnum==0)&&(copy==0))
-            return createIFile(getPartFilename(rfn,partnum,copy));
-        return NULL;
-    }
-    
-    virtual void getDirAndFilename(StringBuffer &dir, StringBuffer &filename) override
-    {
-        if (dfile.get())
-        {
-            dir.append(dfile->queryDefaultDir());
-            splitFilename(localpath, nullptr, nullptr, &filename, &filename);
-        }
-        else if (localpath.isEmpty())
-        {
-            RemoteFilename rfn;
-            lfn.getExternalFilename(rfn);
-            StringBuffer fullPath;
-            rfn.getLocalPath(fullPath);
-            splitFilename(localpath, nullptr, &dir, &filename, &filename);
-        }
-        else
-        {
-            dir.append(fileDescPath);
-            splitFilename(localpath, nullptr, nullptr, &filename, &filename);
-        }
-    }
-
-    virtual RemoteFilename &getPartFilename(RemoteFilename &rfn, unsigned partnum,unsigned copy) override
-    {
-        if (dfile.get()) 
-            dfile->queryPart(partnum).getFilename(rfn,copy);
-        else if (localpath.isEmpty())
-            lfn.getExternalFilename(rfn);
-        else
-            rfn.setRemotePath(localpath);
-        return rfn;
-    }
-
-    StringBuffer &getPartFilename(StringBuffer &path, unsigned partnum,unsigned copy)
-    {
-        RemoteFilename rfn;
-        if (dfile.get()) 
-            dfile->queryPart(partnum).getFilename(rfn,copy);
-        else if (localpath.isEmpty())
-            lfn.getExternalFilename(rfn);
-        else 
-            path.append(localpath);
-        if (rfn.isLocal())
-            rfn.getLocalPath(path);
-        else
-            rfn.getRemotePath(path);
-        return path;
-    }
-
-    virtual bool getPartCrc(unsigned partnum, unsigned &crc) override
-    {
-        if (dfile.get())  
-            return dfile->queryPart(partnum).getCrc(crc);
-        Owned<IFile> file = getPartFile(0,0);
-        if (file.get()) {
-            crc = file->getCRC();
-            return true;
-        }
-        return false;
-    }
-
-    virtual offset_t getPartFileSize(unsigned partnum) override
-    {
-        if (dfile.get()) 
-            return dfile->queryPart(partnum).getFileSize(true,false);
-        Owned<IFile> file = getPartFile(0,0);
-        if (file.get())
-            return file->size();
-        return (offset_t)-1;
-    }
-
-    virtual offset_t getFileSize() override
-    {
-        if (dfile.get())
-            dfile->getFileSize(true,false);
-        offset_t ret = 0;
-        unsigned np = numParts();
-        for (unsigned i = 0;i<np;i++)
-            ret += getPartFileSize(i);
-        return ret;
-    }
-
-    virtual bool exists() const override
-    {
-        return fileExists;
-    }
-
-    virtual bool isExternal() const override
-    {
-        return lfn.isExternal();
-    }
-};
-
-ILocalOrDistributedFile* createLocalOrDistributedFile(const char *fname,IUserDescriptor *user,bool onlylocal,bool onlydfs, bool iswrite, bool isPrivilegedUser, const StringArray *clusters)
-{
-    Owned<CLocalOrDistributedFile> ret = new CLocalOrDistributedFile();
-    if (ret->init(fname,user,onlylocal,onlydfs,iswrite,isPrivilegedUser,clusters))
-        return ret.getClear();
-    return NULL;
-}
-
 static bool transactionLoggingOn=false;
 static cycle_t slowTransactionThreshold=0;
 const bool &queryTransactionLogging() { return transactionLoggingOn; }

+ 0 - 2
dali/base/dautils.hpp

@@ -449,8 +449,6 @@ interface ILocalOrDistributedFile: extends IInterface
     virtual bool isExternal() const = 0;
 };
 
-extern da_decl ILocalOrDistributedFile* createLocalOrDistributedFile(const char *fname,IUserDescriptor *user,bool onlylocal,bool onlydfs,bool iswrite, bool isPrivilegedUser, const StringArray *clusters);
-
 typedef __int64 ConnectionId;
 
 struct LockData

+ 3 - 3
dali/dfu/dfurun.cpp

@@ -117,7 +117,7 @@ class CDFUengine: public CInterface, implements IDFUengine
         void displayProgress(unsigned percentDone, unsigned secsLeft, const char * timeLeft,
                                 unsigned __int64 scaledDone, unsigned __int64 scaledTotal, const char * scale,
                                 unsigned kbPerSecondAve, unsigned kbPerSecondRate,
-                                unsigned slavesDone)
+                                unsigned slavesDone, unsigned __int64 numReads, unsigned __int64 numWrites)
         {
             if (repmode==REPbefore)
                 percentDone /= 2;
@@ -125,7 +125,7 @@ class CDFUengine: public CInterface, implements IDFUengine
                 if (repmode==REPduring)
                     percentDone = percentDone/2+50;
             progress->setProgress(percentDone, secsLeft, timeLeft, scaledDone, scaledTotal, scale,
-                                 kbPerSecondAve, kbPerSecondRate, slavesDone, repmode==REPduring);
+                                 kbPerSecondAve, kbPerSecondRate, slavesDone, repmode==REPduring, numReads, numWrites);
         }
         void displaySummary(const char * timeTaken, unsigned kbPerSecond)
         {
@@ -997,7 +997,7 @@ public:
                 numdone++;
                 subfiles.append(dlfnres.get());
                 if ((ctx.level==1)&&ctx.feedback)
-                    ctx.feedback->displayProgress(numtodo?(numdone*100/numtodo):0,0,"unknown",0,0,"",0,0,0);
+                    ctx.feedback->displayProgress(numtodo?(numdone*100/numtodo):0,0,"unknown",0,0,"",0,0,0,0,0);
             }
             // now construct the superfile
             Owned<IDistributedSuperFile> sfile = queryDistributedFileDirectory().createSuperFile(dlfn.get(),ctx.user,true,false);

+ 3 - 1
dali/dfu/dfuwu.cpp

@@ -466,7 +466,7 @@ public:
     void setProgress(   unsigned percentDone, unsigned secsLeft, const char * timeLeft,
                         unsigned __int64 scaledDone, unsigned __int64 scaledTotal, const char * scale,
                         unsigned kbPerSecAve, unsigned kbPerSecRate,
-                        unsigned slavesDone, bool replicating)
+                        unsigned slavesDone, bool replicating, unsigned __int64 numReads, unsigned __int64 numWrites)
     {
         CriticalBlock block(parent->crit);
         queryRoot()->setPropInt("@percentdone",(int)percentDone);
@@ -479,6 +479,8 @@ public:
         queryRoot()->setPropInt("@kbpersec",(int)kbPerSecRate);
         queryRoot()->setPropInt("@slavesdone",(int)slavesDone);
         queryRoot()->setPropInt("@replicating",replicating?1:0);
+        queryRoot()->setPropInt("@numreads",numReads);
+        queryRoot()->setPropInt("@numwrites",numWrites);
         parent->commit();
     }
     void setPercentDone(unsigned percentDone)

+ 1 - 1
dali/dfu/dfuwu.hpp

@@ -317,7 +317,7 @@ interface IDFUprogress: extends IConstDFUprogress
     virtual void setProgress(unsigned percentDone, unsigned secsLeft, const char * timeLeft,
                              unsigned __int64 scaledDone, unsigned __int64 scaledTotal, const char * scale,
                              unsigned kbPerSecondAve, unsigned kbPerSecondRate,
-                             unsigned slavesDone, bool replicating)=0;
+                             unsigned slavesDone, bool replicating, unsigned __int64 numReads, unsigned __int64 numWrites)=0;
     virtual void setDone(const char * timeTaken, unsigned kbPerSecond, bool set100pc) = 0;
     virtual void setState(DFUstate state) = 0;
     virtual void setTotalNodes(unsigned val) = 0;

+ 1 - 1
dali/ft/daft.hpp

@@ -31,7 +31,7 @@ interface IMultiException;
 
 interface IDaftProgress
 {
-    virtual void onProgress(unsigned __int64 sizeDone, unsigned __int64 totalSize, unsigned numNodes) = 0;          // how much has been done
+    virtual void onProgress(unsigned __int64 sizeDone, unsigned __int64 totalSize, unsigned numNodes, unsigned __int64 numReads, unsigned __int64 numWrites) = 0;          // how much has been done
     virtual void setRange(unsigned __int64 sizeReadBefore, unsigned __int64 totalSize, unsigned totalNodes) = 0;          // how much has been done
 };
 

+ 2 - 2
dali/ft/daftprogress.cpp

@@ -64,7 +64,7 @@ void DaftProgress::formatTime(char * buffer, unsigned secs)
         sprintf(buffer, "%d secs", secs);
 }
 
-void DaftProgress::onProgress(unsigned __int64 sizeDone, unsigned __int64 totalSize, unsigned numNodes)
+void DaftProgress::onProgress(unsigned __int64 sizeDone, unsigned __int64 totalSize, unsigned numNodes, unsigned __int64 numReads, unsigned __int64 numWrites)
 {
     cycle_t nowTime = get_cycles_now();
     savedTime[nextSample] = nowTime;
@@ -91,7 +91,7 @@ void DaftProgress::onProgress(unsigned __int64 sizeDone, unsigned __int64 totalS
         displayProgress((unsigned)(sizeDone*100/totalSize), secsLeft, temp, 
                         sizeDone/scale,totalSize/scale,scaleUnit, 
                         (unsigned)(msGone ? (sizeDone-startSize)/msGone : 0),
-                        (unsigned)(recentTimeDelta ? recentSizeDelta / recentTimeDelta : 0), numNodes);
+                        (unsigned)(recentTimeDelta ? recentSizeDelta / recentTimeDelta : 0), numNodes, numReads, numWrites);
 
         if (sizeDone == totalSize)
         {

+ 2 - 2
dali/ft/daftprogress.hpp

@@ -26,11 +26,11 @@ class DALIFT_API DaftProgress : public IDaftProgress
 public:
     DaftProgress();
 
-    virtual void onProgress(unsigned __int64 sizeDone, unsigned __int64 totalSize, unsigned numNodes);
+    virtual void onProgress(unsigned __int64 sizeDone, unsigned __int64 totalSize, unsigned numNodes, unsigned __int64 numReads, unsigned __int64 numWrites);
     virtual void displayProgress(unsigned percentDone, unsigned secsLeft, const char * timeLeft,
                             unsigned __int64 scaledDone, unsigned __int64 scaledTotal, const char * scale,
                             unsigned kbPerSecondAve, unsigned kbPerSecondRate,
-                            unsigned numNodes) = 0;
+                            unsigned numNodes, unsigned __int64 numReads, unsigned __int64 numWrites) = 0;
     virtual void displaySummary(const char * timeTaken, unsigned kbPerSecond) = 0;
     virtual void setRange(unsigned __int64 sizeReadBefore, unsigned __int64 totalSize, unsigned _totalNodes);
 

+ 12 - 2
dali/ft/filecopy.cpp

@@ -110,7 +110,6 @@ inline void setCanAccessDirectly(RemoteFilename & file)
 #define FAsize              "@size"
 #define FAcompressedSize    "@compressedSize"
 
-
 const unsigned operatorUpdateFrequency = 5000;      // time between updates in ms
 const unsigned abortCheckFrequency = 20000;         // time between updates in ms
 const unsigned sdsUpdateFrequency = 20000;          // time between updates in ms
@@ -615,6 +614,8 @@ FileSprayer::FileSprayer(IPropertyTree * _options, IPropertyTree * _progress, IR
     calcedInputCRC = false;
     aborting = false;
     totalLengthRead = 0;
+    totalNumReads = 0;
+    totalNumWrites = 0;
     throttleNicSpeed = 0;
     compressedInput = false;
     compressOutput = options->getPropBool(ANcompress);
@@ -2828,7 +2829,10 @@ void FileSprayer::updateProgress(const OutputProgress & newProgress)
     OutputProgress & curProgress = progress.item(newProgress.whichPartition);
 
     totalLengthRead += (newProgress.inputLength - curProgress.inputLength);
+    totalNumReads += (newProgress.numReads - curProgress.numReads);
+    totalNumWrites += (newProgress.numWrites - curProgress.numWrites);
     curProgress.set(newProgress);
+
     if (curProgress.tree)
         curProgress.save(curProgress.tree);
 
@@ -2853,7 +2857,7 @@ void FileSprayer::updateSizeRead()
         unsigned numCompleted = (sizeReadSoFar == sizeToBeRead) ? transferSlaves.ordinality() : numSlavesCompleted;
         if (done || (nowTick - lastOperatorTick >= operatorUpdateFrequency))
         {
-            progressReport->onProgress(sizeReadSoFar, sizeToBeRead, numCompleted);
+            progressReport->onProgress(sizeReadSoFar, sizeToBeRead, numCompleted, totalNumReads, totalNumWrites);
             lastOperatorTick = nowTick;
             progressDone = done;
         }
@@ -3288,6 +3292,7 @@ void FileSprayer::updateTargetProperties()
 
         DistributedFilePropertyLock lock(distributedTarget);
         IPropertyTree &curProps = lock.queryAttributes();
+        curProps.setPropInt64("@numDiskWrites", totalNumWrites);
         if (calcCRC())
             curProps.setPropInt(FAcrc, totalCRC.get());
         curProps.setPropInt64(FAsize, totalLength);
@@ -3463,6 +3468,11 @@ void FileSprayer::updateTargetProperties()
         if (expireDays != -1)
             curProps.setPropInt("@expireDays", expireDays);
     }
+    if (distributedSource)
+    {
+        if (distributedSource->querySuperFile()==nullptr)
+            distributedSource->addAttrValue("@numDiskReads", totalNumReads);
+    }
     if (error)
         throw error.getClear();
 }

+ 2 - 0
dali/ft/filecopy.ipp

@@ -336,6 +336,8 @@ protected:
     bool                    compressOutput;
     bool                    copyCompressed;
     unsigned __int64        totalLengthRead;
+    unsigned __int64        totalNumReads;
+    unsigned __int64        totalNumWrites;
     unsigned                throttleNicSpeed;
     unsigned                lastProgressTick;
     StringAttr              wuid; // used for logging

+ 10 - 2
dali/ft/ftbase.cpp

@@ -540,13 +540,15 @@ void OutputProgress::reset()
     outputLength = 0;
     hasCompressed = false;
     compressedPartSize = 0;
+    numWrites = 0;
+    numReads = 0;
 }
 
 MemoryBuffer & OutputProgress::deserializeCore(MemoryBuffer & in)
 {
     unsigned _inputCRC, _outputCRC;
     bool hasTime;
-    in.read(status).read(whichPartition).read(hasInputCRC).read(_inputCRC).read(inputLength).read(_outputCRC).read(outputLength).read(hasTime);
+    in.read(status).read(whichPartition).read(hasInputCRC).read(_inputCRC).read(inputLength).read(_outputCRC).read(outputLength).read(hasTime).read(numWrites).read(numReads);
     inputCRC = _inputCRC;
     outputCRC = _outputCRC;
     if (hasTime)
@@ -584,7 +586,7 @@ MemoryBuffer & OutputProgress::serializeCore(MemoryBuffer & out)
     bool hasTime = !resultTime.isNull();
     unsigned _inputCRC = inputCRC;
     unsigned _outputCRC = outputCRC;
-    out.append(status).append(whichPartition).append(hasInputCRC).append(_inputCRC).append(inputLength).append(_outputCRC).append(outputLength).append(hasTime);
+    out.append(status).append(whichPartition).append(hasInputCRC).append(_inputCRC).append(inputLength).append(_outputCRC).append(outputLength).append(hasTime).append(numWrites).append(numReads);
     if (hasTime)
         resultTime.serialize(out);
     return out;
@@ -615,6 +617,8 @@ void OutputProgress::set(const OutputProgress & other)
     resultTime = other.resultTime;
     hasCompressed = other.hasCompressed;
     compressedPartSize = other.compressedPartSize;
+    numWrites = other.numWrites;
+    numReads = other.numReads;
 }
 
 void OutputProgress::restore(IPropertyTree * tree)
@@ -629,6 +633,8 @@ void OutputProgress::restore(IPropertyTree * tree)
     resultTime.setString(tree->queryProp("@modified"));
     hasCompressed = tree->getPropBool("@compressed");
     compressedPartSize = tree->getPropInt64("@compressedPartSize");
+    numWrites = tree->getPropInt64("@numWrites");
+    numReads = tree->getPropInt64("@numReads");
 }
 
 void OutputProgress::save(IPropertyTree * tree)
@@ -647,6 +653,8 @@ void OutputProgress::save(IPropertyTree * tree)
     }
     tree->setPropInt("@compressed", hasCompressed);
     tree->setPropInt64("@compressedPartSize", compressedPartSize);
+    tree->setPropInt64("@numWrites", numWrites);
+    tree->setPropInt64("@numReads", numReads);
 }
 
 

+ 2 - 0
dali/ft/ftbase.ipp

@@ -110,6 +110,8 @@ public:
     bool            hasInputCRC;
     bool            hasCompressed;
     offset_t        compressedPartSize;
+    stat_type       numWrites;
+    stat_type       numReads;
 
 //Not saved/serialized - should probably be in a Sprayer-only class that contains an outputProgress.
     Owned<IPropertyTree> tree;

+ 12 - 1
dali/ft/fttransform.cpp

@@ -521,7 +521,8 @@ void TransferServer::appendTransformed(unsigned chunkIndex, ITransformer * input
 
     const offset_t startInputOffset = curPartition.inputOffset;
     const offset_t startOutputOffset = curPartition.outputOffset;
-
+    stat_type prevNumWrites =  out->getStatistic(StNumDiskWrites);
+    stat_type prevNumReads = input->getStatistic(StNumDiskReads);
     for (;;)
     {
         unsigned gotLength = input->getBlock(out);
@@ -541,6 +542,12 @@ void TransferServer::appendTransformed(unsigned chunkIndex, ITransformer * input
             curProgress.status = (gotLength == 0) ? OutputProgress::StatusCopied : OutputProgress::StatusActive;
             curProgress.inputLength = input->tell()-startInputOffset;
             curProgress.outputLength = out->tell()-startOutputOffset;
+            stat_type curNumWrites = out->getStatistic(StNumDiskWrites);
+            stat_type curNumReads = input->getStatistic(StNumDiskReads);
+            curProgress.numWrites += (curNumWrites - prevNumWrites);
+            curProgress.numReads += (curNumReads - prevNumReads);
+            prevNumWrites = curNumWrites;
+            prevNumReads = curNumReads;
             if (crcOut)
                 curProgress.outputCRC = crcOut->getCRC();
             if (calcInputCRC)
@@ -688,10 +695,12 @@ void TransferServer::transferChunk(unsigned chunkIndex)
     size32_t fixedTextLength = (size32_t)curPartition.fixedText.length();
     if (fixedTextLength || curPartition.inputName.isNull())
     {
+        stat_type prevWrites = out->getStatistic(StNumDiskWrites);
         out->write(fixedTextLength, curPartition.fixedText.get());
         curProgress.status = OutputProgress::StatusCopied;
         curProgress.inputLength = fixedTextLength;
         curProgress.outputLength = fixedTextLength;
+        curProgress.numWrites += (out->getStatistic(StNumDiskWrites)-prevWrites);
         if (crcOut)
             curProgress.outputCRC = crcOut->getCRC();
         sendProgress(curProgress);
@@ -864,7 +873,9 @@ processedProgress:
                 {
                     char null = 0;
                     offset_t lastOffset = lastChunk.outputOffset+lastChunk.outputLength;
+                    stat_type prevWrites = outio->getStatistic(StNumDiskWrites);
                     outio->write(lastOffset-sizeof(null),sizeof(null),&null);
+                    curProgress.numWrites += (outio->getStatistic(StNumDiskWrites)-prevWrites);
                     LOG(MCdebugProgress, unknownJob, "Extend length of target file to %" I64F "d", lastOffset);
                 }
             }

+ 1 - 0
dali/ft/fttransform.hpp

@@ -31,6 +31,7 @@ public:
     virtual void setInputCRC(crc32_t value) = 0;
     virtual bool setPartition(RemoteFilename & remoteInputName, offset_t _startOffset, offset_t _length, bool compressedInput, const char *decryptKey) = 0;
     virtual offset_t tell() = 0;
+    virtual stat_type getStatistic(StatisticKind kind) = 0;
 };
 
 

+ 3 - 0
dali/ft/fttransform.ipp

@@ -38,6 +38,7 @@ public:
     virtual bool getInputCRC(crc32_t & value) { return false; }
     virtual bool setPartition(RemoteFilename & remoteInputName, offset_t _startOffset, offset_t _length, bool compressedInput, const char *decryptKey) = 0;
     virtual void setInputCRC(crc32_t value);
+    virtual stat_type getStatistic(StatisticKind kind) = 0;
 
 protected:
     bool setPartition(RemoteFilename & remoteInputName, offset_t _startOffset, offset_t _length);
@@ -60,6 +61,7 @@ public:
     virtual bool setPartition(RemoteFilename & remoteInputName, offset_t _startOffset, offset_t _length, bool compressedInput, const char *decryptKey);
     virtual size32_t getBlock(IFileIOStream * out);
     virtual offset_t tell();
+    virtual stat_type getStatistic(StatisticKind kind) override { return input->getStatistic(kind); }
 
 protected:
     size32_t read(size32_t maxLength, void * buffer);
@@ -192,6 +194,7 @@ public:
     virtual bool setPartition(RemoteFilename & remoteInputName, offset_t _startOffset, offset_t _length, bool compressedInput, const char *decryptKey);
     virtual void setInputCRC(crc32_t value);
     virtual offset_t tell();
+    virtual stat_type getStatistic(StatisticKind kind) override { UNIMPLEMENTED; }
 
 protected:
     Owned<IFormatProcessor> processor;

+ 132 - 25
docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml

@@ -158,6 +158,28 @@
       functional system, however you should define the component resources in
       a manner that corresponds best to your operational strategy.</para>
 
+      <sect3 id="YML_HPCCSystemsServices">
+        <title>The Systems services</title>
+
+        <para>Most of the HPCC Systems components have a service definition
+        entry, similar to the resources entry. All the components that have
+        service definitions follow this same pattern.</para>
+
+        <para>Any service related info needs to be under a service object, for
+        example:</para>
+
+        <para><programlisting>  service:
+    servicePort: 7200
+    visibility: local
+</programlisting></para>
+
+        <para>This applies to most all of the HPCC Systems components, ESP,
+        Dali, dafilesrv, and Sasha. Roxie's specification is slightly
+        different, in that it has its service defined under "roxieservice".
+        Each Roxie can then have multiple "roxieservice" definitions. (see
+        schema).</para>
+      </sect3>
+
       <sect3 id="DALI_ValueYAML">
         <title>Dali</title>
 
@@ -167,7 +189,7 @@
         assign all the pods to the same virtual machine and components
         fighting for memory will crush them. Therefore more memory assigned
         the better. If you define these wrong and a process uses more memory
-        than configured, Kubernetes will kill the pod. </para>
+        than configured, Kubernetes will kill the pod.</para>
       </sect3>
 
       <sect3 id="DAFLESRV_DFURVR_YMLSECT">
@@ -361,6 +383,84 @@
         in bare metal we can have jobs that could effect other jobs because
         they are running in the same process space.</para>
       </sect3>
+
+      <sect3 id="YAML_Thor_and_hThor_Memory">
+        <title>Thor and hThor Memory</title>
+
+        <para>The Thor and hThor <emphasis>memory</emphasis> sections allow
+        the resource memory of the component to be refined into different
+        areas.</para>
+
+        <para>For example, the "workerMemory" for a Thor defined as:</para>
+
+        <programlisting>thor:
+- name: thor
+  prefix: thor
+  numWorkers: 2
+  maxJobs: 4
+  maxGraphs: 2
+  managerResources:
+    cpu: "1"
+    memory: "2G"
+  workerResources:
+    cpu: "4"
+    memory: "4G"
+  workerMemory:
+    query: "3G"
+    thirdParty: "500M"
+  eclAgentResources:
+    cpu: "1"
+    memory: "2G"</programlisting>
+
+        <para>The "<emphasis>workerResources</emphasis>" section will tell
+        Kubernetes to resource 4G per worker pod. By default Thor will reserve
+        90% of this memory to use for HPCC query memory (roxiemem). The
+        remaining 10% is left for all other non-row based (roxiemem) usage,
+        such as general heap, OS overheads, etc. There is no allowance for any
+        3rd party library, plugins, or embedded language usage within this default. In other words,
+        if for example embedded python allocates 4G, the process will soon
+        fail with an out of memory error, when it starts to use any memory,
+        since it was expecting 90% of that 4G to be freely available to use
+        for itself.</para>
+
+        <para>These defaults can be overridden by the memory sections. In
+        this example, <emphasis>workerMemory.query</emphasis> defines that 3G
+        of the available resourced memory should be assigned to query memory,
+        and 500M to "thirdParty" uses.</para>
+
+        <para>This limits the HPCC Systems memory
+        <emphasis>roxiemem</emphasis> usage to exactly 3G, leaving 1G free
+        other purposes. The "thirdParty" is not actually allocated, but is
+        used solely as part of the running total, to ensure that the
+        configuration doesn't specify a total in this section larger than the
+        resources section, e.g., if "thirdParty" was set to "2G" in the above
+        section, there would be a runtime complaint when Thor ran that the
+        definition exceeded the resource limit.</para>
+
+        <para>It is also possible to override the default recommended
+        percentage (90% by default), by setting
+        <emphasis>maxMemPercentage</emphasis>. If "query" is not defined, then
+        it is calculated to be the recommended max memory minus the defined
+        memory (e.g., "thirdParty").</para>
+
+        <para>In Thor there are 3 resource areas,
+        <emphasis>eclAgent</emphasis>, <emphasis>ThorManager</emphasis>, and
+        <emphasis>ThorWorker</emphasis>(s). Each has a *Resource area that
+        defines their Kubernetes resource needs, and a
+        corresponding *Memory section that can be used to override default
+        memory allocation requirements.</para>
+
+        <para>These settings can also be overridden on a per query basis, via
+        workunit options following the pattern:
+        &lt;memory-section-name&gt;.&lt;property&gt;. For example:
+        #option('workerMemory.thirdParty', "1G");</para>
+
+        <para><emphasis role="bold">Note:</emphasis> Currently there is only
+        "query" (HPCC roxiemem usage) and "thirdParty" for all/any 3rd party
+        usage. It's possible that further categories will be added in future,
+        like "python" or "java" - that specifically define memory uses for
+        those targets.</para>
+      </sect3>
     </sect2>
   </sect1>
 
@@ -380,35 +480,35 @@
     <para>The delivered HPCC Systems Values file primarily consists of the
     following areas:</para>
 
-    <itemizedlist>
-      <listitem>
-        <para>global</para>
-      </listitem>
+    <para><informaltable>
+        <tgroup cols="3">
+          <tbody>
+            <row>
+              <entry>global</entry>
+
+              <entry>storage</entry>
 
-      <listitem>
-        <para>storage</para>
-      </listitem>
+              <entry>visibilities</entry>
+            </row>
 
-      <listitem>
-        <para>data planes</para>
-      </listitem>
+            <row>
+              <entry>data planes</entry>
 
-      <listitem>
-        <para>certificates</para>
-      </listitem>
+              <entry>certificates</entry>
 
-      <listitem>
-        <para>security</para>
-      </listitem>
+              <entry>security</entry>
+            </row>
 
-      <listitem>
-        <para>secrets</para>
-      </listitem>
+            <row>
+              <entry>secrets</entry>
 
-      <listitem>
-        <para>components</para>
-      </listitem>
-    </itemizedlist>
+              <entry>components</entry>
+
+              <entry></entry>
+            </row>
+          </tbody>
+        </tgroup>
+      </informaltable></para>
 
     <para>The subsequent sections will examine some of these more closely and
     why each of them is there.</para>
@@ -475,7 +575,7 @@
   # For persistent storage:
   #   pvc: &lt;name&gt;                           # The name of the persistant volume claim
   #   forcePermissions: false
-  #   hosts: [ &lt;host-list ]                 # Inline list of hosts
+  #   hosts: [ &lt;host-list&gt; ]                 # Inline list of hosts
   #   hostGroup: &lt;name&gt;                     # Name of the host group for bare metal 
   #                                         # must match the name of the storage plane..
   #
@@ -801,6 +901,13 @@
     </sect2>
 
     <sect2>
+      <title>Visibilities</title>
+
+      <para>The visibilities section can be used to set labels, annotations,
+      and service types for any service with the specified visibility.</para>
+    </sect2>
+
+    <sect2>
       <title>Replicas and Resources</title>
 
       <para>Other noteworthy values in the charts that have bearing on HPCC

+ 138 - 1
docs/EN_US/HPCCClientTools/CT_Mods/CT_ECL_CLI.xml

@@ -1683,7 +1683,7 @@ ecl queries list roxie --target=roxie --show=A </programlisting>
         <para>The queries files command displays a list of the files currently
         in use by the given query. If the <emphasis>query</emphasis> option is
         omitted, it returns a list of files for all queries on the specified
-        target. </para>
+        target.</para>
 
         <para>Examples:</para>
 
@@ -4726,6 +4726,143 @@ ecl packagemap copy //192.168.0.100:8010/roxie/MyPkg roxie2
           </informaltable><parameter></parameter></para>
       </sect2>
 
+      <sect2 id="CT_CLI_SyntaxECLRoxiexref" role="brk">
+        <title>ecl roxie xref</title>
+
+        <para><emphasis role="bold">ecl roxie xref
+        &lt;cluster&gt;</emphasis></para>
+
+        <para>The <emphasis role="bold">roxie xref</emphasis> command returns
+        file information for the specified queries on the specified cluster.
+        If the <emphasis>queryids</emphasis> option is omitted, file
+        information about all queries is returned. The result is in XML
+        format.</para>
+
+        <para>Examples:</para>
+
+        <programlisting>ecl roxie xref myroxie
+ecl roxie xref myroxie --queryids=myquery.1,myotherquery.1</programlisting>
+
+        <para>Example result:</para>
+
+        <programlisting>&lt;QueryXrefInfo&gt;
+ &lt;Endpoint ep="192.168.56.1:9876"&gt;
+  &lt;Queries reporting="1"&gt;
+   &lt;Query id="myquery.1"/&gt;
+     &lt;SuperFile name="jd::mysuperfile"&gt;
+       &lt;File name="jd::subfile1"/&gt;
+       &lt;File name="jd::subfile2"/&gt;
+   &lt;Query id="myotherquery.1"/&gt;
+     &lt;SuperFile name="jd::myothersuperfile"&gt;
+       &lt;File name="jd::subfile1"/&gt;
+       &lt;File name="jd::subfile2"/&gt;
+       &lt;File name="jd::subfile3"/&gt;
+  &lt;/Queries&gt;
+  &lt;Status&gt;ok&lt;/Status&gt;
+ &lt;/Endpoint&gt;
+&lt;/QueryXrefInfo&gt;
+</programlisting>
+
+        <para><informaltable colsep="1" frame="all" rowsep="1">
+            <tgroup cols="2">
+              <colspec align="left" colwidth="125.55pt" />
+
+              <colspec />
+
+              <tbody>
+                <row>
+                  <entry>ecl roxie xref</entry>
+
+                  <entry>Returns file information for the selected queries in
+                  XML format.</entry>
+                </row>
+
+                <row>
+                  <entry><emphasis role="bold">Options</emphasis></entry>
+                </row>
+
+                <row>
+                  <entry>--check-all-nodes</entry>
+
+                  <entry>Gets query file information from all nodes. This can
+                  be slow.</entry>
+                </row>
+
+                <row>
+                  <entry>--queryids=&lt;csv list&gt;</entry>
+
+                  <entry>The queries for which to get file information
+                  (default is all queries)</entry>
+                </row>
+
+                <row>
+                  <entry>--wait=&lt;ms&gt;</entry>
+
+                  <entry>Max time to wait in milliseconds</entry>
+                </row>
+
+                <row>
+                  <entry>-v, --verbose</entry>
+
+                  <entry>Output additional tracing information</entry>
+                </row>
+
+                <row>
+                  <entry>-s, --server</entry>
+
+                  <entry>The IP Address or hostname of ESP server running ECL
+                  Watch services</entry>
+                </row>
+
+                <row>
+                  <entry>--port</entry>
+
+                  <entry>The ECL Watch services port (Default is 8010)</entry>
+                </row>
+
+                <row>
+                  <entry>--ssl</entry>
+
+                  <entry>Use SSL to secure the connection to the
+                  server.</entry>
+                </row>
+
+                <row>
+                  <entry>--wait=<emphasis>&lt;ms&gt;</emphasis></entry>
+
+                  <entry>Max time to wait in milliseconds</entry>
+                </row>
+
+                <row>
+                  <entry>-u, --username</entry>
+
+                  <entry>The username (if necessary)</entry>
+                </row>
+
+                <row>
+                  <entry>-pw, --password</entry>
+
+                  <entry>The password (if necessary)</entry>
+                </row>
+
+                <row>
+                  <entry>--wait-connect=&lt;Ms&gt;</entry>
+
+                  <entry>Timeout while connecting to server (in
+                  milliseconds)</entry>
+                </row>
+
+                <row>
+                  <entry>--wait-read=&lt;Secs&gt;</entry>
+
+                  <entry>Timeout while reading from socket (in
+                  seconds)</entry>
+                </row>
+              </tbody>
+            </tgroup>
+          </informaltable><parameter></parameter></para>
+      </sect2>
+
       <sect2 id="CT_CLI_SyntaxECLBundleDepends" role="brk">
         <title>ecl bundle depends</title>
 

+ 5 - 3
ecl/eclagent/eclagent.cpp

@@ -55,6 +55,8 @@
 #include "anawu.hpp"
 #include "hpccconfig.hpp"
 
+#include "ws_dfsclient.hpp"
+
 using roxiemem::OwnedRoxieString;
 
 #include <memory>
@@ -1438,7 +1440,7 @@ bool EclAgent::fileExists(const char *name)
     StringBuffer lfn;
     expandLogicalName(lfn, name);
 
-    Owned<IDistributedFile> f = queryDistributedFileDirectory().lookup(lfn.str(),queryUserDescriptor(), false, false, false, nullptr, defaultPrivilegedUser);
+    Owned<IDistributedFile> f = wsdfs::lookup(lfn.str(), queryUserDescriptor(), false, false, false, nullptr, defaultPrivilegedUser, INFINITE);
     if (f)
         return true;
     return false;
@@ -2758,7 +2760,7 @@ unsigned __int64 EclAgent::getDatasetHash(const char * logicalName, unsigned __i
         return crc;
     }
 
-    Owned<IDistributedFile> file = queryDistributedFileDirectory().lookup(fullname.str(),queryUserDescriptor(), false, false, false, nullptr, defaultPrivilegedUser);
+    Owned<IDistributedFile> file = wsdfs::lookup(fullname.str(),queryUserDescriptor(), false, false, false, nullptr, defaultPrivilegedUser, INFINITE);
     if (file)
     {
         WorkunitUpdate wu = updateWorkUnit();
@@ -3068,7 +3070,7 @@ restart:     // If things change beneath us as we are deleting, repeat the proce
                 MilliSleep(PERSIST_LOCK_SLEEP + (getRandom()%PERSIST_LOCK_SLEEP));
                 persistLock.setown(getPersistReadLock(goer));
             }
-            Owned<IDistributedFile> f = queryDistributedFileDirectory().lookup(goer, queryUserDescriptor(), true, false, false, nullptr, defaultPrivilegedUser);
+            Owned<IDistributedFile> f = wsdfs::lookup(goer, queryUserDescriptor(), true, false, false, nullptr, defaultPrivilegedUser, INFINITE);
             if (!f)
                 goto restart; // Persist has been deleted since last checked - repeat the whole process
             const char *newAccessTime = f->queryAttributes().queryProp("@accessed");

+ 22 - 20
ecl/eclcc/CMakeLists.txt

@@ -33,22 +33,23 @@ set (    SRCS
 include_directories ( 
          ${CMAKE_BINARY_DIR}
          ${CMAKE_BINARY_DIR}/oss
-         ./../../ecl/hqlcpp 
-         ./../../common/environment
-         ./../../common/workunit 
-         ./../../common/dllserver 
-         ./../../common/deftype 
-         ./../../system/include 
-         ./../../ecl/hql
-         ./../../dali/base
-         ./../../system/mp
-         ./../../rtl/include 
-         ./../../rtl/eclrtl 
-         ./../../system/jlib 
-         ./../../fs/dafsclient
-         ./../../system/security/shared
-         ./../../system/security/zcrypt
+         ${HPCC_SOURCE_DIR}/ecl/hqlcpp 
+         ${HPCC_SOURCE_DIR}/common/environment
+         ${HPCC_SOURCE_DIR}/common/workunit 
+         ${HPCC_SOURCE_DIR}/common/dllserver 
+         ${HPCC_SOURCE_DIR}/common/deftype 
+         ${HPCC_SOURCE_DIR}/dali/base
+         ${HPCC_SOURCE_DIR}/ecl/hql
+         ${HPCC_SOURCE_DIR}/esp/clients/ws_dfsclient
+         ${HPCC_SOURCE_DIR}/fs/dafsclient
+         ${HPCC_SOURCE_DIR}/rtl/include 
+         ${HPCC_SOURCE_DIR}/rtl/eclrtl 
          ${HPCC_SOURCE_DIR}/system/codesigner
+         ${HPCC_SOURCE_DIR}/system/include 
+         ${HPCC_SOURCE_DIR}/system/jlib 
+         ${HPCC_SOURCE_DIR}/system/mp
+         ${HPCC_SOURCE_DIR}/system/security/shared
+         ${HPCC_SOURCE_DIR}/system/security/zcrypt
     )
 
 ADD_DEFINITIONS( -D_CONSOLE )
@@ -61,17 +62,18 @@ endif()
 HPCC_ADD_EXECUTABLE ( eclcc ${SRCS} )
 install ( TARGETS eclcc RUNTIME DESTINATION ${EXEC_DIR} )
 target_link_libraries ( eclcc 
-         jlib
-         nbcd 
          eclrtl 
          deftype 
          dafsclient 
          dalibase 
-         workunit 
-         thorhelper 
+         dllserver
          hql
          hqlcpp
-         dllserver
+         jlib
+         nbcd 
+         thorhelper 
+         workunit 
+         ws_dfsclient
     )
 
 if (NOT CONTAINERIZED)

+ 3 - 1
ecl/eclcc/eclcc.cpp

@@ -71,6 +71,8 @@
 #include "zcrypt.hpp"
 #endif
 
+#include "ws_dfsclient.hpp"
+
 //#define TEST_LEGACY_DEPENDENCY_CODE
 
 #define INIFILE "eclcc.ini"
@@ -2500,7 +2502,7 @@ IHqlExpression *EclCC::lookupDFSlayout(const char *filename, IErrorReceiver &err
         // Look up the file in Dali
         try
         {
-            Owned<IDistributedFile> dfsFile = queryDistributedFileDirectory().lookup(filename, udesc, false, false, false, nullptr, defaultPrivilegedUser);
+            Owned<IDistributedFile> dfsFile = wsdfs::lookup(filename, udesc, false, false, false, nullptr, defaultPrivilegedUser, INFINITE);
             if (dfsFile)
             {
                 const char *recordECL = dfsFile->queryAttributes().queryProp("ECL");

+ 2 - 0
ecl/hql/hqlatoms.cpp

@@ -285,6 +285,7 @@ IAtom * maxSizeAtom;
 IAtom * mergeAtom;
 IAtom * mergeTransformAtom;
 IAtom * _metadata_Atom;
+IAtom * metaAtom;
 IAtom * methodAtom;
 IAtom * minAtom;
 IAtom * minimalAtom;
@@ -763,6 +764,7 @@ MODULE_INIT(INIT_PRIORITY_HQLATOM)
     MAKEATOM(memory);
     MAKEATOM(merge);
     MAKEATOM(mergeTransform);
+    MAKEATOM(meta);
     MAKESYSATOM(metadata);
     MAKEATOM(method);
     MAKEATOM(min);

+ 1 - 0
ecl/hql/hqlatoms.hpp

@@ -289,6 +289,7 @@ extern HQL_API IAtom * maxSizeAtom;
 extern HQL_API IAtom * memoryAtom;
 extern HQL_API IAtom * mergeAtom;
 extern HQL_API IAtom * mergeTransformAtom;
+extern HQL_API IAtom * metaAtom;
 extern HQL_API IAtom * _metadata_Atom;
 extern HQL_API IAtom * methodAtom;
 extern HQL_API IAtom * minAtom;

+ 6 - 0
ecl/hqlcpp/hqlcpp.cpp

@@ -1631,6 +1631,7 @@ void HqlCppTranslator::cacheOptions()
         DebugOption(options.evaluateCoLocalRowInvariantInExtract,"evaluateCoLocalRowInvariantInExtract", false),
         DebugOption(options.spanMultipleCpp,"spanMultipleCpp", true),
         DebugOption(options.activitiesPerCpp, "<exception>", 0x7fffffff),
+        DebugOption(options.metaMultipleCpp, "metaMultipleCpp", false),
         DebugOption(options.allowInlineSpill,"allowInlineSpill", true),
         DebugOption(options.optimizeGlobalProjects,"optimizeGlobalProjects", false),
         DebugOption(options.optimizeResourcedProjects,"optimizeResourcedProjects", false),
@@ -1914,6 +1915,11 @@ void HqlCppTranslator::cacheOptions()
         options.activitiesPerCpp = wu()->getDebugValueInt("activitiesPerCpp", DEFAULT_ACTIVITIES_PER_CPP);
         curCppFile = 1;
     }
+    else
+    {
+        options.metaMultipleCpp = false;
+        options.activitiesPerCpp = 0x7fffffff;
+    }
 
     code->cppInfo.append(* new CppFileInfo(0));
     options.targetCompiler = DEFAULT_COMPILER;

+ 3 - 2
ecl/hqlcpp/hqlcpp.ipp

@@ -645,6 +645,7 @@ struct HqlCppOptions
     bool                evaluateCoLocalRowInvariantInExtract;
     bool                allowInlineSpill;
     bool                spanMultipleCpp;
+    bool                metaMultipleCpp;
     bool                optimizeGlobalProjects;
     bool                optimizeResourcedProjects;
     byte                notifyOptimizedProjects;
@@ -1117,7 +1118,7 @@ public:
     unsigned expandRtlRecordFields(StringBuffer & fieldListText, IHqlExpression * record, IHqlExpression * rowRecord, const char * rowTypeName);
     unsigned buildRtlIfBlockField(StringBuffer & instanceName, IHqlExpression * ifblock, IHqlExpression * rowRecord, const char * rowTypeName, bool isPayload);
 
-    void buildMetaInfo(MetaInstance & instance);
+    IHqlExpression * buildMetaInfo(MetaInstance & instance);
     IHqlExpression * buildMetaParameter(IHqlExpression * arg);
     void buildMetaForRecord(StringBuffer & name, IHqlExpression * record);
     void buildMetaForSerializedRecord(StringBuffer & name, IHqlExpression * record, bool isGrouped);
@@ -2077,7 +2078,6 @@ protected:
     unsigned            startCursorSet;
     bool                requireTable;
     BuildCtx *          activeGraphCtx;
-    HqlExprArray        metas;
     Owned<GeneratedGraphInfo> activeGraph;
     unsigned            graphSeqNumber;
     StringAttr          graphLabel;
@@ -2098,6 +2098,7 @@ protected:
     HqlExprArray        internalFunctions;
     HqlExprArray        internalFunctionExternals;
     UniqueSequenceCounter spillSequence;
+    std::vector<IHqlStmt *> metaPassStmts;
     
 #ifdef SPOT_POTENTIAL_COMMON_ACTIVITIES
     LocationArray       savedActivityLocations;

+ 2 - 2
ecl/hqlcpp/hqlecl.cpp

@@ -93,7 +93,7 @@ $?$/* Template for generating thor/hthor/roxie output */
 $?multiFile$#include "$headerName$"
 $?$@literal@
 @declare@
-@helper@
+@meta@@helper@
 
 @go@
 
@@ -134,7 +134,7 @@ $?$/* Template for generating a child module for query */
 
 #include "$headerName$"
 
-@helper@
+@meta@@helper@
 @userFunction@
 )!!";
 

+ 56 - 26
ecl/hqlcpp/hqlhtcpp.cpp

@@ -2975,7 +2975,7 @@ void GlobalClassBuilder::buildClass(unsigned priority)
     if (baseName)
         s.append(" : public ").append(baseName);
 
-    classctx.set(declareAtom);
+    //classctx was initialised in the constructor - all others ctxs are re-initialized
     if (priority)
         classctx.setNextPriority(priority);
     classStmt = classctx.addQuotedCompound(s, ";");
@@ -3027,8 +3027,8 @@ void GlobalClassBuilder::completeClass(unsigned priority)
         StringBuffer s, prototype;
         prototype.append("extern ECL_API ").append(accessorInterface).append(" * ").append(accessorName).append("(ICodeContext * ctx, unsigned activityId)");
 
-        BuildCtx accessctx(classctx);
-        accessctx.set(declareAtom);
+        BuildCtx accessctx(classctx, classStmt);
+        accessctx.selectContainer();
         if (priority)
             accessctx.setNextPriority(priority);
         accessctx.addQuotedFunction(prototype, true);
@@ -4165,7 +4165,7 @@ unsigned HqlCppTranslator::buildRtlType(StringBuffer & instanceName, ITypeInfo *
 }
 
 
-void HqlCppTranslator::buildMetaInfo(MetaInstance & instance)
+IHqlExpression * HqlCppTranslator::buildMetaInfo(MetaInstance & instance)
 {
     if (options.spanMultipleCpp)
     {
@@ -4174,29 +4174,56 @@ void HqlCppTranslator::buildMetaInfo(MetaInstance & instance)
         instance.instanceObject.set(queryFunctionName);
     }
 
-    BuildCtx declarectx(*code, declareAtom);
+    //Currently there are some classes which directly access the meta instances, if not spanning multiple c+ files accessor functions are not generated.
+    //In this case for backward compatibility generate in the declare section rather than the meta section.
+    IAtom * section = options.metaMultipleCpp ? metaAtom : declareAtom;
+    BuildCtx declarectx(*code, section);
 
     OwnedHqlExpr search = instance.getMetaUniqueKey();
-
     // stop duplicate classes being generated.
     // MORE: If this ever includes sorting/grouping, the dependence on a record will need to be revised
     HqlExprAssociation * match = declarectx.queryMatchExpr(search);
     if (match)
-        return;
+        return match->queryExpr(); // Return the source file the meta was generated in (so derived classes can be generated in the same file)
+
+    //This is the location that the meta will actually be defined (the c++ file could differ)
+    BuildCtx definectx(declarectx);
 
     bool savedContextAvailable = contextAvailable;
     contextAvailable = false;
-    metas.append(*search.getLink());
     StringBuffer s;
-    StringBuffer serializerName, deserializerName, prefetcherName, internalSerializerName, internalDeserializerName;
+    StringBuffer baseMetaName;
 
-    StringBuffer endText;
+    IHqlExpression * record = instance.queryRecord();
+    unsigned pass = 0;
+    if (instance.isGrouped())
+    {
+        MetaInstance ungroupedMeta(*this, record, false);
+        IHqlExpression * passExpr = buildMetaInfo(ungroupedMeta);
+        baseMetaName.set(ungroupedMeta.metaName);
+        pass = getIntValue(passExpr);
+    }
+    else if (options.metaMultipleCpp)
+        pass = beginFunctionGetCppIndex(0, false);
 
-    endText.append(" ").append(instance.instanceName).append(";");
-    BuildCtx metactx(declarectx);
+    if (options.metaMultipleCpp)
+    {
+        if (metaPassStmts.size() > pass)
+        {
+            assertex(metaPassStmts[pass]);
+            definectx.selectPass(metaPassStmts[pass]);
+        }
+        else
+        {
+            while (metaPassStmts.size() < pass)
+                metaPassStmts.emplace_back(nullptr);
 
-    IHqlExpression * record = instance.queryRecord();
+            OwnedHqlExpr passExpr = getSizetConstant(pass);
+            metaPassStmts.emplace_back(definectx.addGroupPass(passExpr));
+        }
+    }
 
+    BuildCtx metactx(definectx);
     unsigned flags = MDFhasserialize;       // we always generate a serialize since 
     bool useTypeForXML = false;
     if (instance.isGrouped())
@@ -4221,18 +4248,19 @@ void HqlCppTranslator::buildMetaInfo(MetaInstance & instance)
         useTypeForXML = true;
     }
 
+    StringBuffer endText;
+    endText.append(" ").append(instance.instanceName).append(";");
     if (instance.isGrouped())
     {
-        MetaInstance ungroupedMeta(*this, record, false);
-        buildMetaInfo(ungroupedMeta);
-
-        s.append("struct ").append(instance.metaName).append(" : public ").append(ungroupedMeta.metaName);
+        s.append("struct ").append(instance.metaName).append(" : public ").append(baseMetaName);
         metactx.setNextPriority(RowMetaPrio);
         metactx.addQuotedCompound(s, endText.str());
         doBuildUnsignedFunction(metactx, "getMetaFlags", flags);
     }
     else
     {
+        StringBuffer serializerName, deserializerName, prefetcherName, internalSerializerName, internalDeserializerName;
+
         //Serialization classes need to be generated for all meta information - because they may be called by parent row classes
         //however, the CFixedOutputMetaData base class contains a default implementation - reducing the required code.
         if (record && (isVariableSizeRecord(record) || (flags & MDFneedserializemask)))
@@ -4241,33 +4269,33 @@ void HqlCppTranslator::buildMetaInfo(MetaInstance & instance)
             if (flags & MDFneedserializedisk)
             {
                 serializerName.append("s").append(instance.metaName);
-                buildMetaSerializerClass(declarectx, record, serializerName.str(), diskAtom);
+                buildMetaSerializerClass(definectx, record, serializerName.str(), diskAtom);
             }
             bool needInternalSerializer = ((flags & MDFneedserializeinternal) && recordSerializationDiffers(record, diskAtom, internalAtom));
 
             if (needInternalSerializer)
             {
                 internalSerializerName.append("si").append(instance.metaName);
-                buildMetaSerializerClass(declarectx, record, internalSerializerName.str(), internalAtom);
+                buildMetaSerializerClass(definectx, record, internalSerializerName.str(), internalAtom);
             }
 
             //MORE:
             //still generate a deserialize for the variable width case because it offers protection
             //against accessing out of bounds data
             deserializerName.append("d").append(instance.metaName);
-            buildMetaDeserializerClass(declarectx, record, deserializerName.str(), diskAtom);
+            buildMetaDeserializerClass(definectx, record, deserializerName.str(), diskAtom);
 
             if (needInternalSerializer)
             {
                 internalDeserializerName.append("di").append(instance.metaName);
-                buildMetaDeserializerClass(declarectx, record, internalDeserializerName.str(), internalAtom);
+                buildMetaDeserializerClass(definectx, record, internalDeserializerName.str(), internalAtom);
             }
 
             //The base class implements prefetch using the serialized meta so no need to generate...
             if (!(flags & MDFneedserializemask))
             {
                 prefetcherName.append("p").append(instance.metaName);
-                if (!buildMetaPrefetcherClass(declarectx, record, prefetcherName))
+                if (!buildMetaPrefetcherClass(definectx, record, prefetcherName))
                     prefetcherName.clear();
             }
         }
@@ -4356,17 +4384,19 @@ void HqlCppTranslator::buildMetaInfo(MetaInstance & instance)
     s.append(instance.instanceName).append(".Link(); ");
     s.append("return &").append(instance.instanceName).append("; ");
     s.append("}");
-    declarectx.setNextPriority(RowMetaPrio);
-    declarectx.addQuoted(s);
+    definectx.setNextPriority(RowMetaPrio);
+    definectx.addQuoted(s);
     if (options.spanMultipleCpp)
     {
         StringBuffer temp;
-        createAccessFunctions(temp, declarectx, RowMetaPrio, "IOutputMetaData", instance.instanceName);
+        createAccessFunctions(temp, definectx, RowMetaPrio, "IOutputMetaData", instance.instanceName);
     }
 
+    OwnedHqlExpr passExpr = getSizetConstant(pass);
     OwnedHqlExpr temp = createVariable(instance.metaName, makeVoidType());
-    declarectx.associateExpr(search, temp);
+    declarectx.associateExpr(search, passExpr);
     contextAvailable = savedContextAvailable;
+    return passExpr;
 }
 
 

+ 6 - 0
ecl/hqlcpp/hqlstmt.cpp

@@ -892,6 +892,12 @@ void BuildCtx::selectElse(IHqlStmt * stmt)
     }
 }
 
+void BuildCtx::selectPass(IHqlStmt * pass)
+{
+    assertex(pass->getStmt() == pass_stmt);
+    selectCompound(pass);
+}
+
 
 unsigned BuildCtx::setPriority(unsigned newPrio)
 {

+ 1 - 0
ecl/hqlcpp/hqlstmt.hpp

@@ -150,6 +150,7 @@ public:
     IHqlStmt *                  selectBestContext(IHqlExpression * expr);
     void                        selectContainer();
     void                        selectElse(IHqlStmt * filter);
+    void                        selectPass(IHqlStmt * pass);
     void                        setNextConstructor()    { setNextPriority(ConPrio); }
     void                        setNextDestructor()     { setNextPriority(DesPrio); }
     void                        setNextNormal()         { setNextPriority(NormalPrio); }

+ 26 - 24
ecl/hthor/CMakeLists.txt

@@ -46,31 +46,32 @@ set (    INCLUDES
 include_directories (
          .
          ${HPCC_SOURCE_DIR}/common/remote
-         ${HPCC_SOURCE_DIR}/system/jhtree
-         ${HPCC_SOURCE_DIR}/system/hrpc
-         ${HPCC_SOURCE_DIR}/system/mp
-         ${HPCC_SOURCE_DIR}/common/workunit
+         ${HPCC_SOURCE_DIR}/esp/clients/ws_dfsclient
          ${HPCC_SOURCE_DIR}/common/deftype
-         ${HPCC_SOURCE_DIR}/system/include
-         ${HPCC_SOURCE_DIR}/dali/base
-         ${HPCC_SOURCE_DIR}/rtl/include
-         ${HPCC_SOURCE_DIR}/ecl/eclagent
-         ${HPCC_SOURCE_DIR}/system/jlib
-         ${HPCC_SOURCE_DIR}/common/thorhelper
-         ${HPCC_SOURCE_DIR}/rtl/eclrtl
-         ${HPCC_SOURCE_DIR}/roxie/roxiemem
-         ${HPCC_SOURCE_DIR}/roxie/roxie
-         ${HPCC_SOURCE_DIR}/roxie/ccd
          ${HPCC_SOURCE_DIR}/common/dllserver
          ${HPCC_SOURCE_DIR}/common/environment
-         ${HPCC_SOURCE_DIR}/ecl/schedulectrl
+         ${HPCC_SOURCE_DIR}/common/thorhelper
+         ${HPCC_SOURCE_DIR}/common/workunit
+         ${HPCC_SOURCE_DIR}/common/wuanalysis
+         ${HPCC_SOURCE_DIR}/dali/base
+         ${HPCC_SOURCE_DIR}/dali/ft
+         ${HPCC_SOURCE_DIR}/ecl/eclagent
          ${HPCC_SOURCE_DIR}/ecl/hql
+         ${HPCC_SOURCE_DIR}/ecl/schedulectrl
          ${HPCC_SOURCE_DIR}/fs/dafsclient
+         ${HPCC_SOURCE_DIR}/roxie/ccd
+         ${HPCC_SOURCE_DIR}/roxie/roxie
+         ${HPCC_SOURCE_DIR}/roxie/roxiemem
+         ${HPCC_SOURCE_DIR}/rtl/eclrtl
+         ${HPCC_SOURCE_DIR}/rtl/include
+         ${HPCC_SOURCE_DIR}/system/hrpc
+         ${HPCC_SOURCE_DIR}/system/include
+         ${HPCC_SOURCE_DIR}/system/jlib
+         ${HPCC_SOURCE_DIR}/system/jhtree
+         ${HPCC_SOURCE_DIR}/system/mp
+         ${HPCC_SOURCE_DIR}/system/security/shared
          ${CMAKE_BINARY_DIR}
          ${CMAKE_BINARY_DIR}/oss
-         ${HPCC_SOURCE_DIR}/dali/ft
-         ${HPCC_SOURCE_DIR}/system/security/shared
-         ${HPCC_SOURCE_DIR}/common/wuanalysis
     )
 
 if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG)
@@ -82,20 +83,21 @@ ADD_DEFINITIONS( -D_USRDLL -DHTHOR_EXPORTS -DSTARTQUERY_EXPORTS )
 HPCC_ADD_LIBRARY( hthorlib SHARED ${SRCS} ${INCLUDES} )
 install ( TARGETS hthorlib RUNTIME DESTINATION ${EXEC_DIR} LIBRARY DESTINATION ${LIB_DIR} ARCHIVE DESTINATION componentfiles/cl/lib )
 target_link_libraries ( hthorlib
-         jlib
-         mp
-         hrpc
          dafsclient
          dalibase
          dllserver
-         nbcd
          eclrtl
          deftype
-         workunit
+         hrpc
          jhtree
-         thorhelper
+         jlib
+         mp
+         nbcd
          roxiemem
          schedulectrl
+         thorhelper
+         workunit
+         ws_dfsclient
          wuanalysis
     )
 

+ 4 - 1
ecl/hthor/hthor.cpp

@@ -58,6 +58,9 @@
 #include "thormeta.hpp"
 #include "thorread.hpp"
 
+#include "ws_dfsclient.hpp"
+
+
 #define EMPTY_LOOP_LIMIT 1000
 
 static unsigned const hthorReadBufferSize = 0x10000;
@@ -1096,7 +1099,7 @@ CHThorIndexWriteActivity::CHThorIndexWriteActivity(IAgentContext &_agent, unsign
     expandLogicalFilename(lfn, fname, agent.queryWorkUnit(), agent.queryResolveFilesLocally(), false);
     if (!agent.queryResolveFilesLocally())
     {
-        Owned<IDistributedFile> f = queryDistributedFileDirectory().lookup(lfn, agent.queryCodeContext()->queryUserDescriptor(), true, false, false, nullptr, defaultNonPrivilegedUser);
+        Owned<IDistributedFile> f = wsdfs::lookup(lfn, agent.queryCodeContext()->queryUserDescriptor(), true, false, false, nullptr, defaultNonPrivilegedUser, INFINITE);
 
         if (f)
         {

+ 308 - 2
esp/clients/ws_dfsclient/ws_dfsclient.cpp

@@ -716,8 +716,8 @@ IDFSFile *lookupDFSFile(const char *logicalName, unsigned timeoutSecs, unsigned
                     throw makeStringException(-1, "Could not find any DFS services in the target HPCC configuration.");
             }
         }
-        serviceUrl.append(dfsServiceUrls[currentDfsServiceUrl++].c_str());
-        logicalName = remoteLogicalFileName;
+        serviceUrl.append(dfsServiceUrls[currentDfsServiceUrl].c_str());
+        currentDfsServiceUrl = (currentDfsServiceUrl+1 == dfsServiceUrls.size()) ? 0 : currentDfsServiceUrl+1;
         remoteName.clear(); // local
 #endif
     }
@@ -797,6 +797,312 @@ IDistributedFile *lookupLegacyDFSFile(const char *logicalName, unsigned timeoutS
     return createLegacyDFSFile(dfsFile);
 }
 
+IDistributedFile *lookup(CDfsLogicalFileName &lfn, IUserDescriptor *user, bool write, bool hold, bool lockSuperOwner, IDistributedFileTransaction *transaction, bool priviledged, unsigned timeout)
+{
+    bool viaDali = false;
+
+    // DFS service currently only supports remote files 
+    if (write)
+        viaDali = true;
+    else
+    {
+        // switch to Dali if non-remote file, unless "dfsesp-localfiles" enabled (and non-external) 
+        if (!lfn.isRemote())
+        {
+            if (lfn.isExternal() || (!getComponentConfigSP()->getPropBool("dfsesp-localfiles")))
+                viaDali = true;
+        }
+    }
+    if (viaDali)
+        return queryDistributedFileDirectory().lookup(lfn, user, write, hold, lockSuperOwner, transaction, priviledged, timeout);
+
+    return wsdfs::lookupLegacyDFSFile(lfn.get(), timeout, wsdfs::keepAliveExpiryFrequency, user);
+}
+
+IDistributedFile *lookup(const char *logicalFilename, IUserDescriptor *user, bool write, bool hold, bool lockSuperOwner, IDistributedFileTransaction *transaction, bool priviledged, unsigned timeout)
+{
+    CDfsLogicalFileName lfn;
+    lfn.set(logicalFilename);
+    return lookup(lfn, user, write, hold, lockSuperOwner, transaction, priviledged, timeout);
+}
+
 
 } // namespace wsdfs
 
+
+class CLocalOrDistributedFile: implements ILocalOrDistributedFile, public CInterface
+{
+    bool fileExists;
+    Owned<IDistributedFile> dfile;
+    CDfsLogicalFileName lfn;    // set if localpath but prob not useful
+    StringAttr localpath;
+    StringAttr fileDescPath;
+public:
+    IMPLEMENT_IINTERFACE;
+    CLocalOrDistributedFile()
+    {
+        fileExists = false;
+    }
+
+    virtual const char *queryLogicalName() override
+    {
+        return lfn.get();
+    }
+
+    virtual IDistributedFile * queryDistributedFile() override
+    { 
+        return dfile.get(); 
+    }
+
+    bool init(const char *fname,IUserDescriptor *user,bool onlylocal,bool onlydfs, bool write, bool isPrivilegedUser, const StringArray *clusters)
+    {
+        fileExists = false;
+        if (!onlydfs)
+            lfn.allowOsPath(true);
+        if (!lfn.setValidate(fname))
+            return false;
+        if (!onlydfs)
+        {
+            bool gotlocal = true;
+            if (isAbsolutePath(fname)||(stdIoHandle(fname)>=0)) 
+                localpath.set(fname);
+            else if (!strstr(fname,"::"))
+            {
+                // treat it as a relative file
+                StringBuffer fn;
+                localpath.set(makeAbsolutePath(fname,fn).str());
+            }
+            else if (!lfn.isExternal())
+                gotlocal = false;
+            if (gotlocal)
+            {
+                if (!write && !onlylocal) // MORE - this means the dali access checks not happening... maybe that's ok?
+                    dfile.setown(wsdfs::lookup(lfn, user, write, false, false, nullptr, isPrivilegedUser, INFINITE));
+                Owned<IFile> file = getPartFile(0,0);
+                if (file.get())
+                {
+                    fileExists = file->exists();
+                    return fileExists || write;
+                }
+            }
+        }
+        if (!onlylocal)
+        {
+            if (lfn.isExternal() && !lfn.isRemote())
+            {
+                Owned<IFileDescriptor> fDesc = createExternalFileDescriptor(lfn.get());
+                dfile.setown(queryDistributedFileDirectory().createExternal(fDesc, lfn.get()));
+                Owned<IFile> file = getPartFile(0,0);
+                if (file.get())
+                    fileExists = file->exists();
+                if (write && lfn.isExternal()&&(dfile->numParts()==1))   // if it is writing to an external file then don't return distributed
+                    dfile.clear();
+                return true;
+            }
+            else
+            {
+                dfile.setown(wsdfs::lookup(lfn, user, write, false, false, nullptr, isPrivilegedUser, INFINITE));
+                if (dfile.get())
+                    return true;
+            }
+
+            StringBuffer dir;
+            unsigned stripeNum = 0;
+#ifdef _CONTAINERIZED
+            StringBuffer cluster;
+            if (clusters)
+            {
+                if (clusters->ordinality()>1)
+                    throw makeStringExceptionV(0, "Container mode does not yet support output to multiple clusters while writing file %s)", fname);
+                cluster.append(clusters->item(0));
+            }
+            else
+                getDefaultStoragePlane(cluster);
+            Owned<IStoragePlane> plane = getDataStoragePlane(cluster, true);
+            dir.append(plane->queryPrefix());
+            unsigned numStripedDevices = plane->numDevices();
+            stripeNum = calcStripeNumber(0, lfn.get(), numStripedDevices);
+#endif
+            StringBuffer descPath;
+            makePhysicalDirectory(descPath, lfn.get(), 0, DFD_OSdefault, dir);
+            fileDescPath.set(descPath);
+
+            // MORE - should we create the IDistributedFile here ready for publishing (and/or to make sure it's locked while we write)?
+            StringBuffer physicalPath;
+            makePhysicalPartName(lfn.get(), 1, 1, physicalPath, 0, DFD_OSdefault, dir, false, stripeNum); // more - may need to override path for roxie
+            localpath.set(physicalPath);
+            fileExists = (dfile != NULL);
+            return write;
+        }
+        return false;
+    }
+
+    virtual IFileDescriptor *getFileDescriptor() override
+    {
+        if (dfile.get())
+            return dfile->getFileDescriptor();
+        Owned<IFileDescriptor> fileDesc = createFileDescriptor();
+        fileDesc->setTraceName(lfn.get());
+        StringBuffer dir;
+        if (localpath.isEmpty()) { // e.g. external file
+            StringBuffer tail;
+            IException *e=NULL;
+            bool iswin=
+#ifdef _WIN32
+                true;
+#else
+                false;
+#endif
+            if (!lfn.getExternalPath(dir,tail,iswin,&e)) {
+                if (e)
+                    throw e;
+                return NULL;
+            }
+        }
+        else 
+            splitDirTail(fileDescPath,dir);
+        fileDesc->setDefaultDir(dir.str());
+        RemoteFilename rfn;
+        getPartFilename(rfn,0,0);
+        fileDesc->setPart(0,rfn);
+        fileDesc->queryPartDiskMapping(0).defaultCopies = DFD_DefaultCopies;
+        return fileDesc.getClear();
+    }
+
+    virtual bool getModificationTime(CDateTime &dt) override
+    {
+        if (dfile.get())
+            return dfile->getModificationTime(dt);
+        Owned<IFile> file = getPartFile(0,0);
+        if (file.get()) {
+            CDateTime dt;
+            return file->getTime(NULL,&dt,NULL);
+        }
+        return false;
+    }
+
+    virtual unsigned numParts() override 
+    {
+        if (dfile.get()) 
+            return dfile->numParts();
+        return 1;
+    }
+
+    virtual unsigned numPartCopies(unsigned partnum) override
+    {
+        if (dfile.get()) 
+            return dfile->queryPart(partnum).numCopies();
+        return 1;
+    }
+    
+    virtual IFile *getPartFile(unsigned partnum,unsigned copy) override
+    {
+        RemoteFilename rfn;
+        if ((partnum==0)&&(copy==0))
+            return createIFile(getPartFilename(rfn,partnum,copy));
+        return NULL;
+    }
+    
+    virtual void getDirAndFilename(StringBuffer &dir, StringBuffer &filename) override
+    {
+        if (dfile.get())
+        {
+            dir.append(dfile->queryDefaultDir());
+            splitFilename(localpath, nullptr, nullptr, &filename, &filename);
+        }
+        else if (localpath.isEmpty())
+        {
+            RemoteFilename rfn;
+            lfn.getExternalFilename(rfn);
+            StringBuffer fullPath;
+            rfn.getLocalPath(fullPath);
+            splitFilename(localpath, nullptr, &dir, &filename, &filename);
+        }
+        else
+        {
+            dir.append(fileDescPath);
+            splitFilename(localpath, nullptr, nullptr, &filename, &filename);
+        }
+    }
+
+    virtual RemoteFilename &getPartFilename(RemoteFilename &rfn, unsigned partnum,unsigned copy) override
+    {
+        if (dfile.get()) 
+            dfile->queryPart(partnum).getFilename(rfn,copy);
+        else if (localpath.isEmpty())
+            lfn.getExternalFilename(rfn);
+        else
+            rfn.setRemotePath(localpath);
+        return rfn;
+    }
+
+    StringBuffer &getPartFilename(StringBuffer &path, unsigned partnum,unsigned copy)
+    {
+        RemoteFilename rfn;
+        if (dfile.get()) 
+            dfile->queryPart(partnum).getFilename(rfn,copy);
+        else if (localpath.isEmpty())
+            lfn.getExternalFilename(rfn);
+        else 
+            path.append(localpath);
+        if (rfn.isLocal())
+            rfn.getLocalPath(path);
+        else
+            rfn.getRemotePath(path);
+        return path;
+    }
+
+    virtual bool getPartCrc(unsigned partnum, unsigned &crc) override
+    {
+        if (dfile.get())  
+            return dfile->queryPart(partnum).getCrc(crc);
+        Owned<IFile> file = getPartFile(0,0);
+        if (file.get()) {
+            crc = file->getCRC();
+            return true;
+        }
+        return false;
+    }
+
+    virtual offset_t getPartFileSize(unsigned partnum) override
+    {
+        if (dfile.get()) 
+            return dfile->queryPart(partnum).getFileSize(true,false);
+        Owned<IFile> file = getPartFile(0,0);
+        if (file.get())
+            return file->size();
+        return (offset_t)-1;
+    }
+
+    virtual offset_t getFileSize() override
+    {
+        if (dfile.get())
+            dfile->getFileSize(true,false);
+        offset_t ret = 0;
+        unsigned np = numParts();
+        for (unsigned i = 0;i<np;i++)
+            ret += getPartFileSize(i);
+        return ret;
+    }
+
+    virtual bool exists() const override
+    {
+        return fileExists;
+    }
+
+    virtual bool isExternal() const override
+    {
+        return lfn.isExternal();
+    }
+};
+
+
+ILocalOrDistributedFile* createLocalOrDistributedFile(const char *fname,IUserDescriptor *user,bool onlylocal,bool onlydfs, bool iswrite, bool isPrivilegedUser, const StringArray *clusters)
+{
+    Owned<CLocalOrDistributedFile> ret = new CLocalOrDistributedFile();
+    if (ret->init(fname,user,onlylocal,onlydfs,iswrite,isPrivilegedUser,clusters))
+        return ret.getClear();
+    return NULL;
+}
+
+

+ 7 - 0
esp/clients/ws_dfsclient/ws_dfsclient.hpp

@@ -51,6 +51,13 @@ WS_DFSCLIENT_API IDFSFile *lookupDFSFile(const char *logicalName, unsigned timeo
 WS_DFSCLIENT_API IDistributedFile *createLegacyDFSFile(IDFSFile *dfsFile);
 WS_DFSCLIENT_API IDistributedFile *lookupLegacyDFSFile(const char *logicalName, unsigned timeoutSecs, unsigned keepAliveExpiryFrequency, IUserDescriptor *userDesc);
 
+WS_DFSCLIENT_API IDistributedFile *lookup(CDfsLogicalFileName &lfn, IUserDescriptor *user, bool write, bool hold, bool lockSuperOwner, IDistributedFileTransaction *transaction, bool priviledged, unsigned timeout);
+WS_DFSCLIENT_API IDistributedFile *lookup(const char *logicalFilename, IUserDescriptor *user, bool write, bool hold, bool lockSuperOwner, IDistributedFileTransaction *transaction, bool priviledged, unsigned timeout);
+
+
 } // end of namespace wsdfs
 
+interface ILocalOrDistributedFile;
+WS_DFSCLIENT_API ILocalOrDistributedFile* createLocalOrDistributedFile(const char *fname,IUserDescriptor *user,bool onlylocal,bool onlydfs,bool iswrite, bool isPrivilegedUser, const StringArray *clusters);
+
 #endif // _WS_DFSCLIENT_HPP

+ 7 - 2
esp/scm/ldapenvironment.ecm

@@ -43,8 +43,6 @@ ESPenum OUMode : int
 
 ESPrequest LDAPCreateEnvironmentRequest
 {
-    bool                                              CreateLDAPEnvironment(false);
-    bool                                              CreateK8sSecrets(false);
     [label("Environment Name"), cols(20)] string      EnvName;
     [label("Environment Owner"), cols(20)] string     EnvOwnerName;
     [label("Description"), cols(60)] string           EnvDescription;
@@ -63,6 +61,13 @@ ESPrequest LDAPCreateEnvironmentRequest
 
     ESPEnum OUMode                                    WorkunitsMode;
     [label("Custom Workunits Base DN"), cols(60)] string CustomWorkunitsBaseDN;
+
+    bool                                              CreateLDAPEnvironment(false);
+    bool                                              CreateK8sSecrets(false);
+
+    bool                                              CreateVaultSecrets(false);
+    [label("Vault Name"), cols(20)] string            VaultName;
+
 };
 
 ESPresponse [exceptions_inline] LDAPCreateEnvironmentResponse

+ 53 - 59
esp/services/ldapenvironment/ldapenvironmentService.cpp

@@ -142,9 +142,19 @@ bool CldapenvironmentEx::changePermissions(const char * ou, const char * userFQD
     return ok;
 }
 
-bool CldapenvironmentEx::createSecret(const char * secretName, const char * username, const char * pwd, StringBuffer & notes)
+bool CldapenvironmentEx::createSecret(SecretType type, const char * secretName, const char * username, const char * pwd, StringBuffer & notes)
 {
-    VStringBuffer cmdLineSafe("kubectl create secret generic %s --from-literal=username=%s --from-literal=password=", secretName, username);
+    StringBuffer cmdLineSafe;
+    switch (type)
+    {
+        case ST_K8S:
+            cmdLineSafe.appendf("kubectl create secret generic %s --from-literal=username=%s --from-literal=password=", secretName, username);
+            break;
+        case ST_VAULT:
+            cmdLineSafe.appendf("vault kv put secret/authn/%s username=%s password=", secretName, username);
+            break;
+    }
+
     VStringBuffer cmdLine("%s%s", cmdLineSafe.str(), pwd);
     try
     {
@@ -161,6 +171,17 @@ bool CldapenvironmentEx::createSecret(const char * secretName, const char * user
     return true;
 }
 
+void CldapenvironmentEx::createLDAPBaseDN(const char * baseDN, const char * description, StringBuffer & notes)
+{
+    try
+    {
+        secmgr->createLdapBasedn(nullptr, baseDN, PT_ADMINISTRATORS_ONLY, description);
+    }
+    catch(...)
+    {
+        notes.appendf("\nNon Fatal Error creating '%s'", baseDN);
+    }
+}
 
 bool CldapenvironmentEx::onLDAPCreateEnvironment(IEspContext &context, IEspLDAPCreateEnvironmentRequest &req, IEspLDAPCreateEnvironmentResponse &resp)
 {
@@ -195,6 +216,9 @@ bool CldapenvironmentEx::onLDAPCreateEnvironment(IEspContext &context, IEspLDAPC
         if (!req.getWorkunitsMode() == COUMode_CreateCustom && isEmptyString(req.getCustomWorkunitsBaseDN()))
             throw MakeStringException(-1, "CustomWorkunitsBaseDN must be specified (ex. 'ou=workunits,ou=hpcc,dc=myldap,dc=com')");
 
+        if (req.getCreateVaultSecrets() && isEmptyString(req.getVaultName()))
+            throw MakeStringException(-1, "Vault Name must be specified to create vault secrets");
+
         // Create OU string names
 
         StringBuffer respFilesBaseDN, respGroupsBaseDN, respUsersBaseDN, respResourcesBaseDN, respWorkunitsBaseDN;
@@ -216,46 +240,11 @@ bool CldapenvironmentEx::onLDAPCreateEnvironment(IEspContext &context, IEspLDAPC
         if (req.getCreateLDAPEnvironment())
         {
             //Note that ESP will also try to create these OUs on startup
-            try
-            {
-                 secmgr->createLdapBasedn(nullptr, respFilesBaseDN.str(), PT_ADMINISTRATORS_ONLY, description.str());
-            }
-            catch(...)
-            {
-                  notes.appendf("\nNon Fatal Error creating '%s'", respFilesBaseDN.str());
-            }
-            try
-            {
-                 secmgr->createLdapBasedn(nullptr, respGroupsBaseDN.str(), PT_ADMINISTRATORS_ONLY, description.str());
-            }
-            catch(...)
-            {
-                notes.appendf("\nNon Fatal Error creating '%s'", respGroupsBaseDN.str());
-            }
-            try
-            {
-                secmgr->createLdapBasedn(nullptr, respUsersBaseDN.str(), PT_ADMINISTRATORS_ONLY, description.str());
-            }
-            catch(...)
-            {
-                notes.appendf("\nNon Fatal Error creating '%s'", respUsersBaseDN.str());
-            }
-            try
-            {
-                secmgr->createLdapBasedn(nullptr, respResourcesBaseDN.str(), PT_ADMINISTRATORS_ONLY, description.str());
-            }
-            catch(...)
-            {
-                notes.appendf("\nNon Fatal Error creating '%s'", respResourcesBaseDN.str());
-            }
-            try
-            {
-                secmgr->createLdapBasedn(nullptr, respWorkunitsBaseDN.str(), PT_ADMINISTRATORS_ONLY, description.str());
-            }
-            catch(...)
-            {
-                 notes.appendf("\nNon Fatal Error creating '%s'", respWorkunitsBaseDN.str());
-            }
+            createLDAPBaseDN(respFilesBaseDN.str(), description.str(), notes);
+            createLDAPBaseDN(respGroupsBaseDN.str(), description.str(), notes);
+            createLDAPBaseDN(respUsersBaseDN.str(), description.str(), notes);
+            createLDAPBaseDN(respResourcesBaseDN.str(), description.str(), notes);
+            createLDAPBaseDN(respWorkunitsBaseDN.str(), description.str(), notes);
 
             //Create HPCCAdmins Group
             try
@@ -312,14 +301,7 @@ bool CldapenvironmentEx::onLDAPCreateEnvironment(IEspContext &context, IEspLDAPC
             //Grant SmcAccess to HPCCAdmins group
             {
                 VStringBuffer smcAccess("%sSmcAccess,%s", resPrefix, respResourcesBaseDN.str());
-                try
-                {
-                    secmgr->createLdapBasedn(nullptr, smcAccess.str(), PT_ADMINISTRATORS_ONLY, description.str());
-                }
-                catch(...)
-                {
-                    notes.appendf("\nNon Fatal Error creating '%s'", smcAccess.str());
-                }
+                createLDAPBaseDN(smcAccess.str(), description.str(), notes);
 
                 CPermissionAction action;
                 action.m_action = "update";
@@ -342,10 +324,17 @@ bool CldapenvironmentEx::onLDAPCreateEnvironment(IEspContext &context, IEspLDAPC
         }
 
         //Create the secret
-        VStringBuffer  respHPCCAdminK8sSecretName("hpcc-admin-%s", req.getEnvName());
-        respHPCCAdminK8sSecretName.toLowerCase();
+        VStringBuffer  respHPCCAdminSecretName("hpcc-admin-%s", req.getEnvName());
+        respHPCCAdminSecretName.toLowerCase();
         if (req.getCreateK8sSecrets())
-            createSecret(respHPCCAdminK8sSecretName.str(), respHPCCAdminUser.str(), respHPCCAdminPwd.str(), notes);
+            createSecret(ST_K8S, respHPCCAdminSecretName.str(), respHPCCAdminUser.str(), respHPCCAdminPwd.str(), notes);
+
+        StringBuffer respVaultID;
+        if (req.getCreateVaultSecrets())
+        {
+            respVaultID.set(req.getVaultName());
+            createSecret(ST_VAULT, respHPCCAdminSecretName.str(), respHPCCAdminUser.str(), respHPCCAdminPwd.str(), notes);
+        }
 
         //----------------------------------
         // Create LDAP Admin Username/password.
@@ -371,10 +360,12 @@ bool CldapenvironmentEx::onLDAPCreateEnvironment(IEspContext &context, IEspLDAPC
         }
 
         //Create the secret
-        VStringBuffer respLDAPAdminK8sSecretName("ldap-admin-%s", req.getEnvName());
-        respLDAPAdminK8sSecretName.toLowerCase();
+        VStringBuffer respLDAPAdminSecretName("ldap-admin-%s", req.getEnvName());
+        respLDAPAdminSecretName.toLowerCase();
         if (req.getCreateK8sSecrets())
-            createSecret(respLDAPAdminK8sSecretName.str(), respLDAPAdminUser.str(), respLDAPAdminPwd.str(), notes);
+            createSecret(ST_K8S, respLDAPAdminSecretName.str(), respLDAPAdminUser.str(), respLDAPAdminPwd.str(), notes);
+        if (req.getCreateVaultSecrets())
+            createSecret(ST_VAULT, respLDAPAdminSecretName.str(), respLDAPAdminUser.str(), respLDAPAdminPwd.str(), notes);
 
         //----------------------------------
         // Set response
@@ -399,17 +390,20 @@ bool CldapenvironmentEx::onLDAPCreateEnvironment(IEspContext &context, IEspLDAPC
                                "  ldap:\n"
                                "    adminGroupName: %s\n"
                                "    ldapAdminSecretKey: %s\n"
+                               "    ldapAdminVaultId: %s\n"
                                "    hpccAdminSecretKey: %s\n"
+                               "    hpccAdminVaultId: %s\n"
                                "    filesBasedn: %s\n"
                                "    groupsBasedn: %s\n"
                                "    usersBasedn: %s\n"
                                "    resourcesBasedn: %s\n"
                                "    workunitsBasedn: %s\n"
                                "    systemBasedn: %s\n\n",
-                               ldapcredskey.str(), respLDAPAdminK8sSecretName.str(),
-                               hpcccredskey.str(), respHPCCAdminK8sSecretName.str(),
+                               ldapcredskey.str(), respLDAPAdminSecretName.str(),
+                               hpcccredskey.str(), respHPCCAdminSecretName.str(),
                                adminGroupName.str(),
-                               ldapcredskey.str(), hpcccredskey.str(),
+                               ldapcredskey.str(), respVaultID.str(),
+                               hpcccredskey.str(), respVaultID.str(),
                                respFilesBaseDN.str(), respGroupsBaseDN.str(), respUsersBaseDN.str(), respResourcesBaseDN.str(), respWorkunitsBaseDN.str(), respUsersBaseDN.str());
         resp.setLDAPHelm(ldapHelm.str());
         resp.setNotes(notes.str());

+ 8 - 1
esp/services/ldapenvironment/ldapenvironmentService.hpp

@@ -32,9 +32,16 @@ private:
     StringBuffer    adminGroupName;
 
     const char * formatOUname(StringBuffer &ou, const char * envName, int mode, const char * sharedOU, const char * reqBaseDN, const char * privateOU);
-    bool createSecret(const char * secretName, const char * username, const char * pwd, StringBuffer & notes);
+    void createLDAPBaseDN(const char * baseDN, const char * description, StringBuffer & notes);
     bool changePermissions(const char * ou, const char * userFQDN, SecAccessFlags allows, SecAccessFlags denies);
 
+    enum SecretType : int
+    {
+        ST_K8S = 0,
+        ST_VAULT = 1
+    };
+    bool createSecret(SecretType type, const char * secretName, const char * username, const char * pwd, StringBuffer & notes);
+
 public:
     IMPLEMENT_IINTERFACE;
     virtual void init(IPropertyTree *_cfg, const char *_process, const char *_service);

+ 4 - 4
esp/services/ws_workunits/ws_workunitsHelpers.cpp

@@ -3434,7 +3434,7 @@ StringBuffer & WsWuHelpers::resolveQueryWuid(StringBuffer &wuid, const char *que
 }
 
 void WsWuHelpers::runWsWuQuery(IEspContext &context, IConstWorkUnit *cw, const char *queryset, const char *query,
-    const char *cluster, const char *paramXml, IArrayOf<IConstApplicationValue> *applications)
+    const char *cluster, const char *paramXml, IArrayOf<IConstNamedValue> *variables, IArrayOf<IConstApplicationValue> *applications)
 {
     StringBuffer srcWuid;
 
@@ -3443,11 +3443,11 @@ void WsWuHelpers::runWsWuQuery(IEspContext &context, IConstWorkUnit *cw, const c
     copyWsWorkunit(context, *wu, srcWuid);
     wu.clear();
 
-    submitWsWorkunit(context, cw, cluster, NULL, 0,  0, false, true, true, paramXml, NULL, NULL, applications);
+    submitWsWorkunit(context, cw, cluster, NULL, 0,  0, false, true, true, paramXml, variables, NULL, applications);
 }
 
 void WsWuHelpers::runWsWuQuery(IEspContext &context, StringBuffer &wuid, const char *queryset, const char *query,
-    const char *cluster, const char *paramXml, IArrayOf<IConstApplicationValue> *applications)
+    const char *cluster, const char *paramXml, IArrayOf<IConstNamedValue> *variables, IArrayOf<IConstApplicationValue> *applications)
 {
     StringBuffer srcWuid;
 
@@ -3457,7 +3457,7 @@ void WsWuHelpers::runWsWuQuery(IEspContext &context, StringBuffer &wuid, const c
     copyWsWorkunit(context, *wu, srcWuid);
     wu.clear();
 
-    submitWsWorkunit(context, wuid.str(), cluster, NULL, 0,  0, false, true, true, paramXml, NULL, NULL, applications);
+    submitWsWorkunit(context, wuid.str(), cluster, NULL, 0,  0, false, true, true, paramXml, variables, NULL, applications);
 }
 
 void WsWuHelpers::checkAndTrimWorkunit(const char* methodName, StringBuffer& input)

+ 4 - 4
esp/services/ws_workunits/ws_workunitsHelpers.hpp

@@ -539,10 +539,10 @@ namespace WsWuHelpers
             IArrayOf<IConstNamedValue> *variables=NULL, IArrayOf<IConstNamedValue> *debugs=NULL, IArrayOf<IConstApplicationValue> *applications=NULL);
     IException * noteException(IWorkUnit *wu, IException *e, ErrorSeverity level=SeverityError);
     StringBuffer & resolveQueryWuid(StringBuffer &wuid, const char *queryset, const char *query, bool notSuspended=true, IWorkUnit *wu=NULL);
-    void runWsWuQuery(IEspContext &context, IConstWorkUnit *cw, const char *queryset, const char *query, const char *cluster, const char *paramXml=NULL,
-            IArrayOf<IConstApplicationValue> *applications=NULL);
-    void runWsWuQuery(IEspContext &context, StringBuffer &wuid, const char *queryset, const char *query, const char *cluster, const char *paramXml=NULL,
-            IArrayOf<IConstApplicationValue> *applications=NULL);
+    void runWsWuQuery(IEspContext &context, IConstWorkUnit *cw, const char *queryset, const char *query, const char *cluster, const char *paramXml,
+            IArrayOf<IConstNamedValue> *variables, IArrayOf<IConstApplicationValue> *applications);
+    void runWsWuQuery(IEspContext &context, StringBuffer &wuid, const char *queryset, const char *query, const char *cluster, const char *paramXml,
+            IArrayOf<IConstNamedValue> *variables, IArrayOf<IConstApplicationValue> *applications);
     void checkAndTrimWorkunit(const char* methodName, StringBuffer& input);
 };
 

+ 2 - 2
esp/services/ws_workunits/ws_workunitsService.cpp

@@ -1007,7 +1007,7 @@ bool CWsWorkunitsEx::onWUSubmit(IEspContext &context, IEspWUSubmitRequest &req,
                 throw WsWuHelpers::noteException(wu, MakeStringException(ECLWATCH_INVALID_INPUT,"Queryset and/or query not specified"));
             }
 
-            WsWuHelpers::runWsWuQuery(context, cw, info.queryset.str(), info.query.str(), cluster, NULL);
+            WsWuHelpers::runWsWuQuery(context, cw, info.queryset.str(), info.query.str(), cluster, nullptr, nullptr, nullptr);
         }
         else
             WsWuHelpers::submitWsWorkunit(context, cw, cluster, req.getSnapshot(), req.getMaxRunTime(), req.getMaxCost(), true, false, false, nullptr, nullptr, nullptr, nullptr);
@@ -1076,7 +1076,7 @@ bool CWsWorkunitsEx::onWURun(IEspContext &context, IEspWURunRequest &req, IEspWU
         else if (notEmpty(req.getQuerySet()) && notEmpty(req.getQuery()))
         {
             PROGLOG("WURun: QuerySet %s, Query %s", req.getQuerySet(), req.getQuery());
-            WsWuHelpers::runWsWuQuery(context, wuid, req.getQuerySet(), req.getQuery(), cluster, req.getInput(),
+            WsWuHelpers::runWsWuQuery(context, wuid, req.getQuerySet(), req.getQuery(), cluster, req.getInput(), &req.getVariables(),
                 &req.getApplicationValues());
         }
         else

+ 3 - 1
esp/src/eclwatch/HPCCPlatformWidget.js

@@ -197,7 +197,9 @@ define([
                 if (lang.exists("MyAccountResponse.username", response)) {
                     context.userName = response.MyAccountResponse.username;
                     dojoConfig.username = response.MyAccountResponse.username;
-                    cookie("User", response.MyAccountResponse.username);
+                    if (response.MyAccountResponse.username) {
+                        cookie("User", response.MyAccountResponse.username);
+                    }
                     context.checkIfAdmin(context.userName);
                     context.refreshUserName();
                     if (!cookie("PasswordExpiredCheck")) {

+ 8 - 3
esp/src/eclwatch/LockDialogWidget.js

@@ -57,6 +57,14 @@ define([
 
         show: function (event) {
             var context = this;
+            if (!cookie("User")) {
+                cookie("Status", "Unlocked");
+                context.storage.setItem("Status", "Unlocked");
+                topic.publish("hpcc/session_management_status", {
+                    status: "Unlocked"
+                });
+                return;
+            }
             on(this.unlockPassword, "keypress", function (event) {
                 if (event.key === "Enter") {
                     context._onUnlock();
@@ -106,7 +114,6 @@ define([
                             status: "Unlocked"
                         });
                         cookie("Status", "Unlocked");
-                        context.storage.removeItem("Status");
                         context.storage.setItem("Status", "Unlocked");
                         if (context.idleFired) {
                             dojo.publish("hpcc/brToaster", {
@@ -144,7 +151,6 @@ define([
                     status: "Locked"
                 });
                 cookie("Status", "Locked");
-                context.storage.removeItem("Status");
                 context.storage.setItem("Status", "Locked");
             } else if (cookie("Status") === "Unlocked") {
                 xhr("esp/lock", {
@@ -158,7 +164,6 @@ define([
                             status: "Locked"
                         });
                         cookie("Status", "Locked");
-                        context.storage.removeItem("Status");
                         context.storage.setItem("Status", "Locked");
                     }
                 });

+ 6 - 6
esp/src/package-lock.json

@@ -5376,9 +5376,9 @@
       }
     },
     "node_modules/minimist": {
-      "version": "1.2.5",
-      "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
-      "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==",
+      "version": "1.2.6",
+      "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
+      "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==",
       "dev": true
     },
     "node_modules/mkdirp": {
@@ -12118,9 +12118,9 @@
       }
     },
     "minimist": {
-      "version": "1.2.5",
-      "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
-      "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==",
+      "version": "1.2.6",
+      "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
+      "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==",
       "dev": true
     },
     "mkdirp": {

+ 1 - 1
esp/src/package.json

@@ -4,7 +4,7 @@
   "description": "'ECL Watch' Web interface for HPCC Platform.",
   "scripts": {
     "clean": "rimraf ./build ./lib ./src/nlsHPCCType.ts",
-    "lint": "eslint eclwatch/**/*.js src/**/*.ts? src-react/**/*.ts?",
+    "lint": "eslint ./eclwatch ./src ./src-react",
     "lint-fix": "eslint --fix eclwatch/**/*.js src/**/*.ts src-react/**/*.ts?",
     "copy-res-es6-promise": "copyfiles -u 3 \"./node_modules/es6-promise/dist/es6-promise.auto.min.js\" ./build/node_modules/es6-promise/dist/",
     "copy-res-eclwatch-img": "copyfiles -u 2 \"./eclwatch/img/**/*.{png,jpg,gif}\" ./build/eclwatch/img/",

+ 23 - 11
esp/src/src-react/components/ECLPlayground.tsx

@@ -236,8 +236,6 @@ const ECLEditorToolbar: React.FunctionComponent<ECLEditorToolbarProps> = ({
                 label={nlsHPCC.Target}
                 className={playgroundStyles.inlineDropdown}
                 onChange={React.useCallback((evt, option) => setCluster(option.key.toString()), [setCluster])}
-                required={true}
-                selectedKey={cluster ? cluster : undefined}
             />
             <div className={playgroundStyles.outputButtons}>
                 <IconButton
@@ -274,6 +272,8 @@ export const ECLPlayground: React.FunctionComponent<ECLPlaygroundProps> = (props
     const [workunit, setWorkunit] = React.useState<Workunit>();
     const [editor, setEditor] = React.useState<ECLEditor>();
     const [query, setQuery] = React.useState("");
+    const [selectedEclSample, setSelectedEclSample] = React.useState("");
+    const [eclContent, setEclContent] = React.useState("");
     const [eclSamples, setEclSamples] = React.useState<IDropdownOption[]>([]);
 
     React.useEffect(() => {
@@ -295,6 +295,7 @@ export const ECLPlayground: React.FunctionComponent<ECLPlaygroundProps> = (props
             .then(response => response.json())
             .then(json => setEclSamples(
                 json.items.map(item => {
+                    if (item.selected) setSelectedEclSample(item.filename);
                     return { key: item.filename, text: item.name };
                 })
             ));
@@ -306,6 +307,24 @@ export const ECLPlayground: React.FunctionComponent<ECLPlaygroundProps> = (props
         }
     }, [wuid, editor, theme]);
 
+    React.useEffect(() => {
+        fetch(`/esp/files/eclwatch/ecl/${selectedEclSample}`)
+            .then(response => {
+                if (response.status && response.status === 200 && response.body) {
+                    response.text().then(sample => {
+                        if (sample.toLowerCase().indexOf("<!doctype") < 0) {
+                            setEclContent(sample);
+                        }
+                    });
+                }
+            });
+    }, [selectedEclSample]);
+
+    React.useEffect(() => {
+        if (!editor) return;
+        editor.ecl(eclContent);
+    }, [editor, eclContent]);
+
     const handleThemeToggle = React.useCallback((evt) => {
         if (!editor) return;
         if (evt.detail && evt.detail.dark === true) {
@@ -323,16 +342,9 @@ export const ECLPlayground: React.FunctionComponent<ECLPlaygroundProps> = (props
                 label="Sample"
                 className={`${playgroundStyles.inlineDropdown} ${playgroundStyles.samplesDropdown}`}
                 options={eclSamples}
+                selectedKey={selectedEclSample}
                 placeholder="Select sample ECL..."
-                onChange={(evt, item) => {
-                    fetch(`/esp/files/eclwatch/ecl/${item.key}`)
-                        .then(async response => {
-                            if (response.status && response.status === 200 && response.body) {
-                                const eclSample = await response.text();
-                                editor.ecl(eclSample);
-                            }
-                        });
-                }}
+                onChange={(evt, item) => { setSelectedEclSample(item.key.toString()); }}
             />
         </div>
         <ReflexContainer orientation="horizontal">

+ 7 - 2
esp/src/src-react/components/Workunits.tsx

@@ -157,8 +157,13 @@ export const Workunits: React.FunctionComponent<WorkunitsProps> = ({
         title: nlsHPCC.Delete,
         message: nlsHPCC.DeleteSelectedWorkunits,
         items: selection.map(s => s.Wuid),
-        onSubmit: React.useCallback(() => {
-            WsWorkunits.WUAction(selection, "Delete").then(() => refreshTable(true));
+        onSubmit: React.useCallback(async () => {
+            const unknownWUs = selection.filter(wu => wu.State === "unknown");
+            if (unknownWUs.length) {
+                await WsWorkunits.WUAction(unknownWUs, "SetToFailed");
+            }
+            await WsWorkunits.WUAction(selection, "Delete");
+            refreshTable(true);
         }, [refreshTable, selection])
     });
 

+ 8 - 3
esp/src/src-react/components/forms/Fields.tsx

@@ -303,6 +303,7 @@ export const TargetClusterTextField: React.FunctionComponent<TargetClusterTextFi
     const [targetClusters, defaultCluster] = useLogicalClusters();
     const [options, setOptions] = React.useState<IDropdownOption[]>();
     const [defaultRow, setDefaultRow] = React.useState<IDropdownOption>();
+    const { onChange, required, selectedKey } = { ...props };
 
     React.useEffect(() => {
         const options = targetClusters?.map(row => {
@@ -314,10 +315,14 @@ export const TargetClusterTextField: React.FunctionComponent<TargetClusterTextFi
         }) || [];
         setOptions(options);
 
-        if (autoSelectDropdown(props.selectedKey, props.required)) {
-            setDefaultRow(options.filter(row => row.key === defaultCluster?.Name)[0]);
+        if (autoSelectDropdown(selectedKey, required)) {
+            const selectedItem = options.filter(row => row.key === defaultCluster?.Name)[0];
+            if (selectedItem) {
+                setDefaultRow(selectedItem);
+                onChange(undefined, selectedItem);
+            }
         }
-    }, [targetClusters, defaultCluster, props.selectedKey, props.required]);
+    }, [targetClusters, defaultCluster, onChange, required, selectedKey]);
 
     return <AsyncDropdown {...props} selectedKey={props.selectedKey || defaultRow?.key as string} options={options} />;
 };

+ 1 - 1
esp/src/src-react/hooks/activity.ts

@@ -23,7 +23,7 @@ export function useActivity(): [Activity, number, () => void] {
         return () => {
             active = false;
             handle.release();
-        }
+        };
     }, [count]);
 
     return [activity, lastUpdate, increment];

+ 13 - 10
esp/src/src-react/hooks/grid.tsx

@@ -125,7 +125,7 @@ interface useFluentStoreGridResponse {
     selection: any[],
     copyButtons: ICommandBarItemProps[],
     total: number,
-    refreshTable: () => void
+    refreshTable: (clearSelection?: boolean) => void
 }
 
 function useFluentStoreGrid({
@@ -144,8 +144,17 @@ function useFluentStoreGrid({
     const [items, setItems] = React.useState<any[]>([]);
     const [total, setTotal] = React.useState<number>(0);
 
-    const refreshTable = React.useCallback(() => {
+    const selectionHandler = useConst(new Selection({
+        onSelectionChanged: () => {
+            setSelection(selectionHandler.getSelection());
+        }
+    }));
+
+    const refreshTable = React.useCallback((clearSelection = false) => {
         if (isNaN(start) || (isNaN(count) || count === 0)) return;
+        if (clearSelection) {
+            selectionHandler.setItems([], true);
+        }
         const storeQuery = store.query(query ?? {}, { start, count, sort: sorted ? [sorted] : undefined });
         storeQuery.total.then(total => {
             setTotal(total);
@@ -153,7 +162,7 @@ function useFluentStoreGrid({
         storeQuery.then(items => {
             setItems(items);
         });
-    }, [count, query, sorted, start, store]);
+    }, [count, query, selectionHandler, sorted, start, store]);
 
     React.useEffect(() => {
         refreshTable();
@@ -183,12 +192,6 @@ function useFluentStoreGrid({
         });
     }, [constColumns]);
 
-    const selectionHandler = useConst(new Selection({
-        onSelectionChanged: () => {
-            setSelection(selectionHandler.getSelection());
-        }
-    }));
-
     const renderDetailsHeader = React.useCallback((props: IDetailsHeaderProps, defaultRender?: any) => {
         return defaultRender({
             ...props,
@@ -262,7 +265,7 @@ interface useFluentPagedGridResponse {
     Grid: React.FunctionComponent<{ height?: string }>,
     GridPagination: React.FunctionComponent<Partial<IStackProps>>,
     selection: any[],
-    refreshTable: (full?: boolean) => void,
+    refreshTable: (clearSelection?: boolean) => void,
     copyButtons: ICommandBarItemProps[]
 }
 

+ 2 - 2
esp/src/src-react/hooks/metrics.ts

@@ -36,11 +36,11 @@ export function useMetricsOptions(): [MetricsOptions, (opts: MetricsOptions) =>
             options[key] = opts[key];
         }
         refresh();
-    }, [options, refresh]);
+    }, [refresh]);
 
     const save = React.useCallback(() => {
         store?.set("MetricOptions", JSON.stringify(options), true);
-    }, [options, store]);
+    }, [store]);
 
     const reset = React.useCallback((toDefaults: boolean = false) => {
         if (toDefaults) {

+ 24 - 24
esp/src/src/ECLArchiveWidget.ts

@@ -412,10 +412,10 @@ export class ECLArchiveWidget {
         function updateSummary(markers) {
             const propCounts = {};
             const propFormats = {};
-            const propSums = markers.reduce((ret, n)=>{
-                n.properties.forEach(prop=>{
-                    if(prop.Measure !== undefined){
-                        if(!propCounts[prop.Name]){
+            const propSums = markers.reduce((ret, n) => {
+                n.properties.forEach(prop => {
+                    if (prop.Measure !== undefined) {
+                        if (!propCounts[prop.Name]) {
                             propCounts[prop.Name] = 0;
                             propFormats[prop.Name] = prop.Measure;
                             ret[prop.Name] = 0;
@@ -426,21 +426,21 @@ export class ECLArchiveWidget {
                 });
                 return ret;
             }, {});
-            const propAvgs = Object.keys(propSums).reduce((ret, k)=>{
+            const propAvgs = Object.keys(propSums).reduce((ret, k) => {
                 ret[k] = propSums[k] / propCounts[k];
                 return ret;
             }, {});
             context.summaryTable
                 .columns(["Name", "Cnt", "Avg", "Sum"])
                 .data([
-                    ...Object.keys(propSums).map(k=>{
+                    ...Object.keys(propSums).map(k => {
                         let avg = propAvgs[k];
                         let sum = propSums[k];
 
                         const isTime = propFormats[k] === "ns";
                         const isSize = propFormats[k] === "sz";
 
-                        if(isTime) {
+                        if (isTime) {
                             avg = _formatTime(avg);
                             sum = _formatTime(sum);
                         } else if (isSize) {
@@ -460,17 +460,17 @@ export class ECLArchiveWidget {
                 ])
                 .lazyRender()
                 ;
-            function _formatTime(v){
-                if(v > 1000000000) {
+            function _formatTime(v) {
+                if (v > 1000000000) {
                     return (v / 1000000000).toFixed(3) + "s";
                 }
                 return (v / 1000000).toFixed(3) + "ms";
             }
-            function _formatSize(v){
-                if(v > 1000000000) {
+            function _formatSize(v) {
+                if (v > 1000000000) {
                     return (v * 0.000000000931).toFixed(3) + "Gb";
                 }
-                else if(v > 1000000) {
+                else if (v > 1000000) {
                     return (v * 0.0000009537).toFixed(3) + "Mb";
                 }
                 return (v * 0.000977).toFixed(3) + "Kb";
@@ -579,10 +579,10 @@ export class ECLArchiveWidget {
                     marker.color,
                     "Verdana",
                     "12px",
-                    () => {},
-                    () => {},
+                    () => { },
+                    () => { },
                     () => {
-                        if(context.selectedMarker === marker.lineNum) {
+                        if (context.selectedMarker === marker.lineNum) {
                             updateSummary(markers);
                             context.selectedMarker = -1;
                             const columnArr = context.summaryTable.columns();
@@ -594,9 +594,9 @@ export class ECLArchiveWidget {
                         } else {
 
                             const _data = markerTableData(marker);
-                            
+
                             context.summaryTable
-                                .columns(["Line: "+marker.lineNum, ...Array(_data[0].length).fill("")])
+                                .columns(["Line: " + marker.lineNum, ...Array(_data[0].length).fill("")])
                                 .data(_data)
                                 .lazyRender()
                                 ;
@@ -675,16 +675,16 @@ export class ECLArchiveWidget {
             return ret;
 
             function nsToTime(nanoseconds) {
-                let subSecond:string|number = Math.floor(nanoseconds % 100000000);
-                let seconds:string|number = Math.floor((nanoseconds / 1000000000) % 60);
-                let minutes:string|number = Math.floor((nanoseconds / (1000000000 * 60)) % 60);
-                let hours:string|number = Math.floor((nanoseconds / (1000000000 * 60 * 60)) % 24);
-                
+                const subSecond: string | number = Math.floor(nanoseconds % 100000000);
+                let seconds: string | number = Math.floor((nanoseconds / 1000000000) % 60);
+                let minutes: string | number = Math.floor((nanoseconds / (1000000000 * 60)) % 60);
+                let hours: string | number = Math.floor((nanoseconds / (1000000000 * 60 * 60)) % 24);
+
                 hours = (hours < 10) ? "0" + hours : hours;
                 minutes = (minutes < 10) ? "0" + minutes : minutes;
                 seconds = (seconds < 10) ? "0" + seconds : seconds;
-                
-                return String(hours).padStart(2,"0") + ":" + String(minutes) + ":" + String(seconds) + "." + String(subSecond).padStart(9,"0");
+
+                return String(hours).padStart(2, "0") + ":" + String(minutes) + ":" + String(seconds) + "." + String(subSecond).padStart(9, "0");
             }
         }
     }

+ 1 - 1
esp/src/src/ESPSearch.ts

@@ -343,7 +343,7 @@ export function searchAll(searchText: string,
         searchParams.text = searchParams.text.trim();
 
         return searchParams;
-    }
+    };
 
     const searchArray = [];
     const searchParams = generateSearchParams(searchText);

+ 2 - 2
esp/src/src/ws_access.ts

@@ -21,7 +21,7 @@ class UsersStore extends ESPRequest.Store {
     startProperty = "PageStartFrom";
     countProperty = "PageSize";
 
-    SortbyProperty = "SortBy"
+    SortbyProperty = "SortBy";
 
     groupname: string;
 
@@ -398,7 +398,7 @@ class PermissionsStore extends Memory {
         const tmp = id.split(CONCAT_SYMBOL);
         if (tmp.length > 0) {
             const parentID = tmp[0];
-            const parent = super.get(parentID);  
+            const parent = super.get(parentID);
             if (tmp.length === 1) {
                 return parent;
             }

+ 10 - 0
helm/hpcc/values.schema.json

@@ -1415,6 +1415,16 @@
           "default": true,
           "description": "Treat out-of-date local files as if they were not present."
         },
+        "ignoreFileDateMismatches": { 
+          "type": "boolean",
+          "default": false,
+          "description": "Ignore mismatched file dates on local files"
+        },
+        "fileTimeFuzzySeconds": { 
+          "type": "integer",
+          "default": 0,
+          "description": "Ignore mismatched file dates of up to this amount"
+        },
         "lazyOpen": {
           "type": "boolean",
           "default": false,

+ 3 - 3
initfiles/bin/check_executes

@@ -1,7 +1,7 @@
 #!/bin/bash
 
 usage() {
-  echo "Usage: check-executes [options] -- cmd args"
+  echo "Usage: check_executes [options] -- cmd args"
   echo "    -d <directory>     Mounted directory to store post-mortem info in"
   echo "    -f <file>          Specifies a file to preserve on post-mortem"
 }
@@ -49,7 +49,7 @@ for (( arg=1; arg <= "$#"; arg++ )); do
   optval=${!arg#*=}
   if [[ ${optname} == '--config' ]]; then
     PMD_COPYFILES+=(${optval})
-  elif [[ ${optname} == '--daliServer' ]]; then
+  elif [[ ${optname} == '--daliServers' ]]; then
     PMD_DALISERVER=${optval}
   elif [[ ${optname} == '--workunit' ]]; then
     PMD_WORKUNIT=${optval}
@@ -85,7 +85,7 @@ if [ $retVal -ne 0 ]; then
     rm core
   fi
   if [[ -n "${PMD_DALISERVER}" ]] && [[ -n "${PMD_WORKUNIT}" ]]; then
-    wutool postmortem ${PMD_WORKUNIT} DALISERVERS=${PMD_DALISERVER} PMD=${POST_MORTEM_DIR}
+    wutool postmortem ${PMD_WORKUNIT} DALISERVER=${PMD_DALISERVER} PMD=${POST_MORTEM_DIR}
     echo Updated workunit ${PMD_WORKUNIT}
   fi
 fi

+ 29 - 27
roxie/ccd/CMakeLists.txt

@@ -64,34 +64,35 @@ set (   SRCS
 
 include_directories ( 
          .
-         ${HPCC_SOURCE_DIR}/fs/dafsclient
-         ${HPCC_SOURCE_DIR}/system/jhtree
-         ${HPCC_SOURCE_DIR}/system/mp
-         ${HPCC_SOURCE_DIR}/common/workunit
-         ${HPCC_SOURCE_DIR}/roxie/udplib
-         ${HPCC_SOURCE_DIR}/roxie/ccdcache
-         ${HPCC_SOURCE_DIR}/roxie/roxie
+         ${HPCC_SOURCE_DIR}/common/deftype
+         ${HPCC_SOURCE_DIR}/common/dllserver
          ${HPCC_SOURCE_DIR}/common/environment
+         ${HPCC_SOURCE_DIR}/common/thorhelper
+         ${HPCC_SOURCE_DIR}/common/workunit
+         ${HPCC_SOURCE_DIR}/dali/base
+         ${HPCC_SOURCE_DIR}/dali/dfu
+         ${HPCC_SOURCE_DIR}/dali/ft
          ${HPCC_SOURCE_DIR}/ecl/hthor
          ${HPCC_SOURCE_DIR}/ecl/schedulectrl
+         ${HPCC_SOURCE_DIR}/esp/clients/ws_dfsclient
+         ${HPCC_SOURCE_DIR}/fs/dafsclient
+         ${HPCC_SOURCE_DIR}/roxie/ccdcache
+         ${HPCC_SOURCE_DIR}/rtl/eclrtl
+         ${HPCC_SOURCE_DIR}/rtl/include
          ${HPCC_SOURCE_DIR}/rtl/nbcd
-         ${HPCC_SOURCE_DIR}/common/deftype
-         ${HPCC_SOURCE_DIR}/system/include
-         ${HPCC_SOURCE_DIR}/dali/base
-         ${HPCC_SOURCE_DIR}/dali/dfu
+         ${HPCC_SOURCE_DIR}/roxie/roxie
          ${HPCC_SOURCE_DIR}/roxie/roxiemem
-         ${HPCC_SOURCE_DIR}/common/dllserver
+         ${HPCC_SOURCE_DIR}/roxie/udplib
+         ${HPCC_SOURCE_DIR}/system/include
+         ${HPCC_SOURCE_DIR}/system/jhtree
          ${HPCC_SOURCE_DIR}/system/jlib
-         ${HPCC_SOURCE_DIR}/common/thorhelper
-         ${HPCC_SOURCE_DIR}/rtl/eclrtl
-         ${HPCC_SOURCE_DIR}/rtl/include
+         ${HPCC_SOURCE_DIR}/system/libbase58
+         ${HPCC_SOURCE_DIR}/system/mp
+         ${HPCC_SOURCE_DIR}/system/security/shared
+         ${HPCC_SOURCE_DIR}/system/security/securesocket
          ${HPCC_SOURCE_DIR}/testing/unittests
          ${CMAKE_BINARY_DIR}
          ${CMAKE_BINARY_DIR}/oss
-         ${HPCC_SOURCE_DIR}/dali/ft
-         ${HPCC_SOURCE_DIR}/system/security/shared
-         ${HPCC_SOURCE_DIR}/system/security/securesocket
-         ${HPCC_SOURCE_DIR}/system/libbase58
     )
 
 if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG)
@@ -112,20 +113,21 @@ HPCC_ADD_LIBRARY( ccd SHARED ${SRCS} )
 install ( TARGETS ccd RUNTIME DESTINATION ${EXEC_DIR} LIBRARY DESTINATION ${LIB_DIR} ARCHIVE DESTINATION componentfiles/cl/lib )
 
 target_link_libraries ( ccd 
-         jlib
-         nbcd
-         roxiemem 
-         udplib 
          dafsclient 
-         eclrtl 
          dalibase 
          deftype 
-         thorhelper 
+         dllserver 
+         eclrtl 
          jhtree 
+         jlib
+         libbase58
+         nbcd
+         roxiemem 
          schedulectrl
-         dllserver 
+         thorhelper 
+         udplib 
          workunit 
-         libbase58
+         ws_dfsclient
     )
 
 if (NOT CONTAINERIZED)

+ 2 - 0
roxie/ccd/ccd.hpp

@@ -375,6 +375,8 @@ extern bool defaultNoSeekBuildIndex;
 extern unsigned parallelQueryLoadThreads;
 extern bool adhocRoxie;
 extern bool alwaysFailOnLeaks;
+extern bool ignoreFileDateMismatches;
+extern int fileTimeFuzzySeconds;
 extern SinkMode defaultSinkMode;
 
 #ifdef _CONTAINERIZED

+ 4 - 3
roxie/ccd/ccdcontext.cpp

@@ -39,6 +39,7 @@
 #include "ccdstate.hpp"
 #include "roxiehelper.hpp"
 #include "enginecontext.hpp"
+#include "ws_dfsclient.hpp"
 
 #include <list>
 #include <string>
@@ -450,7 +451,7 @@ private:
 
     inline bool fileExists(const char *lfn)
     {
-        Owned<IDistributedFile> f = queryDistributedFileDirectory().lookup(lfn, queryUserDescriptor(), false, false, false, nullptr, defaultPrivilegedUser);
+        Owned<IDistributedFile> f = wsdfs::lookup(lfn, queryUserDescriptor(), false, false, false, nullptr, defaultPrivilegedUser, INFINITE);
         if (f)
             return true;
         return false;
@@ -769,7 +770,7 @@ private:
                     MilliSleep(PERSIST_LOCK_SLEEP + (getRandom()%PERSIST_LOCK_SLEEP));
                     persistLock.setown(getPersistReadLock(goer));
                 }
-                Owned<IDistributedFile> f = queryDistributedFileDirectory().lookup(goer, queryUserDescriptor(), true, false, false, nullptr, defaultPrivilegedUser);
+                Owned<IDistributedFile> f = wsdfs::lookup(goer, queryUserDescriptor(), true, false, false, nullptr, defaultPrivilegedUser, INFINITE);
                 if (!f)
                     goto restart; // Persist has been deleted since last checked - repeat the whole process
                 const char *newAccessTime = f->queryAttributes().queryProp("@accessed");
@@ -3817,7 +3818,7 @@ public:
     {
         StringBuffer fullname;
         expandLogicalFilename(fullname, logicalName, workUnit, false, false);
-        Owned<IDistributedFile> file = queryDistributedFileDirectory().lookup(fullname.str(),queryUserDescriptor(),false,false,false,nullptr,defaultPrivilegedUser);
+        Owned<IDistributedFile> file = wsdfs::lookup(fullname.str(),queryUserDescriptor(),false,false,false,nullptr,defaultPrivilegedUser,INFINITE);
         if (file)
         {
             WorkunitUpdate wu = updateWorkUnit();

+ 2 - 1
roxie/ccd/ccddali.cpp

@@ -32,6 +32,7 @@
 #include "thorplugin.hpp"
 #include "workflow.hpp"
 #include "mpcomm.hpp"
+#include "ws_dfsclient.hpp"
 
 #ifndef _CONTAINERIZED
 #define ROXIE_DALI_CACHE
@@ -626,7 +627,7 @@ public:
             unsigned start = msTick();
             CDfsLogicalFileName lfn;
             lfn.set(logicalName);
-            Owned<IDistributedFile> dfsFile = queryDistributedFileDirectory().lookup(lfn, userdesc.get(), writeAccess, cacheIt,false,nullptr,isPrivilegedUser);
+            Owned<IDistributedFile> dfsFile = wsdfs::lookup(lfn, userdesc.get(), writeAccess, cacheIt,false,nullptr,isPrivilegedUser,INFINITE);
             if (dfsFile)
             {
                 IDistributedSuperFile *super = dfsFile->querySuperFile();

+ 23 - 26
roxie/ccd/ccdfile.cpp

@@ -910,8 +910,27 @@ class CRoxieFileCache : implements IRoxieFileCache, implements ICopyFileProgress
             // A temporary fix - files stored on azure don't have an accurate time stamp, so treat them as up to date.
             if (isUrl(f->queryFilename()))
                 return FileIsValid;
+            if (modified.isNull()) 
+                return FileIsValid;
             CDateTime mt;
-            return (modified.isNull() || (f->getTime(NULL, &mt, NULL) &&  mt.equals(modified, false))) ? FileIsValid : FileDateMismatch;
+            if (f->getTime(NULL, &mt, NULL))
+            {
+                if (fileTimeFuzzySeconds)
+                {
+                    time_t mtt = mt.getSimple();
+                    time_t modt = modified.getSimple();
+                    __int64 diff = mtt-modt;
+                    if (std::abs(diff) <= (__int64) fileTimeFuzzySeconds)
+                        return FileIsValid;
+                }
+                else if (mt.equals(modified, false))
+                    return FileIsValid;
+            }
+            StringBuffer s1, s2;
+            DBGLOG("File date mismatch: local %s, DFS %s", mt.getString(s1).str(), modified.getString(s2).str());
+            if (ignoreFileDateMismatches)
+                return FileIsValid;
+            return FileDateMismatch;
         }
         else
             return FileNotFound;
@@ -1669,31 +1688,9 @@ public:
         }
         else
         {
-            // MORE - not at all sure about this. Foreign files should stay foreign ?
-            CDfsLogicalFileName dlfn;
-            dlfn.set(lfn);
-            if (dlfn.isForeign())
-                dlfn.clearForeign();
-
-            bool defaultDirPerPart = false;
-            StringBuffer defaultDir;
-            unsigned stripeNum = 0;
-#ifdef _CONTAINERIZED
-            if (!dlfn.isExternal())
-            {
-                IFileDescriptor &fileDesc = pdesc->queryOwner();
-                StringBuffer planeName;
-                fileDesc.getClusterGroupName(0, planeName);
-                Owned<IStoragePlane> plane = getDataStoragePlane(planeName, true);
-                defaultDir.append(plane->queryPrefix());
-                unsigned numStripedDevices = plane->numDevices();
-                stripeNum = calcStripeNumber(partNo-1, dlfn.get(), numStripedDevices);
-                FileDescriptorFlags fileFlags = static_cast<FileDescriptorFlags>(fileDesc.queryProperties().getPropInt("@flags"));
-                if (FileDescriptorFlags::none != (fileFlags & FileDescriptorFlags::dirperpart))
-                    defaultDirPerPart = true;
-            }
-#endif
-            makePhysicalPartName(dlfn.get(), partNo, numParts, localLocation, replicationLevel, DFD_OSdefault, defaultDir.str(), defaultDirPerPart, stripeNum);
+            RemoteFilename rfn;
+            pdesc->getFilename(replicationLevel, rfn);
+            rfn.getLocalPath(localLocation);
         }
         Owned<ILazyFileIO> ret;
         try

+ 4 - 0
roxie/ccd/ccdmain.cpp

@@ -139,6 +139,8 @@ bool defaultCollectFactoryStatistics = true;
 bool defaultNoSeekBuildIndex = false;
 unsigned parallelQueryLoadThreads = 0;               // Number of threads to use for parallel loading of queries. 0 means don't (may cause CPU starvation on other vms)
 bool alwaysFailOnLeaks = false;
+bool ignoreFileDateMismatches = false;
+int fileTimeFuzzySeconds = 0;
 SinkMode defaultSinkMode = SinkMode::Parallel;
 unsigned continuationCompressThreshold = 1024;
 
@@ -1115,6 +1117,8 @@ int CCD_API roxie_main(int argc, const char *argv[], const char * defaultYaml)
         if (!parallelQueryLoadThreads)
             parallelQueryLoadThreads = 1;
         alwaysFailOnLeaks = topology->getPropBool("@alwaysFailOnLeaks", false);
+        ignoreFileDateMismatches = topology->getPropBool("@ignoreFileDateMismatches", false);
+        fileTimeFuzzySeconds = topology->getPropInt("@fileTimeFuzzySeconds", 0);
         const char *sinkModeText = topology->queryProp("@sinkMode");
         if (sinkModeText)
             defaultSinkMode = getSinkMode(sinkModeText);

+ 3 - 0
roxie/ccd/ccdstate.cpp

@@ -43,6 +43,9 @@
 #include "pkgimpl.hpp"
 #include "roxiehelper.hpp"
 
+#include "ws_dfsclient.hpp"
+
+
 //-------------------------------------------------------------------------------------------
 // class CRoxiePluginCtx - provide the environments for plugins loaded by roxie. 
 // Base class handles making sure memory allocation comes from the right heap. 

+ 6 - 4
thorlcr/activities/diskread/thdiskread.cpp

@@ -106,9 +106,12 @@ public:
     virtual void done() override
     {
         CDiskReadMasterVF::done();
-        IHThorDiskReadBaseArg *helper = (IHThorDiskReadBaseArg *)queryHelper();
-        if (0 != (helper->getFlags() & TDXtemporary) && !container.queryJob().queryUseCheckpoints())
-            container.queryTempHandler()->deregisterFile(fileName, fileDesc->queryProperties().getPropBool("@pausefile"));
+        if (file)
+        {
+            IHThorDiskReadBaseArg *helper = (IHThorDiskReadBaseArg *)queryHelper();
+            if (0 != (helper->getFlags() & TDXtemporary) && !container.queryJob().queryUseCheckpoints())
+                container.queryTempHandler()->deregisterFile(fileName, fileDesc->queryProperties().getPropBool("@pausefile"));
+        }
     }
     virtual void init() override
     {
@@ -192,7 +195,6 @@ public:
         {
             if (!helper->hasSegmentMonitors() && !helper->hasFilter() && !(helper->getFlags() & TDXtemporary))
             {
-                IDistributedFile *file = queryReadFile(0);
                 if (file && canMatch)
                 {
                     if (0 != (TDRunfilteredcount & helper->getFlags()) && file->queryAttributes().hasProp("@recordCount"))

+ 17 - 7
thorlcr/activities/fetch/thfetch.cpp

@@ -34,6 +34,7 @@ class CFetchActivityMaster : public CMasterActivity
     std::vector<OwnedPtr<CThorStatsCollection>> subFileStats;
 
 protected:
+    Owned<IDistributedFile> fetchFile;
     IHThorFetchArg *helper;
 
 public:
@@ -49,11 +50,11 @@ public:
     {
         if (endpoints) free(endpoints);
     }
-    virtual void init()
+    virtual void init() override
     {
         CMasterActivity::init();
         OwnedRoxieString fname(helper->getFileName());
-        Owned<IDistributedFile> fetchFile = queryThorFileManager().lookup(container.queryJob(), fname, false, 0 != (helper->getFetchFlags() & FFdatafileoptional), false, container.activityIsCodeSigned());
+        fetchFile.setown(lookupReadFile(fname, false, false, 0 != (helper->getFetchFlags() & FFdatafileoptional)));
         if (fetchFile)
         {
             if (isFileKey(fetchFile))
@@ -77,15 +78,25 @@ public:
                 throw MakeActivityException(this, 0, "File '%s' was published as encrypted but no encryption key provided", fetchFile->queryLogicalName());
             IDistributedSuperFile *super = fetchFile->querySuperFile();
             unsigned numsubs = super?super->numSubFiles(true):0;
-            for (unsigned i=0; i<numsubs; i++)
-                subFileStats.push_back(new CThorStatsCollection(diskReadPartStatistics));
+
+            /* JCS->SHAMSER - kludge for now, don't add more than max
+                * But it means updateFileReadCostStats will not be querying the correct files,
+                * if the file varies per CQ execution (see other notes in updateFileReadCostStats)
+                */
+            for (unsigned i=subFileStats.size(); i<numsubs; i++)
+                subFileStats.push_back(new CThorStatsCollection(diskReadRemoteStatistics));
+
 
             mapping.setown(getFileSlaveMaps(fetchFile->queryLogicalName(), *fileDesc, container.queryJob().queryUserDescriptor(), container.queryJob().querySlaveGroup(), container.queryLocalOrGrouped(), false, NULL, super));
             mapping->serializeFileOffsetMap(offsetMapMb);
-            addReadFile(fetchFile);
         }
     }
-    virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
+    virtual void kill() override
+    {
+        CMasterActivity::kill();
+        fetchFile.clear();
+    }
+    virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave) override
     {
         if (mapping)
         {
@@ -120,7 +131,6 @@ public:
     virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {
         CFetchActivityMaster::serializeSlaveData(dst, slave);
-        IDistributedFile *fetchFile = queryReadFile(0);
         if (fetchFile)
             fetchFile->queryAttributes().serialize(dst);
     }

+ 1 - 5
thorlcr/activities/hashdistrib/thhashdistrib.cpp

@@ -100,9 +100,7 @@ public:
         StringBuffer scoped;
         OwnedRoxieString indexFileName(helper->getIndexFileName());
         queryThorFileManager().addScope(container.queryJob(), indexFileName, scoped);
-        Owned<IDistributedFile> file = queryThorFileManager().lookup(container.queryJob(), indexFileName, false, false, false, container.activityIsCodeSigned());
-        if (!file)
-            throw MakeActivityException(this, 0, "KeyedDistribute: Failed to find key: %s", scoped.str());
+        Owned<IDistributedFile> file = lookupReadFile(indexFileName, false, false, false);
         if (0 == file->numParts())
             throw MakeActivityException(this, 0, "KeyedDistribute: Can't distribute based on an empty key: %s", scoped.str());
         if (!isFileKey(file))
@@ -123,8 +121,6 @@ public:
 
         tlkMb.append(iFileIO->size());
         ::read(iFileIO, 0, (size32_t)iFileIO->size(), tlkMb);
-
-        addReadFile(file);
     }
     virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {

+ 4 - 4
thorlcr/activities/hashdistrib/thhashdistribslave.cpp

@@ -1150,7 +1150,7 @@ public:
         if (allowSpill)
         {
             StringBuffer temp;
-            GetTempName(temp,"hddrecvbuff", true);
+            GetTempFilePath(temp,"hddrecvbuff");
             piperd.setown(createSmartBuffer(activity, temp.str(), pullBufferSize, rowIf));
         }
         else
@@ -1814,7 +1814,7 @@ public:
                 if (!cachefileio.get())
                 {
                     StringBuffer tempname;
-                    GetTempName(tempname,"hashdistspill",true);
+                    GetTempFilePath(tempname,"hashdistspill");
                     cachefile.setown(createIFile(tempname.str()));
                     cachefileio.setown(cachefile->open(IFOcreaterw));
                     if (!cachefileio)
@@ -2257,7 +2257,7 @@ public:
             unsigned rwFlags = DEFAULT_RWFLAGS;
             sz = 0;
             StringBuffer tempname;
-            GetTempName(tempname,"hdprop",true); // use alt temp dir
+            GetTempFilePath(tempname,"hdprop");
             tempfile.setown(createIFile(tempname.str()));
             {
                 ActPrintLogEx(&activity->queryContainer(), thorlog_null, MCwarning, "REDISTRIBUTE size unknown, spilling to disk");
@@ -2665,7 +2665,7 @@ public:
         count = 0;
         StringBuffer tempname, prefix("hashdedup_bucket");
         prefix.append(bucketN).append('_').append(desc);
-        GetTempName(tempname, prefix.str(), true);
+        GetTempFilePath(tempname, prefix.str());
         OwnedIFile iFile = createIFile(tempname.str());
         spillFile.setown(new CFileOwner(iFile.getLink()));
         if (owner.getOptBool(THOROPT_COMPRESS_SPILLS, true))

+ 35 - 29
thorlcr/activities/indexread/thindexread.cpp

@@ -216,46 +216,52 @@ public:
     virtual void init() override
     {
         CMasterActivity::init();
-        OwnedRoxieString helperFileName = indexBaseHelper->getFileName();
-        StringBuffer expandedFileName;
-        queryThorFileManager().addScope(container.queryJob(), helperFileName, expandedFileName);
-        fileName.set(expandedFileName);
-        Owned<IDistributedFile> index = queryThorFileManager().lookup(container.queryJob(), helperFileName, false, 0 != (TIRoptional & indexBaseHelper->getFlags()), true, container.activityIsCodeSigned());
-        if (index)
+        if ((container.queryLocalOrGrouped() || indexBaseHelper->canMatchAny()))
         {
-            checkFileType(this, index, "key", true);
+            OwnedRoxieString helperFileName = indexBaseHelper->getFileName();
+            StringBuffer expandedFileName;
+            queryThorFileManager().addScope(container.queryJob(), helperFileName, expandedFileName);
+            fileName.set(expandedFileName);
+            Owned<IDistributedFile> index = lookupReadFile(helperFileName, false, false, 0 != (TIRoptional & indexBaseHelper->getFlags()));
+            if (index && (0 == index->numParts())) // possible if superfile
+                index.clear();
+            if (index)
+            {
+                checkFileType(this, index, "key", true);
 
-            partitionKey = index->queryAttributes().hasProp("@partitionFieldMask");
-            localKey = index->queryAttributes().getPropBool("@local") && !partitionKey;
+                partitionKey = index->queryAttributes().hasProp("@partitionFieldMask");
+                localKey = index->queryAttributes().getPropBool("@local") && !partitionKey;
 
-            if (container.queryLocalData() && !localKey)
-                throw MakeActivityException(this, 0, "Index Read cannot be LOCAL unless supplied index is local");
+                if (container.queryLocalData() && !localKey)
+                    throw MakeActivityException(this, 0, "Index Read cannot be LOCAL unless supplied index is local");
 
-            nofilter = 0 != (TIRnofilter & indexBaseHelper->getFlags());
-            if (localKey)
-                nofilter = true;
-            else
-            {
-                IDistributedSuperFile *super = index->querySuperFile();
-                IDistributedFile *sub = super ? &super->querySubFile(0,true) : index.get();
-                if (sub && 1 == sub->numParts())
+                nofilter = 0 != (TIRnofilter & indexBaseHelper->getFlags());
+                if (localKey)
                     nofilter = true;
-                if (super)
+                else
                 {
-                    unsigned numSubFiles = super->numSubFiles();
-                    for (unsigned i=0; i<numSubFiles; i++)
-                        subIndexFileStats.push_back(new CThorStatsCollection(indexReadActivityStatistics));
+                    IDistributedSuperFile *super = index->querySuperFile();
+                    IDistributedFile *sub = super ? &super->querySubFile(0,true) : index.get();
+                    if (sub && 1 == sub->numParts())
+                        nofilter = true;
+                    if (super)
+                    {
+                        unsigned numSubFiles = super->numSubFiles(true);
+
+                        /* JCS->SHAMSER - kludge for now, don't add more than max
+                        * But it means updateFileReadCostStats will not be querying the correct files,
+                        * if the file varies per CQ execution (see other notes in updateFileReadCostStats)
+                        */
+                        for (unsigned i=subIndexFileStats.size(); i<numSubFiles; i++)
+                            subIndexFileStats.push_back(new CThorStatsCollection(indexReadActivityStatistics));
+                    }
                 }
-            }
-            //MORE: Change index getFormatCrc once we support projected rows for indexes.
-            checkFormatCrc(this, index, indexBaseHelper->getDiskFormatCrc(), indexBaseHelper->queryDiskRecordSize(), indexBaseHelper->getProjectedFormatCrc(), indexBaseHelper->queryProjectedDiskRecordSize(), true);
-            if ((container.queryLocalOrGrouped() || indexBaseHelper->canMatchAny()) && index->numParts())
-            {
+                //MORE: Change index getFormatCrc once we support projected rows for indexes.
+                checkFormatCrc(this, index, indexBaseHelper->getDiskFormatCrc(), indexBaseHelper->queryDiskRecordSize(), indexBaseHelper->getProjectedFormatCrc(), indexBaseHelper->queryProjectedDiskRecordSize(), true);
                 fileDesc.setown(getConfiguredFileDescriptor(*index));
                 if (container.queryLocalOrGrouped())
                     nofilter = true;
                 prepareKey(index);
-                addReadFile(index);
                 mapping.setown(getFileSlaveMaps(index->queryLogicalName(), *fileDesc, container.queryJob().queryUserDescriptor(), container.queryJob().querySlaveGroup(), container.queryLocalOrGrouped(), true, NULL, index->querySuperFile()));
             }
         }

+ 2 - 4
thorlcr/activities/keydiff/thkeydiff.cpp

@@ -56,10 +56,10 @@ public:
         queryThorFileManager().addScope(container.queryJob(), outputHelperName, expandedFileName.clear(), false);
         outputName.set(expandedFileName);
 
-        originalIndexFile.setown(queryThorFileManager().lookup(container.queryJob(), originalHelperName,false, false, false, container.activityIsCodeSigned()));
+        originalIndexFile.setown(lookupReadFile(originalHelperName, false, false, false));
+        newIndexFile.setown(lookupReadFile(updatedHelperName, false, false, false));
         if (!isFileKey(originalIndexFile))
             throw MakeActivityException(this, TE_FileTypeMismatch, "Attempting to read flat file as an index: %s", originalHelperName.get());
-        newIndexFile.setown(queryThorFileManager().lookup(container.queryJob(), updatedHelperName, false, false, false, container.activityIsCodeSigned()));
         if (!isFileKey(newIndexFile))
             throw MakeActivityException(this, TE_FileTypeMismatch, "Attempting to read flat file as an index: %s", updatedHelperName.get());
         if (originalIndexFile->numParts() != newIndexFile->numParts())
@@ -87,8 +87,6 @@ public:
         fillClusterArray(container.queryJob(), outputName, clusters, groups);
         patchDesc.setown(queryThorFileManager().create(container.queryJob(), outputName, clusters, groups, 0 != (KDPoverwrite & helper->getFlags()), 0, !local, width));
         patchDesc->queryProperties().setProp("@kind", "keydiff");
-        addReadFile(originalIndexFile);
-        addReadFile(newIndexFile);
     }
     virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {

+ 12 - 14
thorlcr/activities/keyedjoin/thkeyedjoin-legacy.cpp

@@ -30,6 +30,7 @@ namespace LegacyKJ
 class CKeyedJoinMaster : public CMasterActivity
 {
     IHThorKeyedJoinArg *helper;
+    Owned<IDistributedFile> dataFile, indexFile;
     Owned<IFileDescriptor> dataFileDesc;
     Owned<CSlavePartMapping> dataFileMapping;
     MemoryBuffer offsetMapMb, initMb;
@@ -54,12 +55,11 @@ public:
             if (TAG_NULL != tags[i])
                 container.queryJob().freeMPTag(tags[i]);
     }
-    virtual void init()
+    virtual void init() override
     {
         CMasterActivity::init();
         OwnedRoxieString indexFileName(helper->getIndexFileName());
-        Owned<IDistributedFile> dataFile;
-        Owned<IDistributedFile> indexFile = queryThorFileManager().lookup(container.queryJob(), indexFileName, false, 0 != (helper->getJoinFlags() & JFindexoptional), true, container.activityIsCodeSigned());
+        indexFile.setown(lookupReadFile(indexFileName, false, false, 0 != (helper->getJoinFlags() & JFindexoptional)));
 
         unsigned keyReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJKRR", 0);
         if (!keyReadWidth || keyReadWidth>container.queryJob().querySlaves())
@@ -207,7 +207,7 @@ public:
                     OwnedRoxieString fetchFilename(helper->getFileName());
                     if (fetchFilename)
                     {
-                        dataFile.setown(queryThorFileManager().lookup(container.queryJob(), fetchFilename, false, 0 != (helper->getFetchFlags() & FFdatafileoptional), true, container.activityIsCodeSigned()));
+                        dataFile.setown(lookupReadFile(fetchFilename, false, false, 0 != (helper->getFetchFlags() & FFdatafileoptional)));
                         if (dataFile)
                         {
                             if (isFileKey(dataFile))
@@ -260,22 +260,20 @@ public:
             else
                 indexFile.clear();
         }
-        if (indexFile)
-        {
-            addReadFile(indexFile);
-            if (dataFile)
-                addReadFile(dataFile);
-        }
-        else
+        if (!indexFile)
             initMb.append((unsigned)0);
     }
-    virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
+    virtual void kill() override
+    {
+        CMasterActivity::kill();
+        indexFile.clear();
+        dataFile.clear();
+    }
+    virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave) override
     {
         dst.append(initMb);
-        IDistributedFile *indexFile = queryReadFile(0); // 0 == indexFile, 1 == dataFile
         if (indexFile && helper->diskAccessRequired())
         {
-            IDistributedFile *dataFile = queryReadFile(1);
             if (dataFile)
             {
                 dst.append(remoteDataFiles);

+ 14 - 10
thorlcr/activities/keyedjoin/thkeyedjoin.cpp

@@ -303,7 +303,7 @@ public:
         totalIndexParts = 0;
 
         Owned<IDistributedFile> dataFile;
-        Owned<IDistributedFile> indexFile = queryThorFileManager().lookup(container.queryJob(), indexFileName, false, 0 != (helper->getJoinFlags() & JFindexoptional), true, container.activityIsCodeSigned());
+        Owned<IDistributedFile> indexFile = lookupReadFile(indexFileName, false, false, 0 != (helper->getJoinFlags() & JFindexoptional));
         if (indexFile)
         {
             if (!isFileKey(indexFile))
@@ -311,7 +311,12 @@ public:
             IDistributedSuperFile *superIndex = indexFile->querySuperFile();
             // One entry for each subfile (unless it is not a superfile => then add one entry for index data file stats)
             unsigned numSuperIndexSubs = superIndex?superIndex->numSubFiles(true):1;
-            for (unsigned i=0; i<numSuperIndexSubs; i++)
+
+            /* JCS->SHAMSER - kludge for now, don't add more than max
+            * But it means updateFileReadCostStats will not be querying the correct files,
+            * if the file varies per CQ execution (see other notes in updateFileReadCostStats)
+            */
+            for (unsigned i=fileStats.size(); i<numSuperIndexSubs; i++)
                 fileStats.push_back(new CThorStatsCollection(indexReadStatistics));
 
             if (helper->diskAccessRequired())
@@ -319,7 +324,7 @@ public:
                 OwnedRoxieString fetchFilename(helper->getFileName());
                 if (fetchFilename)
                 {
-                    dataFile.setown(queryThorFileManager().lookup(container.queryJob(), fetchFilename, false, 0 != (helper->getFetchFlags() & FFdatafileoptional), true, container.activityIsCodeSigned()));
+                    dataFile.setown(lookupReadFile(fetchFilename, false, false, 0 != (helper->getFetchFlags() & FFdatafileoptional)));
                     if (dataFile)
                     {
                         if (isFileKey(dataFile))
@@ -360,7 +365,12 @@ public:
                         IDistributedSuperFile *super = dataFile->querySuperFile();
                         // One entry for each subfile (unless it is not a superfile => then have 1 entry for data file stats)
                         unsigned numsubs = super?super->numSubFiles(true):1;
-                        for (unsigned i=0; i<numsubs; i++)
+
+                        /* JCS->SHAMSER - kludge for now, don't add more than max
+                        * But it means updateFileReadCostStats will not be querying the correct files,
+                        * if the file varies per CQ execution (see other notes in updateFileReadCostStats)
+                        */
+                        for (unsigned i=fileStats.size(); i<numsubs; i++)
                             fileStats.push_back(new CThorStatsCollection(diskReadRemoteStatistics));
                     }
                 }
@@ -511,12 +521,6 @@ public:
         }
         else
             initMb.append(totalIndexParts); // 0
-        if (indexFile)
-        {
-            addReadFile(indexFile);
-            if (dataFile)
-                addReadFile(dataFile);
-        }
     }
     virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {

+ 1 - 1
thorlcr/activities/keyedjoin/thkeyedjoinslave.cpp

@@ -3634,7 +3634,7 @@ public:
         };
 
         StringBuffer tmpFileName;
-        GetTempName(tmpFileName, "kjgroup");
+        GetTempFilePath(tmpFileName, "kjgroup");
         Owned<IFile> iFile = createIFile(tmpFileName);
         Owned<IRowStreamWithFpos> rowStream = new CRowStreamWithFpos(iFile, keyLookupReplyOutputMetaRowIf, preserveOrder ? totalIndexParts : 0);
 

+ 2 - 5
thorlcr/activities/keypatch/thkeypatch.cpp

@@ -55,10 +55,10 @@ public:
         queryThorFileManager().addScope(container.queryJob(), outputHelperName, expandedFileName.clear(), false);
         outputName.set(expandedFileName);
 
-        originalIndexFile.setown(queryThorFileManager().lookup(container.queryJob(), originalHelperName, false, false, false, container.activityIsCodeSigned()));
+        originalIndexFile.setown(lookupReadFile(originalHelperName, false, false, false));
         if (!isFileKey(originalIndexFile))
             throw MakeActivityException(this, TE_FileTypeMismatch, "Attempting to read flat file as an index: %s", originalHelperName.get());
-        patchFile.setown(queryThorFileManager().lookup(container.queryJob(), patchHelperName, false, false, false, container.activityIsCodeSigned()));
+        patchFile.setown(lookupReadFile(patchHelperName, false, false, false));
         if (isFileKey(patchFile))
             throw MakeActivityException(this, TE_FileTypeMismatch, "Attempting to read index as a patch file: %s", patchHelperName.get());
         
@@ -67,9 +67,6 @@ public:
         if (originalIndexFile->querySuperFile() || patchFile->querySuperFile())
             throw MakeActivityException(this, 0, "Patching super files not supported");
         
-        addReadFile(originalIndexFile);
-        addReadFile(patchFile);
-
         width = originalIndexFile->numParts();
 
         originalDesc.setown(originalIndexFile->getFileDescriptor());

+ 2 - 2
thorlcr/activities/lookupjoin/thlookupjoinslave.cpp

@@ -1877,7 +1877,7 @@ protected:
 
                         VStringBuffer tempPrefix("spill_%d", container.queryId());
                         StringBuffer tempName;
-                        GetTempName(tempName, tempPrefix.str(), true);
+                        GetTempFilePath(tempName, tempPrefix.str());
                         file.setown(new CFileOwner(createIFile(tempName.str())));
                         VStringBuffer spillPrefixStr("clearAllNonLocalRows(%d)", SPILL_PRIORITY_SPILLABLE_STREAM);
                         // 3rd param. is skipNulls = true, the row arrays may have had the non-local rows delete already.
@@ -2941,7 +2941,7 @@ public:
             rwFlags |= spillCompInfo;
         }
         StringBuffer tempFilename;
-        GetTempName(tempFilename, "lookup_local", true);
+        GetTempFilePath(tempFilename, "lookup_local");
         ActPrintLog("Overflowing RHS broadcast rows to spill file: %s", tempFilename.str());
         OwnedIFile iFile = createIFile(tempFilename.str());
         overflowWriteFile.setown(new CFileOwner(iFile.getLink()));

+ 1 - 1
thorlcr/activities/merge/thmergeslave.cpp

@@ -320,7 +320,7 @@ public:
         chunkmaxsize = MERGE_TRANSFER_BUFFER_SIZE;
         Owned<IRowStream> merged = createRowStreamMerger(streams.ordinality(), streams.getArray(), helper->queryCompare(),helper->dedup(), linkcounter);
         StringBuffer tmpname;
-        GetTempName(tmpname,"merge",true); // use alt temp dir
+        GetTempFilePath(tmpname,"merge");
         tmpfile.setown(createIFile(tmpname.str()));
         Owned<IRowWriter> writer =  createRowWriter(tmpfile, this);
         CThorKeyArray sample(*this, this, helper->querySerialize(), helper->queryCompare(), helper->queryCompareKey(), helper->queryCompareRowKey());

+ 1 - 2
thorlcr/activities/msort/thmsort.cpp

@@ -91,10 +91,9 @@ protected:
         OwnedRoxieString cosortlogname(helper->getSortedFilename());
         if (cosortlogname&&*cosortlogname)
         {
-            Owned<IDistributedFile> coSortFile = queryThorFileManager().lookup(container.queryJob(), cosortlogname, false, false, false, container.activityIsCodeSigned());
+            Owned<IDistributedFile> coSortFile = lookupReadFile(cosortlogname, false, false, false);
             if (isFileKey(coSortFile))
                 throw MakeActivityException(this, TE_FileTypeMismatch, "Attempting to read index as a flat file: %s", cosortlogname.get());
-            addReadFile(coSortFile);
             Owned<IFileDescriptor> fileDesc = coSortFile->getFileDescriptor();
             unsigned o;
             for (o=0; o<fileDesc->numParts(); o++)

+ 1 - 1
thorlcr/activities/nsplitter/thnsplitterslave.cpp

@@ -226,7 +226,7 @@ public:
                     if (spill)
                     {
                         StringBuffer tempname;
-                        GetTempName(tempname, "nsplit", true); // use alt temp dir
+                        GetTempFilePath(tempname, "nsplit");
                         smartBuf.setown(createSharedSmartDiskBuffer(this, tempname.str(), numOutputs, queryRowInterfaces(input)));
                         ActPrintLog("Using temp spill file: %s", tempname.str());
                     }

+ 7 - 4
thorlcr/activities/thactivityutil.cpp

@@ -96,7 +96,7 @@ public:
         {
             StringBuffer temp;
             if (allowspill)
-                GetTempName(temp,"lookahd",true);
+                GetTempFilePath(temp,"lookahd");
             assertex(bufsize);
             if (allowspill)
                 smartbuf.setown(createSmartBuffer(&activity, temp.str(), bufsize, rowIf));
@@ -747,15 +747,18 @@ IFileIO *createMultipleWrite(CActivityBase *activity, IPartDescriptor &partDesc,
     else
     {
         // use temp name
-        GetTempName(outLocationName, "partial");
         if (rfn.isLocal() || (twFlags & TW_External))
-        { // ensure local tmp in same directory as target
+        {
+            // ensure local tmp in same directory (and plane) as target
             StringBuffer dir;
             splitDirTail(primaryName, dir);
             addPathSepChar(dir);
-            dir.append(pathTail(outLocationName));
+            GetTempFileName(dir, "partial");
             outLocationName.swapWith(dir);
         }
+        else
+            GetTempFilePath(outLocationName, "partial");
+
         assertex(outLocationName.length());
         ensureDirectoryForFile(outLocationName.str());
     }

+ 51 - 39
thorlcr/activities/thdiskbase.cpp

@@ -47,61 +47,73 @@ void CDiskReadMasterBase::init()
     fileName.set(expandedFileName);
     reInit = 0 != (helper->getFlags() & (TDXvarfilename|TDXdynamicfilename));
 
-    Owned<IDistributedFile> file = queryThorFileManager().lookup(container.queryJob(), helperFileName, 0 != ((TDXtemporary|TDXjobtemp) & helper->getFlags()), 0 != (TDRoptional & helper->getFlags()), true, container.activityIsCodeSigned());
-    if (file)
+    if (container.queryLocal() || helper->canMatchAny()) // if local, assume may match
     {
-        if (file->isExternal() && (helper->getFlags() & TDXcompress))
-            file->queryAttributes().setPropBool("@blockCompressed", true);
-        if (file->numParts() > 1)
-            fileDesc.setown(getConfiguredFileDescriptor(*file));
-        else
-            fileDesc.setown(file->getFileDescriptor());
-        validateFile(file);
-        if (container.queryLocal() || helper->canMatchAny()) // if local, assume may match
+        bool temp = 0 != (TDXtemporary & helper->getFlags());
+        bool jobTemp = 0 != (TDXjobtemp & helper->getFlags());
+        bool opt = 0 != (TDRoptional & helper->getFlags());
+        file.setown(lookupReadFile(helperFileName, jobTemp, temp, opt));
+        if (file)
         {
-            bool temp = 0 != (TDXtemporary & helper->getFlags());
+            if (file->isExternal() && (helper->getFlags() & TDXcompress))
+                file->queryAttributes().setPropBool("@blockCompressed", true);
+            if (file->numParts() > 1)
+                fileDesc.setown(getConfiguredFileDescriptor(*file));
+            else
+                fileDesc.setown(file->getFileDescriptor());
+            validateFile(file);
             bool local;
             if (temp)
                 local = false;
             else
                 local = container.queryLocal();
             mapping.setown(getFileSlaveMaps(file->queryLogicalName(), *fileDesc, container.queryJob().queryUserDescriptor(), container.queryJob().querySlaveGroup(), local, false, hash, file->querySuperFile()));
-            addReadFile(file, temp);
-        }
-        IDistributedSuperFile *super = file->querySuperFile();
-        unsigned numsubs = super?super->numSubFiles(true):0;
-        if (0 != (helper->getFlags() & TDRfilenamecallback)) // only get/serialize if using virtual file name fields
-        {
-            for (unsigned s=0; s<numsubs; s++)
+            IDistributedSuperFile *super = file->querySuperFile();
+            unsigned numsubs = super?super->numSubFiles(true):0;
+            if (0 != (helper->getFlags() & TDRfilenamecallback)) // only get/serialize if using virtual file name fields
             {
-                IDistributedFile &subfile = super->querySubFile(s, true);
-                subfileLogicalFilenames.append(subfile.queryLogicalName());
+                subfileLogicalFilenames.kill();
+                for (unsigned s=0; s<numsubs; s++)
+                {
+                    IDistributedFile &subfile = super->querySubFile(s, true);
+                    subfileLogicalFilenames.append(subfile.queryLogicalName());
+                }
             }
-        }
-        if (0==(helper->getFlags() & TDXtemporary))
-        {
-            for (unsigned i=0; i<numsubs; i++)
-                subFileStats.push_back(new CThorStatsCollection(diskReadRemoteStatistics));
-        }
-        void *ekey;
-        size32_t ekeylen;
-        helper->getEncryptKey(ekeylen,ekey);
-        bool encrypted = fileDesc->queryProperties().getPropBool("@encrypted");
-        if (0 != ekeylen)
-        {
-            memset(ekey,0,ekeylen);
-            free(ekey);
-            if (!encrypted)
+            if (0==(helper->getFlags() & TDXtemporary))
+            {
+                /* JCS->SHAMSER - kludge for now, don't add more than max
+                 * But it means updateFileReadCostStats will not be querying the correct files,
+                 * if the file varies per CQ execution (see other notes in updateFileReadCostStats)
+                 */
+                for (unsigned i=subFileStats.size(); i<numsubs; i++)
+                    subFileStats.push_back(new CThorStatsCollection(diskReadRemoteStatistics));
+            }
+            void *ekey;
+            size32_t ekeylen;
+            helper->getEncryptKey(ekeylen,ekey);
+            bool encrypted = fileDesc->queryProperties().getPropBool("@encrypted");
+            if (0 != ekeylen)
             {
-                Owned<IException> e = MakeActivityWarning(&container, TE_EncryptionMismatch, "Ignoring encryption key provided as file '%s' was not published as encrypted", fileName.get());
-                queryJobChannel().fireException(e);
+                memset(ekey,0,ekeylen);
+                free(ekey);
+                if (!encrypted)
+                {
+                    Owned<IException> e = MakeActivityWarning(&container, TE_EncryptionMismatch, "Ignoring encryption key provided as file '%s' was not published as encrypted", fileName.get());
+                    queryJobChannel().fireException(e);
+                }
             }
+            else if (encrypted)
+                throw MakeActivityException(this, 0, "File '%s' was published as encrypted but no encryption key provided", fileName.get());
         }
-        else if (encrypted)
-            throw MakeActivityException(this, 0, "File '%s' was published as encrypted but no encryption key provided", fileName.get());
     }
 }
 
+void CDiskReadMasterBase::kill()
+{
+    CMasterActivity::kill();
+    file.clear();
+}
+
 void CDiskReadMasterBase::serializeSlaveData(MemoryBuffer &dst, unsigned slave)
 {
     dst.append(fileName);

+ 2 - 0
thorlcr/activities/thdiskbase.ipp

@@ -29,6 +29,7 @@ class CDiskReadMasterBase : public CMasterActivity
 protected:
     StringArray subfileLogicalFilenames;
     Owned<IFileDescriptor> fileDesc;
+    Owned<IDistributedFile> file;
     Owned<CSlavePartMapping> mapping;
     IHash *hash;
     StringAttr fileName;
@@ -37,6 +38,7 @@ protected:
 public:
     CDiskReadMasterBase(CMasterGraphElement *info);
     virtual void init() override;
+    virtual void kill() override;
     virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave) override;
     virtual void done() override;
     virtual void validateFile(IDistributedFile *file) { }

+ 2 - 2
thorlcr/activities/thdiskbaseslave.cpp

@@ -236,7 +236,7 @@ void CDiskReadSlaveActivityBase::init(MemoryBuffer &data, MemoryBuffer &slaveDat
 
         if (helper->getFlags() & TDXtemporary)
         {
-            // put temp files in individual slave temp dirs (incl port)
+            // put temp files in temp dir
             if (!container.queryJob().queryUseCheckpoints())
                 partDescs.item(0).queryOwner().setDefaultDir(queryTempDir());
         }
@@ -520,7 +520,7 @@ void CDiskWriteSlaveActivityBase::init(MemoryBuffer &data, MemoryBuffer &slaveDa
     }
     partDesc.setown(deserializePartFileDescriptor(data));
 
-    // put temp files in individual slave temp dirs (incl port)
+    // put temp files in temp dir
     if ((diskHelperBase->getFlags() & TDXtemporary) && (!container.queryJob().queryUseCheckpoints()))
         partDesc->queryOwner().setDefaultDir(queryTempDir());
 

+ 1 - 1
thorlcr/graph/thgraph.cpp

@@ -2842,7 +2842,6 @@ CActivityBase &CJobBase::queryChannelActivity(unsigned c, graph_id gid, activity
 void CJobBase::startJob()
 {
     LOG(MCdebugProgress, thorJob, "New Graph started : %s", graphName.get());
-    ClearTempDirs();
     perfmonhook.setown(createThorMemStatsPerfMonHook(*this, getOptInt(THOROPT_MAX_KERNLOG, 3)));
     setPerformanceMonitorHook(perfmonhook);
     PrintMemoryStatusLog();
@@ -2922,6 +2921,7 @@ void CJobBase::endJob()
             exceptions.setown(makeMultiException());
         exceptions->append(*LINK(e));
     }
+    ClearTempDir();
     if (exceptions && exceptions->ordinality())
         throw exceptions.getClear();
 }

+ 108 - 39
thorlcr/graph/thgraphmaster.cpp

@@ -388,13 +388,6 @@ CMasterActivity::~CMasterActivity()
     delete [] data;
 }
 
-void CMasterActivity::addReadFile(IDistributedFile *file, bool temp)
-{
-    readFiles.append(*LINK(file));
-    if (!temp) // NB: Temps not listed in workunit
-        queryThorFileManager().noteFileRead(container.queryJob(), file);
-}
-
 IDistributedFile *CMasterActivity::queryReadFile(unsigned f)
 {
     if (f>=readFiles.ordinality())
@@ -402,20 +395,31 @@ IDistributedFile *CMasterActivity::queryReadFile(unsigned f)
     return &readFiles.item(f);
 }
 
-void CMasterActivity::preStart(size32_t parentExtractSz, const byte *parentExtract)
+IDistributedFile *CMasterActivity::findReadFile(const char *lfnName)
 {
-    CActivityBase::preStart(parentExtractSz, parentExtract);
-    IArrayOf<IDistributedFile> tmpFiles;
-    tmpFiles.swapWith(readFiles);
-    ForEachItemIn(f, tmpFiles)
+    auto it = readFilesMap.find(lfnName);
+    if (it != readFilesMap.end())
+        return LINK(it->second);
+    return nullptr;
+}
+
+IDistributedFile *CMasterActivity::lookupReadFile(const char *lfnName, bool jobTemp, bool temp, bool opt)
+{
+    StringBuffer normalizedFileName;
+    queryThorFileManager().addScope(container.queryJob(), lfnName, normalizedFileName, jobTemp|temp);
+    Owned<IDistributedFile> file = findReadFile(normalizedFileName);
+    if (!file)
     {
-        IDistributedFile &file = tmpFiles.item(f);
-        IDistributedSuperFile *super = file.querySuperFile();
-        if (super)
-            getSuperFileSubs(super, readFiles, true);
-        else
-            readFiles.append(*LINK(&file));
+        file.setown(queryThorFileManager().lookup(container.queryJob(), lfnName, jobTemp|temp, opt, true, container.activityIsCodeSigned()));
+        if (file)
+        {
+            readFiles.append(*LINK(file));
+            readFilesMap[normalizedFileName.str()] = file;
+            if (!temp) // NB: Temps not listed in workunit
+                queryThorFileManager().noteFileRead(container.queryJob(), file);
+        }
     }
+    return file.getClear();
 }
 
 MemoryBuffer &CMasterActivity::queryInitializationData(unsigned slave) const
@@ -455,7 +459,16 @@ void CMasterActivity::threadmain()
 
 void CMasterActivity::init()
 {
-    readFiles.kill();
+    // Files are added to readFiles during initialization,
+    // If this is a CQ query act., then it will be repeatedly re-initialized if it depends on master context,
+    // e.g. due to a variable filename.
+    // Therefore, do not clear readFiles on reinitialization, to avoid repeatedly [expensively] looking them up.
+    bool inCQ = container.queryOwner().queryOwner() && !container.queryOwner().isGlobal();
+    if (!inCQ)
+    {
+        readFiles.kill();
+        readFilesMap.clear();
+    }
 }
 
 void CMasterActivity::startProcess(bool async)
@@ -480,6 +493,7 @@ void CMasterActivity::kill()
 {
     CActivityBase::kill();
     readFiles.kill();
+    readFilesMap.clear();
 }
 
 bool CMasterActivity::fireException(IException *_e)
@@ -542,35 +556,84 @@ void CMasterActivity::getEdgeStats(IStatisticGatherer & stats, unsigned idx)
 void CMasterActivity::done()
 {
     CActivityBase::done();
-    ForEachItemIn(s, readFiles)
+    if (readFiles.ordinality())
     {
-        IDistributedFile &file = readFiles.item(s);
-        file.setAccessed();
+        IArrayOf<IDistributedFile> tmpFiles;
+        ForEachItemIn(f, readFiles)
+        {
+            IDistributedFile &file = readFiles.item(f);
+            IDistributedSuperFile *super = file.querySuperFile();
+            if (super)
+            {
+                getSuperFileSubs(super, tmpFiles, true);
+                tmpFiles.append(*LINK(&file));
+            }
+            else
+                tmpFiles.append(*LINK(&file));
+        }
+        ForEachItemIn(s, tmpFiles)
+        {
+            IDistributedFile &file = tmpFiles.item(s);
+            file.setAccessed();
+        }
     }
 }
 
 void CMasterActivity::updateFileReadCostStats(std::vector<OwnedPtr<CThorStatsCollection>> & subFileStats)
 {
-    if (!subFileStats.empty())
-    {
-        unsigned numSubFiles = subFileStats.size();
-        for (unsigned i=0; i<numSubFiles; i++)
-        {
-            IDistributedFile *file = queryReadFile(i);
-            if (file)
+    /* JCSMORE->SHAMSER: (separate JIRA needed)
+     * there can be >1 read file if this act. is in a child query/loop, it could be processing a different file per iteration,
+     * meaning there could be an arbitrary number of readfiles, in that case activity::init would be called multiple times.
+     * 
+     * I hit an assert during testing reading a super in a CQ:
+        libjlib.so!raiseAssertException(const char * assertion, const char * file, unsigned int line) (\home\jsmith\git\HPCC-Platform\system\jlib\jexcept.cpp:660)
+        libjlib.so!MemoryBuffer::read(MemoryBuffer * const this, unsigned char & value) (\home\jsmith\git\HPCC-Platform\system\jlib\jbuff.cpp:693)
+        libjlib.so!MemoryBuffer::readPacked(MemoryBuffer * const this) (\home\jsmith\git\HPCC-Platform\system\jlib\jbuff.cpp:813)
+        libjlib.so!MemoryBuffer::readPacked(MemoryBuffer * const this, unsigned int & value) (\home\jsmith\git\HPCC-Platform\system\jlib\jbuff.cpp:824)
+        libjlib.so!CRuntimeStatisticCollection::deserialize(CRuntimeStatisticCollection * const this, MemoryBuffer & in) (\home\jsmith\git\HPCC-Platform\system\jlib\jstats.cpp:2524)
+        libactivitymasters_lcr.so!CThorStatsCollection::deserialize(CThorStatsCollection * const this, unsigned int node, MemoryBuffer & mb) (\home\jsmith\git\HPCC-Platform\thorlcr\graph\thgraphmaster.ipp:53)
+        libactivitymasters_lcr.so!CDiskReadMasterBase::deserializeStats(CDiskReadMasterBase * const this, unsigned int node, MemoryBuffer & mb) (\home\jsmith\git\HPCC-Platform\thorlcr\activities\thdiskbase.cpp:139)
+        libgraphmaster_lcr.so!CMasterGraph::deserializeStats(CMasterGraph * const this, unsigned int node, MemoryBuffer & mb) (\home\jsmith\git\HPCC-Platform\thorlcr\graph\thgraphmaster.cpp:2781)
+     *
+     * (would be a crash in a Release build)
+     * it's because the diskread init is adding new CThorStatsCollection per init (per execution of the CQ),
+     * which means when deserializing there are too many. The code assumes there is only 1 file being read.
+     * 
+     * I've temporarily changed the code where subFileStats's are added, to prevent more being added per iteration,
+     * but it needs re-thinking to handle the workers potentially dealing with different logical files
+     * (+ index read to handle super files, and case where act. is reading >1 file, i.e. KJ)
+     * I've changed this code rely on the 1st readFiles for now (not the # of subFileStats, which may be more)
+     * NB: also changed when the expansion of readFiles (from supers to subfiles) happens, a new super was being added
+     * each CQ iteration and re-expanded, meaing readFiles kept growing.
+     * 
+     * Also, superkey1.ecl hits a dbgasserex whilst deserializing stats (before and after these PR changes),
+     * but is caught/ignored. I haven't investigated further.
+     */
+
+    IDistributedFile *file = queryReadFile(0);
+    if (file)
+    {
+        IDistributedSuperFile *super = file->querySuperFile();
+        if (super)
+        {       
+            unsigned numSubFiles = super->numSubFiles(true); //subFileStats.size();
+            if (subFileStats.size())
             {
-                stat_type numDiskReads = subFileStats[i]->getStatisticSum(StNumDiskReads);
-                StringBuffer clusterName;
-                file->getClusterName(0, clusterName);
-                diskAccessCost += money2cost_type(calcFileAccessCost(clusterName, 0, numDiskReads));
-                file->addAttrValue("@numDiskReads", numDiskReads);
+                assertex(numSubFiles <= subFileStats.size());
+                for (unsigned i=0; i<subFileStats.size(); i++)
+                {
+                    IDistributedFile &subFile = super->querySubFile(i, true);
+                    const char *subName = subFile.queryLogicalName();
+                    PROGLOG("subName = %s", subName);
+                    stat_type numDiskReads = subFileStats[i]->getStatisticSum(StNumDiskReads);
+                    StringBuffer clusterName;
+                    subFile.getClusterName(0, clusterName);
+                    diskAccessCost += money2cost_type(calcFileAccessCost(clusterName, 0, numDiskReads));
+                    subFile.addAttrValue("@numDiskReads", numDiskReads);
+                }
             }
         }
-    }
-    else
-    {
-        IDistributedFile *file = queryReadFile(0);
-        if (file)
+        else
         {
             stat_type numDiskReads = statsCollection.getStatisticSum(StNumDiskReads);
             StringBuffer clusterName;
@@ -1420,6 +1483,12 @@ CJobMaster::CJobMaster(IConstWorkUnit &_workunit, const char *graphName, ILoaded
     slaveMsgHandler.setown(new CSlaveMessageHandler(*this, slavemptag));
     tmpHandler.setown(createTempHandler(true));
     xgmml.set(graphXGMML);
+
+    StringBuffer tempDir(globals->queryProp("@thorTempDirectory"));
+    // multiple thor jobs can be running on same node, sharing same local disk for temp storage.
+    // make unique by adding wuid+graphName+worker-num
+    VStringBuffer uniqueSubDir("%s_%s_0", workunit->queryWuid(), graphName); // 0 denotes master (workers = 1..N)
+    SetTempDir(tempDir, uniqueSubDir, "thtmp");
 }
 
 void CJobMaster::endJob()

+ 5 - 2
thorlcr/graph/thgraphmaster.ipp

@@ -18,6 +18,8 @@
 #ifndef _THGRAPHMASTER_IPP
 #define _THGRAPHMASTER_IPP
 
+#include <unordered_map>
+
 #include "jmisc.hpp"
 #include "jsuperhash.hpp"
 #include "workunit.hpp"
@@ -238,6 +240,7 @@ class graphmaster_decl CMasterActivity : public CActivityBase, implements IThrea
     MemoryBuffer *data;
     CriticalSection progressCrit;
     IArrayOf<IDistributedFile> readFiles;
+    std::unordered_map<std::string, IDistributedFile *> readFilesMap; // NB: IDistributedFile pointers are owned by readFiles
 
 protected:
     std::vector<OwnedPtr<CThorEdgeCollection>> edgeStatsVector;
@@ -245,8 +248,9 @@ protected:
     IBitSet *notedWarnings;
     cost_type diskAccessCost = 0;
 
-    void addReadFile(IDistributedFile *file, bool temp=false);
     IDistributedFile *queryReadFile(unsigned f);
+    IDistributedFile *findReadFile(const char *lfnName);
+    IDistributedFile *lookupReadFile(const char *lfnName, bool jobTemp, bool temp, bool opt);
     void updateFileReadCostStats(std::vector<OwnedPtr<CThorStatsCollection>> & subFileStats);
     void updateFileWriteCostStats(IFileDescriptor & fileDesc, IPropertyTree &props, stat_type numDiskWrites);
     virtual void process() { }
@@ -269,7 +273,6 @@ public:
     virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave) { }
     virtual void slaveDone(size32_t slaveIdx, MemoryBuffer &mb) { }
 
-    virtual void preStart(size32_t parentExtractSz, const byte *parentExtract);
     virtual void startProcess(bool async=true);
     virtual bool wait(unsigned timeout);
     virtual void done();

+ 6 - 0
thorlcr/graph/thgraphslave.cpp

@@ -1709,6 +1709,12 @@ CJobSlave::CJobSlave(ISlaveWatchdog *_watchdog, IPropertyTree *_workUnitInfo, co
      */
     if (queryMaxLfnBlockTimeMins() >= actInitWaitTimeMins)
         actInitWaitTimeMins = queryMaxLfnBlockTimeMins()+1;
+
+    StringBuffer tempDir(globals->queryProp("@thorTempDirectory"));
+    // multiple thor jobs can be running on same node, sharing same local disk for temp storage.
+    // make unique by adding wuid+graphName+worker-num
+    VStringBuffer uniqueSubDir("%s_%s_%u", wuid.str(), graphName, globals->getPropInt("@slavenum"));
+    SetTempDir(tempDir, uniqueSubDir, "thtmp");
 }
 
 CJobChannel *CJobSlave::addChannel(IMPServer *mpServer)

+ 0 - 11
thorlcr/master/thmastermain.cpp

@@ -901,19 +901,8 @@ int main( int argc, const char *argv[]  )
             }
         }
 
-#ifdef _CONTAINERIZED
-        // multiple thor jobs can be running on same node, sharing same local disk for temp storage.
-        // make unique by adding wuid+graphName
-        addPathSepChar(tempDirStr).append(workunit);
-        addPathSepChar(tempDirStr).append(graphName);
-#endif
         // NB: set into globals, serialized and used by worker processes.
         globals->setProp("@thorTempDirectory", tempDirStr);
-        logDiskSpace(); // Log before temp space is cleared
-        StringBuffer tempPrefix("thtmp");
-        tempPrefix.append(getMasterPortBase()).append("_");
-        SetTempDir(0, tempDirStr.str(), tempPrefix.str(), true);
-        DBGLOG("Temp directory: %s", queryTempDir());
 
         startLogMsgParentReceiver();    
         connectLogMsgManagerToDali();

+ 7 - 14
thorlcr/mfilemanager/thmfilemanager.cpp

@@ -313,22 +313,15 @@ public:
 
     IDistributedFile *timedLookup(CJobBase &job, CDfsLogicalFileName &lfn, bool write, bool privilegedUser=false, unsigned timeout=INFINITE)
     {
-        VStringBuffer blockedMsg("lock file '%s' for %s access", lfn.get(), write ? "WRITE" : "READ");
-        if (!write)
+        auto func = [&job, &lfn, write, privilegedUser](unsigned timeout)
         {
-            if (lfn.isRemote() || (!lfn.isExternal() && job.getOptBool("dfsesp-localfiles")))
-            {
-                auto func = [&job, &lfn](unsigned timeout)
-                {
-                    return wsdfs::lookupLegacyDFSFile(lfn.get(), timeout, wsdfs::keepAliveExpiryFrequency, job.queryUserDescriptor());
-                };
-                return blockReportFunc<IDistributedFile *>(job, func, timeout, blockedMsg);
-            }
-        }
-        // NB: if we're here, we're not using DFSESP
-        auto func = [&job, &lfn, write, privilegedUser](unsigned timeout) { return queryDistributedFileDirectory().lookup(lfn, job.queryUserDescriptor(), write, false, false, nullptr, privilegedUser, timeout); };
+            return wsdfs::lookup(lfn, job.queryUserDescriptor(), write, false, false, nullptr, privilegedUser, timeout);
+        };
+
+        VStringBuffer blockedMsg("lock file '%s' for %s access", lfn.get(), write ? "WRITE" : "READ");
         return blockReportFunc<IDistributedFile *>(job, func, timeout, blockedMsg);
     }
+    
     IDistributedFile *timedLookup(CJobBase &job, const char *logicalName, bool write, bool privilegedUser=false, unsigned timeout=INFINITE)
     {
         CDfsLogicalFileName lfn;
@@ -507,7 +500,7 @@ public:
             StringBuffer dir;
             bool dirPerPart = false;
             if (temporary && !job.queryUseCheckpoints()) 
-                dir.append(queryTempDir(false));
+                dir.append(queryTempDir());
             else
             {
                 StringBuffer planeDir;

+ 2 - 2
thorlcr/msort/tsorts.cpp

@@ -97,7 +97,7 @@ class CWriteIntercept : public CSimpleInterface
             {
                 // right create idx
                 StringBuffer tempname;
-                GetTempName(tempname.clear(),"srtidx",false);
+                GetTempFilePath(tempname.clear(),"srtidx");
                 idxFile.setown(createIFile(tempname.str()));
                 idxFileIO.setown(idxFile->open(IFOcreaterw));
                 if (!idxFileIO.get())
@@ -185,7 +185,7 @@ public:
     offset_t write(IRowStream *input)
     {
         StringBuffer tempname;
-        GetTempName(tempname,"srtmrg",false);
+        GetTempFilePath(tempname,"srtmrg");
         dataFile.setown(createIFile(tempname.str()));
         Owned<IExtRowWriter> output = createRowWriter(dataFile, rowIf);
 

+ 1 - 1
thorlcr/slave/slavmain.cpp

@@ -1866,7 +1866,7 @@ public:
                         if (!rfn.isLocal())
                         {
                             IWARNLOG("Cannot load shared object directly from remote path, creating temporary local copy: %s", soPath.str());
-                            GetTempName(tempSo,"so",true);
+                            GetTempFilePath(tempSo,"so");
                             copyFile(tempSo.str(), soPath.str());
                             soPath.clear().append(tempSo.str());
                         }

+ 5 - 13
thorlcr/slave/thslavemain.cpp

@@ -116,11 +116,13 @@ static bool RegisterSelf(SocketEndpoint &masterEp)
         mySlaveNum = (unsigned)processGroup->rank(queryMyNode());
         assertex(NotFound != mySlaveNum);
         mySlaveNum++; // 1 based;
-        unsigned configSlaveNum = globals->getPropInt("@slavenum", NotFound);
-        if (NotFound != configSlaveNum)
-            assertex(mySlaveNum == configSlaveNum);
 
+        unsigned configSlaveNum = globals->getPropInt("@slavenum", NotFound);
         globals.setown(createPTree(msg));
+        if (NotFound == configSlaveNum)
+            globals->setPropInt("@slavenum", mySlaveNum);
+        else
+            assertex(mySlaveNum == configSlaveNum);
 
         /* NB: preserve command line option overrides
          * Not sure if any cmdline options are actually needed by this stage..
@@ -487,18 +489,9 @@ int main( int argc, const char *argv[]  )
             }
 #endif
 
-            // NB: master has set, and serialized in globals
-            StringBuffer tempDirStr(globals->queryProp("@thorTempDirectory"));
-            addPathSepChar(tempDirStr).append(mySlaveNum);
-
-            logDiskSpace(); // Log before temp space is cleared
-            SetTempDir(mySlaveNum, tempDirStr.str(), "thtmp", true);
-
             useMemoryMappedRead(globals->getPropBool("@useMemoryMappedRead"));
 
             LOG(MCdebugProgress, thorJob, "ThorSlave Version LCR - %d.%d started",THOR_VERSION_MAJOR,THOR_VERSION_MINOR);
-            StringBuffer url;
-            LOG(MCdebugProgress, thorJob, "Slave %s - temporary dir set to : %s", slfEp.getUrlStr(url).str(), queryTempDir());
 #ifdef _WIN32
             ULARGE_INTEGER userfree;
             ULARGE_INTEGER total;
@@ -583,7 +576,6 @@ int main( int argc, const char *argv[]  )
 #ifndef _CONTAINERIZED
     stopPerformanceMonitor();
 #endif
-    ClearTempDirs();
 
     if (multiThorMemoryThreshold)
         setMultiThorMemoryNotify(0,NULL);

+ 2 - 2
thorlcr/thorutil/thmem.cpp

@@ -246,7 +246,7 @@ protected:
 
         StringBuffer tempName;
         VStringBuffer tempPrefix("streamspill_%d", activity.queryId());
-        GetTempName(tempName, tempPrefix.str(), true);
+        GetTempFilePath(tempName, tempPrefix.str());
         spillFile.setown(createIFile(tempName.str()));
 
         VStringBuffer spillPrefixStr("SpillableStream(%u)", spillPriority);
@@ -1653,7 +1653,7 @@ protected:
             tempPrefix.append("srt");
         }
         tempPrefix.appendf("spill_%d", activity.queryId());
-        GetTempName(tempName, tempPrefix.str(), true);
+        GetTempFilePath(tempName, tempPrefix.str());
         Owned<IFile> iFile = createIFile(tempName.str());
         VStringBuffer spillPrefixStr("%sRowCollector(%d)", tracingPrefix.str(), spillPriority);
         spillableRows.save(*iFile, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows

+ 68 - 82
thorlcr/thorutil/thormisc.cpp

@@ -625,99 +625,77 @@ class CTempNameHandler
 {
 public:
     unsigned num;
-    StringAttr tempdir, tempPrefix;
-    StringAttr alttempdir; // only set if needed
+    StringBuffer rootDir, subDirName, prefix, subDirPath;
     CriticalSection crit;
-    bool altallowed;
-    bool cleardir;
-    unsigned slaveNum = 0;
 
     CTempNameHandler()
     {
         num = 0;
-        altallowed = false;
-        cleardir = false;
     }
-    ~CTempNameHandler()
-    {
-        if (cleardir) 
-            clearDirs(false);       // don't log as jlog may have closed
-    }
-    const char *queryTempDir(bool alt) 
+    const char *queryTempDir() 
     { 
-        if (alt&&altallowed) 
-            return alttempdir;
-        return tempdir; 
+        return subDirPath; 
     }
-    void setTempDir(unsigned _slaveNum, const char *name, const char *_tempPrefix, bool clear)
+    void setTempDir(const char *_rootDir, const char *_subDirName, const char *_prefix)
     {
-        assertex(name && *name);
+        assertex(!isEmptyString(_rootDir) && !isEmptyString(_prefix) && !isEmptyString(_subDirName));
         CriticalBlock block(crit);
-        slaveNum = _slaveNum;
-        assertex(tempdir.isEmpty()); // should only be called once
-        tempPrefix.set(_tempPrefix);
-        StringBuffer base(name);
-        addPathSepChar(base);
-        tempdir.set(base.str());
-        recursiveCreateDirectory(tempdir);
-#ifdef _WIN32
-        altallowed = false;
-#else
-        altallowed = globals->getPropBool("@thor_dual_drive",true);
-#endif
-        if (altallowed)
-        {
-            unsigned d = getPathDrive(tempdir);
-            if (d>1)
-                altallowed = false;
-            else
-            {
-                StringBuffer p(tempdir);
-                alttempdir.set(setPathDrive(p,d?0:1).str());
-                recursiveCreateDirectory(alttempdir);
-            }
-        }
-        cleardir = clear;
-        if (clear)
-            clearDirs(true);
+        assertex(subDirPath.isEmpty());
+        rootDir.set(_rootDir);
+        addPathSepChar(rootDir);
+        subDirName.set(_subDirName);
+        prefix.set(_prefix);
+        subDirPath.setf("%s%s", rootDir.str(), subDirName.str());
+        recursiveCreateDirectory(subDirPath);
     }
-    static void clearDir(const char *dir, bool log)
+    void clear(bool log)
     {
-        if (dir&&*dir)
+        assertex(subDirPath.length());
+        Owned<IDirectoryIterator> iter = createDirectoryIterator(subDirPath);
+        ForEach (*iter)
         {
-            Owned<IDirectoryIterator> iter = createDirectoryIterator(dir);
-            ForEach (*iter)
+            IFile &file = iter->query();
+            if (file.isFile()==fileBool::foundYes)
             {
-                IFile &file = iter->query();
-                if (file.isFile()==fileBool::foundYes)
+                if (log)
+                    LOG(MCdebugInfo, thorJob, "Deleting %s", file.queryFilename());
+                try { file.remove(); }
+                catch (IException *e)
                 {
                     if (log)
-                        LOG(MCdebugInfo, thorJob, "Deleting %s", file.queryFilename());
-                    try { file.remove(); }
-                    catch (IException *e)
-                    {
-                        if (log)
-                            FLLOG(MCwarning, thorJob, e);
-                        e->Release();
-                    }
+                        FLLOG(MCwarning, thorJob, e);
+                    e->Release();
                 }
             }
         }
+        try
+        {
+            Owned<IFile> dirIFile = createIFile(subDirPath);
+            bool success = dirIFile->remove();
+            if (log)
+                PROGLOG("%s to delete temp directory: %s", subDirPath.str(), success ? "succeeded" : "failed");
+        }
+        catch (IException *e)
+        {
+            if (log)
+                FLLOG(MCwarning, thorJob, e);
+            e->Release();
+        }
+        subDirPath.clear();
     }
-    void clearDirs(bool log)
-    {
-        clearDir(tempdir,log);
-        clearDir(alttempdir,log);
-    }
-    void getTempName(StringBuffer &name, const char *suffix,bool alt)
+    void getTempName(StringBuffer &name, const char *suffix, bool inTempDir)
     {
         CriticalBlock block(crit);
-        assertex(!tempdir.isEmpty()); // should only be called once
-        if (alt && altallowed)
-            name.append(alttempdir);
+        assertex(!subDirPath.isEmpty());
+        if (inTempDir)
+        {
+            name.append(rootDir);
+            name.append(subDirName);
+            addPathSepChar(name);
+        }
         else
-            name.append(tempdir);
-        name.append(tempPrefix).append((unsigned)GetCurrentProcessId()).append('_').append(slaveNum).append('_').append(++num);
+            name.append(subDirName).append('_');
+        name.append(prefix).append('_').append(++num);
         if (suffix)
             name.append("__").append(suffix);
         name.append(".tmp");
@@ -726,31 +704,39 @@ public:
 
 
 
-void GetTempName(StringBuffer &name, const char *prefix,bool altdisk)
+void GetTempFileName(StringBuffer &name, const char *suffix)
 {
-    TempNameHandler.getTempName(name, prefix, altdisk);
+    TempNameHandler.getTempName(name, suffix, false);
 }
 
-void SetTempDir(unsigned slaveNum, const char *name, const char *tempPrefix, bool clear)
+void GetTempFilePath(StringBuffer &name, const char *suffix)
 {
-    TempNameHandler.setTempDir(slaveNum, name, tempPrefix, clear);
+    TempNameHandler.getTempName(name, suffix, true);
 }
 
-void ClearDir(const char *dir)
+void SetTempDir(const char *rootTempDir, const char *uniqueSubDir, const char *tempPrefix)
 {
-    CTempNameHandler::clearDir(dir,true);
+    TempNameHandler.setTempDir(rootTempDir, uniqueSubDir, tempPrefix);
+    LOG(MCdebugProgress, thorJob, "temporary rootTempdir: %s, uniqueSubDir: %s, prefix: %s", rootTempDir, uniqueSubDir, tempPrefix);
 }
 
-void ClearTempDirs()
+void ClearTempDir()
 {
-    TempNameHandler.clearDirs(true);
-    LOG(MCthorDetailedDebugInfo, thorJob, "temp directory cleared");
+    try
+    {
+        TempNameHandler.clear(true);
+        LOG(MCthorDetailedDebugInfo, thorJob, "temp directory cleared");
+    }
+    catch (IException *e)
+    {
+        EXCLOG(e, "ClearTempDir");
+        e->Release();
+    }
 }
 
-
-const char *queryTempDir(bool altdisk)
+const char *queryTempDir()
 {
-    return TempNameHandler.queryTempDir(altdisk);
+    return TempNameHandler.queryTempDir();
 }
 
 class DECL_EXCEPTION CBarrierAbortException: public CSimpleInterface, public IBarrierException

+ 5 - 5
thorlcr/thorutil/thormisc.hpp

@@ -505,11 +505,11 @@ extern graph_decl IThorException *MakeThorFatal(IException *e, int code, const c
 extern graph_decl IThorException *ThorWrapException(IException *e, const char *msg, ...) __attribute__((format(printf, 2, 3)));
 extern graph_decl void setExceptionActivityInfo(CGraphElementBase &container, IThorException *e);
 
-extern graph_decl void GetTempName(StringBuffer &name, const char *prefix=NULL,bool altdisk=false);
-extern graph_decl void SetTempDir(unsigned slaveNum, const char *name, const char *tempPrefix, bool clear);
-extern graph_decl void ClearDir(const char *dir);
-extern graph_decl void ClearTempDirs();
-extern graph_decl const char *queryTempDir(bool altdisk=false);  
+extern graph_decl void GetTempFilePath(StringBuffer &name, const char *suffix);
+extern graph_decl void GetTempFileName(StringBuffer &name, const char *suffix);
+extern graph_decl void SetTempDir(const char *rootTempDir, const char *uniqueSubDir, const char *tempPrefix);
+extern graph_decl void ClearTempDir();
+extern graph_decl const char *queryTempDir();
 extern graph_decl void loadCmdProp(IPropertyTree *tree, const char *cmdProp);
 
 extern graph_decl void ensureDirectoryForFile(const char *fName);