Browse Source

Merge remote-tracking branch 'origin/candidate-7.12.x'

Signed-off-by: Gavin Halliday <gavin.halliday@lexisnexis.com>
Gavin Halliday 4 years ago
parent
commit
279a6bdf43

+ 1 - 2
CMakeLists.txt

@@ -487,8 +487,7 @@ if(TOP_LEVEL_PROJECT)
 
             add_custom_target(SIGN
                 COMMAND signtool sign /f "${SIGN_DIRECTORY}/hpcc_code_signing.pfx"
-/p "${PFX_PASSWORD}" /t "http://timestamp.globalsign.com/scripts/timstamp.dll"
-"${CMAKE_BINARY_DIR}/${PACKAGE_FILE_NAME_PREFIX}*.exe"
+/p "${PFX_PASSWORD}" /tr "http://timestamp.digicert.com" "${CMAKE_BINARY_DIR}/${PACKAGE_FILE_NAME_PREFIX}*.exe"
                 COMMENT "Digital Signature"
             )
             add_dependencies(SIGN PACKAGE)

+ 5 - 5
docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-TOJSON.xml

@@ -25,17 +25,17 @@
           <row>
             <entry>Return:</entry>
 
-            <entry>TOJSON returns a STRING.</entry>
+            <entry>TOJSON returns a UTF8.</entry>
           </row>
         </tbody>
       </tgroup>
     </informaltable></para>
 
   <para>The <emphasis role="bold">TOJSON </emphasis>function returns a single
-  string with the data in the <emphasis>record</emphasis> re-formatted as
-  JSON. If the RECORD structure of the <emphasis>record</emphasis> has XPATHs
-  defined, then they will be used, otherwise the lower-cased field names are
-  used as the JSON tag names.</para>
+  UTF-8 string with the data in the <emphasis>record</emphasis> re-formatted
+  as JSON. If the RECORD structure of the <emphasis>record</emphasis> has
+  XPATHs defined, then they will be used, otherwise the lower-cased field
+  names are used as the JSON tag names.</para>
 
   <para>Example:</para>
 

+ 7 - 5
docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-TOXML.xml

@@ -26,17 +26,19 @@
           <row>
             <entry>Return:</entry>
 
-            <entry>TOXML returns a STRING.</entry>
+            <entry>TOXML returns a UTF8.</entry>
           </row>
         </tbody>
       </tgroup>
     </informaltable></para>
 
+  <para></para>
+
   <para>The <emphasis role="bold">TOXML </emphasis>function returns a single
-  string with the data in the <emphasis>record</emphasis> re-formatted as XML.
-  If the RECORD structure of the <emphasis>record</emphasis> has XPATHs
-  defined, then they will be used, otherwise the lower-cased field names are
-  used as the XML tag names.</para>
+  UTF-8 string with the data in the <emphasis>record</emphasis> re-formatted
+  as XML. If the RECORD structure of the <emphasis>record</emphasis> has
+  XPATHs defined, then they will be used, otherwise the lower-cased field
+  names are used as the XML tag names.</para>
 
   <para>Example:</para>
 

+ 2 - 2
docs/EN_US/HPCCSpark/SparkHPCC.xml

@@ -37,7 +37,7 @@
       similarity to actual persons, living or dead, is purely
       coincidental.</para>
 
-      <para />
+      <para></para>
     </legalnotice>
 
     <xi:include href="common/Version.xml" xpointer="FooterInfo"
@@ -541,7 +541,7 @@
       dataset. The dataset can be obtained from a variety of sources,
       including the HPCC-Systems/ecl-ml repository. IrisDs.ecl (can be found
       under the ML/Tests/Explanatory folder: <ulink
-      url="https://github.com/hpcc-systems/ecl-ml/blob/master/ML/Tests/Explanatory/IrisDS.ecl">https://github.com/hpcc-systems/ecl-ml/blob/master/ML/Tests/Explanatory/IrisDS.ecl</ulink>)
+      url="https://github.com/hpcc-systems/Spark-HPCC/blob/master/Examples/src/main/ecl/IrisDS.ecl">https://github.com/hpcc-systems/Spark-HPCC/blob/master/Examples/src/main/ecl/IrisDS.ecl</ulink>)
       can be executed to generate the Iris dataset in HPCC. A walk-through of
       the examples is provided in the Examples section.</para>
 

+ 20 - 12
ecl/hthor/hthor.cpp

@@ -6302,6 +6302,19 @@ CHThorWorkUnitWriteActivity::CHThorWorkUnitWriteActivity(IAgentContext &_agent,
 {
 }
 
+static void throwWuResultTooLarge(size32_t outputLimit, IHThorWorkUnitWriteArg &helper)
+{
+    StringBuffer errMsg("Dataset too large to output to workunit (limit "); 
+    errMsg.append(outputLimit/0x100000).append(" megabytes), in result (");
+    const char *name = helper.queryName();
+    if (name)
+        errMsg.append("name=").append(name);
+    else
+        errMsg.append("sequence=").append(helper.getSequence());
+    errMsg.append(")");
+    throw MakeStringExceptionDirect(0, errMsg.str());
+}
+
 void CHThorWorkUnitWriteActivity::execute()
 {
     unsigned flags = helper.getFlags();
@@ -6359,18 +6372,8 @@ void CHThorWorkUnitWriteActivity::execute()
                 break;
         }
         size32_t thisSize = inputMeta->getRecordSize(nextrec);
-        if(outputLimit && ((rowdata.length() + thisSize) > outputLimit))
-        {
-            StringBuffer errMsg("Dataset too large to output to workunit (limit "); 
-            errMsg.append(outputLimit/0x100000).append(" megabytes), in result (");
-            const char *name = helper.queryName();
-            if (name)
-                errMsg.append("name=").append(name);
-            else
-                errMsg.append("sequence=").append(helper.getSequence());
-            errMsg.append(")");
-            throw MakeStringExceptionDirect(0, errMsg.str());
-         }
+        if (outputLimit && ((rowdata.length() + thisSize) > outputLimit))
+            throwWuResultTooLarge(outputLimit, helper);
         if (rowSerializer)
         {
             CThorDemoRowSerializer serializerTarget(rowdata);
@@ -6401,7 +6404,12 @@ void CHThorWorkUnitWriteActivity::execute()
     WorkunitUpdate w = agent.updateWorkUnit();
     Owned<IWUResult> result = updateWorkUnitResult(w, helper.queryName(), helper.getSequence());
     if (0 != (POFextend & helper.getFlags()))
+    {
+        __int64 existingSz = result->getResultRawSize(nullptr, nullptr);
+        if (outputLimit && ((rowdata.length() + existingSz) > outputLimit))
+            throwWuResultTooLarge(outputLimit, helper);
         result->addResultRaw(rowdata.length(), rowdata.toByteArray(), ResultFormatRaw);
+    }
     else
         result->setResultRaw(rowdata.length(), rowdata.toByteArray(), ResultFormatRaw);
     result->setResultStatus(ResultStatusCalculated);

+ 1 - 0
esp/src/eclwatch/ActivityWidget.js

@@ -285,6 +285,7 @@ define([
                 }
             });
             this.createStackControllerTooltip(this.id + "AutoRefresh", this.i18n.AutoRefresh + ": " + this.autoRefreshButton.get("checked"));
+            this.resize();
         },
 
         createGrid: function (domID) {

+ 3 - 0
esp/src/eclwatch/DFUQueryWidget.js

@@ -211,6 +211,9 @@ define([
             if (confirm(this.i18n.DeleteSelectedFiles + "\n" + list)) {
                 var context = this;
                 WsDfu.DFUArrayAction(selection, "Delete").then(function (response) {
+                    selection.forEach(item => {
+                        context.listStore.remove(ESPLogicalFile.createID(item.Cluster, item.Name));
+                    });
                     context.refreshGrid(true);
                 });
             }

+ 5 - 2
esp/src/src/ESPLogicalFile.ts

@@ -14,7 +14,7 @@ import * as WsDfu from "./WsDfu";
 
 const _logicalFiles = {};
 
-const createID = function (Cluster, Name) {
+export const createID = function (Cluster, Name) {
     return (Cluster ? Cluster : "") + "--" + Name;
 };
 
@@ -74,7 +74,10 @@ class Store extends ESPRequest.Store {
             });
         }
     }
-
+    remove(id) {
+        this.inherited(arguments);
+        delete _logicalFiles[id];
+    }
     preProcessRow(item, request, query, options) {
         lang.mixin(item, {
             __hpcc_id: createID(item.NodeGroup, item.Name),

+ 1 - 1
esp/src/src/WsTopology.ts

@@ -81,8 +81,8 @@ class TpLogFileStore extends Memory {
                                 }
 
                                 tempObj[cleanName] = value;
-                                data.push(tempObj);
                             }
+                            data.push(tempObj);
                         }
                     }, this);
                 }

+ 9 - 3
thorlcr/activities/indexwrite/thindexwriteslave.cpp

@@ -41,6 +41,7 @@ class IndexWriteSlaveActivity : public ProcessSlaveActivity, public ILookAheadSt
     Owned<IPartDescriptor> partDesc, tlkDesc;
     IHThorIndexWriteArg *helper;
     Owned <IKeyBuilder> builder;
+    OwnedIFileIO builderIFileIO;
     Owned<IRowStream> myInputStream;
     Owned<IPropertyTree> metadata;
     Linked<IEngineRowAllocator> outRowAllocator;
@@ -182,8 +183,8 @@ public:
         if (metadata->getPropBool("_useTrailingHeader", true))
             flags |= USE_TRAILING_HEADER;
         unsigned twFlags = isUrl(partFname) ? TW_Direct : TW_RenameToPrimary;
-        OwnedIFileIO iFileIO = createMultipleWrite(this, partDesc, 0, twFlags, compress, NULL, this, &abortSoon);
-        Owned<IFileIOStream> out = createBufferedIOStream(iFileIO, 0x100000);
+        builderIFileIO.setown(createMultipleWrite(this, partDesc, 0, twFlags, compress, NULL, this, &abortSoon));
+        Owned<IFileIOStream> out = createBufferedIOStream(builderIFileIO, 0x100000);
         if (!needsSeek)
             out.setown(createNoSeekIOStream(out));
 
@@ -239,7 +240,12 @@ public:
         try 
         { 
             metadata.clear();
-            builder.clear(); 
+            builder.clear();
+            if (builderIFileIO)
+            {
+                builderIFileIO->close();
+                builderIFileIO.clear();
+            }
         }
         catch (IException *_e)
         {

+ 39 - 18
thorlcr/activities/thactivityutil.cpp

@@ -609,25 +609,15 @@ class CWriteHandler : implements IFileIO, public CInterface
     bool remote;
     CFIPScope fipScope;
     unsigned twFlags;
+    bool closed = false;
 
-public:
-    IMPLEMENT_IINTERFACE_USING(CInterface);
-
-    CWriteHandler(CActivityBase &_activity, IPartDescriptor &_partDesc, IFile *_primary, IFileIO *_primaryio, ICopyFileProgress *_iProgress, unsigned _twFlags, bool *_aborted)
-        : activity(_activity), partDesc(_partDesc), primary(_primary), primaryio(_primaryio), iProgress(_iProgress), twFlags(_twFlags), aborted(_aborted), fipScope(primary->queryFilename())
-    {
-        RemoteFilename rfn;
-        partDesc.getFilename(0, rfn);
-        remote = !rfn.isLocal();
-        rfn.getPath(primaryName);
-        if (globals->getPropBool("@replicateAsync", true))
-            cancelReplicates(&activity, partDesc);
-    }
-    virtual void beforeDispose() override
+    void checkAndHandleClose()
     {
-        // Can't throw in destructor...
-        // Note that if we do throw the CWriteHandler object is liable to be leaked...
-        primaryio.clear(); // should close
+        if (closed)
+            return;
+        closed = true;
+        primaryio->close();
+        primaryio.clear();
         if (aborted && *aborted)
         {
             primary->remove(); // i.e. never completed, so remove partial (temp) primary
@@ -692,6 +682,34 @@ public:
         if (partDesc.numCopies()>1)
             _doReplicate(&activity, partDesc, iProgress);
     }
+
+public:
+    IMPLEMENT_IINTERFACE_USING(CInterface);
+
+    CWriteHandler(CActivityBase &_activity, IPartDescriptor &_partDesc, IFile *_primary, IFileIO *_primaryio, ICopyFileProgress *_iProgress, unsigned _twFlags, bool *_aborted)
+        : activity(_activity), partDesc(_partDesc), primary(_primary), primaryio(_primaryio), iProgress(_iProgress), twFlags(_twFlags), aborted(_aborted), fipScope(primary->queryFilename())
+    {
+        RemoteFilename rfn;
+        partDesc.getFilename(0, rfn);
+        remote = !rfn.isLocal();
+        rfn.getPath(primaryName);
+        if (globals->getPropBool("@replicateAsync", true))
+            cancelReplicates(&activity, partDesc);
+    }
+    virtual void beforeDispose() override
+    {
+        // Can't throw in destructor...
+        // Note that if we do throw the CWriteHandler object is liable to be leaked...
+        try
+        {
+            checkAndHandleClose();
+        }
+        catch (IException *e)
+        {
+            EXCLOG(e, "CWriteHandler::beforeDispose");
+            e->Release();
+        }
+    }
 // IFileIO impl.
     virtual size32_t read(offset_t pos, size32_t len, void * data) { return primaryio->read(pos, len, data); }
     virtual offset_t size() { return primaryio->size(); }
@@ -700,7 +718,10 @@ public:
     virtual unsigned __int64 getStatistic(StatisticKind kind) { return primaryio->getStatistic(kind); }
     virtual void setSize(offset_t size) { primaryio->setSize(size); }
     virtual void flush() { primaryio->flush(); }
-    virtual void close() { primaryio->close(); }
+    virtual void close() 
+    {
+        checkAndHandleClose();
+    }
 };
 
 IFileIO *createMultipleWrite(CActivityBase *activity, IPartDescriptor &partDesc, unsigned recordSize, unsigned twFlags, bool &compress, ICompressor *ecomp, ICopyFileProgress *iProgress, bool *aborted, StringBuffer *_outLocationName)

+ 1 - 0
thorlcr/activities/thdiskbaseslave.cpp

@@ -440,6 +440,7 @@ void CDiskWriteSlaveActivityBase::close()
             tmpFileIO.setown(outputIO.getClear());
         }
         mergeStats(stats, tmpFileIO, diskWriteRemoteStatistics);
+        tmpFileIO->close(); // NB: close now, do not rely on close in dtor
 
         if (!rfsQueryParallel && dlfn.isExternal() && !lastNode())
         {

+ 18 - 11
thorlcr/activities/wuidwrite/thwuidwrite.cpp

@@ -38,12 +38,28 @@ protected:
     StringAttr resultName;
     unsigned resultSeq;
 
+    void throwWuResultTooLarge()
+    {
+        StringBuffer errMsg("Dataset too large to output to workunit (limit is set to ");
+        errMsg.append(workunitWriteLimit/0x100000).append(") megabytes, in result (");
+        if (resultName.length())
+            errMsg.append("name=").append(resultName);
+        else
+            errMsg.append("sequence=").append(resultSeq);
+        errMsg.append(")");
+        throw MakeThorException(TE_WorkUnitWriteLimitExceeded, "%s", errMsg.str());
+    }
     void addResult(rowcount_t resultCount, MemoryBuffer &resultData, bool complete)
     {
         Owned<IWorkUnit> wu = &container.queryJob().queryWorkUnit().lock();
         Owned<IWUResult> result = updateWorkUnitResult(wu, resultName, resultSeq);
         if (appendOutput)
+        {
+            __int64 existingSz = result->getResultRawSize(nullptr, nullptr);
+            if (workunitWriteLimit && (existingSz+resultData.length() > workunitWriteLimit))
+                throwWuResultTooLarge();
             result->addResultRaw(resultData.length(), resultData.toByteArray(), ResultFormatRaw);
+        }
         else
             result->setResultRaw(resultData.length(), resultData.toByteArray(), ResultFormatRaw);
         result->setResultRowCount(resultCount);
@@ -121,17 +137,8 @@ public:
                 unsigned numGot;
                 mb.read(numGot);
                 unsigned l=mb.remaining();
-                if (workunitWriteLimit && totalSize+resultData.length()+l > workunitWriteLimit)
-                {
-                    StringBuffer errMsg("Dataset too large to output to workunit (limit is set to ");
-                    errMsg.append(workunitWriteLimit/0x100000).append(") megabytes, in result (");
-                    if (resultName.length())
-                        errMsg.append("name=").append(resultName);
-                    else
-                        errMsg.append("sequence=").append(resultSeq);
-                    errMsg.append(")");
-                    throw MakeThorException(TE_WorkUnitWriteLimitExceeded, "%s", errMsg.str());
-                }
+                if (workunitWriteLimit && (totalSize+resultData.length()+l > workunitWriteLimit))
+                    throwWuResultTooLarge();
                 resultData.append(l, mb.readDirect(l));
                 mb.clear();
                 numResults += numGot;