浏览代码

HPCC-12677 Pickup option in Roxie too

Also
1) Move a couple of common options to thorcommon.hpp
2) Change outputLimit to outputLimitMb,
but continue to accept former too.

Signed-off-by: Jake Smith <jake.smith@lexisnexis.com>
Jake Smith 10 年之前
父节点
当前提交
c3b72f4a81

+ 9 - 2
common/thorhelper/thorcommon.hpp

@@ -26,8 +26,15 @@
 #include "thorhelper.hpp"
 #include "thorxmlwrite.hpp"
 
-#define DALI_RESULT_OUTPUTMAX 2000 // MB
-#define DALI_RESULT_LIMIT_DEFAULT 10 // MB
+static unsigned const defaultDaliResultOutputMax = 2000; // MB
+static unsigned const defaultDaliResultLimit = 10; // MB
+static unsigned const defaultMaxCsvRowSize = 10; // MB
+
+
+#define OPT_OUTPUTLIMIT_LEGACY    "outputLimit"             // OUTPUT Mb limit (legacy property name, renamed to outputLimitMb in 5.2)
+#define OPT_OUTPUTLIMIT           "outputLimitMb"           // OUTPUT Mb limit                                                               (default = 10 [MB])
+#define OPT_MAXCSVROWSIZE         "maxCsvRowSizeMb"         // Upper limit on csv read line size                                             (default = 10 [MB])
+
 
 class THORHELPER_API CSizingSerializer : implements IRowSerializerTarget
 {

+ 7 - 6
ecl/hthor/hthor.cpp

@@ -52,7 +52,6 @@ static unsigned const hthorReadBufferSize = 0x10000;
 static offset_t const defaultHThorDiskWriteSizeLimit = I64C(10*1024*1024*1024); //10 GB, per Nigel
 static size32_t const spillStreamBufferSize = 0x10000;
 static unsigned const hthorPipeWaitTimeout = 100; //100ms - fairly arbitrary choice
-static unsigned const defaultMaxCsvRowSize = 10; // MB
 
 using roxiemem::IRowManager;
 using roxiemem::OwnedRoxieRow;
@@ -5955,11 +5954,12 @@ void CHThorWorkUnitWriteActivity::execute()
 {
     unsigned flags = helper.getFlags();
     grouped = (POFgrouped & flags) != 0;
-    size32_t outputLimit = agent.queryWorkUnit()->getDebugValueInt("outputLimit", DALI_RESULT_LIMIT_DEFAULT);
+    // In absense of OPT_OUTPUTLIMIT check pre 5.2 legacy name OPT_OUTPUTLIMIT_LEGACY
+    size32_t outputLimit = agent.queryWorkUnit()->getDebugValueInt(OPT_OUTPUTLIMIT, agent.queryWorkUnit()->getDebugValueInt(OPT_OUTPUTLIMIT_LEGACY, defaultDaliResultLimit));
     if (flags & POFmaxsize)
         outputLimit = helper.getMaxSize();
-    if (outputLimit>DALI_RESULT_OUTPUTMAX)
-        throw MakeStringException(0, "Dali result outputs are restricted to a maximum of %d MB, the current limit is %d MB. A huge dali result usually indicates the ECL needs altering.", DALI_RESULT_OUTPUTMAX, DALI_RESULT_LIMIT_DEFAULT);
+    if (outputLimit>defaultDaliResultOutputMax)
+        throw MakeStringException(0, "Dali result outputs are restricted to a maximum of %d MB, the current limit is %d MB. A huge dali result usually indicates the ECL needs altering.", defaultDaliResultOutputMax, defaultDaliResultLimit);
     assertex(outputLimit<=0x1000); // 32bit limit because MemoryBuffer/CMessageBuffers involved etc.
     outputLimit *= 0x100000;
     MemoryBuffer rowdata;
@@ -6100,7 +6100,8 @@ void CHThorDictionaryWorkUnitWriteActivity::execute()
     }
     size32_t usedCount = rtlDictionaryCount(builder.getcount(), builder.queryrows());
 
-    size32_t outputLimit = agent.queryWorkUnit()->getDebugValueInt("outputLimit", DALI_RESULT_LIMIT_DEFAULT) * 0x100000;
+    // In absense of OPT_OUTPUTLIMIT check pre 5.2 legacy name OPT_OUTPUTLIMIT_LEGACY
+    size32_t outputLimit = agent.queryWorkUnit()->getDebugValueInt(OPT_OUTPUTLIMIT, agent.queryWorkUnit()->getDebugValueInt(OPT_OUTPUTLIMIT_LEGACY, defaultDaliResultLimit)) * 0x100000;
     MemoryBuffer rowdata;
     CThorDemoRowSerializer out(rowdata);
     Owned<IOutputRowSerializer> serializer = input->queryOutputMeta()->createDiskSerializer(agent.queryCodeContext(), activityId);
@@ -8728,7 +8729,7 @@ const void *CHThorDiskGroupAggregateActivity::nextInGroup()
 
 CHThorCsvReadActivity::CHThorCsvReadActivity(IAgentContext &_agent, unsigned _activityId, unsigned _subgraphId, IHThorCsvReadArg &_arg, ThorActivityKind _kind) : CHThorDiskReadBaseActivity(_agent, _activityId, _subgraphId, _arg, _kind), helper(_arg)
 {
-    maxRowSize = agent.queryWorkUnit()->getDebugValueInt("maxCsvRowSize", defaultMaxCsvRowSize);
+    maxRowSize = agent.queryWorkUnit()->getDebugValueInt(OPT_MAXCSVROWSIZE, defaultMaxCsvRowSize) * 1024 * 1024;
 }
 
 CHThorCsvReadActivity::~CHThorCsvReadActivity()

+ 30 - 18
roxie/ccd/ccdactivities.cpp

@@ -1022,7 +1022,7 @@ class CRoxieCsvReadActivity;
 class CRoxieXmlReadActivity;
 IInMemoryFileProcessor *createKeyedRecordProcessor(IInMemoryIndexCursor *cursor, CRoxieDiskReadActivity &owner, bool resent);
 IInMemoryFileProcessor *createUnkeyedRecordProcessor(IInMemoryIndexCursor *cursor, CRoxieDiskReadActivity &owner, bool variableDisk, IDirectReader *reader);
-IInMemoryFileProcessor *createCsvRecordProcessor(CRoxieCsvReadActivity &owner, IDirectReader *reader, bool _skipHeader, const IResolvedFile *datafile);
+IInMemoryFileProcessor *createCsvRecordProcessor(CRoxieCsvReadActivity &owner, IDirectReader *reader, bool _skipHeader, const IResolvedFile *datafile, size32_t maxRowSize);
 IInMemoryFileProcessor *createXmlRecordProcessor(CRoxieXmlReadActivity &owner, IDirectReader *reader);
 
 class CRoxieDiskReadActivity : public CRoxieDiskReadBaseActivity
@@ -1075,11 +1075,12 @@ public:
 protected:
     IHThorCsvReadArg *helper;
     const IResolvedFile *datafile;
+    size32_t maxRowSize;
 
 public:
-    CRoxieCsvReadActivity(SlaveContextLogger &_logctx, IRoxieQueryPacket *_packet, HelperFactory *_hFactory,
-                          const CSlaveActivityFactory *_aFactory, IInMemoryIndexManager *_manager, const IResolvedFile *_datafile)
-        : CRoxieDiskReadBaseActivity(_logctx, _packet, _hFactory, _aFactory, _manager, 0, 1, true), datafile(_datafile)
+    CRoxieCsvReadActivity(SlaveContextLogger &_logctx, IRoxieQueryPacket *_packet, HelperFactory *_hFactory, const CSlaveActivityFactory *_aFactory,
+                          IInMemoryIndexManager *_manager, const IResolvedFile *_datafile, size32_t _maxRowSize)
+        : CRoxieDiskReadBaseActivity(_logctx, _packet, _hFactory, _aFactory, _manager, 0, 1, true), datafile(_datafile), maxRowSize(_maxRowSize)
     {
         onCreate();
         helper = (IHThorCsvReadArg *) basehelper;
@@ -1098,7 +1099,7 @@ public:
                     createCsvRecordProcessor(*this,
                                              manager->createReader(readPos, parallelPartNo, numParallel),
                                              packet->queryHeader().channel==1 && !resent,
-                                             varFileInfo ? varFileInfo.get() : datafile));
+                                             varFileInfo ? varFileInfo.get() : datafile, maxRowSize));
         }
         unsigned __int64 rowLimit = helper->getRowLimit();
         unsigned __int64 stopAfter = helper->getChooseNLimit();
@@ -1178,17 +1179,23 @@ public:
 
 class CRoxieCsvReadActivityFactory : public CRoxieDiskBaseActivityFactory
 {
+    size32_t maxRowSize;
+
 public:
     IMPLEMENT_IINTERFACE;
 
     CRoxieCsvReadActivityFactory(IPropertyTree &_graphNode, unsigned _subgraphId, IQueryFactory &_queryFactory, HelperFactory *_helperFactory)
         : CRoxieDiskBaseActivityFactory(_graphNode, _subgraphId, _queryFactory, _helperFactory)
     {
+        maxRowSize = defaultDaliResultLimit * 1024 * 1024;
+        IConstWorkUnit *workunit = _queryFactory.queryWorkUnit();
+        if (workunit)
+            maxRowSize = workunit->getDebugValueInt(OPT_MAXCSVROWSIZE, defaultMaxCsvRowSize) * 1024 * 1024;
     }
 
     virtual IRoxieSlaveActivity *createActivity(SlaveContextLogger &logctx, IRoxieQueryPacket *packet) const
     {
-        return new CRoxieCsvReadActivity(logctx, packet, helperFactory, this, manager, datafile);
+        return new CRoxieCsvReadActivity(logctx, packet, helperFactory, this, manager, datafile, maxRowSize);
     }
 
     virtual StringBuffer &toString(StringBuffer &s) const
@@ -1496,12 +1503,13 @@ protected:
     Owned<IDirectReader> reader;
     bool skipHeader;
     const IResolvedFile *datafile;
+    size32_t maxRowSize;
 
 public:
     IMPLEMENT_IINTERFACE;
 
-    CsvRecordProcessor(CRoxieCsvReadActivity &_owner, IDirectReader *_reader, bool _skipHeader, const IResolvedFile *_datafile)
-      : RecordProcessor(NULL), owner(_owner), reader(_reader), datafile(_datafile)
+    CsvRecordProcessor(CRoxieCsvReadActivity &_owner, IDirectReader *_reader, bool _skipHeader, const IResolvedFile *_datafile, size32_t _maxRowSize)
+      : RecordProcessor(NULL), owner(_owner), reader(_reader), datafile(_datafile), maxRowSize(_maxRowSize)
     {
         helper = _owner.helper;
         skipHeader = _skipHeader;
@@ -1538,7 +1546,6 @@ public:
                 break;
             }
             size32_t rowSize = 4096; // MORE - make configurable
-            size32_t maxRowSize = 10*1024*1024; // MORE - make configurable
             size32_t thisLineLength;
             loop
             {
@@ -1548,7 +1555,7 @@ public:
                 if (thisLineLength < rowSize || avail < rowSize)
                     break;
                 if (rowSize == maxRowSize)
-                    throw MakeStringException(0, "Row too big");
+                    throw MakeStringException(0, "File contained a line of length greater than %d bytes.", maxRowSize);
                 if (rowSize >= maxRowSize/2)
                     rowSize = maxRowSize;
                 else
@@ -1677,9 +1684,9 @@ protected:
     Owned<IDirectReader> reader;
 };
 
-IInMemoryFileProcessor *createCsvRecordProcessor(CRoxieCsvReadActivity &owner, IDirectReader *_reader, bool _skipHeader, const IResolvedFile *datafile)
+IInMemoryFileProcessor *createCsvRecordProcessor(CRoxieCsvReadActivity &owner, IDirectReader *_reader, bool _skipHeader, const IResolvedFile *datafile, size32_t maxRowSize)
 {
-    return new CsvRecordProcessor(owner, _reader, _skipHeader, datafile);
+    return new CsvRecordProcessor(owner, _reader, _skipHeader, datafile, maxRowSize);
 }
 
 IInMemoryFileProcessor *createXmlRecordProcessor(CRoxieXmlReadActivity &owner, IDirectReader *_reader)
@@ -4428,11 +4435,12 @@ IRoxieSlaveActivity *CRoxieFetchActivityFactory::createActivity(SlaveContextLogg
 
 class CRoxieCSVFetchActivity : public CRoxieFetchActivityBase
 {
-    CSVSplitter csvSplitter;    
+    CSVSplitter csvSplitter;
+    size32_t maxRowSize;
 
 public:
-    CRoxieCSVFetchActivity(SlaveContextLogger &_logctx, IRoxieQueryPacket *_packet, HelperFactory *_hFactory, const CRoxieFetchActivityFactory *_aFactory, unsigned _maxColumns)
-        : CRoxieFetchActivityBase(_logctx, _packet, _hFactory, _aFactory)
+    CRoxieCSVFetchActivity(SlaveContextLogger &_logctx, IRoxieQueryPacket *_packet, HelperFactory *_hFactory, const CRoxieFetchActivityFactory *_aFactory, unsigned _maxColumns, size32_t _maxRowSize)
+        : CRoxieFetchActivityBase(_logctx, _packet, _hFactory, _aFactory), maxRowSize(_maxRowSize)
     {
         const char * quotes = NULL;
         const char * separators = NULL;
@@ -4462,7 +4470,6 @@ public:
         IHThorCsvFetchArg *h = (IHThorCsvFetchArg *) helper;
         rawStream->reset(pos);
         size32_t rowSize = 4096; // MORE - make configurable
-        size32_t maxRowSize = 10*1024*1024; // MORE - make configurable
         loop
         {
             size32_t avail;
@@ -4470,7 +4477,7 @@ public:
             if (csvSplitter.splitLine(avail, (const byte *)peek) < rowSize || avail < rowSize)
                 break;
             if (rowSize == maxRowSize)
-                throw MakeStringException(0, "Row too big");
+                throw MakeStringException(0, "File contained a line of length greater than %d bytes.", maxRowSize);
             if (rowSize >= maxRowSize/2)
                 rowSize = maxRowSize;
             else
@@ -4541,6 +4548,7 @@ void CRoxieFetchActivityBase::setPartNo(bool filechanged)
 class CRoxieCSVFetchActivityFactory : public CRoxieFetchActivityFactory
 {
     unsigned maxColumns;
+    size32_t maxRowSize;
 
 public:
     CRoxieCSVFetchActivityFactory(IPropertyTree &_graphNode, unsigned _subgraphId, IQueryFactory &_queryFactory, HelperFactory *_helperFactory)
@@ -4550,11 +4558,15 @@ public:
         maxColumns = helper->getMaxColumns();
         ICsvParameters *csvInfo = helper->queryCsvParameters();
         assertex(!csvInfo->queryEBCDIC());
+        maxRowSize = defaultDaliResultLimit * 1024 * 1024;
+        IConstWorkUnit *workunit = _queryFactory.queryWorkUnit();
+        if (workunit)
+            maxRowSize = workunit->getDebugValueInt(OPT_MAXCSVROWSIZE, defaultMaxCsvRowSize) * 1024 * 1024;
     }
 
     virtual IRoxieSlaveActivity *createActivity(SlaveContextLogger &logctx, IRoxieQueryPacket *packet) const
     {
-        return new CRoxieCSVFetchActivity(logctx, packet, helperFactory, this, maxColumns);
+        return new CRoxieCSVFetchActivity(logctx, packet, helperFactory, this, maxColumns, maxRowSize);
     }
 };
 

+ 25 - 8
roxie/ccd/ccdserver.cpp

@@ -19925,9 +19925,12 @@ public:
             if (helper.getFlags() & POFmaxsize)
                 outputLimit = helper.getMaxSize();
             else
-                outputLimit = workunit->getDebugValueInt("outputLimit", DALI_RESULT_LIMIT_DEFAULT);
-            if (outputLimit>DALI_RESULT_OUTPUTMAX)
-                throw MakeStringException(0, "Dali result outputs are restricted to a maximum of %d MB, the current limit is %d MB. A huge dali result usually indicates the ECL needs altering.", DALI_RESULT_OUTPUTMAX, DALI_RESULT_LIMIT_DEFAULT);
+            {
+                // In absense of OPT_OUTPUTLIMIT check pre 5.2 legacy name OPT_OUTPUTLIMIT_LEGACY
+                outputLimit = workunit->getDebugValueInt(OPT_OUTPUTLIMIT, workunit->getDebugValueInt(OPT_OUTPUTLIMIT_LEGACY, defaultDaliResultLimit));
+            }
+            if (outputLimit>defaultDaliResultOutputMax)
+                throw MakeStringException(0, "Dali result outputs are restricted to a maximum of %d MB, the current limit is %d MB. A huge dali result usually indicates the ECL needs altering.", defaultDaliResultOutputMax, defaultDaliResultLimit);
             assertex(outputLimit<=0x1000); // 32bit limit because MemoryBuffer/CMessageBuffers involved etc.
             outputLimitBytes = outputLimit * 0x100000;
         }
@@ -20803,12 +20806,13 @@ class CRoxieServerCsvReadActivity : public CRoxieServerDiskReadBaseActivity
     const char *separators;
     const char *terminators;
     const char *escapes;
+    size32_t maxRowSize;
 public:
     CRoxieServerCsvReadActivity(const IRoxieServerActivityFactory *_factory, IProbeManager *_probeManager, const RemoteActivityId &_remoteId,
                                 unsigned _numParts, bool _isLocal, bool _sorted, bool _maySkip, IInMemoryIndexManager *_manager,
-                                const char *_quotes, const char *_separators, const char *_terminators, const char *_escapes)
+                                const char *_quotes, const char *_separators, const char *_terminators, const char *_escapes, size32_t _maxRowSize)
         : CRoxieServerDiskReadBaseActivity(_factory, _probeManager, _remoteId, _numParts, _isLocal, _sorted, _maySkip, _manager),
-          quotes(_quotes), separators(_separators), terminators(_terminators), escapes(_escapes)
+          quotes(_quotes), separators(_separators), terminators(_terminators), escapes(_escapes), maxRowSize(_maxRowSize)
     {
         compoundHelper = NULL;
         readHelper = (IHThorCsvReadArg *)&helper;
@@ -20865,7 +20869,6 @@ public:
                 }
                 // MORE - there are rumours of a  csvSplitter that operates on a stream... if/when it exists, this should use it
                 size32_t rowSize = 4096; // MORE - make configurable
-                size32_t maxRowSize = 10*1024*1024; // MORE - make configurable
                 size32_t thisLineLength;
                 loop
                 {
@@ -20875,7 +20878,7 @@ public:
                     if (thisLineLength < rowSize || avail < rowSize)
                         break;
                     if (rowSize == maxRowSize)
-                        throw MakeStringException(0, "Row too big");
+                        throw MakeStringException(0, "File contained a line of length greater than %d bytes.", maxRowSize);
                     if (rowSize >= maxRowSize/2)
                         rowSize = maxRowSize;
                     else
@@ -21372,6 +21375,7 @@ public:
     const char *separators;
     const char *terminators;
     const char *escapes;
+    size32_t maxCsvRowSize;
 
     CRoxieServerDiskReadActivityFactory(unsigned _id, unsigned _subgraphId, IQueryFactory &_queryFactory, HelperFactory *_helperFactory, ThorActivityKind _kind, const RemoteActivityId &_remoteId, IPropertyTree &_graphNode)
         : CRoxieServerActivityFactory(_id, _subgraphId, _queryFactory, _helperFactory, _kind), remoteId(_remoteId)
@@ -21408,6 +21412,19 @@ public:
                     manager.setown(getEmptyIndexManager());
             }
         }
+        switch (kind)
+        {
+            case TAKcsvread:
+            {
+                maxCsvRowSize = defaultDaliResultLimit * 1024 * 1024;
+                IConstWorkUnit *workunit = _queryFactory.queryWorkUnit();
+                if (workunit)
+                    maxCsvRowSize = workunit->getDebugValueInt(OPT_MAXCSVROWSIZE, defaultDaliResultLimit) * 1024 * 1024;
+                break;
+            }
+            default:
+                maxCsvRowSize = 0; // unused
+        }
     }
 
     virtual IRoxieServerActivity *createActivity(IProbeManager *_probeManager) const
@@ -21417,7 +21434,7 @@ public:
         {
         case TAKcsvread:
             return new CRoxieServerCsvReadActivity(this, _probeManager, remoteId, numParts, isLocal, sorted, maySkip, manager,
-                                                   quotes, separators, terminators, escapes);
+                                                   quotes, separators, terminators, escapes, maxCsvRowSize);
         case TAKxmlread:
         case TAKjsonread:
             return new CRoxieServerXmlReadActivity(this, _probeManager, remoteId, numParts, isLocal, sorted, maySkip, manager);

+ 1 - 3
thorlcr/activities/csvread/thcsvrslave.cpp

@@ -33,8 +33,6 @@
 #include "csvsplitter.hpp"
 #include "thdiskbaseslave.ipp"
 
-static unsigned const defaultMaxCsvRowSize = 10; // MB
-
 class CCsvReadSlaveActivity : public CDiskReadSlaveActivityBase, public CThorDataLink
 {
     IHThorCsvReadArg *helper;
@@ -88,7 +86,7 @@ class CCsvReadSlaveActivity : public CDiskReadSlaveActivityBase, public CThorDat
             //Initialise information...
             ICsvParameters * csvInfo = activity.helper->queryCsvParameters();
             csvSplitter.init(activity.helper->getMaxColumns(), csvInfo, activity.csvQuote, activity.csvSeparate, activity.csvTerminate, activity.csvEscape);
-            maxRowSize = activity.getOptInt(THOROPT_MAXCSVROWSIZE, defaultMaxCsvRowSize) * 1024 * 1024;
+            maxRowSize = activity.getOptInt(OPT_MAXCSVROWSIZE, defaultMaxCsvRowSize) * 1024 * 1024;
         }
         virtual void setPart(IPartDescriptor *partDesc, unsigned partNoSerialized)
         {

+ 4 - 3
thorlcr/activities/wuidwrite/thwuidwrite.cpp

@@ -53,9 +53,10 @@ public:
     virtual void init()
     {
         CMasterActivity::init();
-        workunitWriteLimit = activityMaxSize ? activityMaxSize : getOptInt(THOROPT_OUTPUTLIMIT, DEFAULT_WUIDWRITE_LIMIT);
-        if (workunitWriteLimit>DALI_RESULT_OUTPUTMAX)
-            throw MakeActivityException(this, 0, "Configured max result size, %d MB, exceeds absolute max limit of %d MB. A huge Dali result usually indicates the ECL needs altering.", workunitWriteLimit, DALI_RESULT_OUTPUTMAX);
+        // In absense of OPT_OUTPUTLIMIT check pre 5.2 legacy name OPT_OUTPUTLIMIT_LEGACY
+        workunitWriteLimit = activityMaxSize ? activityMaxSize : getOptInt(OPT_OUTPUTLIMIT, getOptInt(OPT_OUTPUTLIMIT_LEGACY, defaultDaliResultLimit));
+        if (workunitWriteLimit>defaultDaliResultOutputMax)
+            throw MakeActivityException(this, 0, "Configured max result size, %d MB, exceeds absolute max limit of %d MB. A huge Dali result usually indicates the ECL needs altering.", workunitWriteLimit, defaultDaliResultOutputMax);
         assertex(workunitWriteLimit<=0x1000); // 32bit limit because MemoryBuffer/CMessageBuffers involved etc.
         workunitWriteLimit *= 0x100000;
     }

+ 0 - 2
thorlcr/thorutil/thormisc.hpp

@@ -62,14 +62,12 @@
 #define THOROPT_PARALLEL_FUNNEL       "parallelFunnel"          // Use parallel funnel impl. if !ordered                                         (default = true)
 #define THOROPT_SORT_MAX_DEVIANCE     "sort_max_deviance"       // Max (byte) variance allowed during sort partitioning                          (default = 10Mb)
 #define THOROPT_OUTPUT_FLUSH_THRESHOLD "output_flush_threshold" // When above limit, workunit result is flushed (committed to Dali)              (default = -1 [off])
-#define THOROPT_OUTPUTLIMIT           "outputLimit"             // OUTPUT Mb limit                                                               (default = 10)
 #define THOROPT_PARALLEL_MATCH        "parallel_match"          // Use multi-threaded join helper (retains sort order without unsorted_output)   (default = false)
 #define THOROPT_UNSORTED_OUTPUT       "unsorted_output"         // Allow Join results to be reodered, implies parallel match                     (default = false)
 #define THOROPT_JOINHELPER_THREADS    "joinHelperThreads"       // Number of threads to use in threaded variety of join helper
 #define THOROPT_LKJOIN_LOCALFAILOVER  "lkjoin_localfailover"    // Force SMART to failover to distributed local lookup join (for testing only)   (default = false)
 #define THOROPT_LKJOIN_HASHJOINFAILOVER "lkjoin_hashjoinfailover" // Force SMART to failover to hash join (for testing only)                     (default = false)
 #define THOROPT_MAX_KERNLOG           "max_kern_level"          // Max kernel logging level, to push to workunit, -1 to disable                  (default = 3)
-#define THOROPT_MAXCSVROWSIZE         "maxCsvRowSize"           // Upper limit on csv read line size                                             (default = 10 [MB])
 
 #define INITIAL_SELFJOIN_MATCH_WARNING_LEVEL 20000  // max of row matches before selfjoin emits warning