roxierow.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*##############################################################################
  2. HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. ############################################################################## */
  13. #include "jexcept.hpp"
  14. #include "jcrc.hpp"
  15. #include "thorcommon.ipp" // for CachedOutputMetaData
  16. #include "roxierow.hpp"
  17. //Classes can be used to configure the allocator, and add extra data to the end.
  18. //The checking needs to be done by setting a bit in the allocatorid
  19. class NoCheckingHelper
  20. {
  21. public:
  22. enum {
  23. extraSize = 0,
  24. allocatorCheckFlag = 0x00000000
  25. };
  26. static inline void setCheck(size32_t size, void * ptr) {}
  27. static inline bool isValid(const void * ptr) { return true; }
  28. };
  29. //NOTE: If a row requires checking then the row will also have the bit set to indicate it requires a destructor
  30. //so that rows are checked on destruction.
  31. //Therefore checking if the destructor is set for a row in isValid() to protect us from uninitialised crcs.
  32. class Crc16CheckingHelper
  33. {
  34. public:
  35. enum {
  36. extraSize = sizeof(unsigned short),
  37. allocatorCheckFlag = 0x00100000|ACTIVITY_FLAG_NEEDSDESTRUCTOR
  38. };
  39. static inline void setCheck(size32_t size, void * _ptr)
  40. {
  41. byte * ptr = static_cast<byte *>(_ptr);
  42. memsize_t capacity = RoxieRowCapacity(ptr);
  43. if (capacity < size + extraSize)
  44. throw MakeStringException(0, "Data was written past the end of the row - allocated %d, written %d", (size32_t)(capacity - extraSize), size);
  45. memset(ptr+size, 0, capacity - size - extraSize);
  46. unsigned short * check = reinterpret_cast<unsigned short *>(ptr + capacity - extraSize);
  47. *check = crc16(ptr, capacity-extraSize, 0);
  48. }
  49. static inline bool isValid(const void * _ptr)
  50. {
  51. if (RoxieRowHasDestructor(_ptr))
  52. {
  53. const byte * ptr = static_cast<const byte *>(_ptr);
  54. memsize_t capacity = RoxieRowCapacity(ptr);
  55. const unsigned short * check = reinterpret_cast<const unsigned short *>(ptr + capacity - extraSize);
  56. return *check == crc16(ptr, capacity-extraSize, 0);
  57. }
  58. return true;
  59. }
  60. };
  61. //This is here as demonstration of an alternative implementation... crc16 is possibly a bit expensive.
  62. class Sum16CheckingHelper
  63. {
  64. public:
  65. enum {
  66. extraSize = sizeof(unsigned short),
  67. allocatorCheckFlag = 0x00200000|ACTIVITY_FLAG_NEEDSDESTRUCTOR
  68. };
  69. static inline void setCheck(size32_t size, void * _ptr)
  70. {
  71. byte * ptr = static_cast<byte *>(_ptr);
  72. memsize_t capacity = RoxieRowCapacity(ptr);
  73. if (capacity < size + extraSize)
  74. throw MakeStringException(0, "Data was written past the end of the row - allocated %d, written %d", (size32_t)(capacity - extraSize), size);
  75. memset(ptr+size, 0, capacity - size - extraSize);
  76. unsigned short * check = reinterpret_cast<unsigned short *>(ptr + capacity - extraSize);
  77. *check = chksum16(ptr, capacity-extraSize);
  78. }
  79. static inline bool isValid(const void * _ptr)
  80. {
  81. if (RoxieRowHasDestructor(_ptr))
  82. {
  83. const byte * ptr = static_cast<const byte *>(_ptr);
  84. memsize_t capacity = RoxieRowCapacity(ptr);
  85. const unsigned short * check = reinterpret_cast<const unsigned short *>(ptr + capacity - extraSize);
  86. return chksum16(ptr, capacity-extraSize) == *check;
  87. }
  88. return true;
  89. }
  90. };
  91. bool isRowCheckValid(unsigned allocatorId, const void * row)
  92. {
  93. switch (allocatorId & ALLOCATORID_CHECK_MASK)
  94. {
  95. case NoCheckingHelper::allocatorCheckFlag & ALLOCATORID_CHECK_MASK:
  96. return true;
  97. case Crc16CheckingHelper::allocatorCheckFlag & ALLOCATORID_CHECK_MASK:
  98. return Crc16CheckingHelper::isValid(row);
  99. case Sum16CheckingHelper::allocatorCheckFlag & ALLOCATORID_CHECK_MASK:
  100. return Sum16CheckingHelper::isValid(row);
  101. default:
  102. UNIMPLEMENTED;
  103. }
  104. }
  105. //--------------------------------------------------------------------------------------------------------------------
  106. //More: Function to calculate the total size of a row - requires access to a rowallocator.
  107. //--------------------------------------------------------------------------------------------------------------------
  108. class RoxieEngineRowAllocatorBase : public CInterface, implements IEngineRowAllocator
  109. {
  110. public:
  111. RoxieEngineRowAllocatorBase(roxiemem::IRowManager & _rowManager, IOutputMetaData * _meta, unsigned _activityId, unsigned _allocatorId)
  112. : rowManager(_rowManager), meta(_meta)
  113. {
  114. activityId = _activityId;
  115. allocatorId = _allocatorId;
  116. }
  117. IMPLEMENT_IINTERFACE
  118. //interface IEngineRowsetAllocator
  119. virtual byte * * createRowset(unsigned count)
  120. {
  121. if (count == 0)
  122. return NULL;
  123. return (byte **) rowManager.allocate(count * sizeof(void *), allocatorId | ACTIVITY_FLAG_ISREGISTERED);
  124. }
  125. virtual void releaseRowset(unsigned count, byte * * rowset)
  126. {
  127. rtlReleaseRowset(count, rowset);
  128. }
  129. virtual byte * * linkRowset(byte * * rowset)
  130. {
  131. return rtlLinkRowset(rowset);
  132. }
  133. virtual byte * * appendRowOwn(byte * * rowset, unsigned newRowCount, void * row)
  134. {
  135. byte * * expanded = doReallocRows(rowset, newRowCount-1, newRowCount);
  136. expanded[newRowCount-1] = (byte *)row;
  137. return expanded;
  138. }
  139. virtual byte * * reallocRows(byte * * rowset, unsigned oldRowCount, unsigned newRowCount)
  140. {
  141. //New rows (if any) aren't cleared....
  142. return doReallocRows(rowset, oldRowCount, newRowCount);
  143. }
  144. virtual void releaseRow(const void * row)
  145. {
  146. ReleaseRoxieRow(row);
  147. }
  148. virtual void * linkRow(const void * row)
  149. {
  150. LinkRoxieRow(row);
  151. return const_cast<void *>(row);
  152. }
  153. virtual IOutputMetaData * queryOutputMeta()
  154. {
  155. return meta.queryOriginal();
  156. }
  157. virtual unsigned queryActivityId()
  158. {
  159. return activityId;
  160. }
  161. virtual StringBuffer &getId(StringBuffer &idStr)
  162. {
  163. return idStr.append(activityId); // MORE - may want more context info in here
  164. }
  165. virtual IOutputRowSerializer *createDiskSerializer(ICodeContext *ctx)
  166. {
  167. return meta.createDiskSerializer(ctx, activityId);
  168. }
  169. virtual IOutputRowDeserializer *createDiskDeserializer(ICodeContext *ctx)
  170. {
  171. return meta.createDiskDeserializer(ctx, activityId);
  172. }
  173. virtual IOutputRowSerializer *createInternalSerializer(ICodeContext *ctx)
  174. {
  175. return meta.createInternalSerializer(ctx, activityId);
  176. }
  177. virtual IOutputRowDeserializer *createInternalDeserializer(ICodeContext *ctx)
  178. {
  179. return meta.createInternalDeserializer(ctx, activityId);
  180. }
  181. protected:
  182. inline byte * * doReallocRows(byte * * rowset, unsigned oldRowCount, unsigned newRowCount)
  183. {
  184. if (!rowset)
  185. return createRowset(newRowCount);
  186. //Occasionally (in aggregates) we may try and append to a shared rowset. In this case we need to clone the
  187. //target rowset. It could be that the rowset is unshared immediately, but that is inefficient at worst.
  188. if (RoxieRowIsShared(rowset))
  189. {
  190. byte * * newset = createRowset(newRowCount);
  191. for (unsigned i=0; i < oldRowCount; i++)
  192. {
  193. byte * cur = rowset[i];
  194. LinkRoxieRow(cur);
  195. newset[i] = cur;
  196. }
  197. ReleaseRoxieRow(rowset);
  198. return newset;
  199. }
  200. //This would be more efficient if previous capacity was stored by the caller - or if capacity() is more efficient
  201. if (newRowCount * sizeof(void *) <= RoxieRowCapacity(rowset))
  202. return rowset;
  203. memsize_t capacity;
  204. void * ptr = (void *)rowset;
  205. rowManager.resizeRow(capacity, ptr, oldRowCount * sizeof(void *), newRowCount * sizeof(void *), allocatorId | ACTIVITY_FLAG_ISREGISTERED);
  206. return (byte * *)ptr;
  207. }
  208. protected:
  209. roxiemem::IRowManager & rowManager;
  210. const CachedOutputMetaData meta;
  211. unsigned activityId;
  212. unsigned allocatorId;
  213. };
  214. template <class CHECKER>
  215. class RoxieEngineFixedRowAllocator : public RoxieEngineRowAllocatorBase
  216. {
  217. public:
  218. RoxieEngineFixedRowAllocator(roxiemem::IRowManager & _rowManager, IOutputMetaData * _meta, unsigned _activityId, unsigned _allocatorId, roxiemem::RoxieHeapFlags _flags)
  219. : RoxieEngineRowAllocatorBase(_rowManager, _meta, _activityId, _allocatorId)
  220. {
  221. unsigned flags = _flags;
  222. if (meta.needsDestruct() || CHECKER::allocatorCheckFlag)
  223. flags |= roxiemem::RHFhasdestructor;
  224. heap.setown(rowManager.createFixedRowHeap(meta.getFixedSize()+CHECKER::extraSize, allocatorId | ACTIVITY_FLAG_ISREGISTERED | CHECKER::allocatorCheckFlag, (roxiemem::RoxieHeapFlags)flags));
  225. }
  226. virtual void * createRow()
  227. {
  228. return heap->allocate();
  229. }
  230. virtual void * createRow(size32_t & allocatedSize)
  231. {
  232. allocatedSize = meta.getFixedSize();
  233. return heap->allocate();
  234. }
  235. virtual void * resizeRow(size32_t newSize, void * row, size32_t & size)
  236. {
  237. throwUnexpected();
  238. return NULL;
  239. }
  240. virtual void * finalizeRow(size32_t finalSize, void * row, size32_t oldSize)
  241. {
  242. if (!meta.needsDestruct() && !CHECKER::allocatorCheckFlag)
  243. return row;
  244. CHECKER::setCheck(finalSize, row);
  245. return heap->finalizeRow(row);
  246. }
  247. protected:
  248. Owned<roxiemem::IFixedRowHeap> heap;
  249. };
  250. template <class CHECKER>
  251. class RoxieEngineVariableRowAllocator : public RoxieEngineRowAllocatorBase
  252. {
  253. public:
  254. RoxieEngineVariableRowAllocator(roxiemem::IRowManager & _rowManager, IOutputMetaData * _meta, unsigned _activityId, unsigned _allocatorId, roxiemem::RoxieHeapFlags _flags)
  255. : RoxieEngineRowAllocatorBase(_rowManager, _meta, _activityId, _allocatorId)
  256. {
  257. unsigned flags = _flags;
  258. if (meta.needsDestruct() || CHECKER::allocatorCheckFlag)
  259. flags |= roxiemem::RHFhasdestructor;
  260. heap.setown(rowManager.createVariableRowHeap(allocatorId | ACTIVITY_FLAG_ISREGISTERED | CHECKER::allocatorCheckFlag, (roxiemem::RoxieHeapFlags)flags));
  261. }
  262. virtual void * createRow()
  263. {
  264. memsize_t allocSize = meta.getInitialSize();
  265. memsize_t capacity;
  266. return heap->allocate(allocSize+CHECKER::extraSize, capacity);
  267. }
  268. virtual void * createRow(size32_t & allocatedSize)
  269. {
  270. const memsize_t allocSize = meta.getInitialSize();
  271. memsize_t newCapacity; // always initialised by allocate
  272. void * row = heap->allocate(allocSize+CHECKER::extraSize, newCapacity);
  273. //This test should get constant folded to avoid the decrement when not checked.
  274. if (CHECKER::extraSize)
  275. newCapacity -= CHECKER::extraSize;
  276. allocatedSize = newCapacity;
  277. return row;
  278. }
  279. virtual void * resizeRow(size32_t newSize, void * row, size32_t & size)
  280. {
  281. const size32_t oldsize = size; // don't need to include the extra checking bytes
  282. memsize_t newCapacity; // always initialised by resizeRow
  283. void * newrow = heap->resizeRow(row, oldsize, newSize+CHECKER::extraSize, newCapacity);
  284. if (CHECKER::extraSize)
  285. newCapacity -= CHECKER::extraSize;
  286. size = newCapacity;
  287. return newrow;
  288. }
  289. virtual void * finalizeRow(size32_t finalSize, void * row, size32_t oldSize)
  290. {
  291. if (!meta.needsDestruct() && !CHECKER::allocatorCheckFlag)
  292. return row;
  293. void * newrow = heap->finalizeRow(row, oldSize, finalSize+CHECKER::extraSize);
  294. CHECKER::setCheck(finalSize, newrow);
  295. return newrow;
  296. }
  297. protected:
  298. Owned<roxiemem::IVariableRowHeap> heap;
  299. };
  300. IEngineRowAllocator * createRoxieRowAllocator(roxiemem::IRowManager & rowManager, IOutputMetaData * meta, unsigned activityId, unsigned allocatorId, roxiemem::RoxieHeapFlags flags)
  301. {
  302. if (meta->getFixedSize() != 0)
  303. return new RoxieEngineFixedRowAllocator<NoCheckingHelper>(rowManager, meta, activityId, allocatorId, flags);
  304. else
  305. return new RoxieEngineVariableRowAllocator<NoCheckingHelper>(rowManager, meta, activityId, allocatorId, flags);
  306. }
  307. IEngineRowAllocator * createCrcRoxieRowAllocator(roxiemem::IRowManager & rowManager, IOutputMetaData * meta, unsigned activityId, unsigned allocatorId, roxiemem::RoxieHeapFlags flags)
  308. {
  309. if (meta->getFixedSize() != 0)
  310. return new RoxieEngineFixedRowAllocator<Crc16CheckingHelper>(rowManager, meta, activityId, allocatorId, flags);
  311. else
  312. return new RoxieEngineVariableRowAllocator<Crc16CheckingHelper>(rowManager, meta, activityId, allocatorId, flags);
  313. }
  314. #pragma pack(push,1) // hashing on members, so ensure contiguous
  315. struct AllocatorKey
  316. {
  317. IOutputMetaData *meta;
  318. unsigned activityId;
  319. roxiemem::RoxieHeapFlags flags;
  320. AllocatorKey(IOutputMetaData *_meta, unsigned &_activityId, roxiemem::RoxieHeapFlags _flags)
  321. : meta(_meta), activityId(_activityId), flags(_flags)
  322. {
  323. }
  324. bool operator==(AllocatorKey const &other) const
  325. {
  326. return (meta == other.meta) && (activityId == other.activityId) && (flags == other.flags);
  327. }
  328. };
  329. #pragma pack(pop)
  330. class CAllocatorCacheItem : public OwningHTMapping<IEngineRowAllocator, AllocatorKey>
  331. {
  332. Linked<IOutputMetaData> meta;
  333. unsigned allocatorId;
  334. public:
  335. CAllocatorCacheItem(IEngineRowAllocator *allocator, unsigned _allocatorId, AllocatorKey &key)
  336. : OwningHTMapping<IEngineRowAllocator, AllocatorKey>(*allocator, key), allocatorId(_allocatorId)
  337. {
  338. meta.set(key.meta);
  339. }
  340. unsigned queryAllocatorId() const { return allocatorId; }
  341. };
  342. class CAllocatorCache : public CSimpleInterface, implements IRowAllocatorMetaActIdCache
  343. {
  344. OwningSimpleHashTableOf<CAllocatorCacheItem, AllocatorKey> cache;
  345. IArrayOf<IEngineRowAllocator> allAllocators;
  346. mutable SpinLock allAllocatorsLock;
  347. Owned<roxiemem::IRowManager> rowManager;
  348. IRowAllocatorMetaActIdCacheCallback *callback;
  349. inline CAllocatorCacheItem *_lookup(IOutputMetaData *meta, unsigned activityId, roxiemem::RoxieHeapFlags flags) const
  350. {
  351. AllocatorKey key(meta, activityId, flags);
  352. return cache.find(key);
  353. }
  354. public:
  355. IMPLEMENT_IINTERFACE_USING(CSimpleInterface);
  356. CAllocatorCache(IRowAllocatorMetaActIdCacheCallback *_callback) : callback(_callback)
  357. {
  358. }
  359. // IRowAllocatorMetaActIdCache
  360. inline IEngineRowAllocator *lookup(IOutputMetaData *meta, unsigned activityId, roxiemem::RoxieHeapFlags flags) const
  361. {
  362. SpinBlock b(allAllocatorsLock);
  363. CAllocatorCacheItem *container = _lookup(meta, activityId, flags);
  364. if (!container)
  365. return NULL;
  366. return &container->queryElement();
  367. }
  368. virtual IEngineRowAllocator *ensure(IOutputMetaData * meta, unsigned activityId, roxiemem::RoxieHeapFlags flags)
  369. {
  370. SpinBlock b(allAllocatorsLock);
  371. loop
  372. {
  373. CAllocatorCacheItem *container = _lookup(meta, activityId, flags);
  374. if (container)
  375. {
  376. if (0 == (roxiemem::RHFunique & flags))
  377. return LINK(&container->queryElement());
  378. // if in cache but unique, reuse allocatorId
  379. SpinUnblock b(allAllocatorsLock);
  380. return callback->createAllocator(meta, activityId, container->queryAllocatorId(), flags);
  381. }
  382. // NB: a RHFunique allocator, will cause 1st to be added to 'allAllocators'
  383. // subsequent requests for the same type of unique allocator, will share same allocatorId
  384. // resulting in the 1st allocator being reused by all instances for onDestroy() etc.
  385. assertex(allAllocators.ordinality() < ALLOCATORID_MASK);
  386. unsigned allocatorId = allAllocators.ordinality();
  387. IEngineRowAllocator *ret;
  388. {
  389. SpinUnblock b(allAllocatorsLock);
  390. ret = callback->createAllocator(meta, activityId, allocatorId, flags);
  391. assertex(ret);
  392. }
  393. if (allocatorId == allAllocators.ordinality())
  394. {
  395. AllocatorKey key(meta, activityId, flags);
  396. container = new CAllocatorCacheItem(LINK(ret), allocatorId, key);
  397. cache.replace(*container);
  398. allAllocators.append(*LINK(ret));
  399. return ret;
  400. }
  401. else
  402. {
  403. // someone has used the allocatorId I was going to use.. release and try again (hopefully happens very seldom)
  404. ret->Release();
  405. }
  406. }
  407. }
  408. virtual unsigned items() const
  409. {
  410. return allAllocators.ordinality();
  411. }
  412. // roxiemem::IRowAllocatorCache
  413. virtual unsigned getActivityId(unsigned cacheId) const
  414. {
  415. unsigned allocatorIndex = (cacheId & ALLOCATORID_MASK);
  416. SpinBlock b(allAllocatorsLock);
  417. if (allAllocators.isItem(allocatorIndex))
  418. return allAllocators.item(allocatorIndex).queryActivityId();
  419. else
  420. {
  421. //assert(false);
  422. return 12345678; // Used for tracing, better than a crash...
  423. }
  424. }
  425. virtual StringBuffer &getActivityDescriptor(unsigned cacheId, StringBuffer &out) const
  426. {
  427. unsigned allocatorIndex = (cacheId & ALLOCATORID_MASK);
  428. SpinBlock b(allAllocatorsLock);
  429. if (allAllocators.isItem(allocatorIndex))
  430. return allAllocators.item(allocatorIndex).getId(out);
  431. else
  432. {
  433. assert(false);
  434. return out.append("unknown"); // Used for tracing, better than a crash...
  435. }
  436. }
  437. virtual void onDestroy(unsigned cacheId, void *row) const
  438. {
  439. IEngineRowAllocator *allocator;
  440. unsigned allocatorIndex = (cacheId & ALLOCATORID_MASK);
  441. {
  442. SpinBlock b(allAllocatorsLock); // just protect the access to the array - don't keep locked for the call of destruct or may deadlock
  443. if (allAllocators.isItem(allocatorIndex))
  444. allocator = &allAllocators.item(allocatorIndex);
  445. else
  446. {
  447. assert(false);
  448. return;
  449. }
  450. }
  451. if (!RoxieRowCheckValid(cacheId, row))
  452. {
  453. throw MakeStringException(0, "ERROR: crc check failure destroying row!");
  454. }
  455. allocator->queryOutputMeta()->destruct((byte *) row);
  456. }
  457. virtual void onClone(unsigned cacheId, void *row) const
  458. {
  459. IEngineRowAllocator *allocator;
  460. unsigned allocatorIndex = (cacheId & ALLOCATORID_MASK);
  461. {
  462. SpinBlock b(allAllocatorsLock); // just protect the access to the array - don't keep locked for the call of destruct or may deadlock
  463. if (allAllocators.isItem(allocatorIndex))
  464. allocator = &allAllocators.item(allocatorIndex);
  465. else
  466. {
  467. assert(false);
  468. return;
  469. }
  470. }
  471. if (!RoxieRowCheckValid(cacheId, row))
  472. {
  473. throw MakeStringException(0, "ERROR: crc check failure cloning row!");
  474. }
  475. //This should only be called if the destructor needs to be called - so don't bother checking
  476. ChildRowLinkerWalker walker;
  477. allocator->queryOutputMeta()->walkIndirectMembers((const byte *)row, walker);
  478. }
  479. virtual void checkValid(unsigned cacheId, const void *row) const
  480. {
  481. if (!RoxieRowCheckValid(cacheId, row))
  482. {
  483. throw MakeStringException(0, "ERROR: crc check failure checking row!");
  484. }
  485. }
  486. };
  487. IRowAllocatorMetaActIdCache *createRowAllocatorCache(IRowAllocatorMetaActIdCacheCallback *callback)
  488. {
  489. return new CAllocatorCache(callback);
  490. }
  491. #ifdef _USE_CPPUNIT
  492. #include "unittests.hpp"
  493. namespace roxierowtests {
  494. using namespace roxiemem;
  495. class RoxieRowAllocatorTests : public CppUnit::TestFixture
  496. {
  497. CPPUNIT_TEST_SUITE( RoxieRowAllocatorTests );
  498. CPPUNIT_TEST(testSetup);
  499. CPPUNIT_TEST(testChecking);
  500. CPPUNIT_TEST(testCleanup);
  501. CPPUNIT_TEST(testAllocatorCache);
  502. CPPUNIT_TEST_SUITE_END();
  503. const IContextLogger &logctx;
  504. public:
  505. RoxieRowAllocatorTests() : logctx(queryDummyContextLogger())
  506. {
  507. }
  508. ~RoxieRowAllocatorTests()
  509. {
  510. }
  511. protected:
  512. class CheckingRowAllocatorCache : public CSimpleInterface, public IRowAllocatorCache
  513. {
  514. public:
  515. IMPLEMENT_IINTERFACE_USING(CSimpleInterface);
  516. CheckingRowAllocatorCache() { numFailures = 0; }
  517. virtual unsigned getActivityId(unsigned cacheId) const { return 0; }
  518. virtual StringBuffer &getActivityDescriptor(unsigned cacheId, StringBuffer &out) const { return out.append(cacheId); }
  519. virtual void onDestroy(unsigned cacheId, void *row) const
  520. {
  521. if (!RoxieRowCheckValid(cacheId, row))
  522. ++numFailures;
  523. }
  524. virtual void onClone(unsigned cacheId, void *row) const
  525. {
  526. }
  527. virtual void checkValid(unsigned cacheId, const void *row) const
  528. {
  529. if (!RoxieRowCheckValid(cacheId, row))
  530. ++numFailures;
  531. }
  532. mutable unsigned numFailures;
  533. };
  534. class DummyOutputMeta : public IOutputMetaData, public CInterface
  535. {
  536. public:
  537. DummyOutputMeta(size32_t _minSize, size32_t _fixedSize) : minSize(_minSize), fixedSize(_fixedSize) {}
  538. IMPLEMENT_IINTERFACE
  539. virtual size32_t getRecordSize(const void *rec) { return minSize; }
  540. virtual size32_t getFixedSize() const { return fixedSize; }
  541. virtual size32_t getMinRecordSize() const { return minSize; }
  542. virtual void toXML(const byte * self, IXmlWriter & out) {}
  543. virtual unsigned getVersion() const { return 0; }
  544. virtual unsigned getMetaFlags() { return 0; }
  545. virtual IOutputMetaData * querySerializedDiskMeta() { return this; }
  546. virtual void destruct(byte * self) {}
  547. virtual IOutputRowSerializer * createDiskSerializer(ICodeContext * ctx, unsigned activityId) { return NULL; }
  548. virtual IOutputRowDeserializer * createDiskDeserializer(ICodeContext * ctx, unsigned activityId) { return NULL; }
  549. virtual ISourceRowPrefetcher * createDiskPrefetcher(ICodeContext * ctx, unsigned activityId) { return NULL; }
  550. virtual IOutputRowSerializer * createInternalSerializer(ICodeContext * ctx, unsigned activityId) { return NULL; }
  551. virtual IOutputRowDeserializer * createInternalDeserializer(ICodeContext * ctx, unsigned activityId) { return NULL; }
  552. virtual void walkIndirectMembers(const byte * self, IIndirectMemberVisitor & visitor) {}
  553. size32_t minSize;
  554. size32_t fixedSize;
  555. };
  556. void testAllocator(IOutputMetaData * meta, roxiemem::RoxieHeapFlags flags, unsigned low, unsigned high, int modify, bool checking)
  557. {
  558. CheckingRowAllocatorCache cache;
  559. Owned<IRowManager> rm = createRowManager(0, NULL, logctx, &cache);
  560. Owned<IEngineRowAllocator> alloc = checking ? createCrcRoxieRowAllocator(*rm, meta, 0, 0, flags) : createRoxieRowAllocator(*rm, meta, 0, 0, flags);
  561. for (unsigned size=low; size <= high; size++)
  562. {
  563. unsigned capacity;
  564. unsigned prevFailures = cache.numFailures;
  565. void * row = alloc->createRow(capacity);
  566. if (low != high)
  567. row = alloc->resizeRow(size, row, capacity);
  568. for (unsigned i1=0; i1 < capacity; i1++)
  569. ((byte *)row)[i1] = i1;
  570. const void * final = alloc->finalizeRow(capacity, row, capacity);
  571. for (unsigned i2=0; i2 < capacity; i2++)
  572. {
  573. ASSERT(((byte *)row)[i2] == i2);
  574. }
  575. if (modify != 0)
  576. {
  577. if (modify < 0)
  578. ((byte *)row)[0]++;
  579. else
  580. ((byte *)row)[size-1]++;
  581. }
  582. ReleaseRoxieRow(row);
  583. if (modify == 0)
  584. {
  585. ASSERT(prevFailures == cache.numFailures);
  586. }
  587. else
  588. {
  589. ASSERT(prevFailures+1 == cache.numFailures);
  590. }
  591. }
  592. }
  593. void testAllocator(IOutputMetaData * meta, roxiemem::RoxieHeapFlags flags, unsigned low, unsigned high)
  594. {
  595. testAllocator(meta, flags, low, high, 0, false);
  596. testAllocator(meta, flags, low, high, 0, true);
  597. testAllocator(meta, flags, low, high, -1, true);
  598. testAllocator(meta, flags, low, high, +1, true);
  599. }
  600. void testSetup()
  601. {
  602. setTotalMemoryLimit(false, 40*HEAP_ALIGNMENT_SIZE, 0, NULL);
  603. }
  604. void testCleanup()
  605. {
  606. releaseRoxieHeap();
  607. }
  608. void testChecking()
  609. {
  610. Owned<IRowManager> rm = createRowManager(0, NULL, logctx, NULL);
  611. for (unsigned fixedSize=1; fixedSize<64; fixedSize++)
  612. {
  613. DummyOutputMeta meta(fixedSize, fixedSize);
  614. testAllocator(&meta, RHFnone, fixedSize, fixedSize);
  615. testAllocator(&meta, RHFpacked, fixedSize, fixedSize);
  616. }
  617. for (unsigned varSize=1; varSize<64; varSize++)
  618. {
  619. DummyOutputMeta meta(varSize, 0);
  620. testAllocator(&meta, RHFnone, varSize, varSize);
  621. testAllocator(&meta, RHFnone, 1, varSize);
  622. }
  623. }
  624. void testAllocatorCache()
  625. {
  626. IArrayOf<IOutputMetaData> metas;
  627. Owned<IRowManager> rm = createRowManager(0, NULL, logctx, NULL);
  628. class CAllocatorCallback : implements IRowAllocatorMetaActIdCacheCallback
  629. {
  630. IRowManager *rm;
  631. public:
  632. CAllocatorCallback(IRowManager *_rm) : rm(_rm)
  633. {
  634. }
  635. virtual IEngineRowAllocator *createAllocator(IOutputMetaData *meta, unsigned activityId, unsigned cacheId, roxiemem::RoxieHeapFlags flags) const
  636. {
  637. return createRoxieRowAllocator(*rm, meta, activityId, cacheId, flags);
  638. }
  639. } callback(rm);
  640. Owned<IRowAllocatorMetaActIdCache> allocatorCache = createRowAllocatorCache(&callback);
  641. // create 64 allocators, 32 different activityId's
  642. for (unsigned fixedSize=1; fixedSize<=64; fixedSize++)
  643. {
  644. DummyOutputMeta *meta = new DummyOutputMeta(fixedSize, fixedSize);
  645. metas.append(*meta);
  646. unsigned activityId = 1 + ((fixedSize-1) % 32); // i.e. make an id, so half are duplicates
  647. Owned<IEngineRowAllocator> allocator = allocatorCache->ensure(meta, activityId, roxiemem::RHFnone);
  648. }
  649. // test that 64 in cache
  650. ASSERT(allocatorCache->items() == 64);
  651. // test ensure again
  652. for (unsigned fixedSize=1; fixedSize<=64; fixedSize++)
  653. {
  654. unsigned activityId = 1 + ((fixedSize-1) % 32); // i.e. make an id, so half are duplicates
  655. IOutputMetaData *meta = &metas.item(fixedSize-1); // from 1st round
  656. Owned<IEngineRowAllocator> allocator = allocatorCache->ensure(meta, activityId, roxiemem::RHFnone);
  657. }
  658. ASSERT(allocatorCache->items() == 64);
  659. metas.kill();
  660. allocatorCache.clear();
  661. }
  662. };
  663. CPPUNIT_TEST_SUITE_REGISTRATION( RoxieRowAllocatorTests );
  664. CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( RoxieRowAllocatorTests, "RoxieRowAllocatorTests" );
  665. } // namespace roxiemem
  666. #endif