jmutex.hpp 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. /*##############################################################################
  2. HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. ############################################################################## */
  13. #ifndef __JMUTEX__
  14. #define __JMUTEX__
  15. #include <assert.h>
  16. #include <atomic>
  17. #include "jiface.hpp"
  18. #include "jsem.hpp"
  19. extern jlib_decl void ThreadYield();
  20. extern jlib_decl void spinUntilReady(atomic_t &value);
  21. extern jlib_decl void spinUntilReady(std::atomic_uint &value);
  22. #ifdef _DEBUG
  23. //#define SPINLOCK_USE_MUTEX // for testing
  24. //#define SPINLOCK_RR_CHECK // checks for realtime threads
  25. #define _ASSERT_LOCK_SUPPORT
  26. #endif
  27. #ifdef SPINLOCK_USE_MUTEX
  28. #define NRESPINLOCK_USE_SPINLOCK
  29. #endif
  30. #ifdef _WIN32
  31. #define NRESPINLOCK_USE_SPINLOCK
  32. #endif
  33. #ifdef _WIN32
  34. class jlib_decl Mutex
  35. {
  36. protected:
  37. Mutex(const char *name)
  38. {
  39. mutex = CreateMutex(NULL, FALSE, name);
  40. assertex(mutex);
  41. lockcount = 0;
  42. owner = 0;
  43. }
  44. public:
  45. Mutex()
  46. {
  47. mutex = CreateMutex(NULL, FALSE, NULL);
  48. lockcount = 0;
  49. owner = 0;
  50. }
  51. ~Mutex()
  52. {
  53. if (owner != 0)
  54. printf("Warning - Owned mutex destroyed"); // can't use DBGLOG here!
  55. CloseHandle(mutex);
  56. }
  57. void lock()
  58. {
  59. WaitForSingleObject(mutex, INFINITE);
  60. if (lockcount) {
  61. if(owner!=GetCurrentThreadId()) // I think only way this can happen is with unhandled thread exception
  62. lockcount = 0; // (don't assert as unhandled error may get lost)
  63. }
  64. lockcount++;
  65. owner=GetCurrentThreadId();
  66. }
  67. bool lockWait(unsigned timeout)
  68. {
  69. if (WaitForSingleObject(mutex, (long)timeout)!=WAIT_OBJECT_0)
  70. return false;
  71. if (lockcount) {
  72. if(owner!=GetCurrentThreadId()) // I think only way this can happen is with unhandled thread exception
  73. lockcount = 0; // (don't assert as unhandled error may get lost)
  74. }
  75. lockcount++;
  76. owner=GetCurrentThreadId();
  77. return true;
  78. }
  79. void unlock()
  80. {
  81. assertex(owner==GetCurrentThreadId());
  82. --lockcount;
  83. if (lockcount==0)
  84. owner = 0;
  85. ReleaseMutex(mutex);
  86. }
  87. protected:
  88. MutexId mutex;
  89. ThreadId owner;
  90. int unlockAll()
  91. {
  92. assertex(owner==GetCurrentThreadId());
  93. assertex(lockcount);
  94. int ret = lockcount;
  95. int lc = ret;
  96. while (lc--)
  97. unlock();
  98. return ret;
  99. }
  100. void lockAll(int count)
  101. {
  102. while (count--)
  103. lock();
  104. }
  105. private:
  106. int lockcount;
  107. };
  108. class jlib_decl NamedMutex: public Mutex
  109. {
  110. public:
  111. NamedMutex(const char *name)
  112. : Mutex(name)
  113. {
  114. }
  115. };
  116. #else // posix
  117. class jlib_decl Mutex
  118. {
  119. public:
  120. Mutex();
  121. // Mutex(const char *name); //not supported
  122. ~Mutex();
  123. void lock();
  124. bool lockWait(unsigned timeout);
  125. void unlock();
  126. protected:
  127. MutexId mutex;
  128. ThreadId owner;
  129. int unlockAll();
  130. void lockAll(int);
  131. private:
  132. int lockcount;
  133. pthread_cond_t lock_free;
  134. };
  135. class jlib_decl NamedMutex
  136. {
  137. public:
  138. NamedMutex(const char *name);
  139. ~NamedMutex();
  140. void lock();
  141. bool lockWait(unsigned timeout);
  142. void unlock();
  143. private:
  144. Mutex threadmutex;
  145. char *mutexfname;
  146. };
  147. #endif
  148. class jlib_decl synchronized
  149. {
  150. private:
  151. Mutex &mutex;
  152. void throwLockException(unsigned timeout);
  153. public:
  154. synchronized(Mutex &m) : mutex(m) { mutex.lock(); };
  155. synchronized(Mutex &m,unsigned timeout) : mutex(m) { if(!mutex.lockWait(timeout)) throwLockException(timeout); }
  156. inline ~synchronized() { mutex.unlock(); };
  157. };
  158. #ifdef _WIN32
  159. extern "C" {
  160. WINBASEAPI
  161. BOOL
  162. WINAPI
  163. TryEnterCriticalSection(
  164. IN OUT LPCRITICAL_SECTION lpCriticalSection
  165. );
  166. };
  167. class jlib_decl CriticalSection
  168. {
  169. // lightweight mutex within a single process
  170. private:
  171. CRITICAL_SECTION flags;
  172. #ifdef _ASSERT_LOCK_SUPPORT
  173. ThreadId owner;
  174. unsigned depth;
  175. #endif
  176. inline CriticalSection(CriticalSection & value __attribute__((unused))) { assert(false); } // dummy to prevent inadvertant use as block
  177. public:
  178. inline CriticalSection()
  179. {
  180. InitializeCriticalSection(&flags);
  181. #ifdef _ASSERT_LOCK_SUPPORT
  182. owner = 0;
  183. depth = 0;
  184. #endif
  185. };
  186. inline ~CriticalSection()
  187. {
  188. #ifdef _ASSERT_LOCK_SUPPORT
  189. assertex(owner==0 && depth==0);
  190. #endif
  191. DeleteCriticalSection(&flags);
  192. };
  193. inline void enter()
  194. {
  195. EnterCriticalSection(&flags);
  196. #ifdef _ASSERT_LOCK_SUPPORT
  197. if (owner)
  198. {
  199. assertex(owner==GetCurrentThreadId());
  200. depth++;
  201. }
  202. else
  203. owner = GetCurrentThreadId();
  204. #endif
  205. };
  206. inline void leave()
  207. {
  208. #ifdef _ASSERT_LOCK_SUPPORT
  209. assertex(owner==GetCurrentThreadId());
  210. if (depth)
  211. depth--;
  212. else
  213. owner = 0;
  214. #endif
  215. LeaveCriticalSection(&flags);
  216. };
  217. inline void assertLocked()
  218. {
  219. #ifdef _ASSERT_LOCK_SUPPORT
  220. assertex(owner == GetCurrentThreadId());
  221. #endif
  222. }
  223. #ifdef ENABLE_CHECKEDCRITICALSECTIONS
  224. bool wouldBlock() { if (TryEnterCriticalSection(&flags)) { leave(); return false; } return true; } // debug only
  225. #endif
  226. };
  227. #else
  228. /**
  229. * Mutex locking wrapper. Use enter/leave to lock/unlock.
  230. */
  231. class CriticalSection
  232. {
  233. private:
  234. MutexId mutex;
  235. #ifdef _ASSERT_LOCK_SUPPORT
  236. ThreadId owner;
  237. unsigned depth;
  238. #endif
  239. CriticalSection (const CriticalSection &);
  240. public:
  241. inline CriticalSection()
  242. {
  243. pthread_mutexattr_t attr;
  244. pthread_mutexattr_init(&attr);
  245. #ifdef _DEBUG
  246. verifyex(pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE)==0); // verify supports attr
  247. #else
  248. pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE);
  249. #endif
  250. pthread_mutex_init(&mutex, &attr);
  251. pthread_mutexattr_destroy(&attr);
  252. #ifdef _ASSERT_LOCK_SUPPORT
  253. owner = 0;
  254. depth = 0;
  255. #endif
  256. }
  257. inline ~CriticalSection()
  258. {
  259. #ifdef _ASSERT_LOCK_SUPPORT
  260. assert(owner==0 && depth==0);
  261. #endif
  262. pthread_mutex_destroy(&mutex);
  263. }
  264. inline void enter()
  265. {
  266. pthread_mutex_lock(&mutex);
  267. #ifdef _ASSERT_LOCK_SUPPORT
  268. if (owner)
  269. {
  270. assertex(owner==GetCurrentThreadId());
  271. depth++;
  272. }
  273. else
  274. owner = GetCurrentThreadId();
  275. #endif
  276. }
  277. inline void leave()
  278. {
  279. #ifdef _ASSERT_LOCK_SUPPORT
  280. assertex(owner==GetCurrentThreadId());
  281. if (depth)
  282. depth--;
  283. else
  284. owner = 0;
  285. #endif
  286. pthread_mutex_unlock(&mutex);
  287. }
  288. inline void assertLocked()
  289. {
  290. #ifdef _ASSERT_LOCK_SUPPORT
  291. assertex(owner == GetCurrentThreadId());
  292. #endif
  293. }
  294. };
  295. #endif
  296. /**
  297. * Critical section delimiter, using scope to define lifetime of
  298. * the lock on a critical section (parameter).
  299. * Blocks on construction, unblocks on destruction.
  300. */
  301. class CriticalBlock
  302. {
  303. CriticalSection &crit;
  304. public:
  305. inline CriticalBlock(CriticalSection &c) : crit(c) { crit.enter(); }
  306. inline ~CriticalBlock() { crit.leave(); }
  307. };
  308. /**
  309. * Critical section delimiter, using scope to define lifetime of
  310. * the lock on a critical section (parameter).
  311. * Unblocks on construction, blocks on destruction.
  312. */
  313. class CriticalUnblock
  314. {
  315. CriticalSection &crit;
  316. public:
  317. inline CriticalUnblock(CriticalSection &c) : crit(c) { crit.leave(); }
  318. inline ~CriticalUnblock() { crit.enter(); }
  319. };
  320. class CLeavableCriticalBlock
  321. {
  322. CriticalSection &crit;
  323. bool locked = false;
  324. public:
  325. inline CLeavableCriticalBlock(CriticalSection &_crit) : crit(_crit)
  326. {
  327. enter();
  328. }
  329. inline ~CLeavableCriticalBlock()
  330. {
  331. if (locked)
  332. crit.leave();
  333. }
  334. inline void enter()
  335. {
  336. if (locked)
  337. return;
  338. crit.enter();
  339. locked = true;
  340. }
  341. inline void leave()
  342. {
  343. if (locked)
  344. {
  345. locked = false;
  346. crit.leave();
  347. }
  348. }
  349. };
  350. #ifdef SPINLOCK_USE_MUTEX // for testing
  351. class SpinLock
  352. {
  353. CriticalSection sect;
  354. public:
  355. inline void enter()
  356. {
  357. sect.enter();
  358. }
  359. inline void leave()
  360. {
  361. sect.leave();
  362. }
  363. };
  364. #else
  365. class jlib_decl SpinLock
  366. {
  367. std::atomic_uint value{false}; // Use an atomic_uint rather than a bool because it is more efficient on power8
  368. unsigned nesting = 0; // This is not atomic since it is only accessed by one thread at a time
  369. std::atomic<ThreadId> owner{0};
  370. inline SpinLock(SpinLock & value __attribute__((unused))) = delete; // to prevent inadvertent use as block
  371. public:
  372. inline SpinLock()
  373. {
  374. }
  375. #ifdef _DEBUG
  376. ~SpinLock()
  377. {
  378. if (value)
  379. printf("Warning - Owned Spinlock destroyed"); // can't use DBGLOG here!
  380. }
  381. #endif
  382. inline void enter()
  383. {
  384. ThreadId self = GetCurrentThreadId();
  385. #ifdef SPINLOCK_RR_CHECK // as requested by RKC
  386. int policy;
  387. sched_param param;
  388. if ((pthread_getschedparam(self, &policy, &param)==0)&&(policy==SCHED_RR)) {
  389. param.sched_priority = 0;
  390. pthread_setschedparam(self, SCHED_OTHER, &param); // otherwise will likely re-enter
  391. assertex(!"SpinLock enter on SCHED_RR thread");
  392. }
  393. #endif
  394. //owner can only match if it was set on this thread. Therefore the load can be relaxed since single threaded
  395. //code is always sequentially consistent.
  396. if (self==owner.load(std::memory_order_relaxed))
  397. {
  398. dbgassertex(value);
  399. nesting++;
  400. return;
  401. }
  402. while (unlikely(value.exchange(true, std::memory_order_acquire)))
  403. spinUntilReady(value);
  404. owner.store(self, std::memory_order_relaxed);
  405. }
  406. inline void leave()
  407. {
  408. //It is safe to access nesting - since this thread is the only one that can access
  409. //it, so no need for a synchronized access
  410. if (nesting == 0)
  411. {
  412. owner.store(0, std::memory_order_relaxed);
  413. value.store(false, std::memory_order_release);
  414. }
  415. else
  416. nesting--;
  417. }
  418. };
  419. #endif
  420. class SpinBlock
  421. {
  422. SpinLock &lock;
  423. public:
  424. inline SpinBlock(SpinLock & _lock) : lock(_lock) { lock.enter(); }
  425. inline ~SpinBlock() { lock.leave(); }
  426. };
  427. class SpinUnblock
  428. {
  429. SpinLock &lock;
  430. public:
  431. inline SpinUnblock(SpinLock & _lock) : lock(_lock) { lock.leave(); }
  432. inline ~SpinUnblock() { lock.enter(); }
  433. };
  434. // Non re-entrant Spin locks where *absolutely* certain enters are not nested on same thread
  435. // (debug version checks and asserts if are, release version will deadlock
  436. #ifdef NRESPINLOCK_USE_SPINLOCK
  437. class jlib_decl NonReentrantSpinLock: public SpinLock
  438. {
  439. };
  440. #else
  441. #ifdef _DEBUG
  442. class jlib_decl NonReentrantSpinLock
  443. {
  444. std::atomic_uint value;
  445. std::atomic<ThreadId> owner;
  446. inline NonReentrantSpinLock(NonReentrantSpinLock & value __attribute__((unused))) = delete; // to prevent inadvertent use as block
  447. public:
  448. inline NonReentrantSpinLock() : value(false), owner(0)
  449. {
  450. }
  451. inline void enter()
  452. {
  453. ThreadId self = GetCurrentThreadId();
  454. assertex(self!=owner.load(std::memory_order_relaxed)); // check for reentrancy
  455. while (unlikely(value.exchange(true, std::memory_order_acquire)))
  456. spinUntilReady(value);
  457. owner.store(self, std::memory_order_relaxed);
  458. }
  459. inline void leave()
  460. {
  461. assertex(GetCurrentThreadId()==owner.load(std::memory_order_relaxed)); // check for spurious leave
  462. owner.store(0, std::memory_order_relaxed);
  463. value.store(false, std::memory_order_release);
  464. }
  465. };
  466. #else
  467. class jlib_decl NonReentrantSpinLock
  468. {
  469. std::atomic_uint value;
  470. inline NonReentrantSpinLock(NonReentrantSpinLock & value __attribute__((unused))) = delete; // to prevent inadvertent use as block
  471. public:
  472. inline NonReentrantSpinLock() : value(false)
  473. {
  474. }
  475. inline void enter()
  476. {
  477. while (unlikely(value.exchange(true, std::memory_order_acquire)))
  478. spinUntilReady(value);
  479. }
  480. inline void leave()
  481. {
  482. value.store(false, std::memory_order_release);
  483. }
  484. };
  485. #endif
  486. #endif
  487. class NonReentrantSpinBlock
  488. {
  489. NonReentrantSpinLock &lock;
  490. public:
  491. inline NonReentrantSpinBlock(NonReentrantSpinLock & _lock) : lock(_lock) { lock.enter(); }
  492. inline ~NonReentrantSpinBlock() { lock.leave(); }
  493. };
  494. class NonReentrantSpinUnblock
  495. {
  496. NonReentrantSpinLock &lock;
  497. public:
  498. inline NonReentrantSpinUnblock(NonReentrantSpinLock & _lock) : lock(_lock) { lock.leave(); }
  499. inline ~NonReentrantSpinUnblock() { lock.enter(); }
  500. };
  501. class jlib_decl Monitor: public Mutex
  502. {
  503. // Like a java object - you can synchronize on it for a block, wait for a notify on it, or notify on it
  504. Semaphore *sem;
  505. int waiting;
  506. void *last;
  507. public:
  508. Monitor() : Mutex() { sem = new Semaphore(); waiting = 0; last = NULL; }
  509. // Monitor(const char *name) : Mutex(name) { sem = new Semaphore(name); waiting = 0; last = NULL; } // not supported
  510. ~Monitor() {delete sem;};
  511. void wait(); // only called when locked
  512. void notify(); // only called when locked
  513. void notifyAll(); // only called when locked -- notifys for all waiting threads
  514. };
  515. //--------------------------------------------------------------------------------------------------------------------
  516. //Currently disabled since performance profile of own implementation is preferable, and queryWriteLocked() cannot be implemented
  517. //#define USE_PTHREAD_RWLOCK
  518. #ifndef USE_PTHREAD_RWLOCK
  519. class jlib_decl ReadWriteLock
  520. {
  521. bool lockRead(bool timed, unsigned timeout) {
  522. cs.enter();
  523. if (writeLocks == 0)
  524. {
  525. readLocks++;
  526. cs.leave();
  527. }
  528. else
  529. {
  530. readWaiting++;
  531. cs.leave();
  532. if (timed)
  533. {
  534. if (!readSem.wait(timeout)) {
  535. cs.enter();
  536. if (!readSem.wait(0)) {
  537. readWaiting--;
  538. cs.leave();
  539. return false;
  540. }
  541. cs.leave();
  542. }
  543. }
  544. else
  545. readSem.wait();
  546. //NB: waiting and locks adjusted before the signal occurs.
  547. }
  548. return true;
  549. }
  550. bool lockWrite(bool timed, unsigned timeout) {
  551. cs.enter();
  552. if ((readLocks == 0) && (writeLocks == 0))
  553. {
  554. writeLocks++;
  555. cs.leave();
  556. }
  557. else
  558. {
  559. writeWaiting++;
  560. cs.leave();
  561. if (timed)
  562. {
  563. if (!writeSem.wait(timeout)) {
  564. cs.enter();
  565. if (!writeSem.wait(0)) {
  566. writeWaiting--;
  567. cs.leave();
  568. return false;
  569. }
  570. cs.leave();
  571. }
  572. }
  573. else
  574. writeSem.wait();
  575. //NB: waiting and locks adjusted before the signal occurs.
  576. }
  577. #ifdef _DEBUG
  578. exclWriteOwner = GetCurrentThreadId();
  579. #endif
  580. return true;
  581. }
  582. public:
  583. ReadWriteLock()
  584. {
  585. readLocks = 0; writeLocks = 0; readWaiting = 0; writeWaiting = 0;
  586. #ifdef _DEBUG
  587. exclWriteOwner = 0;
  588. #endif
  589. }
  590. ~ReadWriteLock() { assertex(readLocks == 0 && writeLocks == 0); }
  591. void lockRead() { lockRead(false, 0); }
  592. void lockWrite() { lockWrite(false, 0); }
  593. bool lockRead(unsigned timeout) { return lockRead(true, timeout); }
  594. bool lockWrite(unsigned timeout) { return lockWrite(true, timeout); }
  595. void unlock() {
  596. cs.enter();
  597. if (readLocks) readLocks--;
  598. else
  599. {
  600. writeLocks--;
  601. #ifdef _DEBUG
  602. exclWriteOwner = 0;
  603. #endif
  604. }
  605. assertex(writeLocks == 0);
  606. if (readLocks == 0)
  607. {
  608. if (readWaiting)
  609. {
  610. unsigned numWaiting = readWaiting;
  611. readWaiting = 0;
  612. readLocks += numWaiting;
  613. readSem.signal(numWaiting);
  614. }
  615. else if (writeWaiting)
  616. {
  617. writeWaiting--;
  618. writeLocks++;
  619. writeSem.signal();
  620. }
  621. }
  622. cs.leave();
  623. }
  624. bool queryWriteLocked() { return (writeLocks != 0); }
  625. void unlockRead() { unlock(); }
  626. void unlockWrite() { unlock(); }
  627. //MORE: May want to use the pthread implementations under linux.
  628. protected:
  629. CriticalSection cs;
  630. Semaphore readSem;
  631. Semaphore writeSem;
  632. unsigned readLocks;
  633. unsigned writeLocks;
  634. unsigned readWaiting;
  635. unsigned writeWaiting;
  636. #ifdef _DEBUG
  637. ThreadId exclWriteOwner;
  638. #endif
  639. };
  640. #else
  641. class jlib_decl ReadWriteLock
  642. {
  643. public:
  644. ReadWriteLock() { pthread_rwlock_init(&rwlock, nullptr); }
  645. ~ReadWriteLock() { pthread_rwlock_destroy(&rwlock); }
  646. void lockRead() { pthread_rwlock_rdlock(&rwlock); }
  647. void lockWrite() { pthread_rwlock_wrlock(&rwlock); }
  648. bool lockRead(unsigned timeout);
  649. bool lockWrite(unsigned timeout);
  650. void unlock() { pthread_rwlock_unlock(&rwlock); }
  651. void unlockRead() { pthread_rwlock_unlock(&rwlock); }
  652. void unlockWrite() { pthread_rwlock_unlock(&rwlock); }
  653. // bool queryWriteLocked(); // I don't think this can be implemented on top of the pthread interface
  654. protected:
  655. pthread_rwlock_t rwlock;
  656. };
  657. #endif
  658. class ReadLockBlock
  659. {
  660. ReadWriteLock *lock;
  661. public:
  662. ReadLockBlock(ReadWriteLock &l) : lock(&l) { lock->lockRead(); }
  663. ~ReadLockBlock() { if (lock) lock->unlockRead(); }
  664. void clear()
  665. {
  666. if (lock)
  667. {
  668. lock->unlockRead();
  669. lock = NULL;
  670. }
  671. }
  672. };
  673. class WriteLockBlock
  674. {
  675. ReadWriteLock *lock;
  676. public:
  677. WriteLockBlock(ReadWriteLock &l) : lock(&l) { lock->lockWrite(); }
  678. ~WriteLockBlock() { if (lock) lock->unlockWrite(); }
  679. void clear()
  680. {
  681. if (lock)
  682. {
  683. lock->unlockWrite();
  684. lock = NULL;
  685. }
  686. }
  687. };
  688. //--------------------------------------------------------------------------------------------------------------------
  689. class Barrier
  690. {
  691. CriticalSection crit;
  692. int limit, remaining, waiting;
  693. Semaphore sem;
  694. public:
  695. Barrier(int _limit) { init(_limit); }
  696. Barrier() { init(0); }
  697. void init(int _limit)
  698. {
  699. waiting = 0;
  700. limit = _limit;
  701. remaining = limit;
  702. }
  703. void wait() // blocks until 'limit' barrier points are entered.
  704. {
  705. CriticalBlock block(crit);
  706. while (remaining==0) {
  707. if (waiting) {
  708. crit.leave();
  709. ThreadYield();
  710. crit.enter();
  711. }
  712. else
  713. remaining = limit;
  714. }
  715. remaining--;
  716. if (remaining==0)
  717. sem.signal(waiting);
  718. else if (remaining>0) {
  719. waiting++;
  720. crit.leave();
  721. sem.wait();
  722. crit.enter();
  723. waiting--;
  724. }
  725. }
  726. void abort()
  727. {
  728. CriticalBlock block(crit);
  729. remaining = -1;
  730. sem.signal(waiting);
  731. }
  732. void cancel(int n, bool remove) // cancel n barrier points from this instance, if remove=true reduces barrier width
  733. {
  734. CriticalBlock block(crit);
  735. while (remaining==0) {
  736. if (waiting) {
  737. crit.leave();
  738. ThreadYield();
  739. crit.enter();
  740. }
  741. else
  742. remaining = limit;
  743. }
  744. assertex(remaining>=n);
  745. remaining-=n;
  746. if (remaining==0)
  747. sem.signal(waiting);
  748. if (remove)
  749. limit-=n;
  750. }
  751. };
  752. // checked versions of critical block and readwrite blocks - report deadlocks
  753. #define USECHECKEDCRITICALSECTIONS
  754. #ifdef USECHECKEDCRITICALSECTIONS
  755. typedef Mutex CheckedCriticalSection;
  756. void jlib_decl checkedCritEnter(CheckedCriticalSection &crit, unsigned timeout, const char *fname, unsigned lnum);
  757. void jlib_decl checkedCritLeave(CheckedCriticalSection &crit);
  758. class jlib_decl CheckedCriticalBlock
  759. {
  760. CheckedCriticalSection &crit;
  761. public:
  762. CheckedCriticalBlock(CheckedCriticalSection &c, unsigned timeout, const char *fname,unsigned lnum);
  763. ~CheckedCriticalBlock()
  764. {
  765. crit.unlock();
  766. }
  767. };
  768. class jlib_decl CheckedCriticalUnblock
  769. {
  770. CheckedCriticalSection &crit;
  771. const char *fname;
  772. unsigned lnum;
  773. unsigned timeout;
  774. public:
  775. CheckedCriticalUnblock(CheckedCriticalSection &c,unsigned _timeout,const char *_fname,unsigned _lnum)
  776. : crit(c)
  777. {
  778. timeout = _timeout;
  779. fname = _fname;
  780. lnum = _lnum;
  781. crit.unlock();
  782. }
  783. ~CheckedCriticalUnblock();
  784. };
  785. #define CHECKEDCRITICALBLOCK(sect,timeout) CheckedCriticalBlock glue(block,__LINE__)(sect,timeout,__FILE__,__LINE__)
  786. #define CHECKEDCRITICALUNBLOCK(sect,timeout) CheckedCriticalUnblock glue(unblock,__LINE__)(sect,timeout,__FILE__,__LINE__)
  787. #define CHECKEDCRITENTER(sect,timeout) checkedCritEnter(sect,timeout,__FILE__,__LINE__)
  788. #define CHECKEDCRITLEAVE(sect) checkedCritLeave(sect)
  789. class jlib_decl CheckedReadLockBlock
  790. {
  791. ReadWriteLock &lock;
  792. public:
  793. CheckedReadLockBlock(ReadWriteLock &l, unsigned timeout, const char *fname,unsigned lnum);
  794. ~CheckedReadLockBlock() { lock.unlockRead(); }
  795. };
  796. class jlib_decl CheckedWriteLockBlock
  797. {
  798. ReadWriteLock &lock;
  799. public:
  800. CheckedWriteLockBlock(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  801. ~CheckedWriteLockBlock() { lock.unlockWrite(); }
  802. };
  803. void jlib_decl checkedReadLockEnter(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  804. void jlib_decl checkedWriteLockEnter(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  805. #define CHECKEDREADLOCKBLOCK(l,timeout) CheckedReadLockBlock glue(block,__LINE__)(l,timeout,__FILE__,__LINE__)
  806. #define CHECKEDWRITELOCKBLOCK(l,timeout) CheckedWriteLockBlock glue(block,__LINE__)(l,timeout,__FILE__,__LINE__)
  807. #define CHECKEDREADLOCKENTER(l,timeout) checkedReadLockEnter(l,timeout,__FILE__,__LINE__)
  808. #define CHECKEDWRITELOCKENTER(l,timeout) checkedWriteLockEnter(l,timeout,__FILE__,__LINE__)
  809. #else
  810. #define CheckedCriticalSection CriticalSection
  811. #define CheckedCriticalBlock CriticalBlock
  812. #define CheckedCriticalUnblock CriticalUnblock
  813. #define CHECKEDCRITENTER(sect,timeout) (sect).enter()
  814. #define CHECKEDCRITLEAVE(sect) (sect).leave()
  815. #define CHECKEDCRITICALBLOCK(sect,timeout) CheckedCriticalBlock glue(block,__LINE__)(sect)
  816. #define CHECKEDCRITICALUNBLOCK(sect,timeout) CheckedCriticalUnblock glue(unblock,__LINE__)(sect)
  817. #define CHECKEDREADLOCKBLOCK(l,timeout) ReadLockBlock glue(block,__LINE__)(l)
  818. #define CHECKEDWRITELOCKBLOCK(l,timeout) WriteLockBlock glue(block,__LINE__)(l)
  819. #define CHECKEDREADLOCKENTER(l,timeout) (l).lockRead()
  820. #define CHECKEDWRITELOCKENTER(l,timeout) (l).lockWrite()
  821. #endif
  822. class CSingletonLock // a lock that will generally only be locked once (for locking singleton objects - see below for examples
  823. {
  824. volatile bool needlock;
  825. CriticalSection sect;
  826. public:
  827. inline CSingletonLock()
  828. {
  829. needlock = true;
  830. }
  831. inline bool lock()
  832. {
  833. if (needlock) {
  834. sect.enter();
  835. //prevent compiler from moving any code before the critical section (unlikely)
  836. compiler_memory_barrier();
  837. return true;
  838. }
  839. //Prevent the value of the protected object from being evaluated before the condition
  840. compiler_memory_barrier();
  841. return false;
  842. }
  843. inline void unlock()
  844. {
  845. //Ensure that no code that precedes unlock() gets moved to after needlock being cleared.
  846. compiler_memory_barrier();
  847. needlock = false;
  848. sect.leave();
  849. }
  850. };
  851. /* Usage example
  852. static void *sobj = NULL;
  853. static CSingletonLock slock;
  854. void *get()
  855. {
  856. if (slock.lock()) {
  857. if (!sobj) // required
  858. sobj = createSObj();
  859. slock.unlock();
  860. }
  861. return sobj;
  862. }
  863. */
  864. /*
  865. * A template function for implementing a singleton object. Using the same example as above would require:
  866. static std::atomic<void *> sobj;
  867. static CriticalSection slock;
  868. void *get()
  869. {
  870. return querySingleton(sobj, slock, []{ return createSObj; });
  871. }
  872. */
  873. template <typename X, typename FUNC>
  874. inline X * querySingleton(std::atomic<X *> & singleton, CriticalSection & cs, FUNC factory)
  875. {
  876. X * value = singleton.load(std::memory_order_acquire);
  877. if (value)
  878. return value; // avoid crit
  879. CriticalBlock block(cs);
  880. value = singleton.load(std::memory_order_acquire); // reload in case another thread got here first
  881. if (!value)
  882. {
  883. value = factory();
  884. singleton.store(value, std::memory_order_release);
  885. }
  886. return value;
  887. }
  888. /*
  889. * A template class for implementing a singleton object. Using the same example as above would require:
  890. static Singleton<void> sobj;
  891. void *get()
  892. {
  893. return sobj.query([]{ return createSObj; });
  894. }
  895. */
  896. template <typename X>
  897. class Singleton
  898. {
  899. public:
  900. template <typename FUNC> X * query(FUNC factory) { return querySingleton(singleton, cs, factory); }
  901. X * queryExisting() const { return singleton.load(std::memory_order_acquire); }
  902. private:
  903. std::atomic<X *> singleton = {nullptr};
  904. CriticalSection cs;
  905. };
  906. #endif