jmutex.hpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. /*##############################################################################
  2. HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. ############################################################################## */
  13. #ifndef __JMUTEX__
  14. #define __JMUTEX__
  15. #include <assert.h>
  16. #include "jiface.hpp"
  17. #include "jsem.hpp"
  18. extern jlib_decl void ThreadYield();
  19. #ifdef _DEBUG
  20. //#define SPINLOCK_USE_MUTEX // for testing
  21. //#define SPINLOCK_RR_CHECK // checks for realtime threads
  22. #define _ASSERT_LOCK_SUPPORT
  23. #endif
  24. #ifdef SPINLOCK_USE_MUTEX
  25. #define NRESPINLOCK_USE_SPINLOCK
  26. #endif
  27. #if (_WIN32 || ((__GNUC__ < 4 || (__GNUC_MINOR__ < 1 && __GNUC__ == 4)) || (!defined(__x86_64__) && !defined(__i386__))))
  28. #define NRESPINLOCK_USE_SPINLOCK
  29. #endif
  30. #ifdef _WIN32
  31. class jlib_decl Mutex
  32. {
  33. protected:
  34. Mutex(const char *name)
  35. {
  36. mutex = CreateMutex(NULL, FALSE, name);
  37. assertex(mutex);
  38. lockcount = 0;
  39. owner = 0;
  40. }
  41. public:
  42. Mutex()
  43. {
  44. mutex = CreateMutex(NULL, FALSE, NULL);
  45. lockcount = 0;
  46. owner = 0;
  47. }
  48. ~Mutex()
  49. {
  50. if (owner != 0)
  51. printf("Warning - Owned mutex destroyed"); // can't use PrintLog here!
  52. CloseHandle(mutex);
  53. }
  54. void lock()
  55. {
  56. WaitForSingleObject(mutex, INFINITE);
  57. if (lockcount) {
  58. if(owner!=GetCurrentThreadId()) // I think only way this can happen is with unhandled thread exception
  59. lockcount = 0; // (don't assert as unhandled error may get lost)
  60. }
  61. lockcount++;
  62. owner=GetCurrentThreadId();
  63. }
  64. bool lockWait(unsigned timeout)
  65. {
  66. if (WaitForSingleObject(mutex, (long)timeout)!=WAIT_OBJECT_0)
  67. return false;
  68. if (lockcount) {
  69. if(owner!=GetCurrentThreadId()) // I think only way this can happen is with unhandled thread exception
  70. lockcount = 0; // (don't assert as unhandled error may get lost)
  71. }
  72. lockcount++;
  73. owner=GetCurrentThreadId();
  74. return true;
  75. }
  76. void unlock()
  77. {
  78. assertex(owner==GetCurrentThreadId());
  79. --lockcount;
  80. if (lockcount==0)
  81. owner = 0;
  82. ReleaseMutex(mutex);
  83. }
  84. protected:
  85. MutexId mutex;
  86. ThreadId owner;
  87. int unlockAll()
  88. {
  89. assertex(owner==GetCurrentThreadId());
  90. assertex(lockcount);
  91. int ret = lockcount;
  92. int lc = ret;
  93. while (lc--)
  94. unlock();
  95. return ret;
  96. }
  97. void lockAll(int count)
  98. {
  99. while (count--)
  100. lock();
  101. }
  102. private:
  103. int lockcount;
  104. };
  105. class jlib_decl NamedMutex: public Mutex
  106. {
  107. public:
  108. NamedMutex(const char *name)
  109. : Mutex(name)
  110. {
  111. }
  112. };
  113. #else // posix
  114. class jlib_decl Mutex
  115. {
  116. public:
  117. Mutex();
  118. // Mutex(const char *name); //not supported
  119. ~Mutex();
  120. void lock();
  121. bool lockWait(unsigned timeout);
  122. void unlock();
  123. protected:
  124. MutexId mutex;
  125. ThreadId owner;
  126. int unlockAll();
  127. void lockAll(int);
  128. private:
  129. int lockcount;
  130. pthread_cond_t lock_free;
  131. };
  132. class jlib_decl NamedMutex
  133. {
  134. public:
  135. NamedMutex(const char *name);
  136. ~NamedMutex();
  137. void lock();
  138. bool lockWait(unsigned timeout);
  139. void unlock();
  140. private:
  141. Mutex threadmutex;
  142. char *mutexfname;
  143. };
  144. #endif
  145. class jlib_decl synchronized
  146. {
  147. private:
  148. Mutex &mutex;
  149. void throwLockException(unsigned timeout);
  150. public:
  151. synchronized(Mutex &m) : mutex(m) { mutex.lock(); };
  152. synchronized(Mutex &m,unsigned timeout) : mutex(m) { if(!mutex.lockWait(timeout)) throwLockException(timeout); }
  153. inline ~synchronized() { mutex.unlock(); };
  154. };
  155. #ifdef _WIN32
  156. extern "C" {
  157. WINBASEAPI
  158. BOOL
  159. WINAPI
  160. TryEnterCriticalSection(
  161. IN OUT LPCRITICAL_SECTION lpCriticalSection
  162. );
  163. };
  164. class jlib_decl CriticalSection
  165. {
  166. // lightweight mutex within a single process
  167. private:
  168. CRITICAL_SECTION flags;
  169. #ifdef _ASSERT_LOCK_SUPPORT
  170. ThreadId owner;
  171. unsigned depth;
  172. #endif
  173. inline CriticalSection(CriticalSection & value) { assert(false); } // dummy to prevent inadvertant use as block
  174. public:
  175. inline CriticalSection()
  176. {
  177. InitializeCriticalSection(&flags);
  178. #ifdef _ASSERT_LOCK_SUPPORT
  179. owner = 0;
  180. depth = 0;
  181. #endif
  182. };
  183. inline ~CriticalSection()
  184. {
  185. #ifdef _ASSERT_LOCK_SUPPORT
  186. assertex(owner==0 && depth==0);
  187. #endif
  188. DeleteCriticalSection(&flags);
  189. };
  190. inline void enter()
  191. {
  192. EnterCriticalSection(&flags);
  193. #ifdef _ASSERT_LOCK_SUPPORT
  194. if (owner)
  195. {
  196. assertex(owner==GetCurrentThreadId());
  197. depth++;
  198. }
  199. else
  200. owner = GetCurrentThreadId();
  201. #endif
  202. };
  203. inline void leave()
  204. {
  205. #ifdef _ASSERT_LOCK_SUPPORT
  206. assertex(owner==GetCurrentThreadId());
  207. if (depth)
  208. depth--;
  209. else
  210. owner = 0;
  211. #endif
  212. LeaveCriticalSection(&flags);
  213. };
  214. inline void assertLocked()
  215. {
  216. #ifdef _ASSERT_LOCK_SUPPORT
  217. assertex(owner == GetCurrentThreadId());
  218. #endif
  219. }
  220. #ifdef ENABLE_CHECKEDCRITICALSECTIONS
  221. bool wouldBlock() { if (TryEnterCriticalSection(&flags)) { leave(); return false; } return true; } // debug only
  222. #endif
  223. };
  224. #else
  225. /**
  226. * Mutex locking wrapper. Use enter/leave to lock/unlock.
  227. */
  228. class CriticalSection
  229. {
  230. private:
  231. MutexId mutex;
  232. #ifdef _ASSERT_LOCK_SUPPORT
  233. ThreadId owner;
  234. #endif
  235. CriticalSection (const CriticalSection &);
  236. public:
  237. inline CriticalSection()
  238. {
  239. pthread_mutexattr_t attr;
  240. pthread_mutexattr_init(&attr);
  241. #ifdef _DEBUG
  242. verifyex(pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE)==0); // verify supports attr
  243. #else
  244. pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE);
  245. #endif
  246. pthread_mutex_init(&mutex, &attr);
  247. pthread_mutexattr_destroy(&attr);
  248. #ifdef _ASSERT_LOCK_SUPPORT
  249. owner = 0;
  250. #endif
  251. }
  252. inline ~CriticalSection()
  253. {
  254. #ifdef _ASSERT_LOCK_SUPPORT
  255. assertex(owner==0);
  256. #endif
  257. pthread_mutex_destroy(&mutex);
  258. }
  259. inline void enter()
  260. {
  261. pthread_mutex_lock(&mutex);
  262. #ifdef _ASSERT_LOCK_SUPPORT
  263. owner = GetCurrentThreadId();
  264. #endif
  265. }
  266. inline void leave()
  267. {
  268. #ifdef _ASSERT_LOCK_SUPPORT
  269. owner = 0;
  270. #endif
  271. pthread_mutex_unlock(&mutex);
  272. }
  273. inline void assertLocked()
  274. {
  275. #ifdef _ASSERT_LOCK_SUPPORT
  276. assertex(owner == GetCurrentThreadId());
  277. #endif
  278. }
  279. };
  280. #endif
  281. /**
  282. * Critical section delimiter, using scope to define lifetime of
  283. * the lock on a critical section (parameter).
  284. * Blocks on construction, unblocks on destruction.
  285. */
  286. class CriticalBlock
  287. {
  288. CriticalSection &crit;
  289. public:
  290. inline CriticalBlock(CriticalSection &c) : crit(c) { crit.enter(); }
  291. inline ~CriticalBlock() { crit.leave(); }
  292. };
  293. /**
  294. * Critical section delimiter, using scope to define lifetime of
  295. * the lock on a critical section (parameter).
  296. * Unblocks on construction, blocks on destruction.
  297. */
  298. class CriticalUnblock
  299. {
  300. CriticalSection &crit;
  301. public:
  302. inline CriticalUnblock(CriticalSection &c) : crit(c) { crit.leave(); }
  303. inline ~CriticalUnblock() { crit.enter(); }
  304. };
  305. #ifdef SPINLOCK_USE_MUTEX // for testing
  306. class SpinLock
  307. {
  308. CriticalSection sect;
  309. public:
  310. inline void enter()
  311. {
  312. sect.enter();
  313. }
  314. inline void leave()
  315. {
  316. sect.leave();
  317. }
  318. };
  319. #else
  320. class jlib_decl SpinLock
  321. {
  322. atomic_t value;
  323. unsigned nesting; // not volatile since it is only accessed by one thread at a time
  324. struct { volatile ThreadId tid; } owner;
  325. inline SpinLock(SpinLock & value) { assert(false); } // dummy to prevent inadvetant use as block
  326. public:
  327. inline SpinLock()
  328. {
  329. owner.tid = 0;
  330. nesting = 0;
  331. atomic_set(&value, 0);
  332. }
  333. #ifdef _DEBUG
  334. ~SpinLock()
  335. {
  336. if (atomic_read(&value))
  337. printf("Warning - Owned Spinlock destroyed"); // can't use PrintLog here!
  338. }
  339. #endif
  340. inline void enter()
  341. {
  342. ThreadId self = GetCurrentThreadId();
  343. #ifdef SPINLOCK_RR_CHECK // as requested by RKC
  344. int policy;
  345. sched_param param;
  346. if ((pthread_getschedparam(self, &policy, &param)==0)&&(policy==SCHED_RR)) {
  347. param.sched_priority = 0;
  348. pthread_setschedparam(self, SCHED_OTHER, &param); // otherwise will likely re-enter
  349. assertex(!"SpinLock enter on SCHED_RR thread");
  350. }
  351. #endif
  352. if (self==owner.tid) { // this is atomic
  353. #ifdef _DEBUG
  354. assertex(atomic_read(&value));
  355. #endif
  356. nesting++;
  357. return;
  358. }
  359. while (!atomic_cas(&value,1,0))
  360. ThreadYield();
  361. owner.tid = self;
  362. }
  363. inline void leave()
  364. {
  365. //It is safe to access nesting - since this thread is the only one that can access
  366. //it, so no need for a synchronized access
  367. if (nesting == 0)
  368. {
  369. owner.tid = 0;
  370. //Ensure that no code that precedes the setting of value gets moved after it
  371. //(unlikely since code is conditional and owner.tid is also volatile)
  372. compiler_memory_barrier();
  373. atomic_set(&value, 0);
  374. }
  375. else
  376. nesting--;
  377. }
  378. };
  379. #endif
  380. class SpinBlock
  381. {
  382. SpinLock &lock;
  383. public:
  384. inline SpinBlock(SpinLock & _lock) : lock(_lock) { lock.enter(); }
  385. inline ~SpinBlock() { lock.leave(); }
  386. };
  387. class SpinUnblock
  388. {
  389. SpinLock &lock;
  390. public:
  391. inline SpinUnblock(SpinLock & _lock) : lock(_lock) { lock.leave(); }
  392. inline ~SpinUnblock() { lock.enter(); }
  393. };
  394. // Non re-entrant Spin locks where *absolutely* certain enters are not nested on same thread
  395. // (debug version checks and asserts if are, release version will deadlock
  396. #ifdef NRESPINLOCK_USE_SPINLOCK
  397. class jlib_decl NonReentrantSpinLock: public SpinLock
  398. {
  399. };
  400. #else
  401. #ifdef _DEBUG
  402. class jlib_decl NonReentrantSpinLock
  403. {
  404. atomic_t value;
  405. struct { volatile ThreadId tid; } owner; // atomic
  406. inline NonReentrantSpinLock(NonReentrantSpinLock & value) { assert(false); } // dummy to prevent inadvertent use as block
  407. public:
  408. inline NonReentrantSpinLock()
  409. {
  410. owner.tid = 0;
  411. atomic_set(&value, 0);
  412. }
  413. inline void enter()
  414. {
  415. ThreadId self = GetCurrentThreadId();
  416. assertex(self!=owner.tid); // check for reentrancy
  417. while (!atomic_cas(&value,1,0))
  418. ThreadYield();
  419. owner.tid = self;
  420. }
  421. inline void leave()
  422. {
  423. assertex(GetCurrentThreadId()==owner.tid); // check for spurious leave
  424. owner.tid = 0;
  425. //Ensure that no code that precedes the leave() gets moved after value is cleared
  426. compiler_memory_barrier();
  427. atomic_set(&value, 0);
  428. }
  429. };
  430. #else
  431. class jlib_decl NonReentrantSpinLock
  432. {
  433. atomic_t value;
  434. inline NonReentrantSpinLock(NonReentrantSpinLock & value) { assert(false); } // dummy to prevent inadvertent use as block
  435. public:
  436. inline NonReentrantSpinLock()
  437. {
  438. atomic_set(&value, 0);
  439. }
  440. inline void enter()
  441. {
  442. while (!atomic_cas(&value,1,0))
  443. ThreadYield();
  444. }
  445. inline void leave()
  446. {
  447. //Ensure that no code that precedes the leave() gets moved after value is cleared
  448. compiler_memory_barrier();
  449. atomic_set(&value, 0);
  450. }
  451. };
  452. #endif
  453. #endif
  454. class NonReentrantSpinBlock
  455. {
  456. NonReentrantSpinLock &lock;
  457. public:
  458. inline NonReentrantSpinBlock(NonReentrantSpinLock & _lock) : lock(_lock) { lock.enter(); }
  459. inline ~NonReentrantSpinBlock() { lock.leave(); }
  460. };
  461. class NonReentrantSpinUnblock
  462. {
  463. NonReentrantSpinLock &lock;
  464. public:
  465. inline NonReentrantSpinUnblock(NonReentrantSpinLock & _lock) : lock(_lock) { lock.leave(); }
  466. inline ~NonReentrantSpinUnblock() { lock.enter(); }
  467. };
  468. class jlib_decl Monitor: public Mutex
  469. {
  470. // Like a java object - you can synchronize on it for a block, wait for a notify on it, or notify on it
  471. Semaphore *sem;
  472. int waiting;
  473. void *last;
  474. public:
  475. Monitor() : Mutex() { sem = new Semaphore(); waiting = 0; last = NULL; }
  476. // Monitor(const char *name) : Mutex(name) { sem = new Semaphore(name); waiting = 0; last = NULL; } // not supported
  477. ~Monitor() {delete sem;};
  478. void wait(); // only called when locked
  479. void notify(); // only called when locked
  480. void notifyAll(); // only called when locked -- notifys for all waiting threads
  481. };
  482. class jlib_decl ReadWriteLock
  483. {
  484. bool lockRead(bool timed, unsigned timeout) {
  485. cs.enter();
  486. if (writeLocks == 0)
  487. {
  488. readLocks++;
  489. cs.leave();
  490. }
  491. else
  492. {
  493. readWaiting++;
  494. cs.leave();
  495. if (timed)
  496. {
  497. if (!readSem.wait(timeout)) {
  498. cs.enter();
  499. if (!readSem.wait(0)) {
  500. readWaiting--;
  501. cs.leave();
  502. return false;
  503. }
  504. cs.leave();
  505. }
  506. }
  507. else
  508. readSem.wait();
  509. //NB: waiting and locks adjusted before the signal occurs.
  510. }
  511. return true;
  512. }
  513. bool lockWrite(bool timed, unsigned timeout) {
  514. cs.enter();
  515. if ((readLocks == 0) && (writeLocks == 0))
  516. {
  517. writeLocks++;
  518. cs.leave();
  519. }
  520. else
  521. {
  522. writeWaiting++;
  523. cs.leave();
  524. if (timed)
  525. {
  526. if (!writeSem.wait(timeout)) {
  527. cs.enter();
  528. if (!writeSem.wait(0)) {
  529. writeWaiting--;
  530. cs.leave();
  531. return false;
  532. }
  533. cs.leave();
  534. }
  535. }
  536. else
  537. writeSem.wait();
  538. //NB: waiting and locks adjusted before the signal occurs.
  539. }
  540. #ifdef _DEBUG
  541. exclWriteOwner = GetCurrentThreadId();
  542. #endif
  543. return true;
  544. }
  545. public:
  546. ReadWriteLock()
  547. {
  548. readLocks = 0; writeLocks = 0; readWaiting = 0; writeWaiting = 0;
  549. #ifdef _DEBUG
  550. exclWriteOwner = 0;
  551. #endif
  552. }
  553. ~ReadWriteLock() { assertex(readLocks == 0 && writeLocks == 0); }
  554. void lockRead() { lockRead(false, 0); }
  555. void lockWrite() { lockWrite(false, 0); }
  556. bool lockRead(unsigned timeout) { return lockRead(true, timeout); }
  557. bool lockWrite(unsigned timeout) { return lockWrite(true, timeout); }
  558. void unlock() {
  559. cs.enter();
  560. if (readLocks) readLocks--;
  561. else
  562. {
  563. writeLocks--;
  564. #ifdef _DEBUG
  565. exclWriteOwner = 0;
  566. #endif
  567. }
  568. assertex(writeLocks == 0);
  569. if (readLocks == 0)
  570. {
  571. if (readWaiting)
  572. {
  573. unsigned numWaiting = readWaiting;
  574. readWaiting = 0;
  575. readLocks += numWaiting;
  576. readSem.signal(numWaiting);
  577. }
  578. else if (writeWaiting)
  579. {
  580. writeWaiting--;
  581. writeLocks++;
  582. writeSem.signal();
  583. }
  584. }
  585. cs.leave();
  586. }
  587. bool queryWriteLocked() { return (writeLocks != 0); }
  588. void unlockRead() { unlock(); }
  589. void unlockWrite() { unlock(); }
  590. //MORE: May want to use the pthread implementations under linux.
  591. protected:
  592. CriticalSection cs;
  593. Semaphore readSem;
  594. Semaphore writeSem;
  595. unsigned readLocks;
  596. unsigned writeLocks;
  597. unsigned readWaiting;
  598. unsigned writeWaiting;
  599. #ifdef _DEBUG
  600. ThreadId exclWriteOwner;
  601. #endif
  602. };
  603. class ReadLockBlock
  604. {
  605. ReadWriteLock *lock;
  606. public:
  607. ReadLockBlock(ReadWriteLock &l) : lock(&l) { lock->lockRead(); }
  608. ~ReadLockBlock() { if (lock) lock->unlockRead(); }
  609. void clear()
  610. {
  611. if (lock)
  612. {
  613. lock->unlockRead();
  614. lock = NULL;
  615. }
  616. }
  617. };
  618. class WriteLockBlock
  619. {
  620. ReadWriteLock *lock;
  621. public:
  622. WriteLockBlock(ReadWriteLock &l) : lock(&l) { lock->lockWrite(); }
  623. ~WriteLockBlock() { if (lock) lock->unlockWrite(); }
  624. void clear()
  625. {
  626. if (lock)
  627. {
  628. lock->unlockWrite();
  629. lock = NULL;
  630. }
  631. }
  632. };
  633. class Barrier
  634. {
  635. CriticalSection crit;
  636. int limit, remaining, waiting;
  637. Semaphore sem;
  638. public:
  639. Barrier(int _limit) { init(_limit); }
  640. Barrier() { init(0); }
  641. void init(int _limit)
  642. {
  643. waiting = 0;
  644. limit = _limit;
  645. remaining = limit;
  646. }
  647. void wait() // blocks until 'limit' barrier points are entered.
  648. {
  649. CriticalBlock block(crit);
  650. while (remaining==0) {
  651. if (waiting) {
  652. crit.leave();
  653. ThreadYield();
  654. crit.enter();
  655. }
  656. else
  657. remaining = limit;
  658. }
  659. remaining--;
  660. if (remaining==0)
  661. sem.signal(waiting);
  662. else if (remaining>0) {
  663. waiting++;
  664. crit.leave();
  665. sem.wait();
  666. crit.enter();
  667. waiting--;
  668. }
  669. }
  670. void abort()
  671. {
  672. CriticalBlock block(crit);
  673. remaining = -1;
  674. sem.signal(waiting);
  675. }
  676. void cancel(int n, bool remove) // cancel n barrier points from this instance, if remove=true reduces barrier width
  677. {
  678. CriticalBlock block(crit);
  679. while (remaining==0) {
  680. if (waiting) {
  681. crit.leave();
  682. ThreadYield();
  683. crit.enter();
  684. }
  685. else
  686. remaining = limit;
  687. }
  688. assertex(remaining>=n);
  689. remaining-=n;
  690. if (remaining==0)
  691. sem.signal(waiting);
  692. if (remove)
  693. limit-=n;
  694. }
  695. };
  696. // checked versions of critical block and readwrite blocks - report deadlocks
  697. #define USECHECKEDCRITICALSECTIONS
  698. #ifdef USECHECKEDCRITICALSECTIONS
  699. typedef Mutex CheckedCriticalSection;
  700. void jlib_decl checkedCritEnter(CheckedCriticalSection &crit, unsigned timeout, const char *fname, unsigned lnum);
  701. void jlib_decl checkedCritLeave(CheckedCriticalSection &crit);
  702. class jlib_decl CheckedCriticalBlock
  703. {
  704. CheckedCriticalSection &crit;
  705. public:
  706. CheckedCriticalBlock(CheckedCriticalSection &c, unsigned timeout, const char *fname,unsigned lnum);
  707. ~CheckedCriticalBlock()
  708. {
  709. crit.unlock();
  710. }
  711. };
  712. class jlib_decl CheckedCriticalUnblock
  713. {
  714. CheckedCriticalSection &crit;
  715. const char *fname;
  716. unsigned lnum;
  717. unsigned timeout;
  718. public:
  719. CheckedCriticalUnblock(CheckedCriticalSection &c,unsigned _timeout,const char *_fname,unsigned _lnum)
  720. : crit(c)
  721. {
  722. timeout = _timeout;
  723. fname = _fname;
  724. lnum = _lnum;
  725. crit.unlock();
  726. }
  727. ~CheckedCriticalUnblock();
  728. };
  729. #define CHECKEDCRITICALBLOCK(sect,timeout) CheckedCriticalBlock glue(block,__LINE__)(sect,timeout,__FILE__,__LINE__)
  730. #define CHECKEDCRITICALUNBLOCK(sect,timeout) CheckedCriticalUnblock glue(unblock,__LINE__)(sect,timeout,__FILE__,__LINE__)
  731. #define CHECKEDCRITENTER(sect,timeout) checkedCritEnter(sect,timeout,__FILE__,__LINE__)
  732. #define CHECKEDCRITLEAVE(sect) checkedCritLeave(sect)
  733. class jlib_decl CheckedReadLockBlock
  734. {
  735. ReadWriteLock &lock;
  736. public:
  737. CheckedReadLockBlock(ReadWriteLock &l, unsigned timeout, const char *fname,unsigned lnum);
  738. ~CheckedReadLockBlock() { lock.unlockRead(); }
  739. };
  740. class jlib_decl CheckedWriteLockBlock
  741. {
  742. ReadWriteLock &lock;
  743. public:
  744. CheckedWriteLockBlock(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  745. ~CheckedWriteLockBlock() { lock.unlockWrite(); }
  746. };
  747. void jlib_decl checkedReadLockEnter(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  748. void jlib_decl checkedWriteLockEnter(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  749. #define CHECKEDREADLOCKBLOCK(l,timeout) CheckedReadLockBlock glue(block,__LINE__)(l,timeout,__FILE__,__LINE__)
  750. #define CHECKEDWRITELOCKBLOCK(l,timeout) CheckedWriteLockBlock glue(block,__LINE__)(l,timeout,__FILE__,__LINE__)
  751. #define CHECKEDREADLOCKENTER(l,timeout) checkedReadLockEnter(l,timeout,__FILE__,__LINE__)
  752. #define CHECKEDWRITELOCKENTER(l,timeout) checkedWriteLockEnter(l,timeout,__FILE__,__LINE__)
  753. #else
  754. #define CheckedCriticalSection CriticalSection
  755. #define CheckedCriticalBlock CriticalBlock
  756. #define CheckedCriticalUnblock CriticalUnblock
  757. #define CHECKEDCRITENTER(sect,timeout) (sect).enter()
  758. #define CHECKEDCRITLEAVE(sect) (sect).leave()
  759. #define CHECKEDCRITICALBLOCK(sect,timeout) CheckedCriticalBlock glue(block,__LINE__)(sect)
  760. #define CHECKEDCRITICALUNBLOCK(sect,timeout) CheckedCriticalUnblock glue(unblock,__LINE__)(sect)
  761. #define CHECKEDREADLOCKBLOCK(l,timeout) ReadLockBlock glue(block,__LINE__)(l)
  762. #define CHECKEDWRITELOCKBLOCK(l,timeout) WriteLockBlock glue(block,__LINE__)(l)
  763. #define CHECKEDREADLOCKENTER(l,timeout) (l).lockRead()
  764. #define CHECKEDWRITELOCKENTER(l,timeout) (l).lockWrite()
  765. #endif
  766. class CSingletonLock // a lock that will generally only be locked once (for locking singleton objects - see below for examples
  767. {
  768. volatile bool needlock;
  769. CriticalSection sect;
  770. public:
  771. inline CSingletonLock()
  772. {
  773. needlock = true;
  774. }
  775. inline bool lock()
  776. {
  777. if (needlock) {
  778. sect.enter();
  779. //prevent compiler from moving any code before the critical section (unlikely)
  780. compiler_memory_barrier();
  781. return true;
  782. }
  783. //Prevent the value of the protected object from being evaluated before the condition
  784. compiler_memory_barrier();
  785. return false;
  786. }
  787. inline void unlock()
  788. {
  789. //Ensure that no code that precedes unlock() gets moved to after needlock being cleared.
  790. compiler_memory_barrier();
  791. needlock = false;
  792. sect.leave();
  793. }
  794. };
  795. /* Usage example
  796. void *get()
  797. {
  798. static void *sobj = NULL;
  799. static CSingletonLock slock;
  800. if (slock.lock()) {
  801. if (!sobj) // required
  802. sobj = createSObj();
  803. slock.unlock();
  804. }
  805. return sobj;
  806. }
  807. */
  808. #endif