jmutex.hpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. /*##############################################################################
  2. HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. ############################################################################## */
  13. #ifndef __JMUTEX__
  14. #define __JMUTEX__
  15. #include <assert.h>
  16. #include "jexpdef.hpp"
  17. #include "jiface.hpp"
  18. #include "jsem.hpp"
  19. extern jlib_decl void ThreadYield();
  20. #ifdef _DEBUG
  21. //#define SPINLOCK_USE_MUTEX // for testing
  22. //#define SPINLOCK_RR_CHECK // checks for realtime threads
  23. #endif
  24. #ifdef SPINLOCK_USE_MUTEX
  25. #define NRESPINLOCK_USE_SPINLOCK
  26. #endif
  27. #if (_WIN32 || ((__GNUC__ < 4 || (__GNUC_MINOR__ < 1 && __GNUC__ == 4)) || (!defined(__x86_64__) && !defined(__i386__))))
  28. #define NRESPINLOCK_USE_SPINLOCK
  29. #endif
  30. #ifdef _WIN32
  31. class jlib_decl Mutex
  32. {
  33. protected:
  34. Mutex(const char *name)
  35. {
  36. mutex = CreateMutex(NULL, FALSE, name);
  37. assertex(mutex);
  38. lockcount = 0;
  39. owner = 0;
  40. }
  41. public:
  42. Mutex()
  43. {
  44. mutex = CreateMutex(NULL, FALSE, NULL);
  45. lockcount = 0;
  46. owner = 0;
  47. }
  48. ~Mutex()
  49. {
  50. if (owner != 0)
  51. printf("Warning - Owned mutex destroyed"); // can't use PrintLog here!
  52. CloseHandle(mutex);
  53. }
  54. void lock()
  55. {
  56. WaitForSingleObject(mutex, INFINITE);
  57. if (lockcount) {
  58. if(owner!=GetCurrentThreadId()) // I think only way this can happen is with unhandled thread exception
  59. lockcount = 0; // (don't assert as unhandled error may get lost)
  60. }
  61. lockcount++;
  62. owner=GetCurrentThreadId();
  63. }
  64. bool lockWait(unsigned timeout)
  65. {
  66. if (WaitForSingleObject(mutex, (long)timeout)!=WAIT_OBJECT_0)
  67. return false;
  68. if (lockcount) {
  69. if(owner!=GetCurrentThreadId()) // I think only way this can happen is with unhandled thread exception
  70. lockcount = 0; // (don't assert as unhandled error may get lost)
  71. }
  72. lockcount++;
  73. owner=GetCurrentThreadId();
  74. return true;
  75. }
  76. void unlock()
  77. {
  78. assertex(owner==GetCurrentThreadId());
  79. --lockcount;
  80. if (lockcount==0)
  81. owner = 0;
  82. ReleaseMutex(mutex);
  83. }
  84. protected:
  85. MutexId mutex;
  86. ThreadId owner;
  87. int unlockAll()
  88. {
  89. assertex(owner==GetCurrentThreadId());
  90. assertex(lockcount);
  91. int ret = lockcount;
  92. int lc = ret;
  93. while (lc--)
  94. unlock();
  95. return ret;
  96. }
  97. void lockAll(int count)
  98. {
  99. while (count--)
  100. lock();
  101. }
  102. private:
  103. int lockcount;
  104. };
  105. class jlib_decl NamedMutex: public Mutex
  106. {
  107. public:
  108. NamedMutex(const char *name)
  109. : Mutex(name)
  110. {
  111. }
  112. };
  113. #else // posix
  114. class jlib_decl Mutex
  115. {
  116. public:
  117. Mutex();
  118. // Mutex(const char *name); //not supported
  119. ~Mutex();
  120. void lock();
  121. bool lockWait(unsigned timeout);
  122. void unlock();
  123. protected:
  124. MutexId mutex;
  125. ThreadId owner;
  126. int unlockAll();
  127. void lockAll(int);
  128. private:
  129. int lockcount;
  130. pthread_cond_t lock_free;
  131. };
  132. class jlib_decl NamedMutex
  133. {
  134. public:
  135. NamedMutex(const char *name);
  136. ~NamedMutex();
  137. void lock();
  138. bool lockWait(unsigned timeout);
  139. void unlock();
  140. private:
  141. Mutex threadmutex;
  142. char *mutexfname;
  143. };
  144. #endif
  145. class jlib_decl synchronized
  146. {
  147. private:
  148. Mutex &mutex;
  149. void throwLockException(unsigned timeout);
  150. public:
  151. synchronized(Mutex &m) : mutex(m) { mutex.lock(); };
  152. synchronized(Mutex &m,unsigned timeout) : mutex(m) { if(!mutex.lockWait(timeout)) throwLockException(timeout); }
  153. inline ~synchronized() { mutex.unlock(); };
  154. };
  155. #ifdef _WIN32
  156. extern "C" {
  157. WINBASEAPI
  158. BOOL
  159. WINAPI
  160. TryEnterCriticalSection(
  161. IN OUT LPCRITICAL_SECTION lpCriticalSection
  162. );
  163. };
  164. class jlib_decl CriticalSection
  165. {
  166. // lightweight mutex within a single process
  167. private:
  168. CRITICAL_SECTION flags;
  169. inline CriticalSection(CriticalSection & value) { assert(false); } // dummy to prevent inadvetant use as block
  170. public:
  171. inline CriticalSection() { InitializeCriticalSection(&flags); };
  172. inline ~CriticalSection() { DeleteCriticalSection(&flags); };
  173. inline void enter() { EnterCriticalSection(&flags); };
  174. inline void leave() { LeaveCriticalSection(&flags); };
  175. #ifdef ENABLE_CHECKEDCRITICALSECTIONS
  176. bool wouldBlock() { if (TryEnterCriticalSection(&flags)) { leave(); return false; } return true; } // debug only
  177. #endif
  178. };
  179. #else
  180. /**
  181. * Mutex locking wrapper. Use enter/leave to lock/unlock.
  182. */
  183. class CriticalSection
  184. {
  185. private:
  186. MutexId mutex;
  187. CriticalSection (const CriticalSection &);
  188. public:
  189. inline CriticalSection()
  190. {
  191. pthread_mutexattr_t attr;
  192. pthread_mutexattr_init(&attr);
  193. #ifdef _DEBUG
  194. verifyex(pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE)==0); // verify supports attr
  195. #else
  196. pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE);
  197. #endif
  198. pthread_mutex_init(&mutex, &attr);
  199. pthread_mutexattr_destroy(&attr);
  200. }
  201. inline ~CriticalSection()
  202. {
  203. pthread_mutex_destroy(&mutex);
  204. }
  205. inline void enter()
  206. {
  207. pthread_mutex_lock(&mutex);
  208. }
  209. inline void leave()
  210. {
  211. pthread_mutex_unlock(&mutex);
  212. }
  213. };
  214. #endif
  215. /**
  216. * Critical section delimiter, using scope to define lifetime of
  217. * the lock on a critical section (parameter).
  218. * Blocks on construction, unblocks on destruction.
  219. */
  220. class CriticalBlock
  221. {
  222. CriticalSection &crit;
  223. public:
  224. inline CriticalBlock(CriticalSection &c) : crit(c) { crit.enter(); }
  225. inline ~CriticalBlock() { crit.leave(); }
  226. };
  227. /**
  228. * Critical section delimiter, using scope to define lifetime of
  229. * the lock on a critical section (parameter).
  230. * Unblocks on construction, blocks on destruction.
  231. */
  232. class CriticalUnblock
  233. {
  234. CriticalSection &crit;
  235. public:
  236. inline CriticalUnblock(CriticalSection &c) : crit(c) { crit.leave(); }
  237. inline ~CriticalUnblock() { crit.enter(); }
  238. };
  239. #ifdef SPINLOCK_USE_MUTEX // for testing
  240. class SpinLock
  241. {
  242. CriticalSection sect;
  243. public:
  244. inline void enter()
  245. {
  246. sect.enter();
  247. }
  248. inline void leave()
  249. {
  250. sect.leave();
  251. }
  252. };
  253. #else
  254. class jlib_decl SpinLock
  255. {
  256. atomic_t value;
  257. unsigned nesting; // not volatile since it is only accessed by one thread at a time
  258. struct { volatile ThreadId tid; } owner;
  259. inline SpinLock(SpinLock & value) { assert(false); } // dummy to prevent inadvetant use as block
  260. public:
  261. inline SpinLock()
  262. {
  263. owner.tid = 0;
  264. nesting = 0;
  265. atomic_set(&value, 0);
  266. }
  267. #ifdef _DEBUG
  268. ~SpinLock()
  269. {
  270. if (atomic_read(&value))
  271. printf("Warning - Owned Spinlock destroyed"); // can't use PrintLog here!
  272. }
  273. #endif
  274. inline void enter()
  275. {
  276. ThreadId self = GetCurrentThreadId();
  277. #ifdef SPINLOCK_RR_CHECK // as requested by RKC
  278. int policy;
  279. sched_param param;
  280. if ((pthread_getschedparam(self, &policy, &param)==0)&&(policy==SCHED_RR)) {
  281. param.sched_priority = 0;
  282. pthread_setschedparam(self, SCHED_OTHER, &param); // otherwise will likely re-enter
  283. assertex(!"SpinLock enter on SCHED_RR thread");
  284. }
  285. #endif
  286. if (self==owner.tid) { // this is atomic
  287. #ifdef _DEBUG
  288. assertex(atomic_read(&value));
  289. #endif
  290. nesting++;
  291. return;
  292. }
  293. while (!atomic_cas(&value,1,0))
  294. ThreadYield();
  295. owner.tid = self;
  296. }
  297. inline void leave()
  298. {
  299. //It is safe to access nesting - since this thread is the only one that can access
  300. //it, so no need for a synchronized access
  301. if (nesting == 0)
  302. {
  303. owner.tid = 0;
  304. //Ensure that no code that precedes the setting of value gets moved after it
  305. //(unlikely since code is conditional and owner.tid is also volatile)
  306. compiler_memory_barrier();
  307. atomic_set(&value, 0);
  308. }
  309. else
  310. nesting--;
  311. }
  312. };
  313. #endif
  314. class SpinBlock
  315. {
  316. SpinLock &lock;
  317. public:
  318. inline SpinBlock(SpinLock & _lock) : lock(_lock) { lock.enter(); }
  319. inline ~SpinBlock() { lock.leave(); }
  320. };
  321. class SpinUnblock
  322. {
  323. SpinLock &lock;
  324. public:
  325. inline SpinUnblock(SpinLock & _lock) : lock(_lock) { lock.leave(); }
  326. inline ~SpinUnblock() { lock.enter(); }
  327. };
  328. // Non re-entrant Spin locks where *absolutely* certain enters are not nested on same thread
  329. // (debug version checks and asserts if are, release version will deadlock
  330. #ifdef NRESPINLOCK_USE_SPINLOCK
  331. class jlib_decl NonReentrantSpinLock: public SpinLock
  332. {
  333. };
  334. #else
  335. #ifdef _DEBUG
  336. class jlib_decl NonReentrantSpinLock
  337. {
  338. atomic_t value;
  339. struct { volatile ThreadId tid; } owner; // atomic
  340. inline NonReentrantSpinLock(NonReentrantSpinLock & value) { assert(false); } // dummy to prevent inadvertent use as block
  341. public:
  342. inline NonReentrantSpinLock()
  343. {
  344. owner.tid = 0;
  345. atomic_set(&value, 0);
  346. }
  347. inline void enter()
  348. {
  349. ThreadId self = GetCurrentThreadId();
  350. assertex(self!=owner.tid); // check for reentrancy
  351. while (!atomic_cas(&value,1,0))
  352. ThreadYield();
  353. owner.tid = self;
  354. }
  355. inline void leave()
  356. {
  357. assertex(GetCurrentThreadId()==owner.tid); // check for spurious leave
  358. owner.tid = 0;
  359. //Ensure that no code that precedes the leave() gets moved after value is cleared
  360. compiler_memory_barrier();
  361. atomic_set(&value, 0);
  362. }
  363. };
  364. #else
  365. class jlib_decl NonReentrantSpinLock
  366. {
  367. atomic_t value;
  368. inline NonReentrantSpinLock(NonReentrantSpinLock & value) { assert(false); } // dummy to prevent inadvertent use as block
  369. public:
  370. inline NonReentrantSpinLock()
  371. {
  372. atomic_set(&value, 0);
  373. }
  374. inline void enter()
  375. {
  376. while (!atomic_cas(&value,1,0))
  377. ThreadYield();
  378. }
  379. inline void leave()
  380. {
  381. //Ensure that no code that precedes the leave() gets moved after value is cleared
  382. compiler_memory_barrier();
  383. atomic_set(&value, 0);
  384. }
  385. };
  386. #endif
  387. #endif
  388. class NonReentrantSpinBlock
  389. {
  390. NonReentrantSpinLock &lock;
  391. public:
  392. inline NonReentrantSpinBlock(NonReentrantSpinLock & _lock) : lock(_lock) { lock.enter(); }
  393. inline ~NonReentrantSpinBlock() { lock.leave(); }
  394. };
  395. class NonReentrantSpinUnblock
  396. {
  397. NonReentrantSpinLock &lock;
  398. public:
  399. inline NonReentrantSpinUnblock(NonReentrantSpinLock & _lock) : lock(_lock) { lock.leave(); }
  400. inline ~NonReentrantSpinUnblock() { lock.enter(); }
  401. };
  402. class jlib_decl Monitor: public Mutex
  403. {
  404. // Like a java object - you can synchronize on it for a block, wait for a notify on it, or notify on it
  405. Semaphore *sem;
  406. int waiting;
  407. void *last;
  408. public:
  409. Monitor() : Mutex() { sem = new Semaphore(); waiting = 0; last = NULL; }
  410. // Monitor(const char *name) : Mutex(name) { sem = new Semaphore(name); waiting = 0; last = NULL; } // not supported
  411. ~Monitor() {delete sem;};
  412. void wait(); // only called when locked
  413. void notify(); // only called when locked
  414. void notifyAll(); // only called when locked -- notifys for all waiting threads
  415. };
  416. class jlib_decl ReadWriteLock
  417. {
  418. bool lockRead(bool timed, unsigned timeout) {
  419. cs.enter();
  420. if (writeLocks == 0)
  421. {
  422. readLocks++;
  423. cs.leave();
  424. }
  425. else
  426. {
  427. readWaiting++;
  428. cs.leave();
  429. if (timed)
  430. {
  431. if (!readSem.wait(timeout)) {
  432. cs.enter();
  433. if (!readSem.wait(0)) {
  434. readWaiting--;
  435. cs.leave();
  436. return false;
  437. }
  438. cs.leave();
  439. }
  440. }
  441. else
  442. readSem.wait();
  443. //NB: waiting and locks adjusted before the signal occurs.
  444. }
  445. return true;
  446. }
  447. bool lockWrite(bool timed, unsigned timeout) {
  448. cs.enter();
  449. if ((readLocks == 0) && (writeLocks == 0))
  450. {
  451. writeLocks++;
  452. cs.leave();
  453. }
  454. else
  455. {
  456. writeWaiting++;
  457. cs.leave();
  458. if (timed)
  459. {
  460. if (!writeSem.wait(timeout)) {
  461. cs.enter();
  462. if (!writeSem.wait(0)) {
  463. writeWaiting--;
  464. cs.leave();
  465. return false;
  466. }
  467. cs.leave();
  468. }
  469. }
  470. else
  471. writeSem.wait();
  472. //NB: waiting and locks adjusted before the signal occurs.
  473. }
  474. #ifdef _DEBUG
  475. exclWriteOwner = GetCurrentThreadId();
  476. #endif
  477. return true;
  478. }
  479. public:
  480. ReadWriteLock()
  481. {
  482. readLocks = 0; writeLocks = 0; readWaiting = 0; writeWaiting = 0;
  483. #ifdef _DEBUG
  484. exclWriteOwner = 0;
  485. #endif
  486. }
  487. ~ReadWriteLock() { assertex(readLocks == 0 && writeLocks == 0); }
  488. void lockRead() { lockRead(false, 0); }
  489. void lockWrite() { lockWrite(false, 0); }
  490. bool lockRead(unsigned timeout) { return lockRead(true, timeout); }
  491. bool lockWrite(unsigned timeout) { return lockWrite(true, timeout); }
  492. void unlock() {
  493. cs.enter();
  494. if (readLocks) readLocks--;
  495. else
  496. {
  497. writeLocks--;
  498. #ifdef _DEBUG
  499. exclWriteOwner = 0;
  500. #endif
  501. }
  502. assertex(writeLocks == 0);
  503. if (readLocks == 0)
  504. {
  505. if (readWaiting)
  506. {
  507. unsigned numWaiting = readWaiting;
  508. readWaiting = 0;
  509. readLocks += numWaiting;
  510. readSem.signal(numWaiting);
  511. }
  512. else if (writeWaiting)
  513. {
  514. writeWaiting--;
  515. writeLocks++;
  516. writeSem.signal();
  517. }
  518. }
  519. cs.leave();
  520. }
  521. bool queryWriteLocked() { return (writeLocks != 0); }
  522. void unlockRead() { unlock(); }
  523. void unlockWrite() { unlock(); }
  524. //MORE: May want to use the pthread implementations under linux.
  525. protected:
  526. CriticalSection cs;
  527. Semaphore readSem;
  528. Semaphore writeSem;
  529. unsigned readLocks;
  530. unsigned writeLocks;
  531. unsigned readWaiting;
  532. unsigned writeWaiting;
  533. #ifdef _DEBUG
  534. ThreadId exclWriteOwner;
  535. #endif
  536. };
  537. class ReadLockBlock
  538. {
  539. ReadWriteLock *lock;
  540. public:
  541. ReadLockBlock(ReadWriteLock &l) : lock(&l) { lock->lockRead(); }
  542. ~ReadLockBlock() { if (lock) lock->unlockRead(); }
  543. void clear()
  544. {
  545. if (lock)
  546. {
  547. lock->unlockRead();
  548. lock = NULL;
  549. }
  550. }
  551. };
  552. class WriteLockBlock
  553. {
  554. ReadWriteLock *lock;
  555. public:
  556. WriteLockBlock(ReadWriteLock &l) : lock(&l) { lock->lockWrite(); }
  557. ~WriteLockBlock() { if (lock) lock->unlockWrite(); }
  558. void clear()
  559. {
  560. if (lock)
  561. {
  562. lock->unlockWrite();
  563. lock = NULL;
  564. }
  565. }
  566. };
  567. class Barrier
  568. {
  569. CriticalSection crit;
  570. int limit, remaining, waiting;
  571. Semaphore sem;
  572. public:
  573. Barrier(int _limit) { init(_limit); }
  574. Barrier() { init(0); }
  575. void init(int _limit)
  576. {
  577. waiting = 0;
  578. limit = _limit;
  579. remaining = limit;
  580. }
  581. void wait() // blocks until 'limit' barrier points are entered.
  582. {
  583. CriticalBlock block(crit);
  584. while (remaining==0) {
  585. if (waiting) {
  586. crit.leave();
  587. ThreadYield();
  588. crit.enter();
  589. }
  590. else
  591. remaining = limit;
  592. }
  593. remaining--;
  594. if (remaining==0)
  595. sem.signal(waiting);
  596. else if (remaining>0) {
  597. waiting++;
  598. crit.leave();
  599. sem.wait();
  600. crit.enter();
  601. waiting--;
  602. }
  603. }
  604. void abort()
  605. {
  606. CriticalBlock block(crit);
  607. remaining = -1;
  608. sem.signal(waiting);
  609. }
  610. void cancel(int n, bool remove) // cancel n barrier points from this instance, if remove=true reduces barrier width
  611. {
  612. CriticalBlock block(crit);
  613. while (remaining==0) {
  614. if (waiting) {
  615. crit.leave();
  616. ThreadYield();
  617. crit.enter();
  618. }
  619. else
  620. remaining = limit;
  621. }
  622. assertex(remaining>=n);
  623. remaining-=n;
  624. if (remaining==0)
  625. sem.signal(waiting);
  626. if (remove)
  627. limit-=n;
  628. }
  629. };
  630. // checked versions of critical block and readwrite blocks - report deadlocks
  631. #define USECHECKEDCRITICALSECTIONS
  632. #ifdef USECHECKEDCRITICALSECTIONS
  633. typedef Mutex CheckedCriticalSection;
  634. void jlib_decl checkedCritEnter(CheckedCriticalSection &crit, unsigned timeout, const char *fname, unsigned lnum);
  635. void jlib_decl checkedCritLeave(CheckedCriticalSection &crit);
  636. class jlib_decl CheckedCriticalBlock
  637. {
  638. CheckedCriticalSection &crit;
  639. public:
  640. CheckedCriticalBlock(CheckedCriticalSection &c, unsigned timeout, const char *fname,unsigned lnum);
  641. ~CheckedCriticalBlock()
  642. {
  643. crit.unlock();
  644. }
  645. };
  646. class jlib_decl CheckedCriticalUnblock
  647. {
  648. CheckedCriticalSection &crit;
  649. const char *fname;
  650. unsigned lnum;
  651. unsigned timeout;
  652. public:
  653. CheckedCriticalUnblock(CheckedCriticalSection &c,unsigned _timeout,const char *_fname,unsigned _lnum)
  654. : crit(c)
  655. {
  656. timeout = _timeout;
  657. fname = _fname;
  658. lnum = _lnum;
  659. crit.unlock();
  660. }
  661. ~CheckedCriticalUnblock();
  662. };
  663. #define CHECKEDCRITICALBLOCK(sect,timeout) CheckedCriticalBlock glue(block,__LINE__)(sect,timeout,__FILE__,__LINE__)
  664. #define CHECKEDCRITICALUNBLOCK(sect,timeout) CheckedCriticalUnblock glue(unblock,__LINE__)(sect,timeout,__FILE__,__LINE__)
  665. #define CHECKEDCRITENTER(sect,timeout) checkedCritEnter(sect,timeout,__FILE__,__LINE__)
  666. #define CHECKEDCRITLEAVE(sect) checkedCritLeave(sect)
  667. class jlib_decl CheckedReadLockBlock
  668. {
  669. ReadWriteLock &lock;
  670. public:
  671. CheckedReadLockBlock(ReadWriteLock &l, unsigned timeout, const char *fname,unsigned lnum);
  672. ~CheckedReadLockBlock() { lock.unlockRead(); }
  673. };
  674. class jlib_decl CheckedWriteLockBlock
  675. {
  676. ReadWriteLock &lock;
  677. public:
  678. CheckedWriteLockBlock(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  679. ~CheckedWriteLockBlock() { lock.unlockWrite(); }
  680. };
  681. void jlib_decl checkedReadLockEnter(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  682. void jlib_decl checkedWriteLockEnter(ReadWriteLock &l, unsigned timeout, const char *fname, unsigned lnum);
  683. #define CHECKEDREADLOCKBLOCK(l,timeout) CheckedReadLockBlock glue(block,__LINE__)(l,timeout,__FILE__,__LINE__)
  684. #define CHECKEDWRITELOCKBLOCK(l,timeout) CheckedWriteLockBlock glue(block,__LINE__)(l,timeout,__FILE__,__LINE__)
  685. #define CHECKEDREADLOCKENTER(l,timeout) checkedReadLockEnter(l,timeout,__FILE__,__LINE__)
  686. #define CHECKEDWRITELOCKENTER(l,timeout) checkedWriteLockEnter(l,timeout,__FILE__,__LINE__)
  687. #else
  688. #define CheckedCriticalSection CriticalSection
  689. #define CheckedCriticalBlock CriticalBlock
  690. #define CheckedCriticalUnblock CriticalUnblock
  691. #define CHECKEDCRITENTER(sect,timeout) (sect).enter()
  692. #define CHECKEDCRITLEAVE(sect) (sect).leave()
  693. #define CHECKEDCRITICALBLOCK(sect,timeout) CheckedCriticalBlock glue(block,__LINE__)(sect)
  694. #define CHECKEDCRITICALUNBLOCK(sect,timeout) CheckedCriticalUnblock glue(unblock,__LINE__)(sect)
  695. #define CHECKEDREADLOCKBLOCK(l,timeout) ReadLockBlock glue(block,__LINE__)(l)
  696. #define CHECKEDWRITELOCKBLOCK(l,timeout) WriteLockBlock glue(block,__LINE__)(l)
  697. #define CHECKEDREADLOCKENTER(l,timeout) (l).lockRead()
  698. #define CHECKEDWRITELOCKENTER(l,timeout) (l).lockWrite()
  699. #endif
  700. class CSingletonLock // a lock that will generally only be locked once (for locking singleton objects - see below for examples
  701. {
  702. volatile bool needlock;
  703. CriticalSection sect;
  704. public:
  705. inline CSingletonLock()
  706. {
  707. needlock = true;
  708. }
  709. inline bool lock()
  710. {
  711. if (needlock) {
  712. sect.enter();
  713. //prevent compiler from moving any code before the critical section (unlikely)
  714. compiler_memory_barrier();
  715. return true;
  716. }
  717. //Prevent the value of the protected object from being evaluated before the condition
  718. compiler_memory_barrier();
  719. return false;
  720. }
  721. inline void unlock()
  722. {
  723. //Ensure that no code that precedes unlock() gets moved to after needlock being cleared.
  724. compiler_memory_barrier();
  725. needlock = false;
  726. sect.leave();
  727. }
  728. };
  729. /* Usage example
  730. void *get()
  731. {
  732. static void *sobj = NULL;
  733. static CSingletonLock slock;
  734. if (slock.lock()) {
  735. if (!sobj) // required
  736. sobj = createSObj();
  737. slock.unlock();
  738. }
  739. return sobj;
  740. }
  741. */
  742. #endif