소스 검색

Fix bug with spin locks on old versions of gcc

Signed-off-by: Gavin Halliday <gavin.halliday@lexisnexis.com>
Gavin Halliday 13 년 전
부모
커밋
45b9e19c71
3개의 변경된 파일25개의 추가작업 그리고 2개의 파일을 삭제
  1. 9 1
      system/jlib/jatomic.hpp
  2. 4 0
      system/jlib/jiface.cpp
  3. 12 1
      system/jlib/jmutex.hpp

+ 9 - 1
system/jlib/jatomic.hpp

@@ -23,6 +23,8 @@
 
 #ifdef _WIN32
 
+#include <intrin.h>
+
 extern "C"
 {
    LONG  __cdecl _InterlockedIncrement(LONG volatile *Addend);
@@ -55,8 +57,10 @@ typedef volatile long atomic_t;
 #else
 #define atomic_cas(v,newvalue,expectedvalue)    (InterlockedCompareExchange(v,newvalue,expectedvalue)==expectedvalue)
 #define atomic_cas_ptr(v, newvalue,expectedvalue)       (InterlockedCompareExchangePointer(v,newvalue,expectedvalue)==expectedvalue)
-#endif      
+#endif
 
+//Used to prevent a compiler reordering volatile and non-volatile loads/stores
+#define compiler_memory_barrier()           { _ReadWriteBarrier(); }
 
 #elif defined(__GNUC__)
 
@@ -128,6 +132,8 @@ static __inline__ bool atomic_cas_ptr(void **v,void *newvalue, void *expectedval
     return __sync_bool_compare_and_swap((memsize_t *)v, (memsize_t)expectedvalue, (memsize_t)newvalue);
 }
 
+#define compiler_memory_barrier() asm volatile("": : :"memory")
+
 #else // other unix
 
 //Truely awful implementations of atomic operations...
@@ -140,6 +146,7 @@ int jlib_decl poor_atomic_add_exchange(atomic_t * v, int i);
 bool jlib_decl poor_atomic_cas(atomic_t * v, int newvalue, int expectedvalue);
 void jlib_decl *poor_atomic_xchg_ptr(void *p, void **v);
 bool   jlib_decl poor_atomic_cas_ptr(void ** v, void *newvalue, void *expectedvalue);
+bool jlib_decl poor_compiler_memory_barrier();
 
 #define ATOMIC_INIT(i)                  (i)
 #define atomic_inc(v)                   (void)poor_atomic_inc_and_test(v)
@@ -154,6 +161,7 @@ bool   jlib_decl poor_atomic_cas_ptr(void ** v, void *newvalue, void *expectedva
 #define atomic_cas(v,newvalue,expectedvalue)    poor_atomic_cas(v,newvalue,expectedvalue)
 #define atomic_xchg_ptr(p, v)               poor_atomic_xchg_ptr(p, v)
 #define atomic_cas_ptr(v,newvalue,expectedvalue)    poor_atomic_cas_ptr(v,newvalue,expectedvalue)
+#define compiler_memory_barrier()       {}
 
 #endif
 

+ 4 - 0
system/jlib/jiface.cpp

@@ -120,6 +120,10 @@ bool poor_atomic_cas_ptr(void ** v, void * newvalue, void * expectedvalue)
     return ret;
 }
 
+//Hopefully the function call will be enough to stop the compiler reordering any operations
+bool poor_compiler_memory_barrier()
+{
+}
 
 
 #endif

+ 12 - 1
system/jlib/jmutex.hpp

@@ -338,10 +338,13 @@ public:
         owner.tid = self;
     }
     inline void leave()
-    { 
+    {
+        //It is safe to access nesting - since this thread is the only one that can access
+        //it, so no need for a synchronized access
         if (nesting == 0)
         {
             owner.tid = 0;
+            compiler_memory_barrier();
             atomic_set(&value, 0);
         }
         else
@@ -399,11 +402,13 @@ public:
     { 
         assertex(GetCurrentThreadId()==owner.tid); // check for spurious leave
         owner.tid = 0;
+        compiler_memory_barrier();
         atomic_set(&value, 0); 
     }
 };
 
 #else
+
 class jlib_decl  NonReentrantSpinLock
 {
     atomic_t value;
@@ -420,6 +425,7 @@ public:
     }
     inline void leave()
     { 
+        compiler_memory_barrier();
         atomic_set(&value, 0); 
     }
 };
@@ -763,12 +769,17 @@ public:
     {
         if (needlock) {
             sect.enter();
+            //prevent compiler from moving any code before the critical section
+            compiler_memory_barrier();
             return true;
         }
+        //Prevent the value of the protected object from being evaluated before the condition
+        compiler_memory_barrier();
         return false;
     }
     inline void unlock()
     {
+        compiler_memory_barrier();
         needlock = false;
         sect.leave();
     }