فهرست منبع

roxiemem add new atomic operation

Signed-off-by: Gavin Halliday <gavin.halliday@lexisnexis.com>
Gavin Halliday 13 سال پیش
والد
کامیت
d25833a3cd
3فایلهای تغییر یافته به همراه47 افزوده شده و 13 حذف شده
  1. 35 8
      roxie/roxiemem/roxiemem.cpp
  2. 10 3
      system/jlib/jatomic.hpp
  3. 2 2
      system/jlib/jiface.cpp

+ 35 - 8
roxie/roxiemem/roxiemem.cpp

@@ -1204,7 +1204,7 @@ protected:
 
 class CFixedChunkingHeap : public CNormalChunkingHeap
 {
-    enum { interestingFlags = RHFunique };
+    enum { interestingFlags = RHFunique|RHFpacked };
 public:
     CFixedChunkingHeap(CChunkingRowManager * _rowManager, const IContextLogger &_logctx, const IRowAllocatorCache *_allocatorCache, size32_t _fixedSize, unsigned _flags)
         : CNormalChunkingHeap(_rowManager, _logctx, _allocatorCache), fixedSize(_fixedSize), flags(_flags & interestingFlags)
@@ -1227,6 +1227,32 @@ protected:
     unsigned flags;
 };
 
+class CPackedChunkingHeap : public CNormalChunkingHeap
+{
+    enum { interestingFlags = RHFunique|RHFpacked };
+public:
+    CPackedChunkingHeap(CChunkingRowManager * _rowManager, const IContextLogger &_logctx, const IRowAllocatorCache *_allocatorCache, size32_t _fixedSize, unsigned _flags)
+        : CNormalChunkingHeap(_rowManager, _logctx, _allocatorCache), fixedSize(_fixedSize), flags(_flags & interestingFlags)
+    {
+    }
+
+    virtual void beforeDispose();
+
+    void * allocate(unsigned activityId);
+
+    inline bool matches(size32_t searchSize, unsigned searchFlags) const
+    {
+        //Check the size matches, and any flags we are interested in.
+        return (searchSize == fixedSize) &&
+               (searchFlags & interestingFlags) == flags;
+    }
+
+protected:
+    size32_t fixedSize;
+    unsigned flags;
+    unsigned activityId;
+};
+
 //================================================================================
 //
 #define NUM_NORMAL_HEAPS 10
@@ -1253,7 +1279,7 @@ class CChunkingRowManager : public CInterface, implements IRowManager
     bool ignoreLeaks;
     bool trackMemoryByActivity;
     Owned<IActivityMemoryUsageMap> usageMap;
-    PointerArrayOf<CFixedChunkingHeap> fixedHeaps;
+    PointerArrayOf<CChunkingHeap> fixedHeaps;
     const IRowAllocatorCache *allocatorCache;
     unsigned __int64 cyclesChecked;       // When we last checked timelimit
     unsigned __int64 cyclesCheckInterval; // How often we need to check timelimit
@@ -1339,7 +1365,7 @@ public:
         SpinBlock block(fixedCrit); //Spinblock needed if we can add/remove fixed heaps while allocations are occuring
         ForEachItemIn(i, fixedHeaps)
         {
-            CFixedChunkingHeap * fixedHeap = fixedHeaps.item(i);
+            CChunkingHeap * fixedHeap = fixedHeaps.item(i);
             fixedHeap->checkHeap();
         }
     }
@@ -1359,7 +1385,7 @@ public:
         SpinBlock block(fixedCrit); //Spinblock needed if we can add/remove fixed heaps while allocations are occuring
         ForEachItemIn(i, fixedHeaps)
         {
-            CFixedChunkingHeap * fixedHeap = fixedHeaps.item(i);
+            CChunkingHeap * fixedHeap = fixedHeaps.item(i);
             total += fixedHeap->allocated();
         }
 
@@ -1376,7 +1402,7 @@ public:
         SpinBlock block(fixedCrit); //Spinblock needed if we can add/remove fixed heaps while allocations are occuring
         ForEachItemIn(i, fixedHeaps)
         {
-            CFixedChunkingHeap * fixedHeap = fixedHeaps.item(i);
+            CChunkingHeap * fixedHeap = fixedHeaps.item(i);
             total += fixedHeap->pages();
         }
 
@@ -1393,7 +1419,7 @@ public:
         SpinBlock block(fixedCrit); //Spinblock needed if we can add/remove fixed heaps while allocations are occuring
         ForEachItemIn(i, fixedHeaps)
         {
-            CFixedChunkingHeap * fixedHeap = fixedHeaps.item(i);
+            CChunkingHeap * fixedHeap = fixedHeaps.item(i);
             fixedHeap->getPeakActivityUsage(map);
         }
 
@@ -1629,7 +1655,8 @@ protected:
         {
             ForEachItemIn(i, fixedHeaps)
             {
-                CFixedChunkingHeap * heap = fixedHeaps.item(i);
+                //MORE: Needs to be a virtual function
+                CFixedChunkingHeap * heap = (CFixedChunkingHeap *)fixedHeaps.item(i);
                 if (heap->matches(chunkSize, flags))
                 {
                     heap->Link();
@@ -1668,7 +1695,7 @@ protected:
         SpinBlock block(fixedCrit); //Spinblock needed if we can add/remove fixed heaps while allocations are occuring
         ForEachItemIn(i, fixedHeaps)
         {
-            CFixedChunkingHeap * fixedHeap = fixedHeaps.item(i);
+            CChunkingHeap * fixedHeap = fixedHeaps.item(i);
             if (leaked == 0)
                 break;
             fixedHeap->reportLeaks(leaked);

+ 10 - 3
system/jlib/jatomic.hpp

@@ -45,6 +45,7 @@ typedef volatile long atomic_t;
 #define atomic_inc_and_test(v)          (InterlockedIncrement(v) == 0)
 #define atomic_dec(v)                   InterlockedDecrement(v)
 #define atomic_dec_and_test(v)          (InterlockedDecrement(v) == 0)
+#define atomic_dec_and_read(v)           InterlockedDecrement(v)
 #define atomic_read(v)                  (*v)
 #define atomic_set(v,i)                 ((*v) = (i))
 #define atomic_xchg(i, v)               InterlockedExchange(v, i)
@@ -93,6 +94,11 @@ static __inline__ void atomic_dec(atomic_t *v)
     __sync_add_and_fetch(&v->counter,-1);
 }
 
+static __inline__ int atomic_dec_and_read(atomic_t *v)
+{
+    // (*v)--, return *v;
+    return __sync_add_and_fetch(&v->counter,-1);
+}
 
 static __inline__ int atomic_xchg(int i, atomic_t *v)
 {
@@ -138,7 +144,7 @@ static __inline__ bool atomic_cas_ptr(void **v,void *newvalue, void *expectedval
 
 //Truely awful implementations of atomic operations...
 typedef volatile int atomic_t;
-bool jlib_decl poor_atomic_dec_and_test(atomic_t * v);
+int jlib_decl poor_atomic_dec_and_read(atomic_t * v);
 bool jlib_decl poor_atomic_inc_and_test(atomic_t * v);
 int jlib_decl poor_atomic_xchg(int i, atomic_t * v);
 void jlib_decl poor_atomic_add(atomic_t * v, int i);
@@ -151,8 +157,9 @@ void jlib_decl poor_compiler_memory_barrier();
 #define ATOMIC_INIT(i)                  (i)
 #define atomic_inc(v)                   (void)poor_atomic_inc_and_test(v)
 #define atomic_inc_and_test(v)          poor_atomic_inc_and_test(v)
-#define atomic_dec(v)                   (void)poor_atomic_dec_and_test(v)
-#define atomic_dec_and_test(v)          poor_atomic_dec_and_test(v)
+#define atomic_dec(v)                   (void)poor_atomic_dec_and_read(v)
+#define atomic_dec_and_read(v)          poor_atomic_dec_and_read(v)
+#define atomic_dec_and_test(v)          (poor_atomic_dec_and_read(v)==0)
 #define atomic_read(v)                  (*v)
 #define atomic_set(v,i)                 ((*v) = (i))
 #define atomic_xchg(i, v)               poor_atomic_xchg(i, v)

+ 2 - 2
system/jlib/jiface.cpp

@@ -44,10 +44,10 @@ MODULE_EXIT()
 //  delete ICrit;  - need to make sure this is deleted after anything that uses it
 }
 
-bool poor_atomic_dec_and_test(atomic_t * v)
+int poor_atomic_dec_and_read(atomic_t * v)
 {
     ICrit->enter();
-    bool ret = (--(*v) == 0);
+    int ret = --(*v);
     ICrit->leave();
     return ret;
 }