summaryrefslogtreecommitdiff
path: root/cpp
diff options
context:
space:
mode:
Diffstat (limited to 'cpp')
-rw-r--r--cpp/include/IceUtil/Shared.h280
1 files changed, 238 insertions, 42 deletions
diff --git a/cpp/include/IceUtil/Shared.h b/cpp/include/IceUtil/Shared.h
index 6dfd5aaa2c9..e0d06cd4767 100644
--- a/cpp/include/IceUtil/Shared.h
+++ b/cpp/include/IceUtil/Shared.h
@@ -12,8 +12,89 @@
#define ICE_UTIL_SHARED_H
#include <IceUtil/Config.h>
+//
+// This is only needed for the vanilla version of Shared.
+//
#include <IceUtil/Mutex.h>
+#ifndef WIN32
+//
+// Linux only. Unfortunately, asm/atomic.h builds non-SMP safe code
+// with non-SMP kernels. This means that executables compiled with a
+// non-SMP kernel would fail randomly due to concurrency errors with
+// reference counting on SMP hosts. Therefore the relevent pieces of
+// atomic.h are more-or-less duplicated.
+//
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; incl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; decl %0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"m" (v->counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomic_exchange_add - same as InterlockedExchangeAdd. This didn't
+ * come from atomic.h (the code was taken from
+ * /usr/include/asm/rwsem.h.
+ **/
+static __inline__ int atomic_exchange_add(int i, atomic_t* v)
+{
+ int tmp = i;
+ __asm__ __volatile__(
+ "lock ; xadd %0,(%2)"
+ :"+r"(tmp), "=m"(v->counter)
+ :"r"(v), "m"(v->counter)
+ : "memory");
+ return tmp + i;
+}
+#endif
+
//
// Base classes for reference counted types. The IceUtil::Handle
// template can be used for smart pointers to types derived from these
@@ -84,61 +165,176 @@ class Shared : public noncopyable
{
public:
- Shared() :
- _ref(0),
- _noDelete(false)
- {
- }
-
- virtual ~Shared()
- {
- }
+ Shared();
+ virtual ~Shared();
+ void __incRef();
+ void __decRef();
+ int __getRef();
+ void __setNoDelete(bool);
- void __incRef()
+private:
+
+#ifdef WIN32
+ LONG _ref;
+#else
+ atomic_t _ref;
+#endif
+ bool _noDelete;
+
+ //
+ // For the plain vanilla version.
+ //
+ // bool _noDelete;
+ // int _ref;
+ // Mutex _mutex;
+ //
+};
+
+#ifdef WIN32
+
+inline
+Shared::Shared() :
+ _ref(0),
+ _noDelete(false)
+{
+}
+
+inline
+Shared::~Shared()
+{
+}
+
+inline void
+Shared::__incRef()
+{
+ assert(InterlockedExchangeAdd(&_ref, 0) >= 0);
+ InterlockedIncrement(&_ref);
+}
+
+inline void
+Shared::__decRef()
+{
+ assert(InterlockedExchangeAdd(&_ref, 0) > 0);
+ if (InterlockedDecrement(&_ref) == 0 && !_noDelete)
{
- _mutex.lock();
- assert(_ref >= 0);
- ++_ref;
- _mutex.unlock();
+ delete this;
}
+}
- void __decRef()
+inline int
+Shared::__getRef()
+{
+ return InterlockedExchangeAdd(&_ref, 0);
+}
+
+inline void
+Shared::__setNoDelete(bool b)
+{
+ _noDelete = b;
+}
+
+#else
+
+inline
+Shared::Shared() :
+ _noDelete(false)
+{
+ atomic_set(&_ref, 0);
+}
+
+inline
+Shared::~Shared()
+{
+}
+
+inline void
+Shared::__incRef()
+{
+ assert(atomic_exchange_add(0, &_ref) >= 0);
+ atomic_inc(&_ref);
+}
+
+inline void
+Shared::__decRef()
+{
+ assert(atomic_exchange_add(0, &_ref) > 0);
+ if (atomic_dec_and_test(&_ref) && !_noDelete)
{
- _mutex.lock();
- bool doDelete = false;
- assert(_ref > 0);
- if (--_ref == 0)
- {
- doDelete = !_noDelete;
- }
- _mutex.unlock();
- if (doDelete)
- {
- delete this;
- }
+ delete this;
}
+}
- int __getRef()
+inline int
+Shared::__getRef()
+{
+ return atomic_exchange_add(0, &_ref);
+}
+
+inline void
+Shared::__setNoDelete(bool b)
+{
+ _noDelete = b;
+}
+#endif
+
+/*
+ * This is the mutex protected version
+ *
+inline
+Shared::Shared() :
+ _ref(0),
+ _noDelete(false)
+{
+}
+
+inline
+Shared::~Shared()
+{
+}
+
+inline void
+Shared::__incRef();
+{
+ _mutex.lock();
+ assert(_ref >= 0);
+ ++_ref;
+ _mutex.unlock();
+}
+
+inline void
+Shared::__decRef()
+{
+ _mutex.lock();
+ bool doDelete = false;
+ assert(_ref > 0);
+ if (--_ref == 0)
{
- _mutex.lock();
- int ref = _ref;
- _mutex.unlock();
- return ref;
+ doDelete = !_noDelete;
}
-
- void __setNoDelete(bool b)
+ _mutex.unlock();
+ if (doDelete)
{
- _mutex.lock();
- _noDelete = b;
- _mutex.unlock();
+ delete this;
}
+}
-private:
+inline int
+Shared::__getRef()
+{
+ _mutex.lock();
+ int ref = _ref;
+ _mutex.unlock();
+ return ref;
+}
- int _ref;
- bool _noDelete;
- Mutex _mutex;
-};
+inline void
+Shared::__setNoDelete(bool b)
+{
+ _mutex.lock();
+ _noDelete = b;
+ _mutex.unlock();
+}
+*/
}