summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cpp/include/Ice/GCShared.h8
-rw-r--r--cpp/include/IceUtil/Shared.h98
-rw-r--r--cpp/src/Ice/GC.cpp28
-rw-r--r--cpp/src/IceUtil/Shared.cpp94
4 files changed, 80 insertions, 148 deletions
diff --git a/cpp/include/Ice/GCShared.h b/cpp/include/Ice/GCShared.h
index b793c9dcc4b..059c245056a 100644
--- a/cpp/include/Ice/GCShared.h
+++ b/cpp/include/Ice/GCShared.h
@@ -43,20 +43,12 @@ public:
int __getRefUnsafe() const
{
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- return _ref.counter;
-#else
return _ref;
-#endif
}
void __decRefUnsafe()
{
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- _ref.atomicDecAndTest();
-#else
--_ref;
-#endif
}
protected:
diff --git a/cpp/include/IceUtil/Shared.h b/cpp/include/IceUtil/Shared.h
index f929ab70841..050654e9c7d 100644
--- a/cpp/include/IceUtil/Shared.h
+++ b/cpp/include/IceUtil/Shared.h
@@ -15,103 +15,9 @@
#if defined(ICE_USE_MUTEX_SHARED)
# include <IceUtil/Mutex.h>
-#elif (defined(__linux) || defined(__FreeBSD__)) && (defined(__i386) || defined(__x86_64)) && !defined(__ICC)
+#elif (defined(__APPLE__) || defined(__linux) || defined(__FreeBSD__)) && (defined(__i386) || defined(__x86_64)) && !defined(__ICC)
# define ICE_HAS_ATOMIC_FUNCTIONS
-
-namespace IceUtilInternal
-{
-
-// __ICC: The inline assembler causes problems with shared libraries.
-//
-// Linux only. Unfortunately, asm/atomic.h builds non-SMP safe code
-// with non-SMP kernels. This means that executables compiled with a
-// non-SMP kernel would fail randomly due to concurrency errors with
-// reference counting on SMP hosts. Therefore the relevent pieces of
-// atomic.h are more-or-less duplicated.
-//
-
-//
-// Make sure gcc doesn't try to be clever and move things around
-// on us. We need to use _exactly_ the address the user gave us,
-// not some alias that contains the same information.
-//
-struct AtomicCounter
-{
- volatile int counter;
-
- /*
- * atomicSet - set ice_atomic variable
- * @v: pointer of type AtomicCounter
- * @i: required value
- *
- * Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an AtomicCounter is only 24 bits.
- */
- void atomicSet(int i)
- {
- counter = i;
- }
-
- /*
- * atomicInc - increment ice_atomic variable
- * @v: pointer of type AtomicCounter
- *
- * Atomically increments @v by 1. Note that the guaranteed useful
- * range of an AtomicCounter is only 24 bits.
- *
- * Inlined because this operation is performance critical.
- */
- void atomicInc()
- {
- __asm__ __volatile__(
- "lock ; incl %0"
- :"=m" (counter)
- :"m" (counter));
- }
-
- /**
- * atomicDecAndTest - decrement and test
- * @v: pointer of type AtomicCounter
- *
- * Atomically decrements @v by 1 and returns true if the result is 0,
- * or false for all other cases. Note that the guaranteed useful
- * range of an AtomicCounter is only 24 bits.
- *
- * Inlined because this operation is performance critical.
- */
- int atomicDecAndTest()
- {
- unsigned char c;
- __asm__ __volatile__(
- "lock ; decl %0; sete %1"
- :"=m" (counter), "=qm" (c)
- :"m" (counter) : "memory");
- return c != 0;
- }
-
- /**
- * atomicExchangeAdd - same as InterlockedExchangeAdd. This
- * didn't come from atomic.h (the code was derived from similar code
- * in /usr/include/asm/rwsem.h)
- *
- * Inlined because this operation is performance critical.
- */
- int atomicExchangeAdd(int i)
- {
- int tmp = i;
- __asm__ __volatile__(
- "lock ; xadd %0,(%2)"
- :"+r"(tmp), "=m"(counter)
- :"r"(this), "m"(counter)
- : "memory");
- return tmp + i;
- }
-
-};
-
-}
-
#elif defined(_WIN32)
// Nothing to include
#else
@@ -214,7 +120,7 @@ protected:
#if defined(_WIN32)
LONG _ref;
#elif defined(ICE_HAS_ATOMIC_FUNCTIONS)
- IceUtilInternal::AtomicCounter _ref;
+ volatile int _ref;
#else
int _ref;
Mutex _mutex;
diff --git a/cpp/src/Ice/GC.cpp b/cpp/src/Ice/GC.cpp
index 9639bb4ffab..d87ed745494 100644
--- a/cpp/src/Ice/GC.cpp
+++ b/cpp/src/Ice/GC.cpp
@@ -58,13 +58,8 @@ void
IceInternal::GCShared::__incRef()
{
RecMutex::Lock lock(gcRecMutex);
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- assert(_ref.counter >= 0);
- _ref.atomicInc();
-#else
assert(_ref >= 0);
++_ref;
-#endif
}
void
@@ -72,13 +67,8 @@ IceInternal::GCShared::__decRef()
{
RecMutex::Lock lock(gcRecMutex);
bool doDelete = false;
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- assert(_ref.counter > 0);
- if(_ref.atomicDecAndTest())
-#else
assert(_ref > 0);
if(--_ref == 0)
-#endif
{
doDelete = !_noDelete;
_noDelete = true;
@@ -94,11 +84,7 @@ int
IceInternal::GCShared::__getRef() const
{
RecMutex::Lock lock(gcRecMutex);
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- return _ref.counter;
-#else
return _ref;
-#endif
}
void
@@ -112,13 +98,8 @@ void
IceInternal::GCShared::__gcIncRef()
{
RecMutex::Lock lock(gcRecMutex);
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- assert(_ref.counter >= 0);
- if(_ref.counter == 0)
-#else
assert(_ref >= 0);
if(_ref == 0)
-#endif
{
#ifdef NDEBUG // To avoid annoying warnings about variables that are not used...
gcObjects.insert(this);
@@ -127,11 +108,7 @@ IceInternal::GCShared::__gcIncRef()
assert(rc.second);
#endif
}
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- _ref.atomicInc();
-#else
++_ref;
-#endif
}
void
@@ -139,13 +116,8 @@ IceInternal::GCShared::__gcDecRef()
{
RecMutex::Lock lock(gcRecMutex);
bool doDelete = false;
-#if defined(ICE_HAS_ATOMIC_FUNCTIONS)
- assert(_ref.counter > 0);
- if(_ref.atomicDecAndTest())
-#else
assert(_ref > 0);
if(--_ref == 0)
-#endif
{
doDelete = !_noDelete;
_noDelete = true;
diff --git a/cpp/src/IceUtil/Shared.cpp b/cpp/src/IceUtil/Shared.cpp
index f18b9385798..1121ab3c823 100644
--- a/cpp/src/IceUtil/Shared.cpp
+++ b/cpp/src/IceUtil/Shared.cpp
@@ -10,7 +10,79 @@
#include <IceUtil/Shared.h>
using namespace IceUtil;
-using namespace IceUtilInternal;
+
+#ifdef ICE_HAS_ATOMIC_FUNCTIONS
+
+namespace IceUtilInternal
+{
+
+//
+// Linux only. Unfortunately, asm/atomic.h builds non-SMP safe code
+// with non-SMP kernels. This means that executables compiled with a
+// non-SMP kernel would fail randomly due to concurrency errors with
+// reference counting on SMP hosts. Therefore the relevent pieces of
+// atomic.h are more-or-less duplicated.
+//
+
+/*
+ * atomicInc - increment ice_atomic variable
+ * @v: pointer of type AtomicCounter
+ *
+ * Atomically increments @v by 1. Note that the guaranteed useful
+ * range of an AtomicCounter is only 24 bits.
+ *
+ * Inlined because this operation is performance critical.
+ */
+static inline void atomicInc(volatile int* counter)
+{
+ __asm__ __volatile__(
+ "lock ; incl %0"
+ :"=m" (*counter)
+ :"m" (*counter));
+}
+
+/**
+ * atomicDecAndTest - decrement and test
+ * @v: pointer of type AtomicCounter
+ *
+ * Atomically decrements @v by 1 and returns true if the result is 0,
+ * or false for all other cases. Note that the guaranteed useful
+ * range of an AtomicCounter is only 24 bits.
+ *
+ * Inlined because this operation is performance critical.
+ */
+static inline int atomicDecAndTest(volatile int* counter)
+{
+ unsigned char c;
+ __asm__ __volatile__(
+ "lock ; decl %0; sete %1"
+ :"=m" (*counter), "=qm" (c)
+ :"m" (*counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomicExchangeAdd - same as InterlockedExchangeAdd. This
+ * didn't come from atomic.h (the code was derived from similar code
+ * in /usr/include/asm/rwsem.h)
+ *
+ * Inlined because this operation is performance critical.
+ */
+static inline int atomicExchangeAdd(volatile int* counter, int i)
+{
+ int tmp = i;
+ __asm__ __volatile__(
+ "lock ; xadd %0,(%2)"
+ :"+r"(tmp), "=m"(*counter)
+ :"r"(counter), "m"(*counter)
+ : "memory");
+ return tmp + i;
+}
+
+}
+
+#endif
+
IceUtil::SimpleShared::SimpleShared() :
_ref(0),
@@ -25,25 +97,15 @@ IceUtil::SimpleShared::SimpleShared(const SimpleShared&) :
}
IceUtil::Shared::Shared() :
-#ifndef ICE_HAS_ATOMIC_FUNCTIONS
_ref(0),
-#endif
_noDelete(false)
{
-#ifdef ICE_HAS_ATOMIC_FUNCTIONS
- _ref.atomicSet(0);
-#endif
}
IceUtil::Shared::Shared(const Shared&) :
-#ifndef ICE_HAS_ATOMIC_FUNCTIONS
_ref(0),
-#endif
_noDelete(false)
{
-#ifdef ICE_HAS_ATOMIC_FUNCTIONS
- _ref.atomicSet(0);
-#endif
}
void
@@ -53,8 +115,8 @@ IceUtil::Shared::__incRef()
assert(InterlockedExchangeAdd(&_ref, 0) >= 0);
InterlockedIncrement(&_ref);
#elif defined(ICE_HAS_ATOMIC_FUNCTIONS)
- assert(_ref.atomicExchangeAdd(&_ref) >= 0);
- _ref.atomicInc();
+ assert(IceUtilInternal::atomicExchangeAdd(&_ref, 0) >= 0);
+ IceUtilInternal::atomicInc(&_ref);
#else
_mutex.lock();
assert(_ref >= 0);
@@ -74,8 +136,8 @@ IceUtil::Shared::__decRef()
delete this;
}
#elif defined(ICE_HAS_ATOMIC_FUNCTIONS)
- assert(_ref.atomicExchangeAdd(0) > 0);
- if(_ref.atomicDecAndTest() && !_noDelete)
+ assert(IceUtilInternal::atomicExchangeAdd(&_ref, 0) > 0);
+ if(IceUtilInternal::atomicDecAndTest(&_ref) && !_noDelete)
{
_noDelete = true;
delete this;
@@ -103,7 +165,7 @@ IceUtil::Shared::__getRef() const
#if defined(_WIN32)
return InterlockedExchangeAdd(const_cast<LONG*>(&_ref), 0);
#elif defined(ICE_HAS_ATOMIC_FUNCTIONS)
- return const_cast<IceUtilInternal::AtomicCounter*>(&_ref)->atomicExchangeAdd(0);
+ return IceUtilInternal::atomicExchangeAdd(const_cast<volatile int*>(&_ref), 0);
#else
_mutex.lock();
int ref = _ref;