summaryrefslogtreecommitdiff
path: root/cpp/src
diff options
context:
space:
mode:
Diffstat (limited to 'cpp/src')
-rw-r--r--cpp/src/Ice/BasicStream.cpp1
-rw-r--r--cpp/src/Ice/Buffer.cpp100
-rw-r--r--cpp/src/Ice/Instance.cpp25
-rw-r--r--cpp/src/Ice/Instance.h10
-rw-r--r--cpp/src/Ice/Makefile1
-rw-r--r--cpp/src/Ice/Makefile.mak1
-rw-r--r--cpp/src/Ice/MemoryPool.cpp520
-rw-r--r--cpp/src/Ice/MemoryPool.h60
-rw-r--r--cpp/src/Ice/PropertyNames.cpp4
-rw-r--r--cpp/src/Ice/PropertyNames.h2
10 files changed, 661 insertions, 63 deletions
diff --git a/cpp/src/Ice/BasicStream.cpp b/cpp/src/Ice/BasicStream.cpp
index 1489dd2f52f..01eb559d8cd 100644
--- a/cpp/src/Ice/BasicStream.cpp
+++ b/cpp/src/Ice/BasicStream.cpp
@@ -31,6 +31,7 @@ using namespace Ice;
using namespace IceInternal;
IceInternal::BasicStream::BasicStream(Instance* instance) :
+ IceInternal::Buffer(instance->memoryPool()),
_instance(instance),
_currentReadEncaps(0),
_currentWriteEncaps(0),
diff --git a/cpp/src/Ice/Buffer.cpp b/cpp/src/Ice/Buffer.cpp
index a841d822422..dc8a6595827 100644
--- a/cpp/src/Ice/Buffer.cpp
+++ b/cpp/src/Ice/Buffer.cpp
@@ -9,6 +9,7 @@
#include <Ice/Buffer.h>
#include <Ice/LocalException.h>
+#include <Ice/MemoryPool.h>
using namespace std;
using namespace Ice;
@@ -17,54 +18,34 @@ using namespace IceInternal;
void
IceInternal::Buffer::swap(Buffer& other)
{
-#ifdef ICE_SMALL_MESSAGE_BUFFER_OPTIMIZATION
- Container::difference_type pos = i - b.begin();
- Container::difference_type otherPos = other.i - other.b.begin();
- b.swap(other.b);
- i = b.begin() + otherPos;
- other.i = other.b.begin() + pos;
-#else
b.swap(other.b);
std::swap(i, other.i);
-#endif
}
-void
-IceInternal::Buffer::Container::swap(Container& other)
+IceInternal::Buffer::Container::Container(IceInternal::MemoryPool* pool) :
+ _buf(0),
+ _size(0),
+ _capacity(0),
+ _pool(pool)
+{
+}
+
+IceInternal::Buffer::Container::~Container()
{
-#ifdef ICE_SMALL_MESSAGE_BUFFER_OPTIMIZATION
- if(_buf == _fixed)
+ if(_pool)
{
- if(other._buf == other._fixed)
- {
- value_type tmp[ICE_BUFFER_FIXED_SIZE];
- memcpy(tmp, _fixed, _size);
- memcpy(_fixed, other._fixed, other._size);
- memcpy(other._fixed, tmp, _size);
- }
- else
- {
- _buf = other._buf;
- memcpy(other._fixed, _fixed, _size);
- other._buf = other._fixed;
- }
+ _pool->free(_buf);
}
else
{
- if(other._buf == other._fixed)
- {
- other._buf = _buf;
- memcpy(_fixed, other._fixed, other._size);
- _buf = _fixed;
- }
- else
- {
- std::swap(_buf, other._buf);
- }
+ ::free(_buf);
}
-#else
+}
+
+void
+IceInternal::Buffer::Container::swap(Container& other)
+{
std::swap(_buf, other._buf);
-#endif
std::swap(_size, other._size);
std::swap(_capacity, other._capacity);
@@ -74,20 +55,17 @@ IceInternal::Buffer::Container::swap(Container& other)
void
IceInternal::Buffer::Container::clear()
{
-#ifdef ICE_SMALL_MESSAGE_BUFFER_OPTIMIZATION
- if(_buf != _fixed)
+ if(_pool)
{
- free(_buf);
- _buf = _fixed;
+ _pool->free(_buf);
+ }
+ else
+ {
+ ::free(_buf);
}
- _size = 0;
- _capacity = ICE_BUFFER_FIXED_SIZE;
-#else
- free(_buf);
_buf = 0;
_size = 0;
_capacity = 0;
-#endif
}
void
@@ -107,26 +85,28 @@ IceInternal::Buffer::Container::reserve(size_type n)
return;
}
-#ifdef ICE_SMALL_MESSAGE_BUFFER_OPTIMIZATION
- if(_buf != _fixed)
- {
- _buf = reinterpret_cast<pointer>(realloc(_buf, _capacity));
- }
- else if(_capacity > ICE_BUFFER_FIXED_SIZE)
- {
- _buf = reinterpret_cast<pointer>(malloc(_capacity));
- memcpy(_buf, _fixed, _size);
- }
-#else
if(_buf)
{
- _buf = reinterpret_cast<pointer>(realloc(_buf, _capacity));
+ if(_pool)
+ {
+ _buf = _pool->realloc(_buf, _capacity);
+ }
+ else
+ {
+ _buf = reinterpret_cast<pointer>(::realloc(_buf, _capacity));
+ }
}
else
{
- _buf = reinterpret_cast<pointer>(malloc(_capacity));
+ if(_pool)
+ {
+ _buf = _pool->alloc(_capacity);
+ }
+ else
+ {
+ _buf = reinterpret_cast<pointer>(::malloc(_capacity));
+ }
}
-#endif
if(!_buf)
{
diff --git a/cpp/src/Ice/Instance.cpp b/cpp/src/Ice/Instance.cpp
index d72faa0b850..fb1807d1d4f 100644
--- a/cpp/src/Ice/Instance.cpp
+++ b/cpp/src/Ice/Instance.cpp
@@ -34,6 +34,7 @@
#include <Ice/PluginManagerI.h>
#include <Ice/Initialize.h>
#include <IceUtil/StringUtil.h>
+#include <Ice/MemoryPool.h>
#include <stdio.h>
@@ -451,6 +452,12 @@ IceInternal::Instance::identityToString(const Identity& ident) const
}
}
+IceInternal::MemoryPool*
+IceInternal::Instance::memoryPool() const
+{
+ return _memoryPool;
+}
+
IceInternal::Instance::Instance(const CommunicatorPtr& communicator, const InitializationData& initData) :
_state(StateActive),
_initData(initData),
@@ -459,7 +466,8 @@ IceInternal::Instance::Instance(const CommunicatorPtr& communicator, const Initi
_serverACM(0),
_threadPerConnection(0),
_threadPerConnectionStackSize(0),
- _defaultContext(new SharedContext(initData.defaultContext))
+ _defaultContext(new SharedContext(initData.defaultContext)),
+ _memoryPool(0)
{
try
{
@@ -669,6 +677,19 @@ IceInternal::Instance::Instance(const CommunicatorPtr& communicator, const Initi
_pluginManager = new PluginManagerI(communicator, _dynamicLibraryList);
+ if(_initData.properties->getPropertyAsIntWithDefault("Ice.MemoryPool", 1) == 1)
+ {
+
+ //
+ // High watermark defaults to 4 * max page size.
+ //
+ size_t highWaterMark = _initData.properties->getPropertyAsIntWithDefault("Ice.MemoryPool.HighWaterMark", 128);
+ const size_t megaByte = 1024 * 1024;
+
+ highWaterMark *= megaByte;
+ _memoryPool = new MemoryPool(highWaterMark);
+ }
+
_outgoingConnectionFactory = new OutgoingConnectionFactory(this);
_servantFactoryManager = new ObjectFactoryManager();
@@ -700,6 +721,8 @@ IceInternal::Instance::~Instance()
assert(!_referenceFactory);
assert(!_proxyFactory);
assert(!_outgoingConnectionFactory);
+ delete _memoryPool;
+
assert(!_connectionMonitor);
assert(!_servantFactoryManager);
assert(!_objectAdapterFactory);
diff --git a/cpp/src/Ice/Instance.h b/cpp/src/Ice/Instance.h
index 86d8b074eb4..2f96d8b6a68 100644
--- a/cpp/src/Ice/Instance.h
+++ b/cpp/src/Ice/Instance.h
@@ -43,6 +43,8 @@ class CommunicatorI;
namespace IceInternal
{
+class MemoryPool;
+
class Instance : public IceUtil::Shared, public IceUtil::RecMutex
{
@@ -75,6 +77,13 @@ public:
void setDefaultContext(const ::Ice::Context&);
SharedContextPtr getDefaultContext() const;
Ice::Identity stringToIdentity(const std::string&) const;
+
+ //
+ // The memory pool is only accessed by BasicStream's. BasicStream's are
+ // always stack allocated so there is no danger of a memory pool instance being
+ // prematurely destroyed while it is in use.
+ //
+ MemoryPool* memoryPool() const;
std::string identityToString(const Ice::Identity&) const;
private:
@@ -114,6 +123,7 @@ private:
DynamicLibraryListPtr _dynamicLibraryList;
Ice::PluginManagerPtr _pluginManager;
SharedContextPtr _defaultContext;
+ MemoryPool* _memoryPool;
};
class UTF8BufferI : public Ice::UTF8Buffer
diff --git a/cpp/src/Ice/Makefile b/cpp/src/Ice/Makefile
index 2ab7a568cdb..006300e674c 100644
--- a/cpp/src/Ice/Makefile
+++ b/cpp/src/Ice/Makefile
@@ -19,6 +19,7 @@ OBJS = Acceptor.o \
Application.o \
Buffer.o \
BasicStream.o \
+ MemoryPool.o \
BuiltinSequences.o \
CommunicatorF.o \
CommunicatorI.o \
diff --git a/cpp/src/Ice/Makefile.mak b/cpp/src/Ice/Makefile.mak
index 4e5fa5ec2df..c0e0b635f7e 100644
--- a/cpp/src/Ice/Makefile.mak
+++ b/cpp/src/Ice/Makefile.mak
@@ -18,6 +18,7 @@ OBJS = Acceptor.obj \
Application.obj \
Buffer.obj \
BasicStream.obj \
+ MemoryPool.obj \
BuiltinSequences.obj \
CommunicatorI.obj \
Communicator.obj \
diff --git a/cpp/src/Ice/MemoryPool.cpp b/cpp/src/Ice/MemoryPool.cpp
new file mode 100644
index 00000000000..28118249b2f
--- /dev/null
+++ b/cpp/src/Ice/MemoryPool.cpp
@@ -0,0 +1,520 @@
+// **********************************************************************
+//
+// Copyright (c) 2003-2006 ZeroC, Inc. All rights reserved.
+//
+// This copy of Ice is licensed to you under the terms described in the
+// ICE_LICENSE file included in this distribution.
+//
+// **********************************************************************
+
+#include <Ice/MemoryPool.h>
+
+#include <Ice/Instance.h>
+#include <Ice/Initialize.h>
+#include <Ice/Properties.h>
+
+// #define MEMPOOL_DEBUG
+
+//
+// TODO: Will a rover to a free block help speed things up here? It's
+// probably not a very significant optimization as long as the pool is
+// primarily for BasicStream usage. If it becomes more general purpose, it
+// will need more consideration.
+//
+
+namespace IceInternal
+{
+
+struct BlockInfo;
+
+struct PageInfo
+{
+ size_t nBlocks;
+ size_t pageSize;
+
+ BlockInfo* blocks;
+ PageInfo* nextPage;
+};
+
+struct BlockInfo
+{
+ //
+ // We could get rid of the next link since the next blockinfo should b
+ // b+size. However, it is pretty useful as a convenience and it allows
+ // for a some block validation.
+ //
+ BlockInfo* next;
+ BlockInfo* prev;
+
+ //
+ // We keep track of the parent page so we can quickly update
+ // information on the parent page.
+ //
+ PageInfo* page;
+
+ //
+ // The amount of user memory for this block. The actual size of the
+ // block in the pool is size + sizeof BlockInfo
+ //
+ size_t size;
+
+ //
+ // The amount of memory in the block that is actually used.
+ size_t used;
+
+ //
+ // Flag indicating whether this block is allocated or not.
+ //
+ bool free;
+
+ Ice::Byte* user;
+};
+
+const size_t blocksPerOversize = 4;
+
+} // End of namespace IceInternal
+
+IceInternal::MemoryPool::MemoryPool(size_t highWaterMark):
+ _pageSize(4 * 1024 * 1024),
+ _maxPageSize(8 * _pageSize),
+ _highWaterMark(highWaterMark),
+ _currentSize(0),
+ _pages(0)
+{
+}
+
+IceInternal::MemoryPool::~MemoryPool()
+{
+ while(_pages != 0)
+ {
+ PageInfo* current = _pages;
+ _pages = _pages->nextPage;
+ ::free(current);
+ }
+}
+
+//
+// The Memory pool's public interface. There should be no reason to
+// call directly on the memory pool's instance.
+//
+Ice::Byte*
+IceInternal::MemoryPool::alloc(size_t n)
+{
+ IceUtil::Mutex::Lock lock(_mutex);
+ return allocBlock(n);
+}
+
+void
+IceInternal::MemoryPool::free(Ice::Byte* b)
+{
+ if(b == 0)
+ {
+ return;
+ }
+ IceUtil::Mutex::Lock lock(_mutex);
+ freeBlock(b);
+}
+
+Ice::Byte*
+IceInternal::MemoryPool::realloc(Ice::Byte* b, size_t n)
+{
+ //
+ // TODO: Is this safe? Can we assume that nobody else is going to try and
+ // delete this block? If so this is super speedy! In one throughput
+ // test with 1000 iterations, we call realloc 4200 times!
+ //
+ BlockInfo* block = reinterpret_cast<BlockInfo*>(b - sizeof(BlockInfo));
+ if(block->size >= n)
+ {
+ block->used = n;
+ return b;
+ }
+
+ IceUtil::Mutex::Lock lock(_mutex);
+ return reallocBlock(b, n);
+}
+
+IceInternal::BlockInfo*
+IceInternal::MemoryPool::initBlock(Ice::Byte* p, PageInfo* page, size_t n, bool allocated)
+{
+ BlockInfo* block = reinterpret_cast<BlockInfo*>(p);
+ block->size = n;
+ block->prev = 0;
+ block->next = 0;
+ block->page = page;
+ block->free = !allocated;
+ block->user = p + sizeof(BlockInfo);
+
+#ifdef MEMPOOL_DEBUG
+ memset(block->user, 'I', block->size);
+#endif
+ return block;
+}
+
+//
+// Page layout:
+//
+// +----------+-----------+-------------+-----------+--------------......-+
+// | PageInfo | BlockInfo | user memory | BlockInfo | user memory |
+// +----------+-----------+-------------+-----------+--------------......-+
+//
+IceInternal::PageInfo*
+IceInternal::MemoryPool::initPage(size_t n)
+{
+ Ice::Byte* rawData = reinterpret_cast<Ice::Byte*>(malloc(n));
+#ifdef MEMPOOL_DEBUG
+ memset(rawData, 'P', n);
+#endif
+ if(rawData == 0)
+ {
+ return 0;
+ }
+
+ PageInfo* p = reinterpret_cast<PageInfo*>(rawData);
+
+ p->nBlocks = 0;
+
+ //
+ // We keep track of the page size because it'll help make decisions
+ // about culling free pages.
+ //
+ p->pageSize = n;
+
+ //
+ // Initialize the first free block.
+ //
+ p->blocks = initBlock(rawData + sizeof(PageInfo), p, n - (sizeof(PageInfo) + sizeof(BlockInfo)), false);
+ p->nextPage = 0;
+ return p;
+}
+
+IceInternal::PageInfo*
+IceInternal::MemoryPool::createNewPage(size_t n)
+{
+ const size_t overhead = sizeof(PageInfo) + sizeof(BlockInfo);
+ const size_t defaultMaxBlockSize = _pageSize - overhead;
+
+ size_t newPageSize = 0;
+ if(n > defaultMaxBlockSize)
+ {
+ if((n + overhead) < _maxPageSize)
+ {
+ newPageSize = _maxPageSize;
+ }
+ else
+ {
+ newPageSize = n + overhead;
+ }
+ }
+ else
+ {
+ newPageSize = _pageSize;
+ }
+ _currentSize += newPageSize;
+ return initPage(newPageSize);
+}
+
+//
+// Remove unused pages (pages with no allocated blocks). If the force
+// arg is false, only remove pages if the total memory allocated for the
+// pool is over the high watermark. If force is true, remove unused
+// pages unconditionally. Generally speaking, this is useful for
+// shrinking memory to within a certain constraint when possible, but
+// that doesn't mean that it's always possible. There may not be any
+// free pages. However, since this pool is primarily intended for the
+// Ice::BasicStream class, usage of pool memory is probably fairly
+// transient so opportunities for cleanup will occur fairly often.
+//
+void
+IceInternal::MemoryPool::purgePages(bool force)
+{
+ if((force || _currentSize > _highWaterMark))
+ {
+ PageInfo* newList = 0;
+ PageInfo* p = _pages;
+ while(p != 0)
+ {
+ PageInfo* next = p->nextPage;
+ if(p->nBlocks == 0)
+ {
+ _currentSize -= p->pageSize;
+ ::free(p);
+ }
+ else
+ {
+ p->nextPage = newList;
+ newList = p;
+ }
+ p = next;
+ }
+ _pages = newList;
+ }
+}
+
+//
+// Get some memory from the pool.
+//
+Ice::Byte*
+IceInternal::MemoryPool::allocBlock(size_t n)
+{
+ if(n < 16)
+ {
+ n = 16;
+ }
+ //
+ // All n should be an exact multiple of 16. This makes address math
+ // a little easier and it ensures that blocks aren't insanely
+ // small. This should not be an issue when servicing
+ // Ice::BasicStream.
+ //
+ n = n + (n % 16);
+
+ //
+ // Try each page until we get a successful allocation.
+ //
+ for(PageInfo* p = _pages; p != 0; p = p->nextPage)
+ {
+ Ice::Byte* block = getBlock(p, n);
+ if(block)
+ {
+ return block;
+ }
+ }
+
+ //
+ // None of the currently allocated pages has sufficient free space
+ // to allocate a block of the required size, so we'll need to
+ // create a new page and allocate from there.
+ //
+ PageInfo* newPage = createNewPage(n);
+ if(newPage == 0)
+ {
+ //
+ // Trouble! Our attempt to create a page has failed, so we need
+ // to look at purging pages and try again.
+ //
+ purgePages(true);
+
+ newPage = createNewPage(n);
+ assert(newPage != 0);
+
+ //
+ // If newPage is 0, there will be trouble. Since we are
+ // malloc() based, returning 0 is the most reasonable thing to
+ // do and matches earlier behavior.
+ //
+ if(newPage == 0)
+ {
+ return 0;
+ }
+ }
+ newPage->nextPage = _pages;
+ _pages = newPage;
+ return getBlock(newPage, n);
+}
+
+#ifdef MEMPOOL_DEBUG
+void
+validateBlock(BlockInfo* p)
+{
+ assert(!p->prev || p->prev->next == p);
+ assert(!p->next || p->next->prev == p);
+ if(p->next)
+ {
+ assert(reinterpret_cast<size_t>(p) + sizeof(BlockInfo) + p->size == reinterpret_cast<size_t>(p->next));
+ }
+ if(p->prev)
+ {
+ assert(reinterpret_cast<size_t>(p->prev) + sizeof(BlockInfo) + p->prev->size ==
+ reinterpret_cast<size_t>(p));
+ }
+}
+#else
+# define validateBlock(x) (void)x
+#endif
+
+//
+// Iterate through this page's blocks, trying to find one that is big
+// enough for 'n'. Return an address to a block's user memory on a
+// successful find, otherwise return 0.
+//
+Ice::Byte*
+IceInternal::MemoryPool::getBlock(PageInfo* page, size_t n)
+{
+ BlockInfo* p = page->blocks;
+
+ const size_t requiredMem = n + sizeof(BlockInfo);
+
+ while(p != 0)
+ {
+ if((n <= p->size) && p->free)
+ {
+ validateBlock(p);
+ Ice::Byte* base = reinterpret_cast<Ice::Byte*>(p);
+ BlockInfo* newBlock = 0;
+
+ //
+ // TODO: It might be nice to leave some extra space for
+ // reallocations. How big of a space to reserve? Since
+ // Ice::BasicStream already does a 'predictive' reserve and
+ // we coalesce adjacent free blocks, it might be overkill
+ // at this point.
+ //
+ size_t offset = 0;
+ if ((requiredMem + 16) <= p->size)
+ {
+ //
+ // p will be the block for the allocated memory.
+ // newBlock will be the remaining free memory and will
+ // be to the 'right' of p.
+ //
+ offset = requiredMem;
+ }
+
+ if(offset != 0)
+ {
+ newBlock = initBlock(base + offset, p->page, p->size - requiredMem, false);
+ newBlock->next = p->next;
+ newBlock->prev = p;
+ if(newBlock->next)
+ {
+ newBlock->next->prev = newBlock;
+ }
+
+ //
+ // Adjust p's members.
+ //
+ p->next = newBlock;
+ p->size = n;
+ }
+
+ if(newBlock)
+ {
+ validateBlock(newBlock);
+ }
+
+ p->free = false;
+ p->page->nBlocks++;
+#ifdef MEMPOOL_DEBUG
+ memset(p->user, 'G', p->size);
+#endif
+ validateBlock(p);
+ p->used = n;
+ return p->user;
+ }
+ p = p->next;
+ }
+
+ return 0;
+}
+
+void
+IceInternal::MemoryPool::freeBlock(Ice::Byte* p)
+{
+ BlockInfo* block = reinterpret_cast<BlockInfo*>(p - sizeof(BlockInfo));
+
+
+ validateBlock(block);
+ block->free = true;
+ block->used = 0;
+ block->page->nBlocks--;
+
+ //
+ // Combine with next block if it is free. This means that the next
+ // block is obliterated.
+ //
+ BlockInfo* nextBlock = block->next;
+ if(nextBlock && nextBlock->free)
+ {
+ block->size += nextBlock->size + sizeof(BlockInfo);
+ block->next = nextBlock->next;
+ if(nextBlock->next)
+ {
+ nextBlock->next->prev = block;
+ }
+ }
+
+ //
+ // Combine with the previous block if it is free. This means that
+ // this block is obliterated.
+ //
+ BlockInfo* previousBlock = block->prev;
+ if(previousBlock && previousBlock->free)
+ {
+ previousBlock->size += block->size + sizeof(BlockInfo);
+ previousBlock->next = block->next;
+ if(block->next)
+ {
+ block->next->prev = previousBlock;
+ }
+ block = previousBlock;
+ }
+
+#ifdef MEMPOOL_DEBUG
+ memset(block->user, 'E', block->size);
+ if(block->prev)
+ {
+ validateBlock(block->prev);
+ }
+ if(block->next)
+ {
+ validateBlock(block->next);
+ }
+ validateBlock(block);
+#endif
+
+ if(block->page->nBlocks == 0)
+ {
+ purgePages(false);
+ }
+}
+
+Ice::Byte*
+IceInternal::MemoryPool::reallocBlock(Ice::Byte* p, size_t n)
+{
+ //
+ // Note: The way we allocate and free blocks *could* mean that a
+ // free block is available immediately after the current block.
+ //
+ BlockInfo* block = reinterpret_cast<BlockInfo*>(p - sizeof(BlockInfo));
+ assert(!block->free);
+ validateBlock(block);
+
+ //
+ // The way we allocate blocks, its very possible that the
+ // current block is already big enough!
+ //
+ if(n > block->size)
+ {
+ //
+ // If the next block is free, try combining it with the
+ // current block to satisfy the allocation requirement.
+ //
+ if(block->next && block->next->free && (block->size + block->next->size) >= n)
+ {
+ block->size += block->next->size;
+ block->next = block->next->next;
+ if(block->next)
+ {
+ block->next->prev = block;
+ }
+ block->used = n;
+ validateBlock(block);
+ }
+ else
+ {
+ //
+ // Realloc with current block has failed. Allocate a new
+ // block that is big enough and copy the contents of the
+ // old block into the new.
+ //
+ Ice::Byte* t = allocBlock(n);
+ memcpy(t, p, block->used);
+ freeBlock(p);
+ return t;
+ }
+ }
+
+ assert(n <= block->size);
+ return p;
+}
diff --git a/cpp/src/Ice/MemoryPool.h b/cpp/src/Ice/MemoryPool.h
new file mode 100644
index 00000000000..c7df2041d5b
--- /dev/null
+++ b/cpp/src/Ice/MemoryPool.h
@@ -0,0 +1,60 @@
+// **********************************************************************
+//
+// Copyright (c) 2003-2006 ZeroC, Inc. All rights reserved.
+//
+// This copy of Ice is licensed to you under the terms described in the
+// ICE_LICENSE file included in this distribution.
+//
+// **********************************************************************
+
+
+#ifndef ICE_MEMORY_POOL_H
+#define ICE_MEMORY_POOL_H
+
+#include <IceUtil/Mutex.h>
+#include <Ice/InstanceF.h>
+
+namespace IceInternal
+{
+
+//
+// Forward declarations.
+//
+struct PageInfo;
+struct BlockInfo;
+
+//
+// Public interface to the memory pool
+//
+class MemoryPool : private IceUtil::noncopyable
+{
+public:
+ MemoryPool(size_t);
+ ~MemoryPool();
+
+ Ice::Byte* alloc(size_t);
+ Ice::Byte* realloc(Ice::Byte*, size_t);
+ void free(Ice::Byte*);
+
+private:
+ IceUtil::Mutex _mutex;
+ size_t _pageSize;
+ size_t _maxPageSize;
+ size_t _highWaterMark;
+ size_t _currentSize;
+
+ PageInfo* _pages;
+
+ BlockInfo* initBlock(Ice::Byte*, PageInfo*, size_t, bool);
+ PageInfo* initPage(size_t);
+ PageInfo* createNewPage(size_t);
+ void purgePages(bool);
+ Ice::Byte* allocBlock(size_t);
+ Ice::Byte* getBlock(PageInfo*, size_t);
+ void freeBlock(Ice::Byte*);
+ Ice::Byte* reallocBlock(Ice::Byte*, size_t);
+};
+
+}
+
+#endif // End of ICE_MEMORY_POOL_H
diff --git a/cpp/src/Ice/PropertyNames.cpp b/cpp/src/Ice/PropertyNames.cpp
index 92f26563d70..0c3e4e603e7 100644
--- a/cpp/src/Ice/PropertyNames.cpp
+++ b/cpp/src/Ice/PropertyNames.cpp
@@ -7,7 +7,7 @@
//
// **********************************************************************
-// Generated by makeprops.py from file `../../config/PropertyNames.def', Mon Sep 11 16:14:10 2006
+// Generated by makeprops.py from file `../config/PropertyNames.def', Wed Sep 13 08:23:09 2006
// IMPORTANT: Do not edit this file -- any edits made here will be lost!
@@ -30,6 +30,8 @@ const char* IceInternal::PropertyNames::IceProps[] =
"Ice.GC.Interval",
"Ice.InitPlugins",
"Ice.Logger.Timestamp",
+ "Ice.MemoryPool",
+ "Ice.MemoryPool.HighWaterMark",
"Ice.MessageSizeMax",
"Ice.MonitorConnections",
"Ice.Nohup",
diff --git a/cpp/src/Ice/PropertyNames.h b/cpp/src/Ice/PropertyNames.h
index bfef02bc2f4..24bbaa197df 100644
--- a/cpp/src/Ice/PropertyNames.h
+++ b/cpp/src/Ice/PropertyNames.h
@@ -7,7 +7,7 @@
//
// **********************************************************************
-// Generated by makeprops.py from file `../../config/PropertyNames.def', Mon Sep 11 16:14:10 2006
+// Generated by makeprops.py from file `../config/PropertyNames.def', Wed Sep 13 08:23:09 2006
// IMPORTANT: Do not edit this file -- any edits made here will be lost!