summaryrefslogtreecommitdiff
path: root/cpp/src/Ice/CollocatedRequestHandler.cpp
diff options
context:
space:
mode:
authorBenoit Foucher <benoit@zeroc.com>2014-11-05 15:33:01 +0100
committerBenoit Foucher <benoit@zeroc.com>2014-11-05 15:33:01 +0100
commitcb4d5772e9a7a9228577df83027e45ec7de022ea (patch)
treebd6489fe77ed5fba43adff613293d580fda8e0f3 /cpp/src/Ice/CollocatedRequestHandler.cpp
parentFixed src tree build of IceJS (diff)
downloadice-cb4d5772e9a7a9228577df83027e45ec7de022ea.tar.bz2
ice-cb4d5772e9a7a9228577df83027e45ec7de022ea.tar.xz
ice-cb4d5772e9a7a9228577df83027e45ec7de022ea.zip
Fixed ICE-5607: relaxed Ice.MessageSizeMax
Diffstat (limited to 'cpp/src/Ice/CollocatedRequestHandler.cpp')
-rw-r--r--cpp/src/Ice/CollocatedRequestHandler.cpp24
1 files changed, 7 insertions, 17 deletions
diff --git a/cpp/src/Ice/CollocatedRequestHandler.cpp b/cpp/src/Ice/CollocatedRequestHandler.cpp
index 543a1b50153..99555c76998 100644
--- a/cpp/src/Ice/CollocatedRequestHandler.cpp
+++ b/cpp/src/Ice/CollocatedRequestHandler.cpp
@@ -139,12 +139,11 @@ CollocatedRequestHandler::CollocatedRequestHandler(const ReferencePtr& ref, cons
_dispatcher(_reference->getInstance()->initializationData().dispatcher),
_logger(_reference->getInstance()->initializationData().logger), // Cached for better performance.
_traceLevels(_reference->getInstance()->traceLevels()), // Cached for better performance.
- _batchAutoFlush(
- ref->getInstance()->initializationData().properties->getPropertyAsIntWithDefault("Ice.BatchAutoFlush", 1) > 0),
+ _batchAutoFlushSize(ref->getInstance()->batchAutoFlushSize()),
_requestId(0),
_batchStreamInUse(false),
_batchRequestNum(0),
- _batchStream(ref->getInstance().get(), currentProtocolEncoding, _batchAutoFlush)
+ _batchStream(ref->getInstance().get(), currentProtocolEncoding)
{
}
@@ -198,7 +197,7 @@ CollocatedRequestHandler::finishBatchRequest(BasicStream* os)
Lock sync(*this);
_batchStream.swap(*os);
- if(_batchAutoFlush && (_batchStream.b.size() > _reference->getInstance()->messageSizeMax()))
+ if(_batchAutoFlushSize > 0 && (_batchStream.b.size() > _batchAutoFlushSize))
{
//
// Temporarily save the last request.
@@ -211,21 +210,12 @@ CollocatedRequestHandler::finishBatchRequest(BasicStream* os)
//
// Reset the batch.
//
- BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding, _batchAutoFlush);
+ BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding);
_batchStream.swap(dummy);
_batchRequestNum = 0;
_batchMarker = 0;
//
- // Check again if the last request doesn't exceed what we can send with the auto flush
- //
- if(sizeof(requestBatchHdr) + lastRequest.size() > _reference->getInstance()->messageSizeMax())
- {
- Ex::throwMemoryLimitException(__FILE__, __LINE__, sizeof(requestBatchHdr) + lastRequest.size(),
- _reference->getInstance()->messageSizeMax());
- }
-
- //
// Start a new batch with the last message that caused us to go over the limit.
//
_batchStream.writeBlob(requestBatchHdr, sizeof(requestBatchHdr));
@@ -252,7 +242,7 @@ CollocatedRequestHandler::abortBatchRequest()
{
Lock sync(*this);
- BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding, _batchAutoFlush);
+ BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding);
_batchStream.swap(dummy);
_batchRequestNum = 0;
_batchMarker = 0;
@@ -436,7 +426,7 @@ CollocatedRequestHandler::invokeBatchRequests(OutgoingBase* out)
//
// Reset the batch stream.
//
- BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding, _batchAutoFlush);
+ BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding);
_batchStream.swap(dummy);
_batchRequestNum = 0;
_batchMarker = 0;
@@ -494,7 +484,7 @@ CollocatedRequestHandler::invokeAsyncBatchRequests(OutgoingAsyncBase* outAsync)
//
// Reset the batch stream.
//
- BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding, _batchAutoFlush);
+ BasicStream dummy(_reference->getInstance().get(), currentProtocolEncoding);
_batchStream.swap(dummy);
_batchRequestNum = 0;
_batchMarker = 0;