15 #if __cplusplus > 199711L || _MSC_VER >= 1700 // C++11 or VS2012 34 #ifndef MOODYCAMEL_CACHE_LINE_SIZE 35 #define MOODYCAMEL_CACHE_LINE_SIZE 64 38 #ifndef MOODYCAMEL_EXCEPTIONS_ENABLED 39 #if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__)) 40 #define MOODYCAMEL_EXCEPTIONS_ENABLED 44 #ifndef MOODYCAMEL_HAS_EMPLACE 45 #if !defined(_MSC_VER) || _MSC_VER >= 1800 // variadic templates: either a non-MS compiler or VS >= 2013 46 #define MOODYCAMEL_HAS_EMPLACE 1 52 #pragma warning(disable: 4324) // structure was padded due to __declspec(align()) 53 #pragma warning(disable: 4820) // padding was added 54 #pragma warning(disable: 4127) // conditional expression is constant 59 template<
typename T,
size_t MAX_BLOCK_SIZE = 512>
96 assert(MAX_BLOCK_SIZE == ceilToPow2(MAX_BLOCK_SIZE) &&
"MAX_BLOCK_SIZE must be a power of 2");
97 assert(MAX_BLOCK_SIZE >= 2 &&
"MAX_BLOCK_SIZE must be at least 2");
99 Block* firstBlock =
nullptr;
101 largestBlockSize = ceilToPow2(maxSize + 1);
102 if (largestBlockSize > MAX_BLOCK_SIZE * 2) {
108 size_t initialBlockCount = (maxSize + MAX_BLOCK_SIZE * 2 - 3) / (MAX_BLOCK_SIZE - 1);
109 largestBlockSize = MAX_BLOCK_SIZE;
110 Block* lastBlock =
nullptr;
111 for (
size_t i = 0; i != initialBlockCount; ++i) {
112 auto block = make_block(largestBlockSize);
113 if (block ==
nullptr) {
114 #ifdef MOODYCAMEL_EXCEPTIONS_ENABLED 115 throw std::bad_alloc();
120 if (firstBlock ==
nullptr) {
124 lastBlock->next = block;
127 block->next = firstBlock;
131 firstBlock = make_block(largestBlockSize);
132 if (firstBlock ==
nullptr) {
133 #ifdef MOODYCAMEL_EXCEPTIONS_ENABLED 134 throw std::bad_alloc();
139 firstBlock->next = firstBlock;
141 frontBlock = firstBlock;
142 tailBlock = firstBlock;
145 fence(memory_order_sync);
151 : frontBlock(other.frontBlock.load()),
152 tailBlock(other.tailBlock.load()),
153 largestBlockSize(other.largestBlockSize)
159 other.largestBlockSize = 32;
160 Block* b = other.make_block(other.largestBlockSize);
162 #ifdef MOODYCAMEL_EXCEPTIONS_ENABLED 163 throw std::bad_alloc();
169 other.frontBlock = b;
177 Block* b = frontBlock.load();
178 frontBlock = other.frontBlock.load();
179 other.frontBlock = b;
180 b = tailBlock.load();
181 tailBlock = other.tailBlock.load();
183 std::swap(largestBlockSize, other.largestBlockSize);
192 fence(memory_order_sync);
195 Block* frontBlock_ = frontBlock;
196 Block* block = frontBlock_;
198 Block* nextBlock = block->next;
199 size_t blockFront = block->front;
200 size_t blockTail = block->tail;
202 for (
size_t i = blockFront; i != blockTail; i = (i + 1) & block->sizeMask) {
203 auto element =
reinterpret_cast<T*
>(block->data + i *
sizeof(T));
208 auto rawBlock = block->rawThis;
212 }
while (block != frontBlock_);
219 AE_FORCEINLINE
bool try_enqueue(T
const& element)
221 return inner_enqueue<CannotAlloc>(element);
227 AE_FORCEINLINE
bool try_enqueue(T&& element)
229 return inner_enqueue<CannotAlloc>(std::forward<T>(element));
232 #if MOODYCAMEL_HAS_EMPLACE 234 template<
typename... Args>
235 AE_FORCEINLINE
bool try_emplace(Args&&... args)
237 return inner_enqueue<CannotAlloc>(std::forward<Args>(args)...);
244 AE_FORCEINLINE
bool enqueue(T
const& element)
246 return inner_enqueue<CanAlloc>(element);
252 AE_FORCEINLINE
bool enqueue(T&& element)
254 return inner_enqueue<CanAlloc>(std::forward<T>(element));
257 #if MOODYCAMEL_HAS_EMPLACE 259 template<
typename... Args>
260 AE_FORCEINLINE
bool emplace(Args&&... args)
262 return inner_enqueue<CanAlloc>(std::forward<Args>(args)...);
270 bool try_dequeue(U& result)
273 ReentrantGuard guard(this->dequeuing);
293 Block* frontBlock_ = frontBlock.load();
294 size_t blockTail = frontBlock_->localTail;
295 size_t blockFront = frontBlock_->front.load();
297 if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
298 fence(memory_order_acquire);
300 non_empty_front_block:
302 auto element =
reinterpret_cast<T*
>(frontBlock_->data + blockFront *
sizeof(T));
303 result = std::move(*element);
306 blockFront = (blockFront + 1) & frontBlock_->sizeMask;
308 fence(memory_order_release);
309 frontBlock_->front = blockFront;
311 else if (frontBlock_ != tailBlock.load()) {
312 fence(memory_order_acquire);
314 frontBlock_ = frontBlock.load();
315 blockTail = frontBlock_->localTail = frontBlock_->tail.load();
316 blockFront = frontBlock_->front.load();
317 fence(memory_order_acquire);
319 if (blockFront != blockTail) {
321 goto non_empty_front_block;
325 Block* nextBlock = frontBlock_->next;
330 size_t nextBlockFront = nextBlock->front.load();
331 size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
332 fence(memory_order_acquire);
336 assert(nextBlockFront != nextBlockTail);
337 AE_UNUSED(nextBlockTail);
340 fence(memory_order_release);
341 frontBlock = frontBlock_ = nextBlock;
343 compiler_fence(memory_order_release);
345 auto element =
reinterpret_cast<T*
>(frontBlock_->data + nextBlockFront *
sizeof(T));
347 result = std::move(*element);
350 nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
352 fence(memory_order_release);
353 frontBlock_->front = nextBlockFront;
372 ReentrantGuard guard(this->dequeuing);
376 Block* frontBlock_ = frontBlock.load();
377 size_t blockTail = frontBlock_->localTail;
378 size_t blockFront = frontBlock_->front.load();
380 if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
381 fence(memory_order_acquire);
382 non_empty_front_block:
383 return reinterpret_cast<T*
>(frontBlock_->data + blockFront *
sizeof(T));
385 else if (frontBlock_ != tailBlock.load()) {
386 fence(memory_order_acquire);
387 frontBlock_ = frontBlock.load();
388 blockTail = frontBlock_->localTail = frontBlock_->tail.load();
389 blockFront = frontBlock_->front.load();
390 fence(memory_order_acquire);
392 if (blockFront != blockTail) {
393 goto non_empty_front_block;
396 Block* nextBlock = frontBlock_->next;
398 size_t nextBlockFront = nextBlock->front.load();
399 fence(memory_order_acquire);
401 assert(nextBlockFront != nextBlock->tail.load());
402 return reinterpret_cast<T*
>(nextBlock->data + nextBlockFront *
sizeof(T));
414 ReentrantGuard guard(this->dequeuing);
418 Block* frontBlock_ = frontBlock.load();
419 size_t blockTail = frontBlock_->localTail;
420 size_t blockFront = frontBlock_->front.load();
422 if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
423 fence(memory_order_acquire);
425 non_empty_front_block:
426 auto element =
reinterpret_cast<T*
>(frontBlock_->data + blockFront *
sizeof(T));
429 blockFront = (blockFront + 1) & frontBlock_->sizeMask;
431 fence(memory_order_release);
432 frontBlock_->front = blockFront;
434 else if (frontBlock_ != tailBlock.load()) {
435 fence(memory_order_acquire);
436 frontBlock_ = frontBlock.load();
437 blockTail = frontBlock_->localTail = frontBlock_->tail.load();
438 blockFront = frontBlock_->front.load();
439 fence(memory_order_acquire);
441 if (blockFront != blockTail) {
442 goto non_empty_front_block;
446 Block* nextBlock = frontBlock_->next;
448 size_t nextBlockFront = nextBlock->front.load();
449 size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
450 fence(memory_order_acquire);
452 assert(nextBlockFront != nextBlockTail);
453 AE_UNUSED(nextBlockTail);
455 fence(memory_order_release);
456 frontBlock = frontBlock_ = nextBlock;
458 compiler_fence(memory_order_release);
460 auto element =
reinterpret_cast<T*
>(frontBlock_->data + nextBlockFront *
sizeof(T));
463 nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
465 fence(memory_order_release);
466 frontBlock_->front = nextBlockFront;
478 inline size_t size_approx()
const 481 Block* frontBlock_ = frontBlock.load();
482 Block* block = frontBlock_;
484 fence(memory_order_acquire);
485 size_t blockFront = block->front.load();
486 size_t blockTail = block->tail.load();
487 result += (blockTail - blockFront) & block->sizeMask;
488 block = block->next.load();
489 }
while (block != frontBlock_);
495 enum AllocationMode { CanAlloc, CannotAlloc };
497 #if MOODYCAMEL_HAS_EMPLACE 498 template<AllocationMode canAlloc,
typename... Args>
499 bool inner_enqueue(Args&&... args)
501 template<AllocationMode canAlloc,
typename U>
502 bool inner_enqueue(U&& element)
506 ReentrantGuard guard(this->enqueuing);
516 Block* tailBlock_ = tailBlock.load();
517 size_t blockFront = tailBlock_->localFront;
518 size_t blockTail = tailBlock_->tail.load();
520 size_t nextBlockTail = (blockTail + 1) & tailBlock_->sizeMask;
521 if (nextBlockTail != blockFront || nextBlockTail != (tailBlock_->localFront = tailBlock_->front.load())) {
522 fence(memory_order_acquire);
524 char* location = tailBlock_->data + blockTail *
sizeof(T);
525 #if MOODYCAMEL_HAS_EMPLACE 526 new (location) T(std::forward<Args>(args)...);
528 new (location) T(std::forward<U>(element));
531 fence(memory_order_release);
532 tailBlock_->tail = nextBlockTail;
535 fence(memory_order_acquire);
536 if (tailBlock_->next.load() != frontBlock) {
542 fence(memory_order_acquire);
545 Block* tailBlockNext = tailBlock_->next.load();
546 size_t nextBlockFront = tailBlockNext->localFront = tailBlockNext->front.load();
547 nextBlockTail = tailBlockNext->tail.load();
548 fence(memory_order_acquire);
552 assert(nextBlockFront == nextBlockTail);
553 tailBlockNext->localFront = nextBlockFront;
555 char* location = tailBlockNext->data + nextBlockTail *
sizeof(T);
556 #if MOODYCAMEL_HAS_EMPLACE 557 new (location) T(std::forward<Args>(args)...);
559 new (location) T(std::forward<U>(element));
562 tailBlockNext->tail = (nextBlockTail + 1) & tailBlockNext->sizeMask;
564 fence(memory_order_release);
565 tailBlock = tailBlockNext;
567 else if (canAlloc == CanAlloc) {
569 auto newBlockSize = largestBlockSize >= MAX_BLOCK_SIZE ? largestBlockSize : largestBlockSize * 2;
570 auto newBlock = make_block(newBlockSize);
571 if (newBlock ==
nullptr) {
575 largestBlockSize = newBlockSize;
577 #if MOODYCAMEL_HAS_EMPLACE 578 new (newBlock->data) T(std::forward<Args>(args)...);
580 new (newBlock->data) T(std::forward<U>(element));
582 assert(newBlock->front == 0);
583 newBlock->tail = newBlock->localTail = 1;
585 newBlock->next = tailBlock_->next.load();
586 tailBlock_->next = newBlock;
594 fence(memory_order_release);
595 tailBlock = newBlock;
597 else if (canAlloc == CannotAlloc) {
602 assert(
false &&
"Should be unreachable code");
619 AE_FORCEINLINE
static size_t ceilToPow2(
size_t x)
626 for (
size_t i = 1; i <
sizeof(size_t); i <<= 1) {
634 static AE_FORCEINLINE
char* align_for(
char* ptr)
636 const std::size_t alignment = std::alignment_of<U>::value;
637 return ptr + (alignment - (
reinterpret_cast<std::uintptr_t
>(ptr) % alignment)) % alignment;
641 struct ReentrantGuard
643 ReentrantGuard(
bool& _inSection)
644 : inSection(_inSection)
646 assert(!inSection &&
"ReaderWriterQueue does not support enqueuing or dequeuing elements from other elements' ctors and dtors");
650 ~ReentrantGuard() { inSection =
false; }
653 ReentrantGuard& operator=(ReentrantGuard
const&);
663 weak_atomic<size_t> front;
666 char cachelineFiller0[MOODYCAMEL_CACHE_LINE_SIZE -
sizeof(weak_atomic<size_t>) -
sizeof(
size_t)];
667 weak_atomic<size_t> tail;
670 char cachelineFiller1[MOODYCAMEL_CACHE_LINE_SIZE -
sizeof(weak_atomic<size_t>) -
sizeof(
size_t)];
671 weak_atomic<Block*> next;
675 const size_t sizeMask;
679 Block(
size_t const& _size,
char* _rawThis,
char* _data)
680 : front(0), localTail(0), tail(0), localFront(0), next(
nullptr), data(_data), sizeMask(_size - 1), rawThis(_rawThis)
686 Block& operator=(Block
const&);
693 static Block* make_block(
size_t capacity)
696 auto size =
sizeof(Block) + std::alignment_of<Block>::value - 1;
697 size +=
sizeof(T) * capacity + std::alignment_of<T>::value - 1;
698 auto newBlockRaw =
static_cast<char*
>(std::malloc(size));
699 if (newBlockRaw ==
nullptr) {
703 auto newBlockAligned = align_for<Block>(newBlockRaw);
704 auto newBlockData = align_for<T>(newBlockAligned +
sizeof(Block));
705 return new (newBlockAligned) Block(capacity, newBlockRaw, newBlockData);
709 weak_atomic<Block*> frontBlock;
711 char cachelineFiller[MOODYCAMEL_CACHE_LINE_SIZE -
sizeof(weak_atomic<Block*>)];
712 weak_atomic<Block*> tailBlock;
714 size_t largestBlockSize;
723 template<
typename T,
size_t MAX_BLOCK_SIZE = 512>
738 AE_FORCEINLINE
bool try_enqueue(T
const& element)
740 if (inner.try_enqueue(element)) {
750 AE_FORCEINLINE
bool try_enqueue(T&& element)
752 if (inner.try_enqueue(std::forward<T>(element))) {
763 AE_FORCEINLINE
bool enqueue(T
const& element)
765 if (inner.enqueue(element)) {
775 AE_FORCEINLINE
bool enqueue(T&& element)
777 if (inner.enqueue(std::forward<T>(element))) {
789 bool try_dequeue(U& result)
791 if (sema.tryWait()) {
792 bool success = inner.try_dequeue(result);
804 void wait_dequeue(U& result)
807 bool success = inner.try_dequeue(result);
821 bool wait_dequeue_timed(U& result, std::int64_t timeout_usecs)
823 if (!sema.wait(timeout_usecs)) {
826 bool success = inner.try_dequeue(result);
834 #if __cplusplus > 199711L || _MSC_VER >= 1700 841 template<
typename U,
typename Rep,
typename Period>
842 inline bool wait_dequeue_timed(U& result, std::chrono::duration<Rep, Period>
const& timeout)
844 return wait_dequeue_timed(result, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
854 AE_FORCEINLINE T* peek()
862 AE_FORCEINLINE
bool pop()
864 if (sema.tryWait()) {
865 bool result = inner.pop();
875 AE_FORCEINLINE
size_t size_approx()
const 877 return sema.availableApprox();
888 spsc_sema::LightweightSemaphore sema;
Definition: readerwriterqueue.h:57
Definition: readerwriterqueue.h:60
Definition: readerwriterqueue.h:724