1 // Tencent is pleased to support the open source community by making RapidJSON available.
2 //
3 // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
4 //
5 // Licensed under the MIT License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // http://opensource.org/licenses/MIT
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14 
15 #ifndef RAPIDJSON_ALLOCATORS_H_
16 #define RAPIDJSON_ALLOCATORS_H_
17 
18 #include "rapidjson.h"
19 #include "internal/meta.h"
20 
21 #include <memory>
22 #include <limits>
23 
24 #if RAPIDJSON_HAS_CXX11
25 #include <type_traits>
26 #endif
27 
28 RAPIDJSON_NAMESPACE_BEGIN
29 
30 ///////////////////////////////////////////////////////////////////////////////
31 // Allocator
32 
33 /*! \class rapidjson::Allocator
34     \brief Concept for allocating, resizing and freeing memory block.
35 
36     Note that Malloc() and Realloc() are non-static but Free() is static.
37 
38     So if an allocator need to support Free(), it needs to put its pointer in
39     the header of memory block.
40 
41 \code
42 concept Allocator {
43     static const bool kNeedFree;    //!< Whether this allocator needs to call Free().
44 
45     // Allocate a memory block.
46     // \param size of the memory block in bytes.
47     // \returns pointer to the memory block.
48     void* Malloc(size_t size);
49 
50     // Resize a memory block.
51     // \param originalPtr The pointer to current memory block. Null pointer is permitted.
52     // \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
53     // \param newSize the new size in bytes.
54     void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
55 
56     // Free a memory block.
57     // \param pointer to the memory block. Null pointer is permitted.
58     static void Free(void *ptr);
59 };
60 \endcode
61 */
62 
63 
64 /*! \def RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY
65     \ingroup RAPIDJSON_CONFIG
66     \brief User-defined kDefaultChunkCapacity definition.
67 
68     User can define this as any \c size that is a power of 2.
69 */
70 
71 #ifndef RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY
72 #define RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY (64 * 1024)
73 #endif
74 
75 
76 ///////////////////////////////////////////////////////////////////////////////
77 // CrtAllocator
78 
79 //! C-runtime library allocator.
80 /*! This class is just wrapper for standard C library memory routines.
81     \note implements Allocator concept
82 */
83 class CrtAllocator {
84 public:
85     static const bool kNeedFree = true;
Malloc(size_t size)86     void* Malloc(size_t size) {
87         if (size) //  behavior of malloc(0) is implementation defined.
88             return RAPIDJSON_MALLOC(size);
89         else
90             return NULL; // standardize to returning NULL.
91     }
Realloc(void * originalPtr,size_t originalSize,size_t newSize)92     void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
93         (void)originalSize;
94         if (newSize == 0) {
95             RAPIDJSON_FREE(originalPtr);
96             return NULL;
97         }
98         return RAPIDJSON_REALLOC(originalPtr, newSize);
99     }
Free(void * ptr)100     static void Free(void *ptr) RAPIDJSON_NOEXCEPT { RAPIDJSON_FREE(ptr); }
101 
102     bool operator==(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
103         return true;
104     }
105     bool operator!=(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
106         return false;
107     }
108 };
109 
110 ///////////////////////////////////////////////////////////////////////////////
111 // MemoryPoolAllocator
112 
113 //! Default memory allocator used by the parser and DOM.
114 /*! This allocator allocate memory blocks from pre-allocated memory chunks.
115 
116     It does not free memory blocks. And Realloc() only allocate new memory.
117 
118     The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default.
119 
120     User may also supply a buffer as the first chunk.
121 
122     If the user-buffer is full then additional chunks are allocated by BaseAllocator.
123 
124     The user-buffer is not deallocated by this allocator.
125 
126     \tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator.
127     \note implements Allocator concept
128 */
129 template <typename BaseAllocator = CrtAllocator>
130 class MemoryPoolAllocator {
131     //! Chunk header for perpending to each chunk.
132     /*! Chunks are stored as a singly linked list.
133     */
134     struct ChunkHeader {
135         size_t capacity;    //!< Capacity of the chunk in bytes (excluding the header itself).
136         size_t size;        //!< Current size of allocated memory in bytes.
137         ChunkHeader *next;  //!< Next chunk in the linked list.
138     };
139 
140     struct SharedData {
141         ChunkHeader *chunkHead;  //!< Head of the chunk linked-list. Only the head chunk serves allocation.
142         BaseAllocator* ownBaseAllocator; //!< base allocator created by this object.
143         size_t refcount;
144         bool ownBuffer;
145     };
146 
147     static const size_t SIZEOF_SHARED_DATA = RAPIDJSON_ALIGN(sizeof(SharedData));
148     static const size_t SIZEOF_CHUNK_HEADER = RAPIDJSON_ALIGN(sizeof(ChunkHeader));
149 
GetChunkHead(SharedData * shared)150     static inline ChunkHeader *GetChunkHead(SharedData *shared)
151     {
152         return reinterpret_cast<ChunkHeader*>(reinterpret_cast<uint8_t*>(shared) + SIZEOF_SHARED_DATA);
153     }
GetChunkBuffer(SharedData * shared)154     static inline uint8_t *GetChunkBuffer(SharedData *shared)
155     {
156         return reinterpret_cast<uint8_t*>(shared->chunkHead) + SIZEOF_CHUNK_HEADER;
157     }
158 
159     static const size_t kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
160 
161 public:
162     static const bool kNeedFree = false;    //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
163     static const bool kRefCounted = true;   //!< Tell users that this allocator is reference counted on copy
164 
165     //! Constructor with chunkSize.
166     /*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
167         \param baseAllocator The allocator for allocating memory chunks.
168     */
169     explicit
170     MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunk_capacity_(chunkSize)171         chunk_capacity_(chunkSize),
172         baseAllocator_(baseAllocator ? baseAllocator : RAPIDJSON_NEW(BaseAllocator)()),
173         shared_(static_cast<SharedData*>(baseAllocator_ ? baseAllocator_->Malloc(SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER) : 0))
174     {
175         RAPIDJSON_ASSERT(baseAllocator_ != 0);
176         RAPIDJSON_ASSERT(shared_ != 0);
177         if (baseAllocator) {
178             shared_->ownBaseAllocator = 0;
179         }
180         else {
181             shared_->ownBaseAllocator = baseAllocator_;
182         }
183         shared_->chunkHead = GetChunkHead(shared_);
184         shared_->chunkHead->capacity = 0;
185         shared_->chunkHead->size = 0;
186         shared_->chunkHead->next = 0;
187         shared_->ownBuffer = true;
188         shared_->refcount = 1;
189     }
190 
191     //! Constructor with user-supplied buffer.
192     /*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size.
193 
194         The user buffer will not be deallocated when this allocator is destructed.
195 
196         \param buffer User supplied buffer.
197         \param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader).
198         \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
199         \param baseAllocator The allocator for allocating memory chunks.
200     */
201     MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunk_capacity_(chunkSize)202         chunk_capacity_(chunkSize),
203         baseAllocator_(baseAllocator),
204         shared_(static_cast<SharedData*>(AlignBuffer(buffer, size)))
205     {
206         RAPIDJSON_ASSERT(size >= SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER);
207         shared_->chunkHead = GetChunkHead(shared_);
208         shared_->chunkHead->capacity = size - SIZEOF_SHARED_DATA - SIZEOF_CHUNK_HEADER;
209         shared_->chunkHead->size = 0;
210         shared_->chunkHead->next = 0;
211         shared_->ownBaseAllocator = 0;
212         shared_->ownBuffer = false;
213         shared_->refcount = 1;
214     }
215 
MemoryPoolAllocator(const MemoryPoolAllocator & rhs)216     MemoryPoolAllocator(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT :
217         chunk_capacity_(rhs.chunk_capacity_),
218         baseAllocator_(rhs.baseAllocator_),
219         shared_(rhs.shared_)
220     {
221         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
222         ++shared_->refcount;
223     }
224     MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT
225     {
226         RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
227         ++rhs.shared_->refcount;
228         this->~MemoryPoolAllocator();
229         baseAllocator_ = rhs.baseAllocator_;
230         chunk_capacity_ = rhs.chunk_capacity_;
231         shared_ = rhs.shared_;
232         return *this;
233     }
234 
235 #if RAPIDJSON_HAS_CXX11_RVALUE_REFS
MemoryPoolAllocator(MemoryPoolAllocator && rhs)236     MemoryPoolAllocator(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT :
237         chunk_capacity_(rhs.chunk_capacity_),
238         baseAllocator_(rhs.baseAllocator_),
239         shared_(rhs.shared_)
240     {
241         RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
242         rhs.shared_ = 0;
243     }
244     MemoryPoolAllocator& operator=(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT
245     {
246         RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
247         this->~MemoryPoolAllocator();
248         baseAllocator_ = rhs.baseAllocator_;
249         chunk_capacity_ = rhs.chunk_capacity_;
250         shared_ = rhs.shared_;
251         rhs.shared_ = 0;
252         return *this;
253     }
254 #endif
255 
256     //! Destructor.
257     /*! This deallocates all memory chunks, excluding the user-supplied buffer.
258     */
~MemoryPoolAllocator()259     ~MemoryPoolAllocator() RAPIDJSON_NOEXCEPT {
260         if (!shared_) {
261             // do nothing if moved
262             return;
263         }
264         if (shared_->refcount > 1) {
265             --shared_->refcount;
266             return;
267         }
268         Clear();
269         BaseAllocator *a = shared_->ownBaseAllocator;
270         if (shared_->ownBuffer) {
271             baseAllocator_->Free(shared_);
272         }
273         RAPIDJSON_DELETE(a);
274     }
275 
276     //! Deallocates all memory chunks, excluding the first/user one.
Clear()277     void Clear() RAPIDJSON_NOEXCEPT {
278         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
279         for (;;) {
280             ChunkHeader* c = shared_->chunkHead;
281             if (!c->next) {
282                 break;
283             }
284             shared_->chunkHead = c->next;
285             baseAllocator_->Free(c);
286         }
287         shared_->chunkHead->size = 0;
288     }
289 
290     //! Computes the total capacity of allocated memory chunks.
291     /*! \return total capacity in bytes.
292     */
Capacity()293     size_t Capacity() const RAPIDJSON_NOEXCEPT {
294         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
295         size_t capacity = 0;
296         for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
297             capacity += c->capacity;
298         return capacity;
299     }
300 
301     //! Computes the memory blocks allocated.
302     /*! \return total used bytes.
303     */
Size()304     size_t Size() const RAPIDJSON_NOEXCEPT {
305         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
306         size_t size = 0;
307         for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
308             size += c->size;
309         return size;
310     }
311 
312     //! Whether the allocator is shared.
313     /*! \return true or false.
314     */
Shared()315     bool Shared() const RAPIDJSON_NOEXCEPT {
316         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
317         return shared_->refcount > 1;
318     }
319 
320     //! Allocates a memory block. (concept Allocator)
Malloc(size_t size)321     void* Malloc(size_t size) {
322         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
323         if (!size)
324             return NULL;
325 
326         size = RAPIDJSON_ALIGN(size);
327         if (RAPIDJSON_UNLIKELY(shared_->chunkHead->size + size > shared_->chunkHead->capacity))
328             if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
329                 return NULL;
330 
331         void *buffer = GetChunkBuffer(shared_) + shared_->chunkHead->size;
332         shared_->chunkHead->size += size;
333         return buffer;
334     }
335 
336     //! Resizes a memory block (concept Allocator)
Realloc(void * originalPtr,size_t originalSize,size_t newSize)337     void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
338         if (originalPtr == 0)
339             return Malloc(newSize);
340 
341         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
342         if (newSize == 0)
343             return NULL;
344 
345         originalSize = RAPIDJSON_ALIGN(originalSize);
346         newSize = RAPIDJSON_ALIGN(newSize);
347 
348         // Do not shrink if new size is smaller than original
349         if (originalSize >= newSize)
350             return originalPtr;
351 
352         // Simply expand it if it is the last allocation and there is sufficient space
353         if (originalPtr == GetChunkBuffer(shared_) + shared_->chunkHead->size - originalSize) {
354             size_t increment = static_cast<size_t>(newSize - originalSize);
355             if (shared_->chunkHead->size + increment <= shared_->chunkHead->capacity) {
356                 shared_->chunkHead->size += increment;
357                 return originalPtr;
358             }
359         }
360 
361         // Realloc process: allocate and copy memory, do not free original buffer.
362         if (void* newBuffer = Malloc(newSize)) {
363             if (originalSize)
364                 std::memcpy(newBuffer, originalPtr, originalSize);
365             return newBuffer;
366         }
367         else
368             return NULL;
369     }
370 
371     //! Frees a memory block (concept Allocator)
Free(void * ptr)372     static void Free(void *ptr) RAPIDJSON_NOEXCEPT { (void)ptr; } // Do nothing
373 
374     //! Compare (equality) with another MemoryPoolAllocator
375     bool operator==(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
376         RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
377         RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
378         return shared_ == rhs.shared_;
379     }
380     //! Compare (inequality) with another MemoryPoolAllocator
381     bool operator!=(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
382         return !operator==(rhs);
383     }
384 
385 private:
386     //! Creates a new chunk.
387     /*! \param capacity Capacity of the chunk in bytes.
388         \return true if success.
389     */
AddChunk(size_t capacity)390     bool AddChunk(size_t capacity) {
391         if (!baseAllocator_)
392             shared_->ownBaseAllocator = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
393         if (ChunkHeader* chunk = static_cast<ChunkHeader*>(baseAllocator_->Malloc(SIZEOF_CHUNK_HEADER + capacity))) {
394             chunk->capacity = capacity;
395             chunk->size = 0;
396             chunk->next = shared_->chunkHead;
397             shared_->chunkHead = chunk;
398             return true;
399         }
400         else
401             return false;
402     }
403 
AlignBuffer(void * buf,size_t & size)404     static inline void* AlignBuffer(void* buf, size_t &size)
405     {
406         RAPIDJSON_NOEXCEPT_ASSERT(buf != 0);
407         const uintptr_t mask = sizeof(void*) - 1;
408         const uintptr_t ubuf = reinterpret_cast<uintptr_t>(buf);
409         if (RAPIDJSON_UNLIKELY(ubuf & mask)) {
410             const uintptr_t abuf = (ubuf + mask) & ~mask;
411             RAPIDJSON_ASSERT(size >= abuf - ubuf);
412             buf = reinterpret_cast<void*>(abuf);
413             size -= abuf - ubuf;
414         }
415         return buf;
416     }
417 
418     size_t chunk_capacity_;     //!< The minimum capacity of chunk when they are allocated.
419     BaseAllocator* baseAllocator_;  //!< base allocator for allocating memory chunks.
420     SharedData *shared_;        //!< The shared data of the allocator
421 };
422 
423 namespace internal {
424     template<typename, typename = void>
425     struct IsRefCounted :
426         public FalseType
427     { };
428     template<typename T>
429     struct IsRefCounted<T, typename internal::EnableIfCond<T::kRefCounted>::Type> :
430         public TrueType
431     { };
432 }
433 
434 template<typename T, typename A>
435 inline T* Realloc(A& a, T* old_p, size_t old_n, size_t new_n)
436 {
437     RAPIDJSON_NOEXCEPT_ASSERT(old_n <= (std::numeric_limits<size_t>::max)() / sizeof(T) && new_n <= (std::numeric_limits<size_t>::max)() / sizeof(T));
438     return static_cast<T*>(a.Realloc(old_p, old_n * sizeof(T), new_n * sizeof(T)));
439 }
440 
441 template<typename T, typename A>
442 inline T *Malloc(A& a, size_t n = 1)
443 {
444     return Realloc<T, A>(a, NULL, 0, n);
445 }
446 
447 template<typename T, typename A>
448 inline void Free(A& a, T *p, size_t n = 1)
449 {
450     static_cast<void>(Realloc<T, A>(a, p, n, 0));
451 }
452 
453 #ifdef __GNUC__
454 RAPIDJSON_DIAG_PUSH
455 RAPIDJSON_DIAG_OFF(effc++) // std::allocator can safely be inherited
456 #endif
457 
458 template <typename T, typename BaseAllocator = CrtAllocator>
459 class StdAllocator :
460     public std::allocator<T>
461 {
462     typedef std::allocator<T> allocator_type;
463 #if RAPIDJSON_HAS_CXX11
464     typedef std::allocator_traits<allocator_type> traits_type;
465 #else
466     typedef allocator_type traits_type;
467 #endif
468 
469 public:
470     typedef BaseAllocator BaseAllocatorType;
471 
472     StdAllocator() RAPIDJSON_NOEXCEPT :
473         allocator_type(),
474         baseAllocator_()
475     { }
476 
477     StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
478         allocator_type(rhs),
479         baseAllocator_(rhs.baseAllocator_)
480     { }
481 
482     template<typename U>
483     StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
484         allocator_type(rhs),
485         baseAllocator_(rhs.baseAllocator_)
486     { }
487 
488 #if RAPIDJSON_HAS_CXX11_RVALUE_REFS
489     StdAllocator(StdAllocator&& rhs) RAPIDJSON_NOEXCEPT :
490         allocator_type(std::move(rhs)),
491         baseAllocator_(std::move(rhs.baseAllocator_))
492     { }
493 #endif
494 #if RAPIDJSON_HAS_CXX11
495     using propagate_on_container_move_assignment = std::true_type;
496     using propagate_on_container_swap = std::true_type;
497 #endif
498 
499     /* implicit */
500     StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
501         allocator_type(),
502         baseAllocator_(allocator)
503     { }
504 
505     ~StdAllocator() RAPIDJSON_NOEXCEPT
506     { }
507 
508     template<typename U>
509     struct rebind {
510         typedef StdAllocator<U, BaseAllocator> other;
511     };
512 
513     typedef typename traits_type::size_type         size_type;
514     typedef typename traits_type::difference_type   difference_type;
515 
516     typedef typename traits_type::value_type        value_type;
517     typedef typename traits_type::pointer           pointer;
518     typedef typename traits_type::const_pointer     const_pointer;
519 
520 #if RAPIDJSON_HAS_CXX11
521 
522     typedef typename std::add_lvalue_reference<value_type>::type &reference;
523     typedef typename std::add_lvalue_reference<typename std::add_const<value_type>::type>::type &const_reference;
524 
525     pointer address(reference r) const RAPIDJSON_NOEXCEPT
526     {
527         return std::addressof(r);
528     }
529     const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
530     {
531         return std::addressof(r);
532     }
533 
534     size_type max_size() const RAPIDJSON_NOEXCEPT
535     {
536         return traits_type::max_size(*this);
537     }
538 
539     template <typename ...Args>
540     void construct(pointer p, Args&&... args)
541     {
542         traits_type::construct(*this, p, std::forward<Args>(args)...);
543     }
544     void destroy(pointer p)
545     {
546         traits_type::destroy(*this, p);
547     }
548 
549 #else // !RAPIDJSON_HAS_CXX11
550 
551     typedef typename allocator_type::reference       reference;
552     typedef typename allocator_type::const_reference const_reference;
553 
554     pointer address(reference r) const RAPIDJSON_NOEXCEPT
555     {
556         return allocator_type::address(r);
557     }
558     const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
559     {
560         return allocator_type::address(r);
561     }
562 
563     size_type max_size() const RAPIDJSON_NOEXCEPT
564     {
565         return allocator_type::max_size();
566     }
567 
568     void construct(pointer p, const_reference r)
569     {
570         allocator_type::construct(p, r);
571     }
572     void destroy(pointer p)
573     {
574         allocator_type::destroy(p);
575     }
576 
577 #endif // !RAPIDJSON_HAS_CXX11
578 
579     template <typename U>
580     U* allocate(size_type n = 1, const void* = 0)
581     {
582         return RAPIDJSON_NAMESPACE::Malloc<U>(baseAllocator_, n);
583     }
584     template <typename U>
585     void deallocate(U* p, size_type n = 1)
586     {
587         RAPIDJSON_NAMESPACE::Free<U>(baseAllocator_, p, n);
588     }
589 
590     pointer allocate(size_type n = 1, const void* = 0)
591     {
592         return allocate<value_type>(n);
593     }
594     void deallocate(pointer p, size_type n = 1)
595     {
596         deallocate<value_type>(p, n);
597     }
598 
599 #if RAPIDJSON_HAS_CXX11
600     using is_always_equal = std::is_empty<BaseAllocator>;
601 #endif
602 
603     template<typename U>
604     bool operator==(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
605     {
606         return baseAllocator_ == rhs.baseAllocator_;
607     }
608     template<typename U>
609     bool operator!=(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
610     {
611         return !operator==(rhs);
612     }
613 
614     //! rapidjson Allocator concept
615     static const bool kNeedFree = BaseAllocator::kNeedFree;
616     static const bool kRefCounted = internal::IsRefCounted<BaseAllocator>::Value;
617     void* Malloc(size_t size)
618     {
619         return baseAllocator_.Malloc(size);
620     }
621     void* Realloc(void* originalPtr, size_t originalSize, size_t newSize)
622     {
623         return baseAllocator_.Realloc(originalPtr, originalSize, newSize);
624     }
625     static void Free(void *ptr) RAPIDJSON_NOEXCEPT
626     {
627         BaseAllocator::Free(ptr);
628     }
629 
630 private:
631     template <typename, typename>
632     friend class StdAllocator; // access to StdAllocator<!T>.*
633 
634     BaseAllocator baseAllocator_;
635 };
636 
637 #if !RAPIDJSON_HAS_CXX17 // std::allocator<void> deprecated in C++17
638 template <typename BaseAllocator>
639 class StdAllocator<void, BaseAllocator> :
640     public std::allocator<void>
641 {
642     typedef std::allocator<void> allocator_type;
643 
644 public:
645     typedef BaseAllocator BaseAllocatorType;
646 
647     StdAllocator() RAPIDJSON_NOEXCEPT :
648         allocator_type(),
649         baseAllocator_()
650     { }
651 
652     StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
653         allocator_type(rhs),
654         baseAllocator_(rhs.baseAllocator_)
655     { }
656 
657     template<typename U>
658     StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
659         allocator_type(rhs),
660         baseAllocator_(rhs.baseAllocator_)
661     { }
662 
663     /* implicit */
664     StdAllocator(const BaseAllocator& baseAllocator) RAPIDJSON_NOEXCEPT :
665         allocator_type(),
666         baseAllocator_(baseAllocator)
667     { }
668 
669     ~StdAllocator() RAPIDJSON_NOEXCEPT
670     { }
671 
672     template<typename U>
673     struct rebind {
674         typedef StdAllocator<U, BaseAllocator> other;
675     };
676 
677     typedef typename allocator_type::value_type value_type;
678 
679 private:
680     template <typename, typename>
681     friend class StdAllocator; // access to StdAllocator<!T>.*
682 
683     BaseAllocator baseAllocator_;
684 };
685 #endif
686 
687 #ifdef __GNUC__
688 RAPIDJSON_DIAG_POP
689 #endif
690 
691 RAPIDJSON_NAMESPACE_END
692 
693 #endif // RAPIDJSON_ENCODINGS_H_
694