Hybrid ICN (hICN) plugin  v21.06-rc0-4-g18fa668
membuf.h
1 /*
2  * Copyright 2013-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * The code in this file if adapated from the IOBuf of folly:
19  * https://github.com/facebook/folly/blob/master/folly/io/IOBuf.h
20  */
21 
22 #pragma once
23 
24 #include <hicn/transport/portability/portability.h>
25 #include <hicn/transport/utils/branch_prediction.h>
26 #include <stdlib.h>
27 
28 #include <atomic>
29 #include <cassert>
30 #include <cinttypes>
31 #include <cstddef>
32 #include <cstring>
33 #include <iterator>
34 #include <limits>
35 #include <memory>
36 #include <type_traits>
37 #include <vector>
38 
39 #ifndef _WIN32
40 TRANSPORT_GNU_DISABLE_WARNING("-Wshadow")
41 #endif
42 
43 namespace utils {
44 
45 class MemBuf {
46  public:
47  enum CreateOp { CREATE };
48  enum WrapBufferOp { WRAP_BUFFER };
49  enum TakeOwnershipOp { TAKE_OWNERSHIP };
50  enum CopyBufferOp { COPY_BUFFER };
51 
52  using Ptr = std::shared_ptr<MemBuf>;
53 
54  typedef void (*FreeFunction)(void* buf, void* userData);
55 
56  static std::unique_ptr<MemBuf> create(std::size_t capacity);
57  MemBuf(CreateOp, std::size_t capacity);
58 
69  static std::unique_ptr<MemBuf> createCombined(std::size_t capacity);
70 
78  static std::unique_ptr<MemBuf> createSeparate(std::size_t capacity);
79 
84  static std::unique_ptr<MemBuf> createChain(size_t totalCapacity,
85  std::size_t maxBufCapacity);
86 
87  static std::unique_ptr<MemBuf> takeOwnership(void* buf, std::size_t capacity,
88  FreeFunction freeFn = nullptr,
89  void* userData = nullptr,
90  bool freeOnError = true) {
91  return takeOwnership(buf, capacity, capacity, freeFn, userData,
92  freeOnError);
93  }
94 
95  MemBuf(TakeOwnershipOp op, void* buf, std::size_t capacity,
96  FreeFunction freeFn = nullptr, void* userData = nullptr,
97  bool freeOnError = true)
98  : MemBuf(op, buf, capacity, capacity, freeFn, userData, freeOnError) {}
99 
100  static std::unique_ptr<MemBuf> takeOwnership(void* buf, std::size_t capacity,
101  std::size_t length,
102  FreeFunction freeFn = nullptr,
103  void* userData = nullptr,
104  bool freeOnError = true);
105 
106  MemBuf(TakeOwnershipOp, void* buf, std::size_t capacity, std::size_t length,
107  FreeFunction freeFn = nullptr, void* userData = nullptr,
108  bool freeOnError = true);
109 
110  static std::unique_ptr<MemBuf> wrapBuffer(const void* buf, std::size_t length,
111  std::size_t capacity);
112 
113  static MemBuf wrapBufferAsValue(const void* buf, std::size_t length,
114  std::size_t capacity) noexcept;
115 
116  MemBuf(WrapBufferOp op, const void* buf, std::size_t length,
117  std::size_t capacity) noexcept;
118 
124  static std::unique_ptr<MemBuf> copyBuffer(const void* buf, std::size_t size,
125  std::size_t headroom = 0,
126  std::size_t minTailroom = 0);
127 
128  MemBuf(CopyBufferOp op, const void* buf, std::size_t size,
129  std::size_t headroom = 0, std::size_t minTailroom = 0);
130 
134  static void destroy(std::unique_ptr<MemBuf>&& data) {
135  auto destroyer = std::move(data);
136  }
137 
138  ~MemBuf();
139 
140  bool empty() const;
141 
142  const uint8_t* data() const { return data_; }
143 
144  uint8_t* writableData() { return data_; }
145 
146  const uint8_t* tail() const { return data_ + length_; }
147 
148  uint8_t* writableTail() { return data_ + length_; }
149 
150  std::size_t length() const { return length_; }
151 
152  void setLength(std::size_t length) { length_ = length; }
153 
154  std::size_t headroom() const { return std::size_t(data_ - buffer()); }
155 
156  std::size_t tailroom() const { return std::size_t(bufferEnd() - tail()); }
157 
158  const uint8_t* buffer() const { return buf_; }
159 
160  uint8_t* writableBuffer() { return buf_; }
161 
162  const uint8_t* bufferEnd() const { return buf_ + capacity_; }
163 
164  std::size_t capacity() const { return capacity_; }
165 
166  MemBuf* next() { return next_; }
167 
168  const MemBuf* next() const { return next_; }
169 
170  MemBuf* prev() { return prev_; }
171 
172  const MemBuf* prev() const { return prev_; }
173 
189  void advance(std::size_t amount) {
190  // In debug builds, assert if there is a problem.
191  assert(amount <= tailroom());
192 
193  if (length_ > 0) {
194  memmove(data_ + amount, data_, length_);
195  }
196  data_ += amount;
197  }
198 
210  void retreat(std::size_t amount) {
211  // In debug builds, assert if there is a problem.
212  assert(amount <= headroom());
213 
214  if (length_ > 0) {
215  memmove(data_ - amount, data_, length_);
216  }
217  data_ -= amount;
218  }
219 
220  void prepend(std::size_t amount) {
221  data_ -= amount;
222  length_ += amount;
223  }
224 
225  void append(std::size_t amount) { length_ += amount; }
226 
227  void trimStart(std::size_t amount) {
228  data_ += amount;
229  length_ -= amount;
230  }
231 
232  void trimEnd(std::size_t amount) { length_ -= amount; }
233 
234  // Never call clear on cloned membuf sharing different
235  // portions of the same underlying buffer.
236  // Use the trim functions instead.
237  void clear() {
238  data_ = writableBuffer();
239  length_ = 0;
240  }
241 
242  void reserve(std::size_t minHeadroom, std::size_t minTailroom) {
243  // Maybe we don't need to do anything.
244  if (headroom() >= minHeadroom && tailroom() >= minTailroom) {
245  return;
246  }
247  // If the buffer is empty but we have enough total room (head + tail),
248  // move the data_ pointer around.
249  if (length() == 0 && headroom() + tailroom() >= minHeadroom + minTailroom) {
250  data_ = writableBuffer() + minHeadroom;
251  return;
252  }
253  // Bah, we have to do actual work.
254  reserveSlow(minHeadroom, minTailroom);
255  }
256 
257  bool isChained() const {
258  assert((next_ == this) == (prev_ == this));
259  return next_ != this;
260  }
261 
262  size_t countChainElements() const;
263 
264  std::size_t computeChainDataLength() const;
265 
266  void prependChain(std::unique_ptr<MemBuf>&& iobuf);
267 
268  void appendChain(std::unique_ptr<MemBuf>&& iobuf) {
269  // Just use prependChain() on the next element in our chain
270  next_->prependChain(std::move(iobuf));
271  }
272 
273  std::unique_ptr<MemBuf> unlink() {
274  next_->prev_ = prev_;
275  prev_->next_ = next_;
276  prev_ = this;
277  next_ = this;
278  return std::unique_ptr<MemBuf>(this);
279  }
280 
285  std::unique_ptr<MemBuf> pop() {
286  MemBuf* next = next_;
287  next_->prev_ = prev_;
288  prev_->next_ = next_;
289  prev_ = this;
290  next_ = this;
291  return std::unique_ptr<MemBuf>((next == this) ? nullptr : next);
292  }
293 
309  std::unique_ptr<MemBuf> separateChain(MemBuf* head, MemBuf* tail) {
310  assert(head != this);
311  assert(tail != this);
312 
313  head->prev_->next_ = tail->next_;
314  tail->next_->prev_ = head->prev_;
315 
316  head->prev_ = tail;
317  tail->next_ = head;
318 
319  return std::unique_ptr<MemBuf>(head);
320  }
321 
328  bool isShared() const {
329  const MemBuf* current = this;
330  while (true) {
331  if (current->isSharedOne()) {
332  return true;
333  }
334  current = current->next_;
335  if (current == this) {
336  return false;
337  }
338  }
339  }
340 
346  bool isManaged() const {
347  const MemBuf* current = this;
348  while (true) {
349  if (!current->isManagedOne()) {
350  return false;
351  }
352  current = current->next_;
353  if (current == this) {
354  return true;
355  }
356  }
357  }
358 
364  bool isManagedOne() const { return sharedInfo(); }
365 
376  bool isSharedOne() const {
377  // If this is a user-owned buffer, it is always considered shared
378  if ((TRANSPORT_EXPECT_FALSE(!sharedInfo()))) {
379  return true;
380  }
381 
382  if ((TRANSPORT_EXPECT_FALSE(sharedInfo()->externallyShared))) {
383  return true;
384  }
385 
386  if ((TRANSPORT_EXPECT_TRUE(!(flags() & flag_maybe_shared)))) {
387  return false;
388  }
389 
390  // flag_maybe_shared is set, so we need to check the reference count.
391  // (Checking the reference count requires an atomic operation, which is why
392  // we prefer to only check flag_maybe_shared if possible.)
393  bool shared = sharedInfo()->refcount.load(std::memory_order_acquire) > 1;
394  if (!shared) {
395  // we're the last one left
396  clearFlags(flag_maybe_shared);
397  }
398  return shared;
399  }
400 
421  void unshare() {
422  if (isChained()) {
423  unshareChained();
424  } else {
425  unshareOne();
426  }
427  }
428 
439  void unshareOne() {
440  if (isSharedOne()) {
441  unshareOneSlow();
442  }
443  }
444 
452  void markExternallyShared();
453 
463  SharedInfo* info = sharedInfo();
464  if (info) {
465  info->externallyShared = true;
466  }
467  }
468 
478  void makeManaged() {
479  if (isChained()) {
480  makeManagedChained();
481  } else {
482  makeManagedOne();
483  }
484  }
485 
495  void makeManagedOne() {
496  if (!isManagedOne()) {
497  // We can call the internal function directly; unmanaged implies shared.
498  unshareOneSlow();
499  }
500  }
501 
502  // /**
503  // * Coalesce this MemBuf chain into a single buffer.
504  // *
505  // * This method moves all of the data in this MemBuf chain into a single
506  // * contiguous buffer, if it is not already in one buffer. After coalesce()
507  // * returns, this MemBuf will be a chain of length one. Other MemBufs in
508  // the
509  // * chain will be automatically deleted.
510  // *
511  // * After coalescing, the MemBuf will have at least as much headroom as the
512  // * first MemBuf in the chain, and at least as much tailroom as the last
513  // MemBuf
514  // * in the chain.
515  // *
516  // * Throws std::bad_alloc on error. On error the MemBuf chain will be
517  // * unmodified.
518  // *
519  // * Returns ByteRange that points to the data MemBuf stores.
520  // */
521  // ByteRange coalesce() {
522  // const std::size_t newHeadroom = headroom();
523  // const std::size_t newTailroom = prev()->tailroom();
524  // return coalesceWithHeadroomTailroom(newHeadroom, newTailroom);
525  // }
526 
527  // /**
528  // * This is similar to the coalesce() method, except this allows to set a
529  // * headroom and tailroom after coalescing.
530  // *
531  // * Returns ByteRange that points to the data MemBuf stores.
532  // */
533  // ByteRange coalesceWithHeadroomTailroom(std::size_t newHeadroom,
534  // std::size_t newTailroom) {
535  // if (isChained()) {
536  // coalesceAndReallocate(newHeadroom, computeChainDataLength(), this,
537  // newTailroom);
538  // }
539  // return ByteRange(data_, length_);
540  // }
541 
561  void gather(std::size_t maxLength) {
562  if (!isChained() || length_ >= maxLength) {
563  return;
564  }
565  coalesceSlow(maxLength);
566  }
567 
576  std::unique_ptr<MemBuf> clone() const;
577 
582  MemBuf cloneAsValue() const;
583 
590  std::unique_ptr<MemBuf> cloneOne() const;
591 
596  MemBuf cloneOneAsValue() const;
597 
610  std::unique_ptr<MemBuf> cloneCoalesced() const;
611 
616  std::unique_ptr<MemBuf> cloneCoalescedWithHeadroomTailroom(
617  std::size_t newHeadroom, std::size_t newTailroom) const;
618 
624 
630  std::size_t newHeadroom, std::size_t newTailroom) const;
631 
636  void cloneInto(MemBuf& other) const { other = cloneAsValue(); }
637 
642  void cloneOneInto(MemBuf& other) const { other = cloneOneAsValue(); }
643 
653  std::vector<struct iovec> getIov() const;
654 
664  void appendToIov(std::vector<struct iovec>* iov) const;
665 
676  size_t fillIov(struct iovec* iov, size_t len) const;
677 
683  static std::unique_ptr<MemBuf> wrapIov(const iovec* vec, size_t count);
684 
690  static std::unique_ptr<MemBuf> takeOwnershipIov(const iovec* vec,
691  size_t count,
692  FreeFunction freeFn = nullptr,
693  void* userData = nullptr,
694  bool freeOnError = true);
695 
700  bool ensureCapacity(std::size_t capacity);
701 
706  bool ensureCapacityAndFillUnused(std::size_t capacity, uint8_t placeholder);
707 
708  /*
709  * Overridden operator new and delete.
710  * These perform specialized memory management to help support
711  * createCombined(), which allocates MemBuf objects together with the buffer
712  * data.
713  */
714  void* operator new(size_t size);
715  void* operator new(size_t size, void* ptr);
716  void operator delete(void* ptr);
717  void operator delete(void* ptr, void* placement);
718 
722  bool operator==(const MemBuf& other);
723  bool operator!=(const MemBuf& other);
724 
725  // /**
726  // * Iteration support: a chain of MemBufs may be iterated through using
727  // * STL-style iterators over const ByteRanges. Iterators are only
728  // invalidated
729  // * if the MemBuf that they currently point to is removed.
730  // */
731  // Iterator cbegin() const;
732  // Iterator cend() const;
733  // Iterator begin() const;
734  // Iterator end() const;
735 
743  MemBuf() noexcept;
744 
762  MemBuf(MemBuf&& other) noexcept;
763  MemBuf& operator=(MemBuf&& other) noexcept;
764 
765  MemBuf(const MemBuf& other);
766  MemBuf& operator=(const MemBuf& other);
767 
768  private:
769  enum FlagsEnum : uintptr_t {
770  // Adding any more flags would not work on 32-bit architectures,
771  // as these flags are stashed in the least significant 2 bits of a
772  // max-align-aligned pointer.
773  flag_free_shared_info = 0x1,
774  flag_maybe_shared = 0x2,
775  flag_mask = flag_free_shared_info | flag_maybe_shared
776  };
777 
778  struct SharedInfo {
779  SharedInfo();
780  SharedInfo(FreeFunction fn, void* arg);
781 
782  // A pointer to a function to call to free the buffer when the refcount
783  // hits 0. If this is null, free() will be used instead.
784  FreeFunction freeFn;
785  void* userData;
786  std::atomic<uint32_t> refcount;
787  bool externallyShared{false};
788  };
789  // Helper structs for use by operator new and delete
790  struct HeapPrefix;
791  struct HeapStorage;
792  struct HeapFullStorage;
793 
801  struct InternalConstructor {}; // avoid conflicts
802  MemBuf(InternalConstructor, uintptr_t flagsAndSharedInfo, uint8_t* buf,
803  std::size_t capacity, uint8_t* data, std::size_t length) noexcept;
804 
805  void unshareOneSlow();
806  void unshareChained();
807  void makeManagedChained();
808  void coalesceSlow();
809  void coalesceSlow(size_t maxLength);
810  // newLength must be the entire length of the buffers between this and
811  // end (no truncation)
812  void coalesceAndReallocate(size_t newHeadroom, size_t newLength, MemBuf* end,
813  size_t newTailroom);
814  void coalesceAndReallocate(size_t newLength, MemBuf* end) {
815  coalesceAndReallocate(headroom(), newLength, end, end->prev_->tailroom());
816  }
817  void decrementRefcount();
818  void reserveSlow(std::size_t minHeadroom, std::size_t minTailroom);
819  void freeExtBuffer();
820 
821  static size_t goodExtBufferSize(std::size_t minCapacity);
822  static void initExtBuffer(uint8_t* buf, size_t mallocSize,
823  SharedInfo** infoReturn,
824  std::size_t* capacityReturn);
825  static void allocExtBuffer(std::size_t minCapacity, uint8_t** bufReturn,
826  SharedInfo** infoReturn,
827  std::size_t* capacityReturn);
828  static void releaseStorage(HeapStorage* storage, uint16_t freeFlags);
829  static void freeInternalBuf(void* buf, void* userData);
830 
831  /*
832  * Member variables
833  */
834 
835  /*
836  * Links to the next and the previous MemBuf in this chain.
837  *
838  * The chain is circularly linked (the last element in the chain points back
839  * at the head), and next_ and prev_ can never be null. If this MemBuf is the
840  * only element in the chain, next_ and prev_ will both point to this.
841  */
842  MemBuf* next_{this};
843  MemBuf* prev_{this};
844 
845  /*
846  * A pointer to the start of the data referenced by this MemBuf, and the
847  * length of the data.
848  *
849  * This may refer to any subsection of the actual buffer capacity.
850  */
851  uint8_t* data_{nullptr};
852  uint8_t* buf_{nullptr};
853  std::size_t length_{0};
854  std::size_t capacity_{0};
855 
856  // Pack flags in least significant 2 bits, sharedInfo in the rest
857  mutable uintptr_t flags_and_shared_info_{0};
858 
859  static inline uintptr_t packFlagsAndSharedInfo(uintptr_t flags,
860  SharedInfo* info) {
861  uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
862  return flags | uinfo;
863  }
864 
865  inline SharedInfo* sharedInfo() const {
866  return reinterpret_cast<SharedInfo*>(flags_and_shared_info_ & ~flag_mask);
867  }
868 
869  inline void setSharedInfo(SharedInfo* info) {
870  uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
871  flags_and_shared_info_ = (flags_and_shared_info_ & flag_mask) | uinfo;
872  }
873 
874  inline uintptr_t flags() const { return flags_and_shared_info_ & flag_mask; }
875 
876  // flags_ are changed from const methods
877  inline void setFlags(uintptr_t flags) const {
878  flags_and_shared_info_ |= flags;
879  }
880 
881  inline void clearFlags(uintptr_t flags) const {
882  flags_and_shared_info_ &= ~flags;
883  }
884 
885  inline void setFlagsAndSharedInfo(uintptr_t flags, SharedInfo* info) {
886  flags_and_shared_info_ = packFlagsAndSharedInfo(flags, info);
887  }
888 
889  struct DeleterBase {
890  virtual ~DeleterBase() {}
891  virtual void dispose(void* p) = 0;
892  };
893 
894  template <class UniquePtr>
895  struct UniquePtrDeleter : public DeleterBase {
896  typedef typename UniquePtr::pointer Pointer;
897  typedef typename UniquePtr::deleter_type Deleter;
898 
899  explicit UniquePtrDeleter(Deleter deleter) : deleter_(std::move(deleter)) {}
900  void dispose(void* p) override {
901  try {
902  deleter_(static_cast<Pointer>(p));
903  delete this;
904  } catch (...) {
905  abort();
906  }
907  }
908 
909  private:
910  Deleter deleter_;
911  };
912 
913  static void freeUniquePtrBuffer(void* ptr, void* userData) {
914  static_cast<DeleterBase*>(userData)->dispose(ptr);
915  }
916 };
917 
918 // template <class UniquePtr>
919 // typename std::enable_if<
920 // detail::IsUniquePtrToSL<UniquePtr>::value,
921 // std::unique_ptr<MemBuf>>::type
922 // MemBuf::takeOwnership(UniquePtr&& buf, size_t count) {
923 // size_t size = count * sizeof(typename UniquePtr::element_type);
924 // auto deleter = new UniquePtrDeleter<UniquePtr>(buf.get_deleter());
925 // return takeOwnership(
926 // buf.release(), size, &MemBuf::freeUniquePtrBuffer, deleter);
927 // }
928 
929 inline std::unique_ptr<MemBuf> MemBuf::copyBuffer(const void* data,
930  std::size_t size,
931  std::size_t headroom,
932  std::size_t minTailroom) {
933  std::size_t capacity = headroom + size + minTailroom;
934  std::unique_ptr<MemBuf> buf = MemBuf::create(capacity);
935  buf->advance(headroom);
936  if (size != 0) {
937  memcpy(buf->writableData(), data, size);
938  }
939  buf->append(size);
940  return buf;
941 }
942 
943 } // namespace utils
utils::MemBuf::copyBuffer
static std::unique_ptr< MemBuf > copyBuffer(const void *buf, std::size_t size, std::size_t headroom=0, std::size_t minTailroom=0)
Definition: membuf.h:929
utils::MemBuf::isSharedOne
bool isSharedOne() const
Definition: membuf.h:376
utils::MemBuf::cloneCoalescedWithHeadroomTailroom
std::unique_ptr< MemBuf > cloneCoalescedWithHeadroomTailroom(std::size_t newHeadroom, std::size_t newTailroom) const
utils::MemBuf::operator==
bool operator==(const MemBuf &other)
utils::MemBuf::wrapIov
static std::unique_ptr< MemBuf > wrapIov(const iovec *vec, size_t count)
utils::MemBuf::pop
std::unique_ptr< MemBuf > pop()
Definition: membuf.h:285
utils::MemBuf::fillIov
size_t fillIov(struct iovec *iov, size_t len) const
utils::MemBuf::gather
void gather(std::size_t maxLength)
Definition: membuf.h:561
utils::MemBuf::cloneInto
void cloneInto(MemBuf &other) const
Definition: membuf.h:636
utils::MemBuf::cloneCoalesced
std::unique_ptr< MemBuf > cloneCoalesced() const
utils::MemBuf::cloneOne
std::unique_ptr< MemBuf > cloneOne() const
utils::MemBuf::cloneAsValue
MemBuf cloneAsValue() const
utils::MemBuf::cloneCoalescedAsValue
MemBuf cloneCoalescedAsValue() const
utils::MemBuf::appendToIov
void appendToIov(std::vector< struct iovec > *iov) const
utils::MemBuf::makeManaged
void makeManaged()
Definition: membuf.h:478
utils::MemBuf::cloneOneAsValue
MemBuf cloneOneAsValue() const
utils::MemBuf::makeManagedOne
void makeManagedOne()
Definition: membuf.h:495
utils::MemBuf::createChain
static std::unique_ptr< MemBuf > createChain(size_t totalCapacity, std::size_t maxBufCapacity)
utils::MemBuf::unshare
void unshare()
Definition: membuf.h:421
utils::MemBuf::markExternallyShared
void markExternallyShared()
utils::MemBuf::isShared
bool isShared() const
Definition: membuf.h:328
utils::MemBuf::isManagedOne
bool isManagedOne() const
Definition: membuf.h:364
utils::MemBuf::createCombined
static std::unique_ptr< MemBuf > createCombined(std::size_t capacity)
iovec
Definition: windows_utils.h:33
utils::MemBuf::isManaged
bool isManaged() const
Definition: membuf.h:346
utils::MemBuf::cloneCoalescedAsValueWithHeadroomTailroom
MemBuf cloneCoalescedAsValueWithHeadroomTailroom(std::size_t newHeadroom, std::size_t newTailroom) const
utils::MemBuf::separateChain
std::unique_ptr< MemBuf > separateChain(MemBuf *head, MemBuf *tail)
Definition: membuf.h:309
utils::MemBuf::getIov
std::vector< struct iovec > getIov() const
utils::MemBuf::cloneOneInto
void cloneOneInto(MemBuf &other) const
Definition: membuf.h:642
utils::MemBuf::ensureCapacity
bool ensureCapacity(std::size_t capacity)
utils::MemBuf::advance
void advance(std::size_t amount)
Definition: membuf.h:189
utils::MemBuf::takeOwnershipIov
static std::unique_ptr< MemBuf > takeOwnershipIov(const iovec *vec, size_t count, FreeFunction freeFn=nullptr, void *userData=nullptr, bool freeOnError=true)
utils::MemBuf::ensureCapacityAndFillUnused
bool ensureCapacityAndFillUnused(std::size_t capacity, uint8_t placeholder)
utils::MemBuf::destroy
static void destroy(std::unique_ptr< MemBuf > &&data)
Definition: membuf.h:134
utils::MemBuf::markExternallySharedOne
void markExternallySharedOne()
Definition: membuf.h:462
utils::MemBuf::createSeparate
static std::unique_ptr< MemBuf > createSeparate(std::size_t capacity)
utils::MemBuf::clone
std::unique_ptr< MemBuf > clone() const
utils::MemBuf::retreat
void retreat(std::size_t amount)
Definition: membuf.h:210
utils::MemBuf::unshareOne
void unshareOne()
Definition: membuf.h:439
utils::MemBuf
Definition: membuf.h:45
utils::MemBuf::MemBuf
MemBuf() noexcept