/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * For high-level documentation and usage examples see * folly/docs/small_vector.md * * @author Jordan DeLong */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if (FOLLY_X64 || FOLLY_PPC64) #define FOLLY_SV_PACK_ATTR FOLLY_PACK_ATTR #define FOLLY_SV_PACK_PUSH FOLLY_PACK_PUSH #define FOLLY_SV_PACK_POP FOLLY_PACK_POP #else #define FOLLY_SV_PACK_ATTR #define FOLLY_SV_PACK_PUSH #define FOLLY_SV_PACK_POP #endif // Ignore shadowing warnings within this file, so includers can use -Wshadow. FOLLY_PUSH_WARNING FOLLY_GNU_DISABLE_WARNING("-Wshadow") namespace folly { ////////////////////////////////////////////////////////////////////// namespace small_vector_policy { ////////////////////////////////////////////////////////////////////// /* * A flag which makes us refuse to use the heap at all. If we * overflow the in situ capacity we throw an exception. */ struct NoHeap; ////////////////////////////////////////////////////////////////////// } // namespace small_vector_policy ////////////////////////////////////////////////////////////////////// template class small_vector; ////////////////////////////////////////////////////////////////////// namespace detail { /* * Move objects in memory to the right into some uninitialized memory, where * the region overlaps. Then call create() for each hole in reverse order. * * This doesn't just use std::move_backward because move_backward only works * if all the memory is initialized to type T already. * * The create function should return a reference type, to avoid * extra copies and moves for non-trivial types. */ template typename std::enable_if>::type moveObjectsRightAndCreate( T* const first, T* const lastConstructed, T* const realLast, Create&& create) { if (lastConstructed == realLast) { return; } T* out = realLast; T* in = lastConstructed; { auto rollback = makeGuard([&] { // We want to make sure the same stuff is uninitialized memory // if we exit via an exception (this is to make sure we provide // the basic exception safety guarantee for insert functions). if (out < lastConstructed) { out = lastConstructed - 1; } for (auto it = out + 1; it != realLast; ++it) { it->~T(); } }); // Decrement the pointers only when it is known that the resulting pointer // is within the boundaries of the object. Decrementing past the beginning // of the object is UB. Note that this is asymmetric wrt forward iteration, // as past-the-end pointers are explicitly allowed. for (; in != first && out > lastConstructed;) { // Out must be decremented before an exception can be thrown so that // the rollback guard knows where to start. --out; new (out) T(std::move(*(--in))); } for (; in != first;) { --out; *out = std::move(*(--in)); } for (; out > lastConstructed;) { --out; new (out) T(create()); } for (; out != first;) { --out; *out = create(); } rollback.dismiss(); } } // Specialization for trivially copyable types. The call to // std::move_backward here will just turn into a memmove. // This must only be used with trivially copyable types because some of the // memory may be uninitialized, and std::move_backward() won't work when it // can't memmove(). template typename std::enable_if>::type moveObjectsRightAndCreate( T* const first, T* const lastConstructed, T* const realLast, Create&& create) { std::move_backward(first, lastConstructed, realLast); T* const end = first - 1; T* out = first + (realLast - lastConstructed) - 1; for (; out != end; --out) { *out = create(); } } /* * Populate a region of memory using `op' to construct elements. If * anything throws, undo what we did. */ template void populateMemForward(T* mem, std::size_t n, Function const& op) { std::size_t idx = 0; { auto rollback = makeGuard([&] { for (std::size_t i = 0; i < idx; ++i) { mem[i].~T(); } }); for (size_t i = 0; i < n; ++i) { op(&mem[idx]); ++idx; } rollback.dismiss(); } } /* * Copies `fromSize` elements from `from' to `to', where `to' is only * initialized up to `toSize`, but has enough storage for `fromSize'. If * `toSize' > `fromSize', the extra elements are destructed. */ template void partiallyUninitializedCopy( Iterator1 from, size_t fromSize, Iterator2 to, size_t toSize) { const size_t minSize = std::min(fromSize, toSize); std::copy(from, from + minSize, to); if (fromSize > toSize) { std::uninitialized_copy(from + minSize, from + fromSize, to + minSize); } else { for (auto it = to + minSize; it != to + toSize; ++it) { using Value = typename std::decay::type; it->~Value(); } } } template struct IntegralSizePolicyBase { typedef SizeType InternalSizeType; IntegralSizePolicyBase() : size_(0) {} protected: static constexpr std::size_t policyMaxSize() { return SizeType(~kClearMask); } std::size_t doSize() const { return AlwaysUseHeap ? size_ : size_ & ~kClearMask; } std::size_t isExtern() const { return AlwaysUseHeap || kExternMask & size_; } void setExtern(bool b) { if (AlwaysUseHeap) { return; } if (b) { size_ |= kExternMask; } else { size_ &= ~kExternMask; } } std::size_t isHeapifiedCapacity() const { return AlwaysUseHeap || kCapacityMask & size_; } void setHeapifiedCapacity(bool b) { if (AlwaysUseHeap) { return; } if (b) { size_ |= kCapacityMask; } else { size_ &= ~kCapacityMask; } } void setSize(std::size_t sz) { assert(sz <= policyMaxSize()); size_ = AlwaysUseHeap ? sz : (kClearMask & size_) | SizeType(sz); } void incrementSize(std::size_t n) { // We can safely increment size without overflowing into mask bits because // we always check new size is less than maxPolicySize (see // makeSizeInternal). To be sure, added assertion to verify it. assert(doSize() + n <= policyMaxSize()); size_ += SizeType(n); } std::size_t getInternalSize() { return size_; } void swapSizePolicy(IntegralSizePolicyBase& o) { std::swap(size_, o.size_); } void resetSizePolicy() { size_ = 0; } protected: static bool constexpr kShouldUseHeap = ShouldUseHeap || AlwaysUseHeap; static bool constexpr kAlwaysUseHeap = AlwaysUseHeap; private: // We reserve two most significant bits of size_. static SizeType constexpr kExternMask = kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 1) : 0; static SizeType constexpr kCapacityMask = kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 2) : 0; static SizeType constexpr kClearMask = kShouldUseHeap ? SizeType(3) << (sizeof(SizeType) * 8 - 2) : 0; SizeType size_; }; template struct IntegralSizePolicy; template struct IntegralSizePolicy : public IntegralSizePolicyBase { public: /* * Move a range to a range of uninitialized memory. Assumes the * ranges don't overlap. */ template typename std::enable_if>::type moveToUninitialized(T* first, T* last, T* out) { std::size_t idx = 0; { auto rollback = makeGuard([&] { // Even for callers trying to give the strong guarantee // (e.g. push_back) it's ok to assume here that we don't have to // move things back and that it was a copy constructor that // threw: if someone throws from a move constructor the effects // are unspecified. for (std::size_t i = 0; i < idx; ++i) { out[i].~T(); } }); for (; first != last; ++first, ++idx) { new (&out[idx]) T(std::move(*first)); } rollback.dismiss(); } } // Specialization for trivially copyable types. template typename std::enable_if>::type moveToUninitialized( T* first, T* last, T* out) { std::memmove( static_cast(out), static_cast(first), (last - first) * sizeof *first); } /* * Move a range to a range of uninitialized memory. Assumes the * ranges don't overlap. Inserts an element at out + pos using * emplaceFunc(). out will contain (end - begin) + 1 elements on success and * none on failure. If emplaceFunc() throws [begin, end) is unmodified. */ template void moveToUninitializedEmplace( T* begin, T* end, T* out, SizeType pos, EmplaceFunc&& emplaceFunc) { // Must be called first so that if it throws [begin, end) is unmodified. // We have to support the strong exception guarantee for emplace_back(). emplaceFunc(out + pos); // move old elements to the left of the new one { auto rollback = makeGuard([&] { // out[pos].~T(); }); if (begin) { this->moveToUninitialized(begin, begin + pos, out); } rollback.dismiss(); } // move old elements to the right of the new one { auto rollback = makeGuard([&] { for (SizeType i = 0; i <= pos; ++i) { out[i].~T(); } }); if (begin + pos < end) { this->moveToUninitialized(begin + pos, end, out + pos + 1); } rollback.dismiss(); } } }; template struct IntegralSizePolicy : public IntegralSizePolicyBase { public: template void moveToUninitialized(T* /*first*/, T* /*last*/, T* /*out*/) { assume_unreachable(); } template void moveToUninitializedEmplace( T* /* begin */, T* /* end */, T* /* out */, SizeType /* pos */, EmplaceFunc&& /* emplaceFunc */) { assume_unreachable(); } }; /* * If you're just trying to use this class, ignore everything about * this next small_vector_base class thing. * * The purpose of this junk is to minimize sizeof(small_vector<>) * and allow specifying the template parameters in whatever order is * convenient for the user. There's a few extra steps here to try * to keep the error messages at least semi-reasonable. * * Apologies for all the black magic. */ namespace mpl = boost::mpl; template < class Value, std::size_t RequestedMaxInline, class InPolicyA, class InPolicyB, class InPolicyC> struct small_vector_base { typedef mpl::vector PolicyList; /* * Determine the size type */ typedef typename mpl::filter_view< PolicyList, std::is_integral>::type Integrals; typedef typename mpl::eval_if< mpl::empty, mpl::identity, mpl::front>::type SizeType; static_assert( std::is_unsigned::value, "Size type should be an unsigned integral type"); static_assert( mpl::size::value == 0 || mpl::size::value == 1, "Multiple size types specified in small_vector<>"); /* * Determine whether we should allow spilling to the heap or not. */ typedef typename mpl::count::type HasNoHeap; static_assert( HasNoHeap::value == 0 || HasNoHeap::value == 1, "Multiple copies of small_vector_policy::NoHeap " "supplied; this is probably a mistake"); /* * Make the real policy base classes. */ typedef IntegralSizePolicy< SizeType, !HasNoHeap::value, RequestedMaxInline == 0> ActualSizePolicy; /* * Now inherit from them all. This is done in such a convoluted * way to make sure we get the empty base optimization on all these * types to keep sizeof(small_vector<>) minimal. */ typedef boost::totally_ordered1< small_vector, ActualSizePolicy> type; }; inline void* unshiftPointer(void* p, size_t sizeBytes) { return static_cast(p) - sizeBytes; } inline void* shiftPointer(void* p, size_t sizeBytes) { return static_cast(p) + sizeBytes; } } // namespace detail ////////////////////////////////////////////////////////////////////// FOLLY_SV_PACK_PUSH template < class Value, std::size_t RequestedMaxInline = 1, class PolicyA = void, class PolicyB = void, class PolicyC = void> class small_vector : public detail::small_vector_base< Value, RequestedMaxInline, PolicyA, PolicyB, PolicyC>::type { typedef typename detail:: small_vector_base:: type BaseType; typedef typename BaseType::InternalSizeType InternalSizeType; /* * Figure out the max number of elements we should inline. (If * the user asks for less inlined elements than we can fit unioned * into our value_type*, we will inline more than they asked.) */ static constexpr auto kSizeOfValuePtr = sizeof(Value*); static constexpr auto kSizeOfValue = sizeof(Value); static constexpr std::size_t MaxInline{ RequestedMaxInline == 0 ? 0 : constexpr_max(kSizeOfValuePtr / kSizeOfValue, RequestedMaxInline)}; public: typedef std::size_t size_type; typedef Value value_type; typedef std::allocator allocator_type; typedef value_type& reference; typedef value_type const& const_reference; typedef value_type* iterator; typedef value_type* pointer; typedef value_type const* const_iterator; typedef value_type const* const_pointer; typedef std::ptrdiff_t difference_type; typedef std::reverse_iterator reverse_iterator; typedef std::reverse_iterator const_reverse_iterator; small_vector() = default; // Allocator is unused here. It is taken in for compatibility with std::vector // interface, but it will be ignored. small_vector(const std::allocator&) {} small_vector(small_vector const& o) { if (kShouldCopyInlineTrivial && !o.isExtern()) { copyInlineTrivial(o); return; } auto n = o.size(); makeSize(n); { auto rollback = makeGuard([&] { freeHeap(); }); std::uninitialized_copy(o.begin(), o.begin() + n, begin()); rollback.dismiss(); } this->setSize(n); } small_vector(small_vector&& o) noexcept( std::is_nothrow_move_constructible::value) { if (o.isExtern()) { this->u.pdata_.heap_ = o.u.pdata_.heap_; o.u.pdata_.heap_ = nullptr; this->swapSizePolicy(o); if (kHasInlineCapacity) { this->u.setCapacity(o.u.getCapacity()); } } else { if (kShouldCopyInlineTrivial) { copyInlineTrivial(o); o.resetSizePolicy(); } else { auto n = o.size(); std::uninitialized_copy( std::make_move_iterator(o.begin()), std::make_move_iterator(o.end()), begin()); this->setSize(n); o.clear(); } } } small_vector(std::initializer_list il) { constructImpl(il.begin(), il.end(), std::false_type()); } explicit small_vector(size_type n) { doConstruct(n, [&](void* p) { new (p) value_type(); }); } small_vector(size_type n, value_type const& t) { doConstruct(n, [&](void* p) { new (p) value_type(t); }); } template explicit small_vector(Arg arg1, Arg arg2) { // Forward using std::is_arithmetic to get to the proper // implementation; this disambiguates between the iterators and // (size_t, value_type) meaning for this constructor. constructImpl(arg1, arg2, std::is_arithmetic()); } ~small_vector() { for (auto& t : *this) { (&t)->~value_type(); } freeHeap(); } small_vector& operator=(small_vector const& o) { if (FOLLY_LIKELY(this != &o)) { if (kShouldCopyInlineTrivial && !this->isExtern() && !o.isExtern()) { copyInlineTrivial(o); } else if (o.size() < capacity()) { const size_t oSize = o.size(); detail::partiallyUninitializedCopy(o.begin(), oSize, begin(), size()); this->setSize(oSize); } else { assign(o.begin(), o.end()); } } return *this; } small_vector& operator=(small_vector&& o) noexcept( std::is_nothrow_move_constructible::value) { if (FOLLY_LIKELY(this != &o)) { // If either is external, reduce to the default-constructed case for this, // since there is nothing that we can move in-place. if (this->isExtern() || o.isExtern()) { reset(); } if (!o.isExtern()) { if (kShouldCopyInlineTrivial) { copyInlineTrivial(o); o.resetSizePolicy(); } else { const size_t oSize = o.size(); detail::partiallyUninitializedCopy( std::make_move_iterator(o.u.buffer()), oSize, this->u.buffer(), size()); this->setSize(oSize); o.clear(); } } else { this->u.pdata_.heap_ = o.u.pdata_.heap_; o.u.pdata_.heap_ = nullptr; // this was already reset above, so it's empty and internal. this->swapSizePolicy(o); if (kHasInlineCapacity) { this->u.setCapacity(o.u.getCapacity()); } } } return *this; } bool operator==(small_vector const& o) const { return size() == o.size() && std::equal(begin(), end(), o.begin()); } bool operator<(small_vector const& o) const { return std::lexicographical_compare(begin(), end(), o.begin(), o.end()); } static constexpr size_type max_size() { return !BaseType::kShouldUseHeap ? static_cast(MaxInline) : BaseType::policyMaxSize(); } allocator_type get_allocator() const { return {}; } size_type size() const { return this->doSize(); } bool empty() const { return !size(); } iterator begin() { return data(); } iterator end() { return data() + size(); } const_iterator begin() const { return data(); } const_iterator end() const { return data() + size(); } const_iterator cbegin() const { return begin(); } const_iterator cend() const { return end(); } reverse_iterator rbegin() { return reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } const_reverse_iterator crbegin() const { return rbegin(); } const_reverse_iterator crend() const { return rend(); } /* * Usually one of the simplest functions in a Container-like class * but a bit more complex here. We have to handle all combinations * of in-place vs. heap between this and o. */ void swap(small_vector& o) noexcept( std::is_nothrow_move_constructible::value&& IsNothrowSwappable::value) { using std::swap; // Allow ADL on swap for our value_type. if (this->isExtern() && o.isExtern()) { this->swapSizePolicy(o); auto* tmp = u.pdata_.heap_; u.pdata_.heap_ = o.u.pdata_.heap_; o.u.pdata_.heap_ = tmp; if (kHasInlineCapacity) { const auto capacity_ = this->u.getCapacity(); this->setCapacity(o.u.getCapacity()); o.u.setCapacity(capacity_); } return; } if (!this->isExtern() && !o.isExtern()) { auto& oldSmall = size() < o.size() ? *this : o; auto& oldLarge = size() < o.size() ? o : *this; for (size_type i = 0; i < oldSmall.size(); ++i) { swap(oldSmall[i], oldLarge[i]); } size_type i = oldSmall.size(); const size_type ci = i; { auto rollback = makeGuard([&] { oldSmall.setSize(i); for (; i < oldLarge.size(); ++i) { oldLarge[i].~value_type(); } oldLarge.setSize(ci); }); for (; i < oldLarge.size(); ++i) { auto addr = oldSmall.begin() + i; new (addr) value_type(std::move(oldLarge[i])); oldLarge[i].~value_type(); } rollback.dismiss(); } oldSmall.setSize(i); oldLarge.setSize(ci); return; } // isExtern != o.isExtern() auto& oldExtern = o.isExtern() ? o : *this; auto& oldIntern = o.isExtern() ? *this : o; auto oldExternCapacity = oldExtern.capacity(); auto oldExternHeap = oldExtern.u.pdata_.heap_; auto buff = oldExtern.u.buffer(); size_type i = 0; { auto rollback = makeGuard([&] { for (size_type kill = 0; kill < i; ++kill) { buff[kill].~value_type(); } for (; i < oldIntern.size(); ++i) { oldIntern[i].~value_type(); } oldIntern.resetSizePolicy(); oldExtern.u.pdata_.heap_ = oldExternHeap; oldExtern.setCapacity(oldExternCapacity); }); for (; i < oldIntern.size(); ++i) { new (&buff[i]) value_type(std::move(oldIntern[i])); oldIntern[i].~value_type(); } rollback.dismiss(); } oldIntern.u.pdata_.heap_ = oldExternHeap; this->swapSizePolicy(o); oldIntern.setCapacity(oldExternCapacity); } void resize(size_type sz) { if (sz <= size()) { downsize(sz); return; } auto extra = sz - size(); makeSize(sz); detail::populateMemForward( begin() + size(), extra, [&](void* p) { new (p) value_type(); }); this->incrementSize(extra); } void resize(size_type sz, value_type const& v) { if (sz < size()) { erase(begin() + sz, end()); return; } auto extra = sz - size(); makeSize(sz); detail::populateMemForward( begin() + size(), extra, [&](void* p) { new (p) value_type(v); }); this->incrementSize(extra); } value_type* data() noexcept { return this->isExtern() ? u.heap() : u.buffer(); } value_type const* data() const noexcept { return this->isExtern() ? u.heap() : u.buffer(); } template iterator emplace(const_iterator p, Args&&... args) { if (p == cend()) { emplace_back(std::forward(args)...); return end() - 1; } /* * We implement emplace at places other than at the back with a * temporary for exception safety reasons. It is possible to * avoid having to do this, but it becomes hard to maintain the * basic exception safety guarantee (unless you respond to a copy * constructor throwing by clearing the whole vector). * * The reason for this is that otherwise you have to destruct an * element before constructing this one in its place---if the * constructor throws, you either need a nothrow default * constructor or a nothrow copy/move to get something back in the * "gap", and the vector requirements don't guarantee we have any * of these. Clearing the whole vector is a legal response in * this situation, but it seems like this implementation is easy * enough and probably better. */ return insert(p, value_type(std::forward(args)...)); } void reserve(size_type sz) { makeSize(sz); } size_type capacity() const { struct Unreachable { size_t operator()(void*) const { assume_unreachable(); } }; using AllocationSizeOrUnreachable = conditional_t; if (this->isExtern()) { if (hasCapacity()) { return u.getCapacity(); } return AllocationSizeOrUnreachable{}(u.pdata_.heap_) / sizeof(value_type); } return MaxInline; } void shrink_to_fit() { if (!this->isExtern()) { return; } small_vector tmp(begin(), end()); tmp.swap(*this); } template reference emplace_back(Args&&... args) { auto isize_ = this->getInternalSize(); if (isize_ < MaxInline) { new (u.buffer() + isize_) value_type(std::forward(args)...); this->incrementSize(1); return *(u.buffer() + isize_); } if (!BaseType::kShouldUseHeap) { throw_exception("max_size exceeded in small_vector"); } auto size_ = size(); auto capacity_ = capacity(); if (capacity_ == size_) { // Any of args may be references into the vector. // When we are reallocating, we have to be careful to construct the new // element before modifying the data in the old buffer. makeSize( size_ + 1, [&](void* p) { new (p) value_type(std::forward(args)...); }, size_); } else { // We know the vector is stored in the heap. new (u.heap() + size_) value_type(std::forward(args)...); } this->incrementSize(1); return *(u.heap() + size_); } void push_back(value_type&& t) { emplace_back(std::move(t)); } void push_back(value_type const& t) { emplace_back(t); } void pop_back() { // ideally this would be implemented in terms of erase(end() - 1) to reuse // the higher-level abstraction, but neither Clang or GCC are able to // optimize it away. if you change this, please verify (with disassembly) // that the generated code on -O3 (and ideally -O2) stays short downsize(size() - 1); } iterator insert(const_iterator constp, value_type&& t) { iterator p = unconst(constp); if (p == end()) { push_back(std::move(t)); return end() - 1; } auto offset = p - begin(); auto size_ = size(); if (capacity() == size_) { makeSize( size_ + 1, [&t](void* ptr) { new (ptr) value_type(std::move(t)); }, offset); this->incrementSize(1); } else { detail::moveObjectsRightAndCreate( data() + offset, data() + size_, data() + size_ + 1, [&]() mutable -> value_type&& { return std::move(t); }); this->incrementSize(1); } return begin() + offset; } iterator insert(const_iterator p, value_type const& t) { // Make a copy and forward to the rvalue value_type&& overload // above. return insert(p, value_type(t)); } iterator insert(const_iterator pos, size_type n, value_type const& val) { auto offset = pos - begin(); auto size_ = size(); makeSize(size_ + n); detail::moveObjectsRightAndCreate( data() + offset, data() + size_, data() + size_ + n, [&]() mutable -> value_type const& { return val; }); this->incrementSize(n); return begin() + offset; } template iterator insert(const_iterator p, Arg arg1, Arg arg2) { // Forward using std::is_arithmetic to get to the proper // implementation; this disambiguates between the iterators and // (size_t, value_type) meaning for this function. return insertImpl(unconst(p), arg1, arg2, std::is_arithmetic()); } iterator insert(const_iterator p, std::initializer_list il) { return insert(p, il.begin(), il.end()); } iterator erase(const_iterator q) { // ideally this would be implemented in terms of erase(q, q + 1) to reuse // the higher-level abstraction, but neither Clang or GCC are able to // optimize it away. if you change this, please verify (with disassembly) // that the generated code on -O3 (and ideally -O2) stays short std::move(unconst(q) + 1, end(), unconst(q)); downsize(size() - 1); return unconst(q); } iterator erase(const_iterator q1, const_iterator q2) { if (q1 == q2) { return unconst(q1); } std::move(unconst(q2), end(), unconst(q1)); downsize(size() - std::distance(q1, q2)); return unconst(q1); } void clear() { // ideally this would be implemented in terms of erase(begin(), end()) to // reuse the higher-level abstraction, but neither Clang or GCC are able to // optimize it away. if you change this, please verify (with disassembly) // that the generated code on -O3 (and ideally -O2) stays short downsize(0); } template void assign(Arg first, Arg last) { clear(); insert(end(), first, last); } void assign(std::initializer_list il) { assign(il.begin(), il.end()); } void assign(size_type n, const value_type& t) { clear(); insert(end(), n, t); } reference front() { assert(!empty()); return *begin(); } reference back() { assert(!empty()); return *(end() - 1); } const_reference front() const { assert(!empty()); return *begin(); } const_reference back() const { assert(!empty()); return *(end() - 1); } reference operator[](size_type i) { assert(i < size()); return *(begin() + i); } const_reference operator[](size_type i) const { assert(i < size()); return *(begin() + i); } reference at(size_type i) { if (i >= size()) { throw_exception("index out of range"); } return (*this)[i]; } const_reference at(size_type i) const { if (i >= size()) { throw_exception("index out of range"); } return (*this)[i]; } private: static iterator unconst(const_iterator it) { return const_cast(it); } void downsize(size_type sz) { assert(sz <= size()); for (auto it = (begin() + sz); it != end(); ++it) { it->~value_type(); } this->setSize(sz); } template typename std::enable_if>::type copyInlineTrivial( small_vector const& o) { // Copy the entire inline storage, instead of just size() values, to make // the loop fixed-size and unrollable. std::copy(o.u.buffer(), o.u.buffer() + MaxInline, u.buffer()); this->setSize(o.size()); } template typename std::enable_if>::type copyInlineTrivial( small_vector const&) { assume_unreachable(); } void reset() { clear(); freeHeap(); this->resetSizePolicy(); } // The std::false_type argument is part of disambiguating the // iterator insert functions from integral types (see insert().) template iterator insertImpl(iterator pos, It first, It last, std::false_type) { if (first == last) { return pos; } using categ = typename std::iterator_traits::iterator_category; using it_ref = typename std::iterator_traits::reference; if (std::is_same::value) { auto offset = pos - begin(); while (first != last) { pos = insert(pos, *first++); ++pos; } return begin() + offset; } auto const distance = std::distance(first, last); auto const offset = pos - begin(); auto size_ = size(); assert(distance >= 0); assert(offset >= 0); makeSize(size_ + distance); detail::moveObjectsRightAndCreate( data() + offset, data() + size_, data() + size_ + distance, [&, in = last]() mutable -> it_ref { return *--in; }); this->incrementSize(distance); return begin() + offset; } iterator insertImpl( iterator pos, size_type n, const value_type& val, std::true_type) { // The true_type means this should call the size_t,value_type // overload. (See insert().) return insert(pos, n, val); } // The std::false_type argument came from std::is_arithmetic as part // of disambiguating an overload (see the comment in the // constructor). template void constructImpl(It first, It last, std::false_type) { typedef typename std::iterator_traits::iterator_category categ; if (std::is_same::value) { // With iterators that only allow a single pass, we can't really // do anything sane here. while (first != last) { emplace_back(*first++); } return; } size_type distance = std::distance(first, last); if (distance <= MaxInline) { this->incrementSize(distance); detail::populateMemForward( u.buffer(), distance, [&](void* p) { new (p) value_type(*first++); }); return; } makeSize(distance); this->incrementSize(distance); { auto rollback = makeGuard([&] { freeHeap(); }); detail::populateMemForward( u.heap(), distance, [&](void* p) { new (p) value_type(*first++); }); rollback.dismiss(); } } template void doConstruct(size_type n, InitFunc&& func) { makeSize(n); assert(size() == 0); this->incrementSize(n); { auto rollback = makeGuard([&] { freeHeap(); }); detail::populateMemForward(data(), n, std::forward(func)); rollback.dismiss(); } } // The true_type means we should forward to the size_t,value_type // overload. void constructImpl(size_type n, value_type const& val, std::true_type) { doConstruct(n, [&](void* p) { new (p) value_type(val); }); } /* * Compute the size after growth. */ size_type computeNewSize() const { return std::min((3 * capacity()) / 2 + 1, max_size()); } void makeSize(size_type newSize) { if (newSize <= capacity()) { return; } makeSizeInternal( newSize, false, [](void*) { assume_unreachable(); }, 0); } template void makeSize(size_type newSize, EmplaceFunc&& emplaceFunc, size_type pos) { assert(size() == capacity()); makeSizeInternal( newSize, true, std::forward(emplaceFunc), pos); } /* * Ensure we have a large enough memory region to be size `newSize'. * Will move/copy elements if we are spilling to heap_ or needed to * allocate a new region, but if resized in place doesn't initialize * anything in the new region. In any case doesn't change size(). * Supports insertion of new element during reallocation by given * pointer to new element and position of new element. * NOTE: If reallocation is not needed, insert must be false, * because we only know how to emplace elements into new memory. */ template void makeSizeInternal( size_type newSize, bool insert, EmplaceFunc&& emplaceFunc, size_type pos) { if (newSize > max_size()) { throw_exception("max_size exceeded in small_vector"); } assert(this->kShouldUseHeap); // This branch isn't needed for correctness, but allows the optimizer to // skip generating code for the rest of this function in NoHeap // small_vectors. if (!this->kShouldUseHeap) { return; } newSize = std::max(newSize, computeNewSize()); const auto needBytes = newSize * sizeof(value_type); // If the capacity isn't explicitly stored inline, but the heap // allocation is grown to over some threshold, we should store // a capacity at the front of the heap allocation. const bool heapifyCapacity = !kHasInlineCapacity && needBytes >= kHeapifyCapacityThreshold; const size_t allocationExtraBytes = heapifyCapacity ? kHeapifyCapacitySize : 0; const size_t goodAllocationSizeBytes = goodMallocSize(needBytes + allocationExtraBytes); const size_t newCapacity = (goodAllocationSizeBytes - allocationExtraBytes) / sizeof(value_type); // Make sure that the allocation request has a size computable from the // capacity, instead of using goodAllocationSizeBytes, so that we can do // sized deallocation. If goodMallocSize() gives us extra bytes that are not // a multiple of the value size we cannot use them anyway. const size_t sizeBytes = newCapacity * sizeof(value_type) + allocationExtraBytes; void* newh = checkedMalloc(sizeBytes); value_type* newp = static_cast( heapifyCapacity ? detail::shiftPointer(newh, kHeapifyCapacitySize) : newh); { auto rollback = makeGuard([&] { // sizedFree(newh, sizeBytes); }); if (insert) { // move and insert the new element this->moveToUninitializedEmplace( begin(), end(), newp, pos, std::forward(emplaceFunc)); } else { // move without inserting new element if (data()) { this->moveToUninitialized(begin(), end(), newp); } } rollback.dismiss(); } for (auto& val : *this) { val.~value_type(); } freeHeap(); // Store shifted pointer if capacity is heapified u.pdata_.heap_ = newp; this->setHeapifiedCapacity(heapifyCapacity); this->setExtern(true); this->setCapacity(newCapacity); } /* * This will set the capacity field, stored inline in the storage_ field * if there is sufficient room to store it. */ void setCapacity(size_type newCapacity) { assert(this->isExtern()); if (hasCapacity()) { assert(newCapacity < std::numeric_limits::max()); u.setCapacity(newCapacity); } } private: struct HeapPtrWithCapacity { value_type* heap_; InternalSizeType capacity_; InternalSizeType getCapacity() const { return capacity_; } void setCapacity(InternalSizeType c) { capacity_ = c; } size_t allocationExtraBytes() const { return 0; } } FOLLY_SV_PACK_ATTR; struct HeapPtr { // heap[-kHeapifyCapacitySize] contains capacity value_type* heap_; InternalSizeType getCapacity() const { return heap_ ? *static_cast( detail::unshiftPointer(heap_, kHeapifyCapacitySize)) : 0; } void setCapacity(InternalSizeType c) { *static_cast( detail::unshiftPointer(heap_, kHeapifyCapacitySize)) = c; } size_t allocationExtraBytes() const { return kHeapifyCapacitySize; } } FOLLY_SV_PACK_ATTR; typedef aligned_storage_for_t InlineStorageDataType; typedef typename std::conditional< sizeof(value_type) * MaxInline != 0, InlineStorageDataType, value_type*>::type InlineStorageType; // If the values are trivially copyable and the storage is small enough, copy // it entirely. Limit is half of a cache line, to minimize probability of // introducing a cache miss. static constexpr bool kShouldCopyInlineTrivial = is_trivially_copyable_v && sizeof(InlineStorageType) <= hardware_constructive_interference_size / 2; static bool constexpr kHasInlineCapacity = !BaseType::kAlwaysUseHeap && sizeof(HeapPtrWithCapacity) < sizeof(InlineStorageType); // This value should we multiple of word size. static size_t constexpr kHeapifyCapacitySize = sizeof( typename std:: aligned_storage::type); struct AllocationSize { auto operator()(void* ptr) const { (void)ptr; #if defined(FOLLY_HAVE_MALLOC_USABLE_SIZE) return malloc_usable_size(ptr); #endif // it is important that this method not return a size_t if we can't call // malloc_usable_size! kMustTrackHeapifiedCapacity uses the deduced return // type of this function in order to decide whether small_vector must // track its own capacity or not. } }; static bool constexpr kMustTrackHeapifiedCapacity = BaseType::kAlwaysUseHeap || !is_invocable_r_v; // Threshold to control capacity heapifying. static size_t constexpr kHeapifyCapacityThreshold = (kMustTrackHeapifiedCapacity ? 0 : 100) * kHeapifyCapacitySize; static bool constexpr kAlwaysHasCapacity = kHasInlineCapacity || kMustTrackHeapifiedCapacity; typedef typename std:: conditional::type PointerType; bool hasCapacity() const { return kAlwaysHasCapacity || !kHeapifyCapacityThreshold || this->isHeapifiedCapacity(); } void freeHeap() { if (!u.pdata_.heap_) { return; } if (this->isExtern()) { if (hasCapacity()) { auto extraBytes = u.pdata_.allocationExtraBytes(); auto vp = detail::unshiftPointer(u.pdata_.heap_, extraBytes); sizedFree(vp, u.getCapacity() * sizeof(value_type) + extraBytes); } else { free(u.pdata_.heap_); } } } union Data { explicit Data() { pdata_.heap_ = nullptr; } PointerType pdata_; InlineStorageType storage_; value_type* buffer() noexcept { void* vp = &storage_; return static_cast(vp); } value_type const* buffer() const noexcept { return const_cast(this)->buffer(); } value_type* heap() noexcept { return pdata_.heap_; } value_type const* heap() const noexcept { return pdata_.heap_; } InternalSizeType getCapacity() const { return pdata_.getCapacity(); } void setCapacity(InternalSizeType c) { pdata_.setCapacity(c); } } u; }; FOLLY_SV_PACK_POP ////////////////////////////////////////////////////////////////////// // Basic guarantee only, or provides the nothrow guarantee iff T has a // nothrow move or copy constructor. template void swap( small_vector& a, small_vector& b) { a.swap(b); } template void erase(small_vector& v, U value) { v.erase(std::remove(v.begin(), v.end(), value), v.end()); } template < class T, std::size_t MaxInline, class A, class B, class C, class Predicate> void erase_if(small_vector& v, Predicate predicate) { v.erase(std::remove_if(v.begin(), v.end(), predicate), v.end()); } ////////////////////////////////////////////////////////////////////// namespace detail { // Format support. template struct IndexableTraits> : public IndexableTraitsSeq> {}; } // namespace detail } // namespace folly FOLLY_POP_WARNING #undef FOLLY_SV_PACK_ATTR #undef FOLLY_SV_PACK_PUSH #undef FOLLY_SV_PACK_POP