/* * Copyright (C) 2005-2020 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if ASAN_ENABLED extern "C" void __sanitizer_annotate_contiguous_container(const void* begin, const void* end, const void* old_mid, const void* new_mid); #endif namespace JSC { class LLIntOffsetsExtractor; } namespace WTF { DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(Vector); template struct VectorDestructor; template struct VectorDestructor { static void destruct(T*, T*) {} }; template struct VectorDestructor { static void destruct(T* begin, T* end) { for (T* cur = begin; cur != end; ++cur) cur->~T(); } }; template struct VectorInitializer; template struct VectorInitializer { static void initializeIfNonPOD(T*, T*) { } static void initialize(T* begin, T* end) { VectorInitializer::initialize(begin, end); } }; template struct VectorInitializer { static void initializeIfNonPOD(T* begin, T* end) { for (T* cur = begin; cur != end; ++cur) new (NotNull, cur) T(); } static void initialize(T* begin, T* end) { initializeIfNonPOD(begin, end); } }; template struct VectorInitializer { static void initializeIfNonPOD(T* begin, T* end) { memset(static_cast(begin), 0, reinterpret_cast(end) - reinterpret_cast(begin)); } static void initialize(T* begin, T* end) { initializeIfNonPOD(begin, end); } }; template struct VectorMover; template struct VectorMover { static void move(T* src, T* srcEnd, T* dst) { while (src != srcEnd) { new (NotNull, dst) T(WTFMove(*src)); src->~T(); ++dst; ++src; } } static void moveOverlapping(T* src, T* srcEnd, T* dst) { if (src > dst) move(src, srcEnd, dst); else { T* dstEnd = dst + (srcEnd - src); while (src != srcEnd) { --srcEnd; --dstEnd; new (NotNull, dstEnd) T(WTFMove(*srcEnd)); srcEnd->~T(); } } } }; template struct VectorMover { static void move(const T* src, const T* srcEnd, T* dst) { memcpy(static_cast(dst), static_cast(const_cast(src)), reinterpret_cast(srcEnd) - reinterpret_cast(src)); } static void moveOverlapping(const T* src, const T* srcEnd, T* dst) { memmove(static_cast(dst), static_cast(const_cast(src)), reinterpret_cast(srcEnd) - reinterpret_cast(src)); } }; template struct VectorCopier; template struct VectorCopier { template static void uninitializedCopy(const T* src, const T* srcEnd, U* dst) { while (src != srcEnd) { new (NotNull, dst) U(*src); ++dst; ++src; } } }; template struct VectorCopier { static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { memcpy(static_cast(dst), static_cast(const_cast(src)), reinterpret_cast(srcEnd) - reinterpret_cast(src)); } template static void uninitializedCopy(const T* src, const T* srcEnd, U* dst) { VectorCopier::uninitializedCopy(src, srcEnd, dst); } }; template struct VectorFiller; template struct VectorFiller { static void uninitializedFill(T* dst, T* dstEnd, const T& val) { while (dst != dstEnd) { new (NotNull, dst) T(val); ++dst; } } }; template struct VectorFiller { static void uninitializedFill(T* dst, T* dstEnd, const T& val) { static_assert(sizeof(T) == 1, "Size of type T should be equal to one!"); memset(dst, val, dstEnd - dst); } }; template struct VectorComparer; template struct VectorComparer { static bool compare(const T* a, const T* b, size_t size) { for (size_t i = 0; i < size; ++i) if (!(a[i] == b[i])) return false; return true; } }; template struct VectorComparer { static bool compare(const T* a, const T* b, size_t size) { return memcmp(a, b, sizeof(T) * size) == 0; } }; template struct VectorTypeOperations { static void destruct(T* begin, T* end) { VectorDestructor::value, T>::destruct(begin, end); } static void initializeIfNonPOD(T* begin, T* end) { VectorInitializer::needsInitialization, VectorTraits::canInitializeWithMemset, T>::initializeIfNonPOD(begin, end); } static void initialize(T* begin, T* end) { VectorInitializer::needsInitialization, VectorTraits::canInitializeWithMemset, T>::initialize(begin, end); } static void move(T* src, T* srcEnd, T* dst) { VectorMover::canMoveWithMemcpy, T>::move(src, srcEnd, dst); } static void moveOverlapping(T* src, T* srcEnd, T* dst) { VectorMover::canMoveWithMemcpy, T>::moveOverlapping(src, srcEnd, dst); } static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { VectorCopier::canCopyWithMemcpy, T>::uninitializedCopy(src, srcEnd, dst); } static void uninitializedFill(T* dst, T* dstEnd, const T& val) { VectorFiller::canFillWithMemset, T>::uninitializedFill(dst, dstEnd, val); } static bool compare(const T* a, const T* b, size_t size) { return VectorComparer::canCompareWithMemcmp, T>::compare(a, b, size); } }; template class VectorBufferBase { WTF_MAKE_NONCOPYABLE(VectorBufferBase); public: template bool allocateBuffer(size_t newCapacity) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); ASSERT(newCapacity); if (newCapacity > std::numeric_limits::max() / sizeof(T)) { if constexpr (action == FailureAction::Crash) CRASH(); else return false; } size_t sizeToAllocate = newCapacity * sizeof(T); T* newBuffer = nullptr; if constexpr (action == FailureAction::Crash) newBuffer = static_cast(Malloc::malloc(sizeToAllocate)); else { newBuffer = static_cast(Malloc::tryMalloc(sizeToAllocate)); if (UNLIKELY(!newBuffer)) return false; } m_capacity = sizeToAllocate / sizeof(T); m_buffer = newBuffer; return true; } ALWAYS_INLINE void allocateBuffer(size_t newCapacity) { allocateBuffer(newCapacity); } ALWAYS_INLINE bool tryAllocateBuffer(size_t newCapacity) { return allocateBuffer(newCapacity); } bool shouldReallocateBuffer(size_t newCapacity) const { return VectorTraits::canMoveWithMemcpy && m_capacity && newCapacity; } void reallocateBuffer(size_t newCapacity) { ASSERT(shouldReallocateBuffer(newCapacity)); if (newCapacity > std::numeric_limits::max() / sizeof(T)) CRASH(); size_t sizeToAllocate = newCapacity * sizeof(T); m_capacity = sizeToAllocate / sizeof(T); m_buffer = static_cast(Malloc::realloc(m_buffer, sizeToAllocate)); } void deallocateBuffer(T* bufferToDeallocate) { if (!bufferToDeallocate) return; if (m_buffer == bufferToDeallocate) { m_buffer = nullptr; m_capacity = 0; } Malloc::free(bufferToDeallocate); } T* buffer() { return m_buffer; } const T* buffer() const { return m_buffer; } static ptrdiff_t bufferMemoryOffset() { return OBJECT_OFFSETOF(VectorBufferBase, m_buffer); } size_t capacity() const { return m_capacity; } MallocPtr releaseBuffer() { T* buffer = m_buffer; m_buffer = nullptr; m_capacity = 0; return adoptMallocPtr(buffer); } protected: VectorBufferBase() : m_buffer(nullptr) , m_capacity(0) , m_size(0) { } VectorBufferBase(T* buffer, size_t capacity, size_t size) : m_buffer(buffer) , m_capacity(capacity) , m_size(size) { } ~VectorBufferBase() { // FIXME: It would be nice to find a way to ASSERT that m_buffer hasn't leaked here. } T* m_buffer; unsigned m_capacity; unsigned m_size; // Only used by the Vector subclass, but placed here to avoid padding the struct. }; template class VectorBuffer; template class VectorBuffer : private VectorBufferBase { private: typedef VectorBufferBase Base; public: VectorBuffer() { } VectorBuffer(size_t capacity, size_t size = 0) { m_size = size; // Calling malloc(0) might take a lock and may actually do an // allocation on some systems. if (capacity) allocateBuffer(capacity); } ~VectorBuffer() { deallocateBuffer(buffer()); } void swap(VectorBuffer& other, size_t, size_t) { std::swap(m_buffer, other.m_buffer); std::swap(m_capacity, other.m_capacity); } void restoreInlineBufferIfNeeded() { } #if ASAN_ENABLED void* endOfBuffer() { return buffer() + capacity(); } #endif using Base::allocateBuffer; using Base::tryAllocateBuffer; using Base::shouldReallocateBuffer; using Base::reallocateBuffer; using Base::deallocateBuffer; using Base::buffer; using Base::capacity; using Base::bufferMemoryOffset; using Base::releaseBuffer; protected: using Base::m_size; private: friend class JSC::LLIntOffsetsExtractor; using Base::m_buffer; using Base::m_capacity; }; template class VectorBuffer : private VectorBufferBase { WTF_MAKE_NONCOPYABLE(VectorBuffer); private: typedef VectorBufferBase Base; public: VectorBuffer() : Base(inlineBuffer(), inlineCapacity, 0) { } VectorBuffer(size_t capacity, size_t size = 0) : Base(inlineBuffer(), inlineCapacity, size) { if (capacity > inlineCapacity) Base::allocateBuffer(capacity); } ~VectorBuffer() { deallocateBuffer(buffer()); } template bool allocateBuffer(size_t newCapacity) { // FIXME: This should ASSERT(!m_buffer) to catch misuse/leaks. if (newCapacity > inlineCapacity) return Base::template allocateBuffer(newCapacity); m_buffer = inlineBuffer(); m_capacity = inlineCapacity; return true; } ALWAYS_INLINE void allocateBuffer(size_t newCapacity) { allocateBuffer(newCapacity); } ALWAYS_INLINE bool tryAllocateBuffer(size_t newCapacity) { return allocateBuffer(newCapacity); } void deallocateBuffer(T* bufferToDeallocate) { if (bufferToDeallocate == inlineBuffer()) return; Base::deallocateBuffer(bufferToDeallocate); } bool shouldReallocateBuffer(size_t newCapacity) const { // We cannot reallocate the inline buffer. return Base::shouldReallocateBuffer(newCapacity) && std::min(static_cast(m_capacity), newCapacity) > inlineCapacity; } void reallocateBuffer(size_t newCapacity) { ASSERT(shouldReallocateBuffer(newCapacity)); Base::reallocateBuffer(newCapacity); } void swap(VectorBuffer& other, size_t mySize, size_t otherSize) { if (buffer() == inlineBuffer() && other.buffer() == other.inlineBuffer()) { swapInlineBuffer(other, mySize, otherSize); std::swap(m_capacity, other.m_capacity); } else if (buffer() == inlineBuffer()) { m_buffer = other.m_buffer; other.m_buffer = other.inlineBuffer(); swapInlineBuffer(other, mySize, 0); std::swap(m_capacity, other.m_capacity); } else if (other.buffer() == other.inlineBuffer()) { other.m_buffer = m_buffer; m_buffer = inlineBuffer(); swapInlineBuffer(other, 0, otherSize); std::swap(m_capacity, other.m_capacity); } else { std::swap(m_buffer, other.m_buffer); std::swap(m_capacity, other.m_capacity); } } void restoreInlineBufferIfNeeded() { if (m_buffer) return; m_buffer = inlineBuffer(); m_capacity = inlineCapacity; } #if ASAN_ENABLED void* endOfBuffer() { ASSERT(buffer()); IGNORE_GCC_WARNINGS_BEGIN("invalid-offsetof") static_assert((offsetof(VectorBuffer, m_inlineBuffer) + sizeof(m_inlineBuffer)) % 8 == 0, "Inline buffer end needs to be on 8 byte boundary for ASan annotations to work."); IGNORE_GCC_WARNINGS_END if (buffer() == inlineBuffer()) return reinterpret_cast(m_inlineBuffer) + sizeof(m_inlineBuffer); return buffer() + capacity(); } #endif using Base::buffer; using Base::capacity; using Base::bufferMemoryOffset; MallocPtr releaseBuffer() { if (buffer() == inlineBuffer()) return { }; return Base::releaseBuffer(); } protected: using Base::m_size; private: using Base::m_buffer; using Base::m_capacity; void swapInlineBuffer(VectorBuffer& other, size_t mySize, size_t otherSize) { // FIXME: We could make swap part of VectorTypeOperations // https://bugs.webkit.org/show_bug.cgi?id=128863 swapInlineBuffers(inlineBuffer(), other.inlineBuffer(), mySize, otherSize); } static void swapInlineBuffers(T* left, T* right, size_t leftSize, size_t rightSize) { if (left == right) return; ASSERT(leftSize <= inlineCapacity); ASSERT(rightSize <= inlineCapacity); size_t swapBound = std::min(leftSize, rightSize); for (unsigned i = 0; i < swapBound; ++i) std::swap(left[i], right[i]); VectorTypeOperations::move(left + swapBound, left + leftSize, right + swapBound); VectorTypeOperations::move(right + swapBound, right + rightSize, left + swapBound); } T* inlineBuffer() { return reinterpret_cast_ptr(m_inlineBuffer); } const T* inlineBuffer() const { return reinterpret_cast_ptr(m_inlineBuffer); } #if ASAN_ENABLED // ASan needs the buffer to begin and end on 8-byte boundaries for annotations to work. // FIXME: Add a redzone before the buffer to catch off by one accesses. We don't need a guard after, because the buffer is the last member variable. static constexpr size_t asanInlineBufferAlignment = std::alignment_of::value >= 8 ? std::alignment_of::value : 8; static constexpr size_t asanAdjustedInlineCapacity = ((sizeof(T) * inlineCapacity + 7) & ~7) / sizeof(T); typename std::aligned_storage::type m_inlineBuffer[asanAdjustedInlineCapacity]; #else typename std::aligned_storage::value>::type m_inlineBuffer[inlineCapacity]; #endif }; struct UnsafeVectorOverflow { static NO_RETURN_DUE_TO_ASSERT void overflowed() { ASSERT_NOT_REACHED(); } }; // Template default values are in Forward.h. template class Vector : private VectorBuffer { WTF_MAKE_FAST_ALLOCATED; private: typedef VectorBuffer Base; typedef VectorTypeOperations TypeOperations; friend class JSC::LLIntOffsetsExtractor; public: // FIXME: Remove uses of ValueType and standardize on value_type, which is required for std::span. typedef T ValueType; typedef T value_type; typedef T* iterator; typedef const T* const_iterator; typedef std::reverse_iterator reverse_iterator; typedef std::reverse_iterator const_reverse_iterator; Vector() { } // Unlike in std::vector, this constructor does not initialize POD types. explicit Vector(size_t size) : Base(size, size) { asanSetInitialBufferSizeTo(size); if (begin()) TypeOperations::initializeIfNonPOD(begin(), end()); } Vector(size_t size, const T& val) : Base(size, size) { asanSetInitialBufferSizeTo(size); if (begin()) TypeOperations::uninitializedFill(begin(), end(), val); } Vector(const T* data, size_t dataSize) : Base(dataSize, dataSize) { asanSetInitialBufferSizeTo(dataSize); if (begin()) TypeOperations::uninitializedCopy(data, data + dataSize, begin()); } Vector(std::initializer_list initializerList) { reserveInitialCapacity(initializerList.size()); asanSetInitialBufferSizeTo(initializerList.size()); for (const auto& element : initializerList) uncheckedAppend(element); } template static Vector from(Items&&... items) { Vector result; auto size = sizeof...(items); result.reserveInitialCapacity(size); result.asanSetInitialBufferSizeTo(size); result.m_size = size; result.uncheckedInitialize<0>(std::forward(items)...); return result; } Vector(WTF::HashTableDeletedValueType) : Base(0, std::numeric_limits::max()) { } ~Vector() { if (m_size) TypeOperations::destruct(begin(), end()); asanSetBufferSizeToFullCapacity(0); } Vector(const Vector&); template explicit Vector(const Vector&); Vector& operator=(const Vector&); template Vector& operator=(const Vector&); Vector(Vector&&); Vector& operator=(Vector&&); size_t size() const { return m_size; } size_t sizeInBytes() const { return static_cast(m_size) * sizeof(T); } static ptrdiff_t sizeMemoryOffset() { return OBJECT_OFFSETOF(Vector, m_size); } size_t capacity() const { return Base::capacity(); } bool isEmpty() const { return !size(); } T& at(size_t i) { if (UNLIKELY(i >= size())) OverflowHandler::overflowed(); return Base::buffer()[i]; } const T& at(size_t i) const { if (UNLIKELY(i >= size())) OverflowHandler::overflowed(); return Base::buffer()[i]; } T& operator[](size_t i) { return at(i); } const T& operator[](size_t i) const { return at(i); } T* data() { return Base::buffer(); } const T* data() const { return Base::buffer(); } static ptrdiff_t dataMemoryOffset() { return Base::bufferMemoryOffset(); } iterator begin() { return data(); } iterator end() { return begin() + m_size; } const_iterator begin() const { return data(); } const_iterator end() const { return begin() + m_size; } reverse_iterator rbegin() { return reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } T& first() { return at(0); } const T& first() const { return at(0); } T& last() { return at(size() - 1); } const T& last() const { return at(size() - 1); } T takeLast() { T result = WTFMove(last()); removeLast(); return result; } template bool contains(const U&) const; template size_t find(const U&) const; template size_t findMatching(const MatchFunction&) const; template size_t reverseFind(const U&) const; template bool appendIfNotContains(const U&); void shrink(size_t size); void grow(size_t size); void resize(size_t size); void resizeToFit(size_t size); ALWAYS_INLINE void reserveCapacity(size_t newCapacity) { reserveCapacity(newCapacity); } ALWAYS_INLINE bool tryReserveCapacity(size_t newCapacity) { return reserveCapacity(newCapacity); } ALWAYS_INLINE void reserveInitialCapacity(size_t initialCapacity) { reserveInitialCapacity(initialCapacity); } ALWAYS_INLINE bool tryReserveInitialCapacity(size_t initialCapacity) { return reserveInitialCapacity(initialCapacity); } void shrinkCapacity(size_t newCapacity); void shrinkToFit() { shrinkCapacity(size()); } void clear() { shrinkCapacity(0); } template Vector isolatedCopy() const &; template Vector isolatedCopy() &&; ALWAYS_INLINE void append(ValueType&& value) { append(std::forward(value)); } template ALWAYS_INLINE void append(U&& u) { append(std::forward(u)); } template ALWAYS_INLINE bool tryAppend(U&& u) { return append(std::forward(u)); } template ALWAYS_INLINE void constructAndAppend(Args&&... args) { constructAndAppend(std::forward(args)...); } template ALWAYS_INLINE bool tryConstructAndAppend(Args&&... args) { return constructAndAppend(std::forward(args)...); } void uncheckedAppend(ValueType&& value) { uncheckedAppend(std::forward(value)); } template void uncheckedAppend(U&&); template void uncheckedConstructAndAppend(Args&&...); template ALWAYS_INLINE void append(const U* u, size_t size) { append(u, size); } template ALWAYS_INLINE bool tryAppend(const U* u, size_t size) { return append(u, size); } template ALWAYS_INLINE void append(Span span) { append(span.data(), span.size()); } template ALWAYS_INLINE void uncheckedAppend(Span span) { uncheckedAppend(span.data(), span.size()); } template void appendVector(const Vector&); template void appendVector(Vector&&); void insert(size_t position, ValueType&& value) { insert(position, std::forward(value)); } template void insert(size_t position, const U*, size_t); template void insert(size_t position, U&&); template void insertVector(size_t position, const Vector&); void remove(size_t position); void remove(size_t position, size_t length); template bool removeFirst(const U&); template bool removeFirstMatching(const MatchFunction&, size_t startIndex = 0); template unsigned removeAll(const U&); template unsigned removeAllMatching(const MatchFunction&, size_t startIndex = 0); void removeLast() { if (UNLIKELY(isEmpty())) OverflowHandler::overflowed(); shrink(size() - 1); } void fill(const T&, size_t); void fill(const T& val) { fill(val, size()); } template void appendRange(Iterator start, Iterator end); MallocPtr releaseBuffer(); void swap(Vector& other) { #if ASAN_ENABLED if (this == std::addressof(other)) // ASan will crash if we try to restrict access to the same buffer twice. return; #endif // Make it possible to copy inline buffers. asanSetBufferSizeToFullCapacity(); other.asanSetBufferSizeToFullCapacity(); Base::swap(other, m_size, other.m_size); std::swap(m_size, other.m_size); asanSetInitialBufferSizeTo(m_size); other.asanSetInitialBufferSizeTo(other.m_size); } void reverse(); void checkConsistency(); template::type> Vector map(MapFunction) const; bool isHashTableDeletedValue() const { return m_size == std::numeric_limits::max(); } private: template bool reserveCapacity(size_t newCapacity); template bool reserveInitialCapacity(size_t initialCapacity); template bool expandCapacity(size_t newMinCapacity); template T* expandCapacity(size_t newMinCapacity, T*); template U* expandCapacity(size_t newMinCapacity, U*); template bool appendSlowCase(U&&); template bool constructAndAppend(Args&&...); template bool constructAndAppendSlowCase(Args&&...); template bool append(U&&); template bool append(const U*, size_t); template bool uncheckedAppend(const U*, size_t); template void uncheckedInitialize(U&& item, Items&&... items) { uncheckedInitialize(std::forward(item)); uncheckedInitialize(std::forward(items)...); } template void uncheckedInitialize(U&& value) { ASSERT(position < size()); ASSERT(position < capacity()); new (NotNull, begin() + position) T(std::forward(value)); } void asanSetInitialBufferSizeTo(size_t); void asanSetBufferSizeToFullCapacity(size_t); void asanSetBufferSizeToFullCapacity() { asanSetBufferSizeToFullCapacity(size()); } void asanBufferSizeWillChangeTo(size_t); using Base::m_size; using Base::buffer; using Base::capacity; using Base::swap; using Base::allocateBuffer; using Base::deallocateBuffer; using Base::tryAllocateBuffer; using Base::shouldReallocateBuffer; using Base::reallocateBuffer; using Base::restoreInlineBufferIfNeeded; using Base::releaseBuffer; #if ASAN_ENABLED using Base::endOfBuffer; #endif }; template Vector::Vector(const Vector& other) : Base(other.size(), other.size()) { asanSetInitialBufferSizeTo(other.size()); if (begin()) TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); } template template Vector::Vector(const Vector& other) : Base(other.size(), other.size()) { asanSetInitialBufferSizeTo(other.size()); if (begin()) TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); } template Vector& Vector::operator=(const Vector& other) { if (&other == this) return *this; if (size() > other.size()) shrink(other.size()); else if (other.size() > capacity()) { clear(); reserveCapacity(other.size()); ASSERT(begin()); } asanBufferSizeWillChangeTo(other.size()); std::copy(other.begin(), other.begin() + size(), begin()); TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); m_size = other.size(); return *this; } inline bool typelessPointersAreEqual(const void* a, const void* b) { return a == b; } template template Vector& Vector::operator=(const Vector& other) { // If the inline capacities match, we should call the more specific // template. If the inline capacities don't match, the two objects // shouldn't be allocated the same address. ASSERT(!typelessPointersAreEqual(&other, this)); if (size() > other.size()) shrink(other.size()); else if (other.size() > capacity()) { clear(); reserveCapacity(other.size()); ASSERT(begin()); } asanBufferSizeWillChangeTo(other.size()); std::copy(other.begin(), other.begin() + size(), begin()); TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); m_size = other.size(); return *this; } template inline Vector::Vector(Vector&& other) { swap(other); } template inline Vector& Vector::operator=(Vector&& other) { swap(other); return *this; } template template bool Vector::contains(const U& value) const { return find(value) != notFound; } template template size_t Vector::findMatching(const MatchFunction& matches) const { for (size_t i = 0; i < size(); ++i) { if (matches(at(i))) return i; } return notFound; } template template size_t Vector::find(const U& value) const { return findMatching([&](auto& item) { return item == value; }); } template template size_t Vector::reverseFind(const U& value) const { for (size_t i = 1; i <= size(); ++i) { const size_t index = size() - i; if (at(index) == value) return index; } return notFound; } template template bool Vector::appendIfNotContains(const U& value) { if (contains(value)) return false; append(value); return true; } template void Vector::fill(const T& val, size_t newSize) { if (size() > newSize) shrink(newSize); else if (newSize > capacity()) { clear(); reserveCapacity(newSize); ASSERT(begin()); } asanBufferSizeWillChangeTo(newSize); std::fill(begin(), end(), val); TypeOperations::uninitializedFill(end(), begin() + newSize, val); m_size = newSize; } template template void Vector::appendRange(Iterator start, Iterator end) { for (Iterator it = start; it != end; ++it) append(*it); } template template bool Vector::expandCapacity(size_t newMinCapacity) { return reserveCapacity(std::max(newMinCapacity, std::max(static_cast(minCapacity), capacity() + capacity() / 4 + 1))); } template template NEVER_INLINE T* Vector::expandCapacity(size_t newMinCapacity, T* ptr) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); if (ptr < begin() || ptr >= end()) { bool success = expandCapacity(newMinCapacity); if constexpr (action == FailureAction::Report) { if (UNLIKELY(!success)) return nullptr; } UNUSED_PARAM(success); return ptr; } size_t index = ptr - begin(); bool success = expandCapacity(newMinCapacity); if constexpr (action == FailureAction::Report) { if (UNLIKELY(!success)) return nullptr; } UNUSED_PARAM(success); return begin() + index; } template template inline U* Vector::expandCapacity(size_t newMinCapacity, U* ptr) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); bool success = expandCapacity(newMinCapacity); if constexpr (action == FailureAction::Report) { if (UNLIKELY(!success)) return nullptr; } UNUSED_PARAM(success); return ptr; } template inline void Vector::resize(size_t size) { if (size <= m_size) { TypeOperations::destruct(begin() + size, end()); asanBufferSizeWillChangeTo(size); } else { if (size > capacity()) expandCapacity(size); asanBufferSizeWillChangeTo(size); if (begin()) TypeOperations::initializeIfNonPOD(end(), begin() + size); } m_size = size; } template void Vector::resizeToFit(size_t size) { reserveCapacity(size); resize(size); } template void Vector::shrink(size_t size) { ASSERT(size <= m_size); TypeOperations::destruct(begin() + size, end()); asanBufferSizeWillChangeTo(size); m_size = size; } template void Vector::grow(size_t size) { ASSERT(size >= m_size); if (size > capacity()) expandCapacity(size); asanBufferSizeWillChangeTo(size); if (begin()) TypeOperations::initializeIfNonPOD(end(), begin() + size); m_size = size; } template inline void Vector::asanSetInitialBufferSizeTo(size_t size) { #if ASAN_ENABLED if (!buffer()) return; // This function resticts buffer access to only elements in [begin(), end()) range, making ASan detect an error // when accessing elements in [end(), endOfBuffer()) range. // A newly allocated buffer can be accessed without restrictions, so "old_mid" argument equals "end" argument. __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), endOfBuffer(), buffer() + size); #else UNUSED_PARAM(size); #endif } template inline void Vector::asanSetBufferSizeToFullCapacity(size_t size) { #if ASAN_ENABLED if (!buffer()) return; // ASan requires that the annotation is returned to its initial state before deallocation. __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), buffer() + size, endOfBuffer()); #else UNUSED_PARAM(size); #endif } template inline void Vector::asanBufferSizeWillChangeTo(size_t newSize) { #if ASAN_ENABLED if (!buffer()) return; // Change allowed range. __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), buffer() + size(), buffer() + newSize); #else UNUSED_PARAM(newSize); #endif } template template bool Vector::reserveCapacity(size_t newCapacity) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); if (newCapacity <= capacity()) return true; T* oldBuffer = begin(); T* oldEnd = end(); asanSetBufferSizeToFullCapacity(); bool success = Base::template allocateBuffer(newCapacity); if constexpr (action == FailureAction::Report) { if (UNLIKELY(!success)) { asanSetInitialBufferSizeTo(size()); return false; } } UNUSED_PARAM(success); ASSERT(begin()); asanSetInitialBufferSizeTo(size()); TypeOperations::move(oldBuffer, oldEnd, begin()); Base::deallocateBuffer(oldBuffer); return true; } template template inline bool Vector::reserveInitialCapacity(size_t initialCapacity) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); ASSERT(!m_size); ASSERT(capacity() == inlineCapacity); if (initialCapacity <= inlineCapacity) return true; return Base::template allocateBuffer(initialCapacity); } template void Vector::shrinkCapacity(size_t newCapacity) { if (newCapacity >= capacity()) return; if (newCapacity < size()) shrink(newCapacity); asanSetBufferSizeToFullCapacity(); T* oldBuffer = begin(); if (newCapacity > 0) { if (Base::shouldReallocateBuffer(newCapacity)) { Base::reallocateBuffer(newCapacity); asanSetInitialBufferSizeTo(size()); return; } T* oldEnd = end(); Base::allocateBuffer(newCapacity); if (begin() != oldBuffer) TypeOperations::move(oldBuffer, oldEnd, begin()); } Base::deallocateBuffer(oldBuffer); Base::restoreInlineBufferIfNeeded(); asanSetInitialBufferSizeTo(size()); } template template ALWAYS_INLINE bool Vector::append(const U* data, size_t dataSize) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); if (!dataSize) return true; size_t newSize = m_size + dataSize; if (newSize > capacity()) { data = expandCapacity(newSize, data); if constexpr (action == FailureAction::Report) { if (UNLIKELY(!data)) return false; } ASSERT(begin()); } if (newSize < m_size) { if constexpr (action == FailureAction::Crash) CRASH(); else return false; } asanBufferSizeWillChangeTo(newSize); T* dest = end(); VectorCopier::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), dest); m_size = newSize; return true; } template template ALWAYS_INLINE bool Vector::uncheckedAppend(const U* data, size_t dataSize) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); if (!dataSize) return true; ASSERT(size() < capacity()); size_t newSize = m_size + dataSize; asanBufferSizeWillChangeTo(newSize); T* dest = end(); VectorCopier::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), dest); m_size = newSize; return true; } template template ALWAYS_INLINE bool Vector::append(U&& value) { if (size() != capacity()) { asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(value)); ++m_size; return true; } return appendSlowCase(std::forward(value)); } template template ALWAYS_INLINE bool Vector::constructAndAppend(Args&&... args) { if (size() != capacity()) { asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(args)...); ++m_size; return true; } return constructAndAppendSlowCase(std::forward(args)...); } template template bool Vector::appendSlowCase(U&& value) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); ASSERT(size() == capacity()); auto ptr = const_cast::type>::type*>(std::addressof(value)); ptr = expandCapacity(size() + 1, ptr); if constexpr (action == FailureAction::Report) { if (UNLIKELY(!ptr)) return false; } ASSERT(begin()); asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(*ptr)); ++m_size; return true; } template template bool Vector::constructAndAppendSlowCase(Args&&... args) { static_assert(action == FailureAction::Crash || action == FailureAction::Report); ASSERT(size() == capacity()); bool success = expandCapacity(size() + 1); if constexpr (action == FailureAction::Report) { if (UNLIKELY(!success)) return false; } UNUSED_PARAM(success); ASSERT(begin()); asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(args)...); ++m_size; return true; } // This version of append saves a branch in the case where you know that the // vector's capacity is large enough for the append to succeed. template template ALWAYS_INLINE void Vector::uncheckedAppend(U&& value) { ASSERT(size() < capacity()); asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(value)); ++m_size; } template template ALWAYS_INLINE void Vector::uncheckedConstructAndAppend(Args&&... args) { ASSERT(size() < capacity()); asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(args)...); ++m_size; } template template inline void Vector::appendVector(const Vector& val) { append(val.begin(), val.size()); } template template inline void Vector::appendVector(Vector&& val) { size_t newSize = m_size + val.size(); if (newSize > capacity()) expandCapacity(newSize); for (auto& item : val) uncheckedAppend(WTFMove(item)); } template template void Vector::insert(size_t position, const U* data, size_t dataSize) { ASSERT_WITH_SECURITY_IMPLICATION(position <= size()); size_t newSize = m_size + dataSize; if (newSize > capacity()) { data = expandCapacity(newSize, data); ASSERT(begin()); } if (newSize < m_size) CRASH(); asanBufferSizeWillChangeTo(newSize); T* spot = begin() + position; TypeOperations::moveOverlapping(spot, end(), spot + dataSize); VectorCopier::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), spot); m_size = newSize; } template template inline void Vector::insert(size_t position, U&& value) { ASSERT_WITH_SECURITY_IMPLICATION(position <= size()); auto ptr = const_cast::type>::type*>(std::addressof(value)); if (size() == capacity()) { ptr = expandCapacity(size() + 1, ptr); ASSERT(begin()); } asanBufferSizeWillChangeTo(m_size + 1); T* spot = begin() + position; TypeOperations::moveOverlapping(spot, end(), spot + 1); new (NotNull, spot) T(std::forward(*ptr)); ++m_size; } template template inline void Vector::insertVector(size_t position, const Vector& val) { insert(position, val.begin(), val.size()); } template inline void Vector::remove(size_t position) { ASSERT_WITH_SECURITY_IMPLICATION(position < size()); T* spot = begin() + position; spot->~T(); TypeOperations::moveOverlapping(spot + 1, end(), spot); asanBufferSizeWillChangeTo(m_size - 1); --m_size; } template inline void Vector::remove(size_t position, size_t length) { ASSERT_WITH_SECURITY_IMPLICATION(position <= size()); ASSERT_WITH_SECURITY_IMPLICATION(position + length <= size()); T* beginSpot = begin() + position; T* endSpot = beginSpot + length; TypeOperations::destruct(beginSpot, endSpot); TypeOperations::moveOverlapping(endSpot, end(), beginSpot); asanBufferSizeWillChangeTo(m_size - length); m_size -= length; } template template inline bool Vector::removeFirst(const U& value) { return removeFirstMatching([&value] (const T& current) { return current == value; }); } template template inline bool Vector::removeFirstMatching(const MatchFunction& matches, size_t startIndex) { for (size_t i = startIndex; i < size(); ++i) { if (matches(at(i))) { remove(i); return true; } } return false; } template template inline unsigned Vector::removeAll(const U& value) { return removeAllMatching([&value] (const T& current) { return current == value; }); } template template inline unsigned Vector::removeAllMatching(const MatchFunction& matches, size_t startIndex) { iterator holeBegin = end(); iterator holeEnd = end(); unsigned matchCount = 0; for (auto it = begin() + startIndex, itEnd = end(); it < itEnd; ++it) { if (matches(*it)) { if (holeBegin == end()) holeBegin = it; else if (holeEnd != it) { TypeOperations::moveOverlapping(holeEnd, it, holeBegin); holeBegin += it - holeEnd; } holeEnd = it + 1; it->~T(); ++matchCount; } } if (holeEnd != end()) TypeOperations::moveOverlapping(holeEnd, end(), holeBegin); asanBufferSizeWillChangeTo(m_size - matchCount); m_size -= matchCount; return matchCount; } template inline void Vector::reverse() { for (size_t i = 0; i < m_size / 2; ++i) std::swap(at(i), at(m_size - 1 - i)); } template template inline Vector Vector::map(MapFunction mapFunction) const { Vector result; result.reserveInitialCapacity(size()); for (size_t i = 0; i < size(); ++i) result.uncheckedAppend(mapFunction(at(i))); return result; } template inline MallocPtr Vector::releaseBuffer() { // FIXME: Find a way to preserve annotations on the returned buffer. // ASan requires that all annotations are removed before deallocation, // and MallocPtr doesn't implement that. asanSetBufferSizeToFullCapacity(); auto buffer = Base::releaseBuffer(); if (inlineCapacity && !buffer && m_size) { // If the vector had some data, but no buffer to release, // that means it was using the inline buffer. In that case, // we create a brand new buffer so the caller always gets one. size_t bytes = m_size * sizeof(T); buffer = adoptMallocPtr(static_cast(Malloc::malloc(bytes))); memcpy(buffer.get(), data(), bytes); } m_size = 0; // FIXME: Should we call Base::restoreInlineBufferIfNeeded() here? return buffer; } template inline void Vector::checkConsistency() { #if ASSERT_ENABLED for (size_t i = 0; i < size(); ++i) ValueCheck::checkConsistency(at(i)); #endif } template inline void swap(Vector& a, Vector& b) { a.swap(b); } template bool operator==(const Vector& a, const Vector& b) { if (a.size() != b.size()) return false; return VectorTypeOperations::compare(a.data(), b.data(), a.size()); } template inline bool operator!=(const Vector& a, const Vector& b) { return !(a == b); } #if ASSERT_ENABLED template struct ValueCheck> { typedef Vector TraitType; static void checkConsistency(const Vector& v) { v.checkConsistency(); } }; #endif // ASSERT_ENABLED template template inline Vector Vector::isolatedCopy() const & { Vector copy; copy.reserveInitialCapacity(size()); for (const auto& element : *this) copy.uncheckedAppend(element.isolatedCopy()); return copy; } template template inline Vector Vector::isolatedCopy() && { for (auto iterator = begin(), iteratorEnd = end(); iterator < iteratorEnd; ++iterator) *iterator = WTFMove(*iterator).isolatedCopy(); return WTFMove(*this); } template size_t removeRepeatedElements(VectorType& vector, const Func& func) { auto end = std::unique(vector.begin(), vector.end(), func); size_t newSize = end - vector.begin(); vector.shrink(newSize); return newSize; } template size_t removeRepeatedElements(Vector& vector) { return removeRepeatedElements(vector, [] (T& a, T& b) { return a == b; }); } template struct CollectionInspector { using RealSourceType = typename std::remove_reference::type; using IteratorType = decltype(std::begin(std::declval())); using SourceItemType = typename std::iterator_traits::value_type; }; template struct Mapper { using SourceItemType = typename CollectionInspector::SourceItemType; using DestinationItemType = typename std::result_of::type; static Vector map(SourceType source, const MapFunction& mapFunction) { Vector result; // FIXME: Use std::size when available on all compilers. result.reserveInitialCapacity(source.size()); for (auto& item : source) result.uncheckedAppend(mapFunction(item)); return result; } }; template struct Mapper::value>::type> { using SourceItemType = typename CollectionInspector::SourceItemType; using DestinationItemType = typename std::result_of::type; static Vector map(SourceType&& source, const MapFunction& mapFunction) { Vector result; // FIXME: Use std::size when available on all compilers. result.reserveInitialCapacity(source.size()); for (auto& item : source) result.uncheckedAppend(mapFunction(WTFMove(item))); return result; } }; template Vector::DestinationItemType> map(SourceType&& source, MapFunction&& mapFunction) { return Mapper::map(std::forward(source), std::forward(mapFunction)); } template struct CompactMapTraits { static bool hasValue(const MapFunctionReturnType&); template static ItemType extractValue(MapFunctionReturnType&&); }; template struct CompactMapTraits> { using ItemType = T; static bool hasValue(const std::optional& returnValue) { return !!returnValue; } static ItemType extractValue(std::optional&& returnValue) { return WTFMove(*returnValue); } }; template struct CompactMapTraits> { using ItemType = Ref; static bool hasValue(const RefPtr& returnValue) { return !!returnValue; } static ItemType extractValue(RefPtr&& returnValue) { return returnValue.releaseNonNull(); } }; template struct CompactMapper { using SourceItemType = typename CollectionInspector::SourceItemType; using ResultItemType = typename std::result_of::type; using DestinationItemType = typename CompactMapTraits::ItemType; static Vector compactMap(SourceType source, const MapFunction& mapFunction) { Vector result; for (auto& item : source) { auto itemResult = mapFunction(item); if (CompactMapTraits::hasValue(itemResult)) result.append(CompactMapTraits::extractValue(WTFMove(itemResult))); } result.shrinkToFit(); return result; } }; template struct CompactMapper::value>::type> { using SourceItemType = typename CollectionInspector::SourceItemType; using ResultItemType = typename std::result_of::type; using DestinationItemType = typename CompactMapTraits::ItemType; static Vector compactMap(SourceType source, const MapFunction& mapFunction) { Vector result; for (auto& item : source) { auto itemResult = mapFunction(WTFMove(item)); if (CompactMapTraits::hasValue(itemResult)) result.append(CompactMapTraits::extractValue(WTFMove(itemResult))); } result.shrinkToFit(); return result; } }; template Vector::DestinationItemType> compactMap(SourceType&& source, MapFunction&& mapFunction) { return CompactMapper::compactMap(std::forward(source), std::forward(mapFunction)); } template inline auto copyToVectorSpecialization(const Collection& collection) -> DestinationVector { DestinationVector result; // FIXME: Use std::size when available on all compilers. result.reserveInitialCapacity(collection.size()); for (auto& item : collection) result.uncheckedAppend(item); return result; } template inline auto copyToVectorOf(const Collection& collection) -> Vector { return WTF::map(collection, [] (auto&& v) -> DestinationItemType { return v; }); } template struct CopyToVectorResult { using Type = typename std::remove_cv::SourceItemType>::type; }; template inline auto copyToVector(const Collection& collection) -> Vector::Type> { return copyToVectorOf::Type>(collection); } } // namespace WTF using WTF::UnsafeVectorOverflow; using WTF::Vector; using WTF::copyToVector; using WTF::copyToVectorOf; using WTF::copyToVectorSpecialization; using WTF::compactMap; using WTF::removeRepeatedElements;