summaryrefslogtreecommitdiffhomepage
path: root/misc/benchmarks/external
diff options
context:
space:
mode:
authorTyge Lovset <[email protected]>2022-12-20 23:31:51 +0100
committerTyge Lovset <[email protected]>2022-12-20 23:31:51 +0100
commit5f57d597cd27aef55adbcb3b452973b0c6e33667 (patch)
treedfd59c2fd0e36a6ef37912a9d0cc5a65970f1524 /misc/benchmarks/external
parent1763be8c8cbbc0896477fcf924edd4180d1345a9 (diff)
downloadSTC-modified-5f57d597cd27aef55adbcb3b452973b0c6e33667.tar.gz
STC-modified-5f57d597cd27aef55adbcb3b452973b0c6e33667.zip
Restructured folders: examples, benchmarks, tests into misc folder.
Diffstat (limited to 'misc/benchmarks/external')
-rw-r--r--misc/benchmarks/external/ankerl/robin_hood.h2544
-rw-r--r--misc/benchmarks/external/ankerl/unordered_dense.h1503
-rw-r--r--misc/benchmarks/external/emhash/hash_table7.hpp1876
-rw-r--r--misc/benchmarks/external/khash.h595
-rw-r--r--misc/benchmarks/external/skarupke/flat_hash_map.hpp1496
-rw-r--r--misc/benchmarks/external/tsl/robin_growth_policy.h406
-rw-r--r--misc/benchmarks/external/tsl/robin_hash.h1639
-rw-r--r--misc/benchmarks/external/tsl/robin_map.h807
-rw-r--r--misc/benchmarks/external/update.sh38
9 files changed, 10904 insertions, 0 deletions
diff --git a/misc/benchmarks/external/ankerl/robin_hood.h b/misc/benchmarks/external/ankerl/robin_hood.h
new file mode 100644
index 00000000..0af031f5
--- /dev/null
+++ b/misc/benchmarks/external/ankerl/robin_hood.h
@@ -0,0 +1,2544 @@
+// ______ _____ ______ _________
+// ______________ ___ /_ ___(_)_______ ___ /_ ______ ______ ______ /
+// __ ___/_ __ \__ __ \__ / __ __ \ __ __ \_ __ \_ __ \_ __ /
+// _ / / /_/ /_ /_/ /_ / _ / / / _ / / // /_/ // /_/ // /_/ /
+// /_/ \____/ /_.___/ /_/ /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/
+// _/_____/
+//
+// Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20
+// https://github.com/martinus/robin-hood-hashing
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2018-2021 Martin Ankerl <http://martin.ankerl.com>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef ROBIN_HOOD_H_INCLUDED
+#define ROBIN_HOOD_H_INCLUDED
+
+// see https://semver.org/
+#define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes
+#define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner
+#define ROBIN_HOOD_VERSION_PATCH 5 // for backwards-compatible bug fixes
+
+#include <algorithm>
+#include <cstdlib>
+#include <cstring>
+#include <functional>
+#include <limits>
+#include <memory> // only to support hash of smart pointers
+#include <stdexcept>
+#include <string>
+#include <type_traits>
+#include <utility>
+#if __cplusplus >= 201703L
+# include <string_view>
+#endif
+
+// #define ROBIN_HOOD_LOG_ENABLED
+#ifdef ROBIN_HOOD_LOG_ENABLED
+# include <iostream>
+# define ROBIN_HOOD_LOG(...) \
+ std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl;
+#else
+# define ROBIN_HOOD_LOG(x)
+#endif
+
+// #define ROBIN_HOOD_TRACE_ENABLED
+#ifdef ROBIN_HOOD_TRACE_ENABLED
+# include <iostream>
+# define ROBIN_HOOD_TRACE(...) \
+ std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl;
+#else
+# define ROBIN_HOOD_TRACE(x)
+#endif
+
+// #define ROBIN_HOOD_COUNT_ENABLED
+#ifdef ROBIN_HOOD_COUNT_ENABLED
+# include <iostream>
+# define ROBIN_HOOD_COUNT(x) ++counts().x;
+namespace robin_hood {
+struct Counts {
+ uint64_t shiftUp{};
+ uint64_t shiftDown{};
+};
+inline std::ostream& operator<<(std::ostream& os, Counts const& c) {
+ return os << c.shiftUp << " shiftUp" << std::endl << c.shiftDown << " shiftDown" << std::endl;
+}
+
+static Counts& counts() {
+ static Counts counts{};
+ return counts;
+}
+} // namespace robin_hood
+#else
+# define ROBIN_HOOD_COUNT(x)
+#endif
+
+// all non-argument macros should use this facility. See
+// https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/
+#define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x()
+
+// mark unused members with this macro
+#define ROBIN_HOOD_UNUSED(identifier)
+
+// bitness
+#if SIZE_MAX == UINT32_MAX
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32
+#elif SIZE_MAX == UINT64_MAX
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64
+#else
+# error Unsupported bitness
+#endif
+
+// endianess
+#ifdef _MSC_VER
+# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() \
+ (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+#endif
+
+// inline
+#ifdef _MSC_VER
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline)
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline))
+#endif
+
+// exceptions
+#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1
+#endif
+
+// count leading/trailing bits
+#if !defined(ROBIN_HOOD_DISABLE_INTRINSICS)
+# ifdef _MSC_VER
+# if ROBIN_HOOD(BITNESS) == 32
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64
+# endif
+# include <intrin.h>
+# pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD))
+# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \
+ [](size_t mask) noexcept -> int { \
+ unsigned long index; \
+ return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) \
+ : ROBIN_HOOD(BITNESS); \
+ }(x)
+# else
+# if ROBIN_HOOD(BITNESS) == 32
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll
+# endif
+# define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS))
+# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS))
+# endif
+#endif
+
+// fallthrough
+#ifndef __has_cpp_attribute // For backwards compatibility
+# define __has_cpp_attribute(x) 0
+#endif
+#if __has_cpp_attribute(clang::fallthrough)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]]
+#elif __has_cpp_attribute(gnu::fallthrough)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]]
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH()
+#endif
+
+// likely/unlikely
+#ifdef _MSC_VER
+# define ROBIN_HOOD_LIKELY(condition) condition
+# define ROBIN_HOOD_UNLIKELY(condition) condition
+#else
+# define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1)
+# define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0)
+#endif
+
+// detect if native wchar_t type is availiable in MSVC
+#ifdef _MSC_VER
+# ifdef _NATIVE_WCHAR_T_DEFINED
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 0
+# endif
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
+#endif
+
+// detect if MSVC supports the pair(std::piecewise_construct_t,...) consructor being constexpr
+#ifdef _MSC_VER
+# if _MSC_VER <= 1900
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 1
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0
+# endif
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0
+#endif
+
+// workaround missing "is_trivially_copyable" in g++ < 5.0
+// See https://stackoverflow.com/a/31798726/48181
+#if defined(__GNUC__) && __GNUC__ < 5
+# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
+#else
+# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
+#endif
+
+// helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L
+
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]]
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD()
+#endif
+
+namespace robin_hood {
+
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
+# define ROBIN_HOOD_STD std
+#else
+
+// c++11 compatibility layer
+namespace ROBIN_HOOD_STD {
+template <class T>
+struct alignment_of
+ : std::integral_constant<std::size_t, alignof(typename std::remove_all_extents<T>::type)> {};
+
+template <class T, T... Ints>
+class integer_sequence {
+public:
+ using value_type = T;
+ static_assert(std::is_integral<value_type>::value, "not integral type");
+ static constexpr std::size_t size() noexcept {
+ return sizeof...(Ints);
+ }
+};
+template <std::size_t... Inds>
+using index_sequence = integer_sequence<std::size_t, Inds...>;
+
+namespace detail_ {
+template <class T, T Begin, T End, bool>
+struct IntSeqImpl {
+ using TValue = T;
+ static_assert(std::is_integral<TValue>::value, "not integral type");
+ static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)");
+
+ template <class, class>
+ struct IntSeqCombiner;
+
+ template <TValue... Inds0, TValue... Inds1>
+ struct IntSeqCombiner<integer_sequence<TValue, Inds0...>, integer_sequence<TValue, Inds1...>> {
+ using TResult = integer_sequence<TValue, Inds0..., Inds1...>;
+ };
+
+ using TResult =
+ typename IntSeqCombiner<typename IntSeqImpl<TValue, Begin, Begin + (End - Begin) / 2,
+ (End - Begin) / 2 == 1>::TResult,
+ typename IntSeqImpl<TValue, Begin + (End - Begin) / 2, End,
+ (End - Begin + 1) / 2 == 1>::TResult>::TResult;
+};
+
+template <class T, T Begin>
+struct IntSeqImpl<T, Begin, Begin, false> {
+ using TValue = T;
+ static_assert(std::is_integral<TValue>::value, "not integral type");
+ static_assert(Begin >= 0, "unexpected argument (Begin<0)");
+ using TResult = integer_sequence<TValue>;
+};
+
+template <class T, T Begin, T End>
+struct IntSeqImpl<T, Begin, End, true> {
+ using TValue = T;
+ static_assert(std::is_integral<TValue>::value, "not integral type");
+ static_assert(Begin >= 0, "unexpected argument (Begin<0)");
+ using TResult = integer_sequence<TValue, Begin>;
+};
+} // namespace detail_
+
+template <class T, T N>
+using make_integer_sequence = typename detail_::IntSeqImpl<T, 0, N, (N - 0) == 1>::TResult;
+
+template <std::size_t N>
+using make_index_sequence = make_integer_sequence<std::size_t, N>;
+
+template <class... T>
+using index_sequence_for = make_index_sequence<sizeof...(T)>;
+
+} // namespace ROBIN_HOOD_STD
+
+#endif
+
+namespace detail {
+
+// make sure we static_cast to the correct type for hash_int
+#if ROBIN_HOOD(BITNESS) == 64
+using SizeT = uint64_t;
+#else
+using SizeT = uint32_t;
+#endif
+
+template <typename T>
+T rotr(T x, unsigned k) {
+ return (x >> k) | (x << (8U * sizeof(T) - k));
+}
+
+// This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to
+// 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with
+// care!
+template <typename T>
+inline T reinterpret_cast_no_cast_align_warning(void* ptr) noexcept {
+ return reinterpret_cast<T>(ptr);
+}
+
+template <typename T>
+inline T reinterpret_cast_no_cast_align_warning(void const* ptr) noexcept {
+ return reinterpret_cast<T>(ptr);
+}
+
+// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
+// inlinings more difficult. Throws are also generally the slow path.
+template <typename E, typename... Args>
+[[noreturn]] ROBIN_HOOD(NOINLINE)
+#if ROBIN_HOOD(HAS_EXCEPTIONS)
+ void doThrow(Args&&... args) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay)
+ throw E(std::forward<Args>(args)...);
+}
+#else
+ void doThrow(Args&&... ROBIN_HOOD_UNUSED(args) /*unused*/) {
+ abort();
+}
+#endif
+
+template <typename E, typename T, typename... Args>
+T* assertNotNull(T* t, Args&&... args) {
+ if (ROBIN_HOOD_UNLIKELY(nullptr == t)) {
+ doThrow<E>(std::forward<Args>(args)...);
+ }
+ return t;
+}
+
+template <typename T>
+inline T unaligned_load(void const* ptr) noexcept {
+ // using memcpy so we don't get into unaligned load problems.
+ // compiler should optimize this very well anyways.
+ T t;
+ std::memcpy(&t, ptr, sizeof(T));
+ return t;
+}
+
+// Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor,
+// and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a
+// pointer.
+template <typename T, size_t MinNumAllocs = 4, size_t MaxNumAllocs = 256>
+class BulkPoolAllocator {
+public:
+ BulkPoolAllocator() noexcept = default;
+
+ // does not copy anything, just creates a new allocator.
+ BulkPoolAllocator(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept
+ : mHead(nullptr)
+ , mListForFree(nullptr) {}
+
+ BulkPoolAllocator(BulkPoolAllocator&& o) noexcept
+ : mHead(o.mHead)
+ , mListForFree(o.mListForFree) {
+ o.mListForFree = nullptr;
+ o.mHead = nullptr;
+ }
+
+ BulkPoolAllocator& operator=(BulkPoolAllocator&& o) noexcept {
+ reset();
+ mHead = o.mHead;
+ mListForFree = o.mListForFree;
+ o.mListForFree = nullptr;
+ o.mHead = nullptr;
+ return *this;
+ }
+
+ BulkPoolAllocator&
+ // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp)
+ operator=(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept {
+ // does not do anything
+ return *this;
+ }
+
+ ~BulkPoolAllocator() noexcept {
+ reset();
+ }
+
+ // Deallocates all allocated memory.
+ void reset() noexcept {
+ while (mListForFree) {
+ T* tmp = *mListForFree;
+ ROBIN_HOOD_LOG("std::free")
+ std::free(mListForFree);
+ mListForFree = reinterpret_cast_no_cast_align_warning<T**>(tmp);
+ }
+ mHead = nullptr;
+ }
+
+ // allocates, but does NOT initialize. Use in-place new constructor, e.g.
+ // T* obj = pool.allocate();
+ // ::new (static_cast<void*>(obj)) T();
+ T* allocate() {
+ T* tmp = mHead;
+ if (!tmp) {
+ tmp = performAllocation();
+ }
+
+ mHead = *reinterpret_cast_no_cast_align_warning<T**>(tmp);
+ return tmp;
+ }
+
+ // does not actually deallocate but puts it in store.
+ // make sure you have already called the destructor! e.g. with
+ // obj->~T();
+ // pool.deallocate(obj);
+ void deallocate(T* obj) noexcept {
+ *reinterpret_cast_no_cast_align_warning<T**>(obj) = mHead;
+ mHead = obj;
+ }
+
+ // Adds an already allocated block of memory to the allocator. This allocator is from now on
+ // responsible for freeing the data (with free()). If the provided data is not large enough to
+ // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor.
+ void addOrFree(void* ptr, const size_t numBytes) noexcept {
+ // calculate number of available elements in ptr
+ if (numBytes < ALIGNMENT + ALIGNED_SIZE) {
+ // not enough data for at least one element. Free and return.
+ ROBIN_HOOD_LOG("std::free")
+ std::free(ptr);
+ } else {
+ ROBIN_HOOD_LOG("add to buffer")
+ add(ptr, numBytes);
+ }
+ }
+
+ void swap(BulkPoolAllocator<T, MinNumAllocs, MaxNumAllocs>& other) noexcept {
+ using std::swap;
+ swap(mHead, other.mHead);
+ swap(mListForFree, other.mListForFree);
+ }
+
+private:
+ // iterates the list of allocated memory to calculate how many to alloc next.
+ // Recalculating this each time saves us a size_t member.
+ // This ignores the fact that memory blocks might have been added manually with addOrFree. In
+ // practice, this should not matter much.
+ ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept {
+ auto tmp = mListForFree;
+ size_t numAllocs = MinNumAllocs;
+
+ while (numAllocs * 2 <= MaxNumAllocs && tmp) {
+ auto x = reinterpret_cast<T***>(tmp);
+ tmp = *x;
+ numAllocs *= 2;
+ }
+
+ return numAllocs;
+ }
+
+ // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree().
+ void add(void* ptr, const size_t numBytes) noexcept {
+ const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE;
+
+ auto data = reinterpret_cast<T**>(ptr);
+
+ // link free list
+ auto x = reinterpret_cast<T***>(data);
+ *x = mListForFree;
+ mListForFree = data;
+
+ // create linked list for newly allocated data
+ auto* const headT =
+ reinterpret_cast_no_cast_align_warning<T*>(reinterpret_cast<char*>(ptr) + ALIGNMENT);
+
+ auto* const head = reinterpret_cast<char*>(headT);
+
+ // Visual Studio compiler automatically unrolls this loop, which is pretty cool
+ for (size_t i = 0; i < numElements; ++i) {
+ *reinterpret_cast_no_cast_align_warning<char**>(head + i * ALIGNED_SIZE) =
+ head + (i + 1) * ALIGNED_SIZE;
+ }
+
+ // last one points to 0
+ *reinterpret_cast_no_cast_align_warning<T**>(head + (numElements - 1) * ALIGNED_SIZE) =
+ mHead;
+ mHead = headT;
+ }
+
+ // Called when no memory is available (mHead == 0).
+ // Don't inline this slow path.
+ ROBIN_HOOD(NOINLINE) T* performAllocation() {
+ size_t const numElementsToAlloc = calcNumElementsToAlloc();
+
+ // alloc new memory: [prev |T, T, ... T]
+ size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc;
+ ROBIN_HOOD_LOG("std::malloc " << bytes << " = " << ALIGNMENT << " + " << ALIGNED_SIZE
+ << " * " << numElementsToAlloc)
+ add(assertNotNull<std::bad_alloc>(std::malloc(bytes)), bytes);
+ return mHead;
+ }
+
+ // enforce byte alignment of the T's
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
+ static constexpr size_t ALIGNMENT =
+ (std::max)(std::alignment_of<T>::value, std::alignment_of<T*>::value);
+#else
+ static const size_t ALIGNMENT =
+ (ROBIN_HOOD_STD::alignment_of<T>::value > ROBIN_HOOD_STD::alignment_of<T*>::value)
+ ? ROBIN_HOOD_STD::alignment_of<T>::value
+ : +ROBIN_HOOD_STD::alignment_of<T*>::value; // the + is for walkarround
+#endif
+
+ static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT;
+
+ static_assert(MinNumAllocs >= 1, "MinNumAllocs");
+ static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs");
+ static_assert(ALIGNED_SIZE >= sizeof(T*), "ALIGNED_SIZE");
+ static_assert(0 == (ALIGNED_SIZE % sizeof(T*)), "ALIGNED_SIZE mod");
+ static_assert(ALIGNMENT >= sizeof(T*), "ALIGNMENT");
+
+ T* mHead{nullptr};
+ T** mListForFree{nullptr};
+};
+
+template <typename T, size_t MinSize, size_t MaxSize, bool IsFlat>
+struct NodeAllocator;
+
+// dummy allocator that does nothing
+template <typename T, size_t MinSize, size_t MaxSize>
+struct NodeAllocator<T, MinSize, MaxSize, true> {
+
+ // we are not using the data, so just free it.
+ void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept {
+ ROBIN_HOOD_LOG("std::free")
+ std::free(ptr);
+ }
+};
+
+template <typename T, size_t MinSize, size_t MaxSize>
+struct NodeAllocator<T, MinSize, MaxSize, false> : public BulkPoolAllocator<T, MinSize, MaxSize> {};
+
+// c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making
+// my own here.
+namespace swappable {
+#if ROBIN_HOOD(CXX) < ROBIN_HOOD(CXX17)
+using std::swap;
+template <typename T>
+struct nothrow {
+ static const bool value = noexcept(swap(std::declval<T&>(), std::declval<T&>()));
+};
+#else
+template <typename T>
+struct nothrow {
+ static const bool value = std::is_nothrow_swappable<T>::value;
+};
+#endif
+} // namespace swappable
+
+} // namespace detail
+
+struct is_transparent_tag {};
+
+// A custom pair implementation is used in the map because std::pair is not is_trivially_copyable,
+// which means it would not be allowed to be used in std::memcpy. This struct is copyable, which is
+// also tested.
+template <typename T1, typename T2>
+struct pair {
+ using first_type = T1;
+ using second_type = T2;
+
+ template <typename U1 = T1, typename U2 = T2,
+ typename = typename std::enable_if<std::is_default_constructible<U1>::value &&
+ std::is_default_constructible<U2>::value>::type>
+ constexpr pair() noexcept(noexcept(U1()) && noexcept(U2()))
+ : first()
+ , second() {}
+
+ // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
+ explicit constexpr pair(std::pair<T1, T2> const& o) noexcept(
+ noexcept(T1(std::declval<T1 const&>())) && noexcept(T2(std::declval<T2 const&>())))
+ : first(o.first)
+ , second(o.second) {}
+
+ // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
+ explicit constexpr pair(std::pair<T1, T2>&& o) noexcept(noexcept(
+ T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>()))))
+ : first(std::move(o.first))
+ , second(std::move(o.second)) {}
+
+ constexpr pair(T1&& a, T2&& b) noexcept(noexcept(
+ T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>()))))
+ : first(std::move(a))
+ , second(std::move(b)) {}
+
+ template <typename U1, typename U2>
+ constexpr pair(U1&& a, U2&& b) noexcept(noexcept(T1(std::forward<U1>(
+ std::declval<U1&&>()))) && noexcept(T2(std::forward<U2>(std::declval<U2&&>()))))
+ : first(std::forward<U1>(a))
+ , second(std::forward<U2>(b)) {}
+
+ template <typename... U1, typename... U2>
+ // MSVC 2015 produces error "C2476: ‘constexpr’ constructor does not initialize all members"
+ // if this constructor is constexpr
+#if !ROBIN_HOOD(BROKEN_CONSTEXPR)
+ constexpr
+#endif
+ pair(std::piecewise_construct_t /*unused*/, std::tuple<U1...> a,
+ std::tuple<U2...>
+ b) noexcept(noexcept(pair(std::declval<std::tuple<U1...>&>(),
+ std::declval<std::tuple<U2...>&>(),
+ ROBIN_HOOD_STD::index_sequence_for<U1...>(),
+ ROBIN_HOOD_STD::index_sequence_for<U2...>())))
+ : pair(a, b, ROBIN_HOOD_STD::index_sequence_for<U1...>(),
+ ROBIN_HOOD_STD::index_sequence_for<U2...>()) {
+ }
+
+ // constructor called from the std::piecewise_construct_t ctor
+ template <typename... U1, size_t... I1, typename... U2, size_t... I2>
+ pair(std::tuple<U1...>& a, std::tuple<U2...>& b, ROBIN_HOOD_STD::index_sequence<I1...> /*unused*/, ROBIN_HOOD_STD::index_sequence<I2...> /*unused*/) noexcept(
+ noexcept(T1(std::forward<U1>(std::get<I1>(
+ std::declval<std::tuple<
+ U1...>&>()))...)) && noexcept(T2(std::
+ forward<U2>(std::get<I2>(
+ std::declval<std::tuple<U2...>&>()))...)))
+ : first(std::forward<U1>(std::get<I1>(a))...)
+ , second(std::forward<U2>(std::get<I2>(b))...) {
+ // make visual studio compiler happy about warning about unused a & b.
+ // Visual studio's pair implementation disables warning 4100.
+ (void)a;
+ (void)b;
+ }
+
+ void swap(pair<T1, T2>& o) noexcept((detail::swappable::nothrow<T1>::value) &&
+ (detail::swappable::nothrow<T2>::value)) {
+ using std::swap;
+ swap(first, o.first);
+ swap(second, o.second);
+ }
+
+ T1 first; // NOLINT(misc-non-private-member-variables-in-classes)
+ T2 second; // NOLINT(misc-non-private-member-variables-in-classes)
+};
+
+template <typename A, typename B>
+inline void swap(pair<A, B>& a, pair<A, B>& b) noexcept(
+ noexcept(std::declval<pair<A, B>&>().swap(std::declval<pair<A, B>&>()))) {
+ a.swap(b);
+}
+
+template <typename A, typename B>
+inline constexpr bool operator==(pair<A, B> const& x, pair<A, B> const& y) {
+ return (x.first == y.first) && (x.second == y.second);
+}
+template <typename A, typename B>
+inline constexpr bool operator!=(pair<A, B> const& x, pair<A, B> const& y) {
+ return !(x == y);
+}
+template <typename A, typename B>
+inline constexpr bool operator<(pair<A, B> const& x, pair<A, B> const& y) noexcept(noexcept(
+ std::declval<A const&>() < std::declval<A const&>()) && noexcept(std::declval<B const&>() <
+ std::declval<B const&>())) {
+ return x.first < y.first || (!(y.first < x.first) && x.second < y.second);
+}
+template <typename A, typename B>
+inline constexpr bool operator>(pair<A, B> const& x, pair<A, B> const& y) {
+ return y < x;
+}
+template <typename A, typename B>
+inline constexpr bool operator<=(pair<A, B> const& x, pair<A, B> const& y) {
+ return !(x > y);
+}
+template <typename A, typename B>
+inline constexpr bool operator>=(pair<A, B> const& x, pair<A, B> const& y) {
+ return !(x < y);
+}
+
+inline size_t hash_bytes(void const* ptr, size_t len) noexcept {
+ static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
+ static constexpr uint64_t seed = UINT64_C(0xe17a1465);
+ static constexpr unsigned int r = 47;
+
+ auto const* const data64 = static_cast<uint64_t const*>(ptr);
+ uint64_t h = seed ^ (len * m);
+
+ size_t const n_blocks = len / 8;
+ for (size_t i = 0; i < n_blocks; ++i) {
+ auto k = detail::unaligned_load<uint64_t>(data64 + i);
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h ^= k;
+ h *= m;
+ }
+
+ auto const* const data8 = reinterpret_cast<uint8_t const*>(data64 + n_blocks);
+ switch (len & 7U) {
+ case 7:
+ h ^= static_cast<uint64_t>(data8[6]) << 48U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 6:
+ h ^= static_cast<uint64_t>(data8[5]) << 40U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 5:
+ h ^= static_cast<uint64_t>(data8[4]) << 32U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 4:
+ h ^= static_cast<uint64_t>(data8[3]) << 24U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 3:
+ h ^= static_cast<uint64_t>(data8[2]) << 16U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 2:
+ h ^= static_cast<uint64_t>(data8[1]) << 8U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 1:
+ h ^= static_cast<uint64_t>(data8[0]);
+ h *= m;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ default:
+ break;
+ }
+
+ h ^= h >> r;
+
+ // not doing the final step here, because this will be done by keyToIdx anyways
+ // h *= m;
+ // h ^= h >> r;
+ return static_cast<size_t>(h);
+}
+
+inline size_t hash_int(uint64_t x) noexcept {
+ // tried lots of different hashes, let's stick with murmurhash3. It's simple, fast, well tested,
+ // and doesn't need any special 128bit operations.
+ x ^= x >> 33U;
+ x *= UINT64_C(0xff51afd7ed558ccd);
+ x ^= x >> 33U;
+
+ // not doing the final step here, because this will be done by keyToIdx anyways
+ // x *= UINT64_C(0xc4ceb9fe1a85ec53);
+ // x ^= x >> 33U;
+ return static_cast<size_t>(x);
+}
+
+// A thin wrapper around std::hash, performing an additional simple mixing step of the result.
+template <typename T, typename Enable = void>
+struct hash : public std::hash<T> {
+ size_t operator()(T const& obj) const
+ noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) {
+ // call base hash
+ auto result = std::hash<T>::operator()(obj);
+ // return mixed of that, to be save against identity has
+ return hash_int(static_cast<detail::SizeT>(result));
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string<CharT>> {
+ size_t operator()(std::basic_string<CharT> const& str) const noexcept {
+ return hash_bytes(str.data(), sizeof(CharT) * str.size());
+ }
+};
+
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
+template <typename CharT>
+struct hash<std::basic_string_view<CharT>> {
+ size_t operator()(std::basic_string_view<CharT> const& sv) const noexcept {
+ return hash_bytes(sv.data(), sizeof(CharT) * sv.size());
+ }
+};
+#endif
+
+template <class T>
+struct hash<T*> {
+ size_t operator()(T* ptr) const noexcept {
+ return hash_int(reinterpret_cast<detail::SizeT>(ptr));
+ }
+};
+
+template <class T>
+struct hash<std::unique_ptr<T>> {
+ size_t operator()(std::unique_ptr<T> const& ptr) const noexcept {
+ return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
+ }
+};
+
+template <class T>
+struct hash<std::shared_ptr<T>> {
+ size_t operator()(std::shared_ptr<T> const& ptr) const noexcept {
+ return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
+ }
+};
+
+template <typename Enum>
+struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
+ size_t operator()(Enum e) const noexcept {
+ using Underlying = typename std::underlying_type<Enum>::type;
+ return hash<Underlying>{}(static_cast<Underlying>(e));
+ }
+};
+
+#define ROBIN_HOOD_HASH_INT(T) \
+ template <> \
+ struct hash<T> { \
+ size_t operator()(T const& obj) const noexcept { \
+ return hash_int(static_cast<uint64_t>(obj)); \
+ } \
+ }
+
+#if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wuseless-cast"
+#endif
+// see https://en.cppreference.com/w/cpp/utility/hash
+ROBIN_HOOD_HASH_INT(bool);
+ROBIN_HOOD_HASH_INT(char);
+ROBIN_HOOD_HASH_INT(signed char);
+ROBIN_HOOD_HASH_INT(unsigned char);
+ROBIN_HOOD_HASH_INT(char16_t);
+ROBIN_HOOD_HASH_INT(char32_t);
+#if ROBIN_HOOD(HAS_NATIVE_WCHART)
+ROBIN_HOOD_HASH_INT(wchar_t);
+#endif
+ROBIN_HOOD_HASH_INT(short);
+ROBIN_HOOD_HASH_INT(unsigned short);
+ROBIN_HOOD_HASH_INT(int);
+ROBIN_HOOD_HASH_INT(unsigned int);
+ROBIN_HOOD_HASH_INT(long);
+ROBIN_HOOD_HASH_INT(long long);
+ROBIN_HOOD_HASH_INT(unsigned long);
+ROBIN_HOOD_HASH_INT(unsigned long long);
+#if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic pop
+#endif
+namespace detail {
+
+template <typename T>
+struct void_type {
+ using type = void;
+};
+
+template <typename T, typename = void>
+struct has_is_transparent : public std::false_type {};
+
+template <typename T>
+struct has_is_transparent<T, typename void_type<typename T::is_transparent>::type>
+ : public std::true_type {};
+
+// using wrapper classes for hash and key_equal prevents the diamond problem when the same type
+// is used. see https://stackoverflow.com/a/28771920/48181
+template <typename T>
+struct WrapHash : public T {
+ WrapHash() = default;
+ explicit WrapHash(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
+ : T(o) {}
+};
+
+template <typename T>
+struct WrapKeyEqual : public T {
+ WrapKeyEqual() = default;
+ explicit WrapKeyEqual(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
+ : T(o) {}
+};
+
+// A highly optimized hashmap implementation, using the Robin Hood algorithm.
+//
+// In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but
+// be about 2x faster in most cases and require much less allocations.
+//
+// This implementation uses the following memory layout:
+//
+// [Node, Node, ... Node | info, info, ... infoSentinel ]
+//
+// * Node: either a DataNode that directly has the std::pair<key, val> as member,
+// or a DataNode with a pointer to std::pair<key,val>. Which DataNode representation to use
+// depends on how fast the swap() operation is. Heuristically, this is automatically choosen
+// based on sizeof(). there are always 2^n Nodes.
+//
+// * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes.
+// Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the
+// corresponding node contains data. Set to 2 means the corresponding Node is filled, but it
+// actually belongs to the previous position and was pushed out because that place is already
+// taken.
+//
+// * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the
+// need for a idx variable.
+//
+// According to STL, order of templates has effect on throughput. That's why I've moved the
+// boolean to the front.
+// https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/
+template <bool IsFlat, size_t MaxLoadFactor100, typename Key, typename T, typename Hash,
+ typename KeyEqual>
+class Table
+ : public WrapHash<Hash>,
+ public WrapKeyEqual<KeyEqual>,
+ detail::NodeAllocator<
+ typename std::conditional<
+ std::is_void<T>::value, Key,
+ robin_hood::pair<typename std::conditional<IsFlat, Key, Key const>::type, T>>::type,
+ 4, 16384, IsFlat> {
+public:
+ static constexpr bool is_flat = IsFlat;
+ static constexpr bool is_map = !std::is_void<T>::value;
+ static constexpr bool is_set = !is_map;
+ static constexpr bool is_transparent =
+ has_is_transparent<Hash>::value && has_is_transparent<KeyEqual>::value;
+
+ using key_type = Key;
+ using mapped_type = T;
+ using value_type = typename std::conditional<
+ is_set, Key,
+ robin_hood::pair<typename std::conditional<is_flat, Key, Key const>::type, T>>::type;
+ using size_type = size_t;
+ using hasher = Hash;
+ using key_equal = KeyEqual;
+ using Self = Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
+
+private:
+ static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100,
+ "MaxLoadFactor100 needs to be >10 && < 100");
+
+ using WHash = WrapHash<Hash>;
+ using WKeyEqual = WrapKeyEqual<KeyEqual>;
+
+ // configuration defaults
+
+ // make sure we have 8 elements, needed to quickly rehash mInfo
+ static constexpr size_t InitialNumElements = sizeof(uint64_t);
+ static constexpr uint32_t InitialInfoNumBits = 5;
+ static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits;
+ static constexpr size_t InfoMask = InitialInfoInc - 1U;
+ static constexpr uint8_t InitialInfoHashShift = 0;
+ using DataPool = detail::NodeAllocator<value_type, 4, 16384, IsFlat>;
+
+ // type needs to be wider than uint8_t.
+ using InfoType = uint32_t;
+
+ // DataNode ////////////////////////////////////////////////////////
+
+ // Primary template for the data node. We have special implementations for small and big
+ // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these
+ // on the heap so swap merely swaps a pointer.
+ template <typename M, bool>
+ class DataNode {};
+
+ // Small: just allocate on the stack.
+ template <typename M>
+ class DataNode<M, true> final {
+ public:
+ template <typename... Args>
+ explicit DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, Args&&... args) noexcept(
+ noexcept(value_type(std::forward<Args>(args)...)))
+ : mData(std::forward<Args>(args)...) {}
+
+ DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, true>&& n) noexcept(
+ std::is_nothrow_move_constructible<value_type>::value)
+ : mData(std::move(n.mData)) {}
+
+ // doesn't do anything
+ void destroy(M& ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {}
+ void destroyDoNotDeallocate() noexcept {}
+
+ value_type const* operator->() const noexcept {
+ return &mData;
+ }
+ value_type* operator->() noexcept {
+ return &mData;
+ }
+
+ const value_type& operator*() const noexcept {
+ return mData;
+ }
+
+ value_type& operator*() noexcept {
+ return mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept {
+ return mData.first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT&>::type getFirst() noexcept {
+ return mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type const&>::type
+ getFirst() const noexcept {
+ return mData.first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept {
+ return mData;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, MT&>::type getSecond() noexcept {
+ return mData.second;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, MT const&>::type getSecond() const noexcept {
+ return mData.second;
+ }
+
+ void swap(DataNode<M, true>& o) noexcept(
+ noexcept(std::declval<value_type>().swap(std::declval<value_type>()))) {
+ mData.swap(o.mData);
+ }
+
+ private:
+ value_type mData;
+ };
+
+ // big object: allocate on heap.
+ template <typename M>
+ class DataNode<M, false> {
+ public:
+ template <typename... Args>
+ explicit DataNode(M& map, Args&&... args)
+ : mData(map.allocate()) {
+ ::new (static_cast<void*>(mData)) value_type(std::forward<Args>(args)...);
+ }
+
+ DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, false>&& n) noexcept
+ : mData(std::move(n.mData)) {}
+
+ void destroy(M& map) noexcept {
+ // don't deallocate, just put it into list of datapool.
+ mData->~value_type();
+ map.deallocate(mData);
+ }
+
+ void destroyDoNotDeallocate() noexcept {
+ mData->~value_type();
+ }
+
+ value_type const* operator->() const noexcept {
+ return mData;
+ }
+
+ value_type* operator->() noexcept {
+ return mData;
+ }
+
+ const value_type& operator*() const {
+ return *mData;
+ }
+
+ value_type& operator*() {
+ return *mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept {
+ return mData->first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT&>::type getFirst() noexcept {
+ return *mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type const&>::type
+ getFirst() const noexcept {
+ return mData->first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept {
+ return *mData;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, MT&>::type getSecond() noexcept {
+ return mData->second;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, MT const&>::type getSecond() const noexcept {
+ return mData->second;
+ }
+
+ void swap(DataNode<M, false>& o) noexcept {
+ using std::swap;
+ swap(mData, o.mData);
+ }
+
+ private:
+ value_type* mData;
+ };
+
+ using Node = DataNode<Self, IsFlat>;
+
+ // helpers for insertKeyPrepareEmptySpot: extract first entry (only const required)
+ ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(Node const& n) const noexcept {
+ return n.getFirst();
+ }
+
+ // in case we have void mapped_type, we are not using a pair, thus we just route k through.
+ // No need to disable this because it's just not used if not applicable.
+ ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(key_type const& k) const noexcept {
+ return k;
+ }
+
+ // in case we have non-void mapped_type, we have a standard robin_hood::pair
+ template <typename Q = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<!std::is_void<Q>::value, key_type const&>::type
+ getFirstConst(value_type const& vt) const noexcept {
+ return vt.first;
+ }
+
+ // Cloner //////////////////////////////////////////////////////////
+
+ template <typename M, bool UseMemcpy>
+ struct Cloner;
+
+ // fast path: Just copy data, without allocating anything.
+ template <typename M>
+ struct Cloner<M, true> {
+ void operator()(M const& source, M& target) const {
+ auto const* const src = reinterpret_cast<char const*>(source.mKeyVals);
+ auto* tgt = reinterpret_cast<char*>(target.mKeyVals);
+ auto const numElementsWithBuffer = target.calcNumElementsWithBuffer(target.mMask + 1);
+ std::copy(src, src + target.calcNumBytesTotal(numElementsWithBuffer), tgt);
+ }
+ };
+
+ template <typename M>
+ struct Cloner<M, false> {
+ void operator()(M const& s, M& t) const {
+ auto const numElementsWithBuffer = t.calcNumElementsWithBuffer(t.mMask + 1);
+ std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(numElementsWithBuffer), t.mInfo);
+
+ for (size_t i = 0; i < numElementsWithBuffer; ++i) {
+ if (t.mInfo[i]) {
+ ::new (static_cast<void*>(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]);
+ }
+ }
+ }
+ };
+
+ // Destroyer ///////////////////////////////////////////////////////
+
+ template <typename M, bool IsFlatAndTrivial>
+ struct Destroyer {};
+
+ template <typename M>
+ struct Destroyer<M, true> {
+ void nodes(M& m) const noexcept {
+ m.mNumElements = 0;
+ }
+
+ void nodesDoNotDeallocate(M& m) const noexcept {
+ m.mNumElements = 0;
+ }
+ };
+
+ template <typename M>
+ struct Destroyer<M, false> {
+ void nodes(M& m) const noexcept {
+ m.mNumElements = 0;
+ // clear also resets mInfo to 0, that's sometimes not necessary.
+ auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1);
+
+ for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) {
+ if (0 != m.mInfo[idx]) {
+ Node& n = m.mKeyVals[idx];
+ n.destroy(m);
+ n.~Node();
+ }
+ }
+ }
+
+ void nodesDoNotDeallocate(M& m) const noexcept {
+ m.mNumElements = 0;
+ // clear also resets mInfo to 0, that's sometimes not necessary.
+ auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1);
+ for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) {
+ if (0 != m.mInfo[idx]) {
+ Node& n = m.mKeyVals[idx];
+ n.destroyDoNotDeallocate();
+ n.~Node();
+ }
+ }
+ }
+ };
+
+ // Iter ////////////////////////////////////////////////////////////
+
+ struct fast_forward_tag {};
+
+ // generic iterator for both const_iterator and iterator.
+ template <bool IsConst>
+ // NOLINTNEXTLINE(hicpp-special-member-functions,cppcoreguidelines-special-member-functions)
+ class Iter {
+ private:
+ using NodePtr = typename std::conditional<IsConst, Node const*, Node*>::type;
+
+ public:
+ using difference_type = std::ptrdiff_t;
+ using value_type = typename Self::value_type;
+ using reference = typename std::conditional<IsConst, value_type const&, value_type&>::type;
+ using pointer = typename std::conditional<IsConst, value_type const*, value_type*>::type;
+ using iterator_category = std::forward_iterator_tag;
+
+ // default constructed iterator can be compared to itself, but WON'T return true when
+ // compared to end().
+ Iter() = default;
+
+ // Rule of zero: nothing specified. The conversion constructor is only enabled for
+ // iterator to const_iterator, so it doesn't accidentally work as a copy ctor.
+
+ // Conversion constructor from iterator to const_iterator.
+ template <bool OtherIsConst,
+ typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
+ // NOLINTNEXTLINE(hicpp-explicit-conversions)
+ Iter(Iter<OtherIsConst> const& other) noexcept
+ : mKeyVals(other.mKeyVals)
+ , mInfo(other.mInfo) {}
+
+ Iter(NodePtr valPtr, uint8_t const* infoPtr) noexcept
+ : mKeyVals(valPtr)
+ , mInfo(infoPtr) {}
+
+ Iter(NodePtr valPtr, uint8_t const* infoPtr,
+ fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept
+ : mKeyVals(valPtr)
+ , mInfo(infoPtr) {
+ fastForward();
+ }
+
+ template <bool OtherIsConst,
+ typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
+ Iter& operator=(Iter<OtherIsConst> const& other) noexcept {
+ mKeyVals = other.mKeyVals;
+ mInfo = other.mInfo;
+ return *this;
+ }
+
+ // prefix increment. Undefined behavior if we are at end()!
+ Iter& operator++() noexcept {
+ mInfo++;
+ mKeyVals++;
+ fastForward();
+ return *this;
+ }
+
+ Iter operator++(int) noexcept {
+ Iter tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+
+ reference operator*() const {
+ return **mKeyVals;
+ }
+
+ pointer operator->() const {
+ return &**mKeyVals;
+ }
+
+ template <bool O>
+ bool operator==(Iter<O> const& o) const noexcept {
+ return mKeyVals == o.mKeyVals;
+ }
+
+ template <bool O>
+ bool operator!=(Iter<O> const& o) const noexcept {
+ return mKeyVals != o.mKeyVals;
+ }
+
+ private:
+ // fast forward to the next non-free info byte
+ // I've tried a few variants that don't depend on intrinsics, but unfortunately they are
+ // quite a bit slower than this one. So I've reverted that change again. See map_benchmark.
+ void fastForward() noexcept {
+ size_t n = 0;
+ while (0U == (n = detail::unaligned_load<size_t>(mInfo))) {
+ mInfo += sizeof(size_t);
+ mKeyVals += sizeof(size_t);
+ }
+#if defined(ROBIN_HOOD_DISABLE_INTRINSICS)
+ // we know for certain that within the next 8 bytes we'll find a non-zero one.
+ if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint32_t>(mInfo))) {
+ mInfo += 4;
+ mKeyVals += 4;
+ }
+ if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint16_t>(mInfo))) {
+ mInfo += 2;
+ mKeyVals += 2;
+ }
+ if (ROBIN_HOOD_UNLIKELY(0U == *mInfo)) {
+ mInfo += 1;
+ mKeyVals += 1;
+ }
+#else
+# if ROBIN_HOOD(LITTLE_ENDIAN)
+ auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8;
+# else
+ auto inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8;
+# endif
+ mInfo += inc;
+ mKeyVals += inc;
+#endif
+ }
+
+ friend class Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
+ NodePtr mKeyVals{nullptr};
+ uint8_t const* mInfo{nullptr};
+ };
+
+ ////////////////////////////////////////////////////////////////////
+
+ // highly performance relevant code.
+ // Lower bits are used for indexing into the array (2^n size)
+ // The upper 1-5 bits need to be a reasonable good hash, to save comparisons.
+ template <typename HashKey>
+ void keyToIdx(HashKey&& key, size_t* idx, InfoType* info) const {
+ // In addition to whatever hash is used, add another mul & shift so we get better hashing.
+ // This serves as a bad hash prevention, if the given data is
+ // badly mixed.
+ auto h = static_cast<uint64_t>(WHash::operator()(key));
+
+ h *= mHashMultiplier;
+ h ^= h >> 33U;
+
+ // the lower InitialInfoNumBits are reserved for info.
+ *info = mInfoInc + static_cast<InfoType>((h & InfoMask) >> mInfoHashShift);
+ *idx = (static_cast<size_t>(h) >> InitialInfoNumBits) & mMask;
+ }
+
+ // forwards the index by one, wrapping around at the end
+ void next(InfoType* info, size_t* idx) const noexcept {
+ *idx = *idx + 1;
+ *info += mInfoInc;
+ }
+
+ void nextWhileLess(InfoType* info, size_t* idx) const noexcept {
+ // unrolling this by hand did not bring any speedups.
+ while (*info < mInfo[*idx]) {
+ next(info, idx);
+ }
+ }
+
+ // Shift everything up by one element. Tries to move stuff around.
+ void
+ shiftUp(size_t startIdx,
+ size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
+ auto idx = startIdx;
+ ::new (static_cast<void*>(mKeyVals + idx)) Node(std::move(mKeyVals[idx - 1]));
+ while (--idx != insertion_idx) {
+ mKeyVals[idx] = std::move(mKeyVals[idx - 1]);
+ }
+
+ idx = startIdx;
+ while (idx != insertion_idx) {
+ ROBIN_HOOD_COUNT(shiftUp)
+ mInfo[idx] = static_cast<uint8_t>(mInfo[idx - 1] + mInfoInc);
+ if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) {
+ mMaxNumElementsAllowed = 0;
+ }
+ --idx;
+ }
+ }
+
+ void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
+ // until we find one that is either empty or has zero offset.
+ // TODO(martinus) we don't need to move everything, just the last one for the same
+ // bucket.
+ mKeyVals[idx].destroy(*this);
+
+ // until we find one that is either empty or has zero offset.
+ while (mInfo[idx + 1] >= 2 * mInfoInc) {
+ ROBIN_HOOD_COUNT(shiftDown)
+ mInfo[idx] = static_cast<uint8_t>(mInfo[idx + 1] - mInfoInc);
+ mKeyVals[idx] = std::move(mKeyVals[idx + 1]);
+ ++idx;
+ }
+
+ mInfo[idx] = 0;
+ // don't destroy, we've moved it
+ // mKeyVals[idx].destroy(*this);
+ mKeyVals[idx].~Node();
+ }
+
+ // copy of find(), except that it returns iterator instead of const_iterator.
+ template <typename Other>
+ ROBIN_HOOD(NODISCARD)
+ size_t findIdx(Other const& key) const {
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(key, &idx, &info);
+
+ do {
+ // unrolling this twice gives a bit of a speedup. More unrolling did not help.
+ if (info == mInfo[idx] &&
+ ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) {
+ return idx;
+ }
+ next(&info, &idx);
+ if (info == mInfo[idx] &&
+ ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) {
+ return idx;
+ }
+ next(&info, &idx);
+ } while (info <= mInfo[idx]);
+
+ // nothing found!
+ return mMask == 0 ? 0
+ : static_cast<size_t>(std::distance(
+ mKeyVals, reinterpret_cast_no_cast_align_warning<Node*>(mInfo)));
+ }
+
+ void cloneData(const Table& o) {
+ Cloner<Table, IsFlat && ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(Node)>()(o, *this);
+ }
+
+ // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized.
+ // @return True on success, false if something went wrong
+ void insert_move(Node&& keyval) {
+ // we don't retry, fail if overflowing
+ // don't need to check max num elements
+ if (0 == mMaxNumElementsAllowed && !try_increase_info()) {
+ throwOverflowError();
+ }
+
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(keyval.getFirst(), &idx, &info);
+
+ // skip forward. Use <= because we are certain that the element is not there.
+ while (info <= mInfo[idx]) {
+ idx = idx + 1;
+ info += mInfoInc;
+ }
+
+ // key not found, so we are now exactly where we want to insert it.
+ auto const insertion_idx = idx;
+ auto const insertion_info = static_cast<uint8_t>(info);
+ if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
+ mMaxNumElementsAllowed = 0;
+ }
+
+ // find an empty spot
+ while (0 != mInfo[idx]) {
+ next(&info, &idx);
+ }
+
+ auto& l = mKeyVals[insertion_idx];
+ if (idx == insertion_idx) {
+ ::new (static_cast<void*>(&l)) Node(std::move(keyval));
+ } else {
+ shiftUp(idx, insertion_idx);
+ l = std::move(keyval);
+ }
+
+ // put at empty spot
+ mInfo[insertion_idx] = insertion_info;
+
+ ++mNumElements;
+ }
+
+public:
+ using iterator = Iter<false>;
+ using const_iterator = Iter<true>;
+
+ Table() noexcept(noexcept(Hash()) && noexcept(KeyEqual()))
+ : WHash()
+ , WKeyEqual() {
+ ROBIN_HOOD_TRACE(this)
+ }
+
+ // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert.
+ // This tremendously speeds up ctor & dtor of a map that never receives an element. The
+ // penalty is payed at the first insert, and not before. Lookup of this empty map works
+ // because everybody points to DummyInfoByte::b. parameter bucket_count is dictated by the
+ // standard, but we can ignore it.
+ explicit Table(
+ size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/, const Hash& h = Hash{},
+ const KeyEqual& equal = KeyEqual{}) noexcept(noexcept(Hash(h)) && noexcept(KeyEqual(equal)))
+ : WHash(h)
+ , WKeyEqual(equal) {
+ ROBIN_HOOD_TRACE(this)
+ }
+
+ template <typename Iter>
+ Table(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0,
+ const Hash& h = Hash{}, const KeyEqual& equal = KeyEqual{})
+ : WHash(h)
+ , WKeyEqual(equal) {
+ ROBIN_HOOD_TRACE(this)
+ insert(first, last);
+ }
+
+ Table(std::initializer_list<value_type> initlist,
+ size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash& h = Hash{},
+ const KeyEqual& equal = KeyEqual{})
+ : WHash(h)
+ , WKeyEqual(equal) {
+ ROBIN_HOOD_TRACE(this)
+ insert(initlist.begin(), initlist.end());
+ }
+
+ Table(Table&& o) noexcept
+ : WHash(std::move(static_cast<WHash&>(o)))
+ , WKeyEqual(std::move(static_cast<WKeyEqual&>(o)))
+ , DataPool(std::move(static_cast<DataPool&>(o))) {
+ ROBIN_HOOD_TRACE(this)
+ if (o.mMask) {
+ mHashMultiplier = std::move(o.mHashMultiplier);
+ mKeyVals = std::move(o.mKeyVals);
+ mInfo = std::move(o.mInfo);
+ mNumElements = std::move(o.mNumElements);
+ mMask = std::move(o.mMask);
+ mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
+ mInfoInc = std::move(o.mInfoInc);
+ mInfoHashShift = std::move(o.mInfoHashShift);
+ // set other's mask to 0 so its destructor won't do anything
+ o.init();
+ }
+ }
+
+ Table& operator=(Table&& o) noexcept {
+ ROBIN_HOOD_TRACE(this)
+ if (&o != this) {
+ if (o.mMask) {
+ // only move stuff if the other map actually has some data
+ destroy();
+ mHashMultiplier = std::move(o.mHashMultiplier);
+ mKeyVals = std::move(o.mKeyVals);
+ mInfo = std::move(o.mInfo);
+ mNumElements = std::move(o.mNumElements);
+ mMask = std::move(o.mMask);
+ mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
+ mInfoInc = std::move(o.mInfoInc);
+ mInfoHashShift = std::move(o.mInfoHashShift);
+ WHash::operator=(std::move(static_cast<WHash&>(o)));
+ WKeyEqual::operator=(std::move(static_cast<WKeyEqual&>(o)));
+ DataPool::operator=(std::move(static_cast<DataPool&>(o)));
+
+ o.init();
+
+ } else {
+ // nothing in the other map => just clear us.
+ clear();
+ }
+ }
+ return *this;
+ }
+
+ Table(const Table& o)
+ : WHash(static_cast<const WHash&>(o))
+ , WKeyEqual(static_cast<const WKeyEqual&>(o))
+ , DataPool(static_cast<const DataPool&>(o)) {
+ ROBIN_HOOD_TRACE(this)
+ if (!o.empty()) {
+ // not empty: create an exact copy. it is also possible to just iterate through all
+ // elements and insert them, but copying is probably faster.
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
+ auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
+
+ ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal("
+ << numElementsWithBuffer << ")")
+ mHashMultiplier = o.mHashMultiplier;
+ mKeyVals = static_cast<Node*>(
+ detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
+ // no need for calloc because clonData does memcpy
+ mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
+ mNumElements = o.mNumElements;
+ mMask = o.mMask;
+ mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
+ mInfoInc = o.mInfoInc;
+ mInfoHashShift = o.mInfoHashShift;
+ cloneData(o);
+ }
+ }
+
+ // Creates a copy of the given map. Copy constructor of each entry is used.
+ // Not sure why clang-tidy thinks this doesn't handle self assignment, it does
+ // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp)
+ Table& operator=(Table const& o) {
+ ROBIN_HOOD_TRACE(this)
+ if (&o == this) {
+ // prevent assigning of itself
+ return *this;
+ }
+
+ // we keep using the old allocator and not assign the new one, because we want to keep
+ // the memory available. when it is the same size.
+ if (o.empty()) {
+ if (0 == mMask) {
+ // nothing to do, we are empty too
+ return *this;
+ }
+
+ // not empty: destroy what we have there
+ // clear also resets mInfo to 0, that's sometimes not necessary.
+ destroy();
+ init();
+ WHash::operator=(static_cast<const WHash&>(o));
+ WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
+ DataPool::operator=(static_cast<DataPool const&>(o));
+
+ return *this;
+ }
+
+ // clean up old stuff
+ Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
+
+ if (mMask != o.mMask) {
+ // no luck: we don't have the same array size allocated, so we need to realloc.
+ if (0 != mMask) {
+ // only deallocate if we actually have data!
+ ROBIN_HOOD_LOG("std::free")
+ std::free(mKeyVals);
+ }
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
+ auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
+ ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal("
+ << numElementsWithBuffer << ")")
+ mKeyVals = static_cast<Node*>(
+ detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
+
+ // no need for calloc here because cloneData performs a memcpy.
+ mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
+ // sentinel is set in cloneData
+ }
+ WHash::operator=(static_cast<const WHash&>(o));
+ WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
+ DataPool::operator=(static_cast<DataPool const&>(o));
+ mHashMultiplier = o.mHashMultiplier;
+ mNumElements = o.mNumElements;
+ mMask = o.mMask;
+ mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
+ mInfoInc = o.mInfoInc;
+ mInfoHashShift = o.mInfoHashShift;
+ cloneData(o);
+
+ return *this;
+ }
+
+ // Swaps everything between the two maps.
+ void swap(Table& o) {
+ ROBIN_HOOD_TRACE(this)
+ using std::swap;
+ swap(o, *this);
+ }
+
+ // Clears all data, without resizing.
+ void clear() {
+ ROBIN_HOOD_TRACE(this)
+ if (empty()) {
+ // don't do anything! also important because we don't want to write to
+ // DummyInfoByte::b, even though we would just write 0 to it.
+ return;
+ }
+
+ Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
+ // clear everything, then set the sentinel again
+ uint8_t const z = 0;
+ std::fill(mInfo, mInfo + calcNumBytesInfo(numElementsWithBuffer), z);
+ mInfo[numElementsWithBuffer] = 1;
+
+ mInfoInc = InitialInfoInc;
+ mInfoHashShift = InitialInfoHashShift;
+ }
+
+ // Destroys the map and all it's contents.
+ ~Table() {
+ ROBIN_HOOD_TRACE(this)
+ destroy();
+ }
+
+ // Checks if both tables contain the same entries. Order is irrelevant.
+ bool operator==(const Table& other) const {
+ ROBIN_HOOD_TRACE(this)
+ if (other.size() != size()) {
+ return false;
+ }
+ for (auto const& otherEntry : other) {
+ if (!has(otherEntry)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool operator!=(const Table& other) const {
+ ROBIN_HOOD_TRACE(this)
+ return !operator==(other);
+ }
+
+ template <typename Q = mapped_type>
+ typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](const key_type& key) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first]))
+ Node(*this, std::piecewise_construct, std::forward_as_tuple(key),
+ std::forward_as_tuple());
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
+ std::forward_as_tuple(key), std::forward_as_tuple());
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ }
+
+ return mKeyVals[idxAndState.first].getSecond();
+ }
+
+ template <typename Q = mapped_type>
+ typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](key_type&& key) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first]))
+ Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)),
+ std::forward_as_tuple());
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] =
+ Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)),
+ std::forward_as_tuple());
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ }
+
+ return mKeyVals[idxAndState.first].getSecond();
+ }
+
+ template <typename Iter>
+ void insert(Iter first, Iter last) {
+ for (; first != last; ++first) {
+ // value_type ctor needed because this might be called with std::pair's
+ insert(value_type(*first));
+ }
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ for (auto&& vt : ilist) {
+ insert(std::move(vt));
+ }
+ }
+
+ template <typename... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ ROBIN_HOOD_TRACE(this)
+ Node n{*this, std::forward<Args>(args)...};
+ auto idxAndState = insertKeyPrepareEmptySpot(getFirstConst(n));
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ n.destroy(*this);
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(*this, std::move(n));
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = std::move(n);
+ break;
+
+ case InsertionState::overflow_error:
+ n.destroy(*this);
+ throwOverflowError();
+ break;
+ }
+
+ return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
+ InsertionState::key_found != idxAndState.second);
+ }
+
+ template <typename... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args) {
+ (void)position;
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ template <typename... Args>
+ std::pair<iterator, bool> try_emplace(const key_type& key, Args&&... args) {
+ return try_emplace_impl(key, std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ std::pair<iterator, bool> try_emplace(key_type&& key, Args&&... args) {
+ return try_emplace_impl(std::move(key), std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ iterator try_emplace(const_iterator hint, const key_type& key, Args&&... args) {
+ (void)hint;
+ return try_emplace_impl(key, std::forward<Args>(args)...).first;
+ }
+
+ template <typename... Args>
+ iterator try_emplace(const_iterator hint, key_type&& key, Args&&... args) {
+ (void)hint;
+ return try_emplace_impl(std::move(key), std::forward<Args>(args)...).first;
+ }
+
+ template <typename Mapped>
+ std::pair<iterator, bool> insert_or_assign(const key_type& key, Mapped&& obj) {
+ return insertOrAssignImpl(key, std::forward<Mapped>(obj));
+ }
+
+ template <typename Mapped>
+ std::pair<iterator, bool> insert_or_assign(key_type&& key, Mapped&& obj) {
+ return insertOrAssignImpl(std::move(key), std::forward<Mapped>(obj));
+ }
+
+ template <typename Mapped>
+ iterator insert_or_assign(const_iterator hint, const key_type& key, Mapped&& obj) {
+ (void)hint;
+ return insertOrAssignImpl(key, std::forward<Mapped>(obj)).first;
+ }
+
+ template <typename Mapped>
+ iterator insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) {
+ (void)hint;
+ return insertOrAssignImpl(std::move(key), std::forward<Mapped>(obj)).first;
+ }
+
+ std::pair<iterator, bool> insert(const value_type& keyval) {
+ ROBIN_HOOD_TRACE(this)
+ return emplace(keyval);
+ }
+
+ iterator insert(const_iterator hint, const value_type& keyval) {
+ (void)hint;
+ return emplace(keyval).first;
+ }
+
+ std::pair<iterator, bool> insert(value_type&& keyval) {
+ return emplace(std::move(keyval));
+ }
+
+ iterator insert(const_iterator hint, value_type&& keyval) {
+ (void)hint;
+ return emplace(std::move(keyval)).first;
+ }
+
+ // Returns 1 if key is found, 0 otherwise.
+ size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ return 1;
+ }
+ return 0;
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<Self_::is_transparent, size_t>::type count(const OtherKey& key) const {
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ return 1;
+ }
+ return 0;
+ }
+
+ bool contains(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
+ return 1U == count(key);
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<Self_::is_transparent, bool>::type contains(const OtherKey& key) const {
+ return 1U == count(key);
+ }
+
+ // Returns a reference to the value found for key.
+ // Throws std::out_of_range if element cannot be found
+ template <typename Q = mapped_type>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<!std::is_void<Q>::value, Q&>::type at(key_type const& key) {
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ doThrow<std::out_of_range>("key not found");
+ }
+ return kv->getSecond();
+ }
+
+ // Returns a reference to the value found for key.
+ // Throws std::out_of_range if element cannot be found
+ template <typename Q = mapped_type>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<!std::is_void<Q>::value, Q const&>::type at(key_type const& key) const {
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ doThrow<std::out_of_range>("key not found");
+ }
+ return kv->getSecond();
+ }
+
+ const_iterator find(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return const_iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey>
+ const_iterator find(const OtherKey& key, is_transparent_tag /*unused*/) const {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return const_iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ typename std::enable_if<Self_::is_transparent, // NOLINT(modernize-use-nodiscard)
+ const_iterator>::type // NOLINT(modernize-use-nodiscard)
+ find(const OtherKey& key) const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return const_iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ iterator find(const key_type& key) {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey>
+ iterator find(const OtherKey& key, is_transparent_tag /*unused*/) {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ typename std::enable_if<Self_::is_transparent, iterator>::type find(const OtherKey& key) {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ iterator begin() {
+ ROBIN_HOOD_TRACE(this)
+ if (empty()) {
+ return end();
+ }
+ return iterator(mKeyVals, mInfo, fast_forward_tag{});
+ }
+ const_iterator begin() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return cbegin();
+ }
+ const_iterator cbegin() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ if (empty()) {
+ return cend();
+ }
+ return const_iterator(mKeyVals, mInfo, fast_forward_tag{});
+ }
+
+ iterator end() {
+ ROBIN_HOOD_TRACE(this)
+ // no need to supply valid info pointer: end() must not be dereferenced, and only node
+ // pointer is compared.
+ return iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
+ }
+ const_iterator end() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return cend();
+ }
+ const_iterator cend() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return const_iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
+ }
+
+ iterator erase(const_iterator pos) {
+ ROBIN_HOOD_TRACE(this)
+ // its safe to perform const cast here
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ return erase(iterator{const_cast<Node*>(pos.mKeyVals), const_cast<uint8_t*>(pos.mInfo)});
+ }
+
+ // Erases element at pos, returns iterator to the next element.
+ iterator erase(iterator pos) {
+ ROBIN_HOOD_TRACE(this)
+ // we assume that pos always points to a valid entry, and not end().
+ auto const idx = static_cast<size_t>(pos.mKeyVals - mKeyVals);
+
+ shiftDown(idx);
+ --mNumElements;
+
+ if (*pos.mInfo) {
+ // we've backward shifted, return this again
+ return pos;
+ }
+
+ // no backward shift, return next element
+ return ++pos;
+ }
+
+ size_t erase(const key_type& key) {
+ ROBIN_HOOD_TRACE(this)
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(key, &idx, &info);
+
+ // check while info matches with the source idx
+ do {
+ if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
+ shiftDown(idx);
+ --mNumElements;
+ return 1;
+ }
+ next(&info, &idx);
+ } while (info <= mInfo[idx]);
+
+ // nothing found to delete
+ return 0;
+ }
+
+ // reserves space for the specified number of elements. Makes sure the old data fits.
+ // exactly the same as reserve(c).
+ void rehash(size_t c) {
+ // forces a reserve
+ reserve(c, true);
+ }
+
+ // reserves space for the specified number of elements. Makes sure the old data fits.
+ // Exactly the same as rehash(c). Use rehash(0) to shrink to fit.
+ void reserve(size_t c) {
+ // reserve, but don't force rehash
+ reserve(c, false);
+ }
+
+ // If possible reallocates the map to a smaller one. This frees the underlying table.
+ // Does not do anything if load_factor is too large for decreasing the table's size.
+ void compact() {
+ ROBIN_HOOD_TRACE(this)
+ auto newSize = InitialNumElements;
+ while (calcMaxNumElementsAllowed(newSize) < mNumElements && newSize != 0) {
+ newSize *= 2;
+ }
+ if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
+ throwOverflowError();
+ }
+
+ ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1")
+
+ // only actually do anything when the new size is bigger than the old one. This prevents to
+ // continuously allocate for each reserve() call.
+ if (newSize < mMask + 1) {
+ rehashPowerOfTwo(newSize, true);
+ }
+ }
+
+ size_type size() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return mNumElements;
+ }
+
+ size_type max_size() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return static_cast<size_type>(-1);
+ }
+
+ ROBIN_HOOD(NODISCARD) bool empty() const noexcept {
+ ROBIN_HOOD_TRACE(this)
+ return 0 == mNumElements;
+ }
+
+ float max_load_factor() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return MaxLoadFactor100 / 100.0F;
+ }
+
+ // Average number of elements per bucket. Since we allow only 1 per bucket
+ float load_factor() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return static_cast<float>(size()) / static_cast<float>(mMask + 1);
+ }
+
+ ROBIN_HOOD(NODISCARD) size_t mask() const noexcept {
+ ROBIN_HOOD_TRACE(this)
+ return mMask;
+ }
+
+ ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept {
+ if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits<size_t>::max)() / 100)) {
+ return maxElements * MaxLoadFactor100 / 100;
+ }
+
+ // we might be a bit inprecise, but since maxElements is quite large that doesn't matter
+ return (maxElements / 100) * MaxLoadFactor100;
+ }
+
+ ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const noexcept {
+ // we add a uint64_t, which houses the sentinel (first byte) and padding so we can load
+ // 64bit types.
+ return numElements + sizeof(uint64_t);
+ }
+
+ ROBIN_HOOD(NODISCARD)
+ size_t calcNumElementsWithBuffer(size_t numElements) const noexcept {
+ auto maxNumElementsAllowed = calcMaxNumElementsAllowed(numElements);
+ return numElements + (std::min)(maxNumElementsAllowed, (static_cast<size_t>(0xFF)));
+ }
+
+ // calculation only allowed for 2^n values
+ ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const {
+#if ROBIN_HOOD(BITNESS) == 64
+ return numElements * sizeof(Node) + calcNumBytesInfo(numElements);
+#else
+ // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows.
+ auto const ne = static_cast<uint64_t>(numElements);
+ auto const s = static_cast<uint64_t>(sizeof(Node));
+ auto const infos = static_cast<uint64_t>(calcNumBytesInfo(numElements));
+
+ auto const total64 = ne * s + infos;
+ auto const total = static_cast<size_t>(total64);
+
+ if (ROBIN_HOOD_UNLIKELY(static_cast<uint64_t>(total) != total64)) {
+ throwOverflowError();
+ }
+ return total;
+#endif
+ }
+
+private:
+ template <typename Q = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<!std::is_void<Q>::value, bool>::type has(const value_type& e) const {
+ ROBIN_HOOD_TRACE(this)
+ auto it = find(e.first);
+ return it != end() && it->second == e.second;
+ }
+
+ template <typename Q = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<std::is_void<Q>::value, bool>::type has(const value_type& e) const {
+ ROBIN_HOOD_TRACE(this)
+ return find(e) != end();
+ }
+
+ void reserve(size_t c, bool forceRehash) {
+ ROBIN_HOOD_TRACE(this)
+ auto const minElementsAllowed = (std::max)(c, mNumElements);
+ auto newSize = InitialNumElements;
+ while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) {
+ newSize *= 2;
+ }
+ if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
+ throwOverflowError();
+ }
+
+ ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1")
+
+ // only actually do anything when the new size is bigger than the old one. This prevents to
+ // continuously allocate for each reserve() call.
+ if (forceRehash || newSize > mMask + 1) {
+ rehashPowerOfTwo(newSize, false);
+ }
+ }
+
+ // reserves space for at least the specified number of elements.
+ // only works if numBuckets if power of two
+ // True on success, false otherwise
+ void rehashPowerOfTwo(size_t numBuckets, bool forceFree) {
+ ROBIN_HOOD_TRACE(this)
+
+ Node* const oldKeyVals = mKeyVals;
+ uint8_t const* const oldInfo = mInfo;
+
+ const size_t oldMaxElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
+
+ // resize operation: move stuff
+ initData(numBuckets);
+ if (oldMaxElementsWithBuffer > 1) {
+ for (size_t i = 0; i < oldMaxElementsWithBuffer; ++i) {
+ if (oldInfo[i] != 0) {
+ // might throw an exception, which is really bad since we are in the middle of
+ // moving stuff.
+ insert_move(std::move(oldKeyVals[i]));
+ // destroy the node but DON'T destroy the data.
+ oldKeyVals[i].~Node();
+ }
+ }
+
+ // this check is not necessary as it's guarded by the previous if, but it helps
+ // silence g++'s overeager "attempt to free a non-heap object 'map'
+ // [-Werror=free-nonheap-object]" warning.
+ if (oldKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) {
+ // don't destroy old data: put it into the pool instead
+ if (forceFree) {
+ std::free(oldKeyVals);
+ } else {
+ DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElementsWithBuffer));
+ }
+ }
+ }
+ }
+
+ ROBIN_HOOD(NOINLINE) void throwOverflowError() const {
+#if ROBIN_HOOD(HAS_EXCEPTIONS)
+ throw std::overflow_error("robin_hood::map overflow");
+#else
+ abort();
+#endif
+ }
+
+ template <typename OtherKey, typename... Args>
+ std::pair<iterator, bool> try_emplace_impl(OtherKey&& key, Args&&... args) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(
+ *this, std::piecewise_construct, std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ break;
+ }
+
+ return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
+ InsertionState::key_found != idxAndState.second);
+ }
+
+ template <typename OtherKey, typename Mapped>
+ std::pair<iterator, bool> insertOrAssignImpl(OtherKey&& key, Mapped&& obj) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ mKeyVals[idxAndState.first].getSecond() = std::forward<Mapped>(obj);
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(
+ *this, std::piecewise_construct, std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Mapped>(obj)));
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Mapped>(obj)));
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ break;
+ }
+
+ return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
+ InsertionState::key_found != idxAndState.second);
+ }
+
+ void initData(size_t max_elements) {
+ mNumElements = 0;
+ mMask = max_elements - 1;
+ mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements);
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements);
+
+ // malloc & zero mInfo. Faster than calloc everything.
+ auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
+ ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal("
+ << numElementsWithBuffer << ")")
+ mKeyVals = reinterpret_cast<Node*>(
+ detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
+ mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
+ std::memset(mInfo, 0, numBytesTotal - numElementsWithBuffer * sizeof(Node));
+
+ // set sentinel
+ mInfo[numElementsWithBuffer] = 1;
+
+ mInfoInc = InitialInfoInc;
+ mInfoHashShift = InitialInfoHashShift;
+ }
+
+ enum class InsertionState { overflow_error, key_found, new_node, overwrite_node };
+
+ // Finds key, and if not already present prepares a spot where to pot the key & value.
+ // This potentially shifts nodes out of the way, updates mInfo and number of inserted
+ // elements, so the only operation left to do is create/assign a new node at that spot.
+ template <typename OtherKey>
+ std::pair<size_t, InsertionState> insertKeyPrepareEmptySpot(OtherKey&& key) {
+ for (int i = 0; i < 256; ++i) {
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(key, &idx, &info);
+ nextWhileLess(&info, &idx);
+
+ // while we potentially have a match
+ while (info == mInfo[idx]) {
+ if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
+ // key already exists, do NOT insert.
+ // see http://en.cppreference.com/w/cpp/container/unordered_map/insert
+ return std::make_pair(idx, InsertionState::key_found);
+ }
+ next(&info, &idx);
+ }
+
+ // unlikely that this evaluates to true
+ if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) {
+ if (!increase_size()) {
+ return std::make_pair(size_t(0), InsertionState::overflow_error);
+ }
+ continue;
+ }
+
+ // key not found, so we are now exactly where we want to insert it.
+ auto const insertion_idx = idx;
+ auto const insertion_info = info;
+ if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
+ mMaxNumElementsAllowed = 0;
+ }
+
+ // find an empty spot
+ while (0 != mInfo[idx]) {
+ next(&info, &idx);
+ }
+
+ if (idx != insertion_idx) {
+ shiftUp(idx, insertion_idx);
+ }
+ // put at empty spot
+ mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info);
+ ++mNumElements;
+ return std::make_pair(insertion_idx, idx == insertion_idx
+ ? InsertionState::new_node
+ : InsertionState::overwrite_node);
+ }
+
+ // enough attempts failed, so finally give up.
+ return std::make_pair(size_t(0), InsertionState::overflow_error);
+ }
+
+ bool try_increase_info() {
+ ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements
+ << ", maxNumElementsAllowed="
+ << calcMaxNumElementsAllowed(mMask + 1))
+ if (mInfoInc <= 2) {
+ // need to be > 2 so that shift works (otherwise undefined behavior!)
+ return false;
+ }
+ // we got space left, try to make info smaller
+ mInfoInc = static_cast<uint8_t>(mInfoInc >> 1U);
+
+ // remove one bit of the hash, leaving more space for the distance info.
+ // This is extremely fast because we can operate on 8 bytes at once.
+ ++mInfoHashShift;
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
+
+ for (size_t i = 0; i < numElementsWithBuffer; i += 8) {
+ auto val = unaligned_load<uint64_t>(mInfo + i);
+ val = (val >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f);
+ std::memcpy(mInfo + i, &val, sizeof(val));
+ }
+ // update sentinel, which might have been cleared out!
+ mInfo[numElementsWithBuffer] = 1;
+
+ mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
+ return true;
+ }
+
+ // True if resize was possible, false otherwise
+ bool increase_size() {
+ // nothing allocated yet? just allocate InitialNumElements
+ if (0 == mMask) {
+ initData(InitialNumElements);
+ return true;
+ }
+
+ auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
+ if (mNumElements < maxNumElementsAllowed && try_increase_info()) {
+ return true;
+ }
+
+ ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed="
+ << maxNumElementsAllowed << ", load="
+ << (static_cast<double>(mNumElements) * 100.0 /
+ (static_cast<double>(mMask) + 1)))
+
+ if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) {
+ // we have to resize, even though there would still be plenty of space left!
+ // Try to rehash instead. Delete freed memory so we don't steadyily increase mem in case
+ // we have to rehash a few times
+ nextHashMultiplier();
+ rehashPowerOfTwo(mMask + 1, true);
+ } else {
+ // we've reached the capacity of the map, so the hash seems to work nice. Keep using it.
+ rehashPowerOfTwo((mMask + 1) * 2, false);
+ }
+ return true;
+ }
+
+ void nextHashMultiplier() {
+ // adding an *even* number, so that the multiplier will always stay odd. This is necessary
+ // so that the hash stays a mixing function (and thus doesn't have any information loss).
+ mHashMultiplier += UINT64_C(0xc4ceb9fe1a85ec54);
+ }
+
+ void destroy() {
+ if (0 == mMask) {
+ // don't deallocate!
+ return;
+ }
+
+ Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}
+ .nodesDoNotDeallocate(*this);
+
+ // This protection against not deleting mMask shouldn't be needed as it's sufficiently
+ // protected with the 0==mMask check, but I have this anyways because g++ 7 otherwise
+ // reports a compile error: attempt to free a non-heap object 'fm'
+ // [-Werror=free-nonheap-object]
+ if (mKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) {
+ ROBIN_HOOD_LOG("std::free")
+ std::free(mKeyVals);
+ }
+ }
+
+ void init() noexcept {
+ mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask);
+ mInfo = reinterpret_cast<uint8_t*>(&mMask);
+ mNumElements = 0;
+ mMask = 0;
+ mMaxNumElementsAllowed = 0;
+ mInfoInc = InitialInfoInc;
+ mInfoHashShift = InitialInfoHashShift;
+ }
+
+ // members are sorted so no padding occurs
+ uint64_t mHashMultiplier = UINT64_C(0xc4ceb9fe1a85ec53); // 8 byte 8
+ Node* mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask); // 8 byte 16
+ uint8_t* mInfo = reinterpret_cast<uint8_t*>(&mMask); // 8 byte 24
+ size_t mNumElements = 0; // 8 byte 32
+ size_t mMask = 0; // 8 byte 40
+ size_t mMaxNumElementsAllowed = 0; // 8 byte 48
+ InfoType mInfoInc = InitialInfoInc; // 4 byte 52
+ InfoType mInfoHashShift = InitialInfoHashShift; // 4 byte 56
+ // 16 byte 56 if NodeAllocator
+};
+
+} // namespace detail
+
+// map
+
+template <typename Key, typename T, typename Hash = hash<Key>,
+ typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
+using unordered_flat_map = detail::Table<true, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
+
+template <typename Key, typename T, typename Hash = hash<Key>,
+ typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
+using unordered_node_map = detail::Table<false, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
+
+template <typename Key, typename T, typename Hash = hash<Key>,
+ typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
+using unordered_map =
+ detail::Table<sizeof(robin_hood::pair<Key, T>) <= sizeof(size_t) * 6 &&
+ std::is_nothrow_move_constructible<robin_hood::pair<Key, T>>::value &&
+ std::is_nothrow_move_assignable<robin_hood::pair<Key, T>>::value,
+ MaxLoadFactor100, Key, T, Hash, KeyEqual>;
+
+// set
+
+template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
+ size_t MaxLoadFactor100 = 80>
+using unordered_flat_set = detail::Table<true, MaxLoadFactor100, Key, void, Hash, KeyEqual>;
+
+template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
+ size_t MaxLoadFactor100 = 80>
+using unordered_node_set = detail::Table<false, MaxLoadFactor100, Key, void, Hash, KeyEqual>;
+
+template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
+ size_t MaxLoadFactor100 = 80>
+using unordered_set = detail::Table<sizeof(Key) <= sizeof(size_t) * 6 &&
+ std::is_nothrow_move_constructible<Key>::value &&
+ std::is_nothrow_move_assignable<Key>::value,
+ MaxLoadFactor100, Key, void, Hash, KeyEqual>;
+
+} // namespace robin_hood
+
+#endif
diff --git a/misc/benchmarks/external/ankerl/unordered_dense.h b/misc/benchmarks/external/ankerl/unordered_dense.h
new file mode 100644
index 00000000..ff902ad4
--- /dev/null
+++ b/misc/benchmarks/external/ankerl/unordered_dense.h
@@ -0,0 +1,1503 @@
+///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
+
+// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
+// Version 2.0.0
+// https://github.com/martinus/unordered_dense
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2022 Martin Leitner-Ankerl <[email protected]>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef ANKERL_UNORDERED_DENSE_H
+#define ANKERL_UNORDERED_DENSE_H
+
+// see https://semver.org/spec/v2.0.0.html
+#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 2 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes
+#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality
+#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes
+
+// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/
+#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch
+#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch)
+#define ANKERL_UNORDERED_DENSE_NAMESPACE \
+ ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \
+ ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH)
+
+#if defined(_MSVC_LANG)
+# define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG
+#else
+# define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus
+#endif
+
+#if defined(__GNUC__)
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__))
+#elif defined(_MSC_VER)
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop))
+#endif
+
+#if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L
+# error ankerl::unordered_dense requires C++17 or higher
+#else
+# include <array> // for array
+# include <cstdint> // for uint64_t, uint32_t, uint8_t, UINT64_C
+# include <cstring> // for size_t, memcpy, memset
+# include <functional> // for equal_to, hash
+# include <initializer_list> // for initializer_list
+# include <iterator> // for pair, distance
+# include <limits> // for numeric_limits
+# include <memory> // for allocator, allocator_traits, shared_ptr
+# include <stdexcept> // for out_of_range
+# include <string> // for basic_string
+# include <string_view> // for basic_string_view, hash
+# include <tuple> // for forward_as_tuple
+# include <type_traits> // for enable_if_t, declval, conditional_t, ena...
+# include <utility> // for forward, exchange, pair, as_const, piece...
+# include <vector> // for vector
+
+# define ANKERL_UNORDERED_DENSE_PMR 0 // NOLINT(cppcoreguidelines-macro-usage)
+# if defined(__has_include)
+# if __has_include(<memory_resource>)
+# undef ANKERL_UNORDERED_DENSE_PMR
+# define ANKERL_UNORDERED_DENSE_PMR 1 // NOLINT(cppcoreguidelines-macro-usage)
+# include <memory_resource> // for polymorphic_allocator
+# endif
+# endif
+
+# if defined(_MSC_VER) && defined(_M_X64)
+# include <intrin.h>
+# pragma intrinsic(_umul128)
+# endif
+
+# if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
+# define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage)
+# else
+# define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
+# endif
+
+namespace ankerl::unordered_dense {
+inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE {
+
+// hash ///////////////////////////////////////////////////////////////////////
+
+// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash
+// No big-endian support (because different values on different machines don't matter),
+// hardcodes seed and the secret, reformattes the code, and clang-tidy fixes.
+namespace detail::wyhash {
+
+static inline void mum(uint64_t* a, uint64_t* b) {
+# if defined(__SIZEOF_INT128__)
+ __uint128_t r = *a;
+ r *= *b;
+ *a = static_cast<uint64_t>(r);
+ *b = static_cast<uint64_t>(r >> 64U);
+# elif defined(_MSC_VER) && defined(_M_X64)
+ *a = _umul128(*a, *b, b);
+# else
+ uint64_t ha = *a >> 32U;
+ uint64_t hb = *b >> 32U;
+ uint64_t la = static_cast<uint32_t>(*a);
+ uint64_t lb = static_cast<uint32_t>(*b);
+ uint64_t hi{};
+ uint64_t lo{};
+ uint64_t rh = ha * hb;
+ uint64_t rm0 = ha * lb;
+ uint64_t rm1 = hb * la;
+ uint64_t rl = la * lb;
+ uint64_t t = rl + (rm0 << 32U);
+ auto c = static_cast<uint64_t>(t < rl);
+ lo = t + (rm1 << 32U);
+ c += static_cast<uint64_t>(lo < t);
+ hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c;
+ *a = lo;
+ *b = hi;
+# endif
+}
+
+// multiply and xor mix function, aka MUM
+[[nodiscard]] static inline auto mix(uint64_t a, uint64_t b) -> uint64_t {
+ mum(&a, &b);
+ return a ^ b;
+}
+
+// read functions. WARNING: we don't care about endianness, so results are different on big endian!
+[[nodiscard]] static inline auto r8(const uint8_t* p) -> uint64_t {
+ uint64_t v{};
+ std::memcpy(&v, p, 8U);
+ return v;
+}
+
+[[nodiscard]] static inline auto r4(const uint8_t* p) -> uint64_t {
+ uint32_t v{};
+ std::memcpy(&v, p, 4);
+ return v;
+}
+
+// reads 1, 2, or 3 bytes
+[[nodiscard]] static inline auto r3(const uint8_t* p, size_t k) -> uint64_t {
+ return (static_cast<uint64_t>(p[0]) << 16U) | (static_cast<uint64_t>(p[k >> 1U]) << 8U) | p[k - 1];
+}
+
+[[maybe_unused]] [[nodiscard]] static inline auto hash(void const* key, size_t len) -> uint64_t {
+ static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f),
+ UINT64_C(0xe7037ed1a0b428db),
+ UINT64_C(0x8ebc6af09c88c6e3),
+ UINT64_C(0x589965cc75374cc3)};
+
+ auto const* p = static_cast<uint8_t const*>(key);
+ uint64_t seed = secret[0];
+ uint64_t a{};
+ uint64_t b{};
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) {
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) {
+ a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U));
+ b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U));
+ } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) {
+ a = r3(p, len);
+ b = 0;
+ } else {
+ a = 0;
+ b = 0;
+ }
+ } else {
+ size_t i = len;
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) {
+ uint64_t see1 = seed;
+ uint64_t see2 = seed;
+ do {
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
+ see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1);
+ see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2);
+ p += 48;
+ i -= 48;
+ } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48));
+ seed ^= see1 ^ see2;
+ }
+ while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) {
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
+ i -= 16;
+ p += 16;
+ }
+ a = r8(p + i - 16);
+ b = r8(p + i - 8);
+ }
+
+ return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed));
+}
+
+[[nodiscard]] static inline auto hash(uint64_t x) -> uint64_t {
+ return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15));
+}
+
+} // namespace detail::wyhash
+
+template <typename T, typename Enable = void>
+struct hash {
+ auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>())))
+ -> uint64_t {
+ return std::hash<T>{}(obj);
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string<CharT>> {
+ using is_avalanching = void;
+ auto operator()(std::basic_string<CharT> const& str) const noexcept -> uint64_t {
+ return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size());
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string_view<CharT>> {
+ using is_avalanching = void;
+ auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> uint64_t {
+ return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size());
+ }
+};
+
+template <class T>
+struct hash<T*> {
+ using is_avalanching = void;
+ auto operator()(T* ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr));
+ }
+};
+
+template <class T>
+struct hash<std::unique_ptr<T>> {
+ using is_avalanching = void;
+ auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
+ }
+};
+
+template <class T>
+struct hash<std::shared_ptr<T>> {
+ using is_avalanching = void;
+ auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
+ }
+};
+
+template <typename Enum>
+struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
+ using is_avalanching = void;
+ auto operator()(Enum e) const noexcept -> uint64_t {
+ using underlying = typename std::underlying_type_t<Enum>;
+ return detail::wyhash::hash(static_cast<underlying>(e));
+ }
+};
+
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \
+ template <> \
+ struct hash<T> { \
+ using is_avalanching = void; \
+ auto operator()(T const& obj) const noexcept -> uint64_t { \
+ return detail::wyhash::hash(static_cast<uint64_t>(obj)); \
+ } \
+ }
+
+# if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wuseless-cast"
+# endif
+// see https://en.cppreference.com/w/cpp/utility/hash
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char);
+# if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t);
+# endif
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long);
+
+# if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic pop
+# endif
+
+// bucket_type //////////////////////////////////////////////////////////
+
+namespace bucket_type {
+
+struct standard {
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
+
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
+ uint32_t m_value_idx; // index into the m_values vector.
+};
+
+ANKERL_UNORDERED_DENSE_PACK(struct big {
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
+
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
+ size_t m_value_idx; // index into the m_values vector.
+});
+
+} // namespace bucket_type
+
+namespace detail {
+
+struct nonesuch {};
+
+template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
+struct detector {
+ using value_t = std::false_type;
+ using type = Default;
+};
+
+template <class Default, template <class...> class Op, class... Args>
+struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> {
+ using value_t = std::true_type;
+ using type = Op<Args...>;
+};
+
+template <template <class...> class Op, class... Args>
+using is_detected = typename detail::detector<detail::nonesuch, void, Op, Args...>::value_t;
+
+template <template <class...> class Op, class... Args>
+constexpr bool is_detected_v = is_detected<Op, Args...>::value;
+
+template <typename T>
+using detect_avalanching = typename T::is_avalanching;
+
+template <typename T>
+using detect_is_transparent = typename T::is_transparent;
+
+template <typename T>
+using detect_iterator = typename T::iterator;
+
+// enable_if helpers
+
+template <typename Mapped>
+constexpr bool is_map_v = !std::is_void_v<Mapped>;
+
+template <typename Hash, typename KeyEqual>
+constexpr bool is_transparent_v = is_detected_v<detect_is_transparent, Hash>&& is_detected_v<detect_is_transparent, KeyEqual>;
+
+template <typename From, typename To1, typename To2>
+constexpr bool is_neither_convertible_v = !std::is_convertible_v<From, To1> && !std::is_convertible_v<From, To2>;
+
+// This is it, the table. Doubles as map and set, and uses `void` for T when its used as a set.
+template <class Key,
+ class T, // when void, treat it as a set.
+ class Hash,
+ class KeyEqual,
+ class AllocatorOrContainer,
+ class Bucket>
+class table {
+public:
+ using value_container_type = std::conditional_t<
+ is_detected_v<detect_iterator, AllocatorOrContainer>,
+ AllocatorOrContainer,
+ typename std::vector<typename std::conditional_t<std::is_void_v<T>, Key, std::pair<Key, T>>, AllocatorOrContainer>>;
+
+private:
+ using bucket_alloc =
+ typename std::allocator_traits<typename value_container_type::allocator_type>::template rebind_alloc<Bucket>;
+ using bucket_alloc_traits = std::allocator_traits<bucket_alloc>;
+
+ static constexpr uint8_t initial_shifts = 64 - 3; // 2^(64-m_shift) number of buckets
+ static constexpr float default_max_load_factor = 0.8F;
+
+public:
+ using key_type = Key;
+ using mapped_type = T;
+ using value_type = typename value_container_type::value_type;
+ using size_type = typename value_container_type::size_type;
+ using difference_type = typename value_container_type::difference_type;
+ using hasher = Hash;
+ using key_equal = KeyEqual;
+ using allocator_type = typename value_container_type::allocator_type;
+ using reference = typename value_container_type::reference;
+ using const_reference = typename value_container_type::const_reference;
+ using pointer = typename value_container_type::pointer;
+ using const_pointer = typename value_container_type::const_pointer;
+ using iterator = typename value_container_type::iterator;
+ using const_iterator = typename value_container_type::const_iterator;
+ using bucket_type = Bucket;
+
+private:
+ using value_idx_type = decltype(Bucket::m_value_idx);
+ using dist_and_fingerprint_type = decltype(Bucket::m_dist_and_fingerprint);
+
+ static_assert(std::is_trivially_destructible_v<Bucket>, "assert there's no need to call destructor / std::destroy");
+ static_assert(std::is_trivially_copyable_v<Bucket>, "assert we can just memset / memcpy");
+
+ value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes.
+ typename std::allocator_traits<bucket_alloc>::pointer m_buckets{};
+ size_t m_num_buckets = 0;
+ size_t m_max_bucket_capacity = 0;
+ float m_max_load_factor = default_max_load_factor;
+ Hash m_hash{};
+ KeyEqual m_equal{};
+ uint8_t m_shifts = initial_shifts;
+
+ [[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type {
+ return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == m_num_buckets)
+ ? 0
+ : static_cast<value_idx_type>(bucket_idx + 1U);
+ }
+
+ // Helper to access bucket through pointer types
+ [[nodiscard]] static constexpr auto at(typename std::allocator_traits<bucket_alloc>::pointer bucket_ptr, size_t offset)
+ -> Bucket& {
+ return *(bucket_ptr + static_cast<typename std::allocator_traits<bucket_alloc>::difference_type>(offset));
+ }
+
+ // use the dist_inc and dist_dec functions so that uint16_t types work without warning
+ [[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
+ return static_cast<dist_and_fingerprint_type>(x + Bucket::dist_inc);
+ }
+
+ [[nodiscard]] static constexpr auto dist_dec(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
+ return static_cast<dist_and_fingerprint_type>(x - Bucket::dist_inc);
+ }
+
+ // The goal of mixed_hash is to always produce a high quality 64bit hash.
+ template <typename K>
+ [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t {
+ if constexpr (is_detected_v<detect_avalanching, Hash>) {
+ // we know that the hash is good because is_avalanching.
+ if constexpr (sizeof(decltype(m_hash(key))) < sizeof(uint64_t)) {
+ // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards
+ return m_hash(key) * UINT64_C(0x9ddfea08eb382d69);
+ } else {
+ // 64bit and is_avalanching => only use the hash itself.
+ return m_hash(key);
+ }
+ } else {
+ // not is_avalanching => apply wyhash
+ return wyhash::hash(m_hash(key));
+ }
+ }
+
+ [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> dist_and_fingerprint_type {
+ return Bucket::dist_inc | (static_cast<dist_and_fingerprint_type>(hash) & Bucket::fingerprint_mask);
+ }
+
+ [[nodiscard]] constexpr auto bucket_idx_from_hash(uint64_t hash) const -> value_idx_type {
+ return static_cast<value_idx_type>(hash >> m_shifts);
+ }
+
+ [[nodiscard]] static constexpr auto get_key(value_type const& vt) -> key_type const& {
+ if constexpr (std::is_void_v<T>) {
+ return vt;
+ } else {
+ return vt.first;
+ }
+ }
+
+ template <typename K>
+ [[nodiscard]] auto next_while_less(K const& key) const -> Bucket {
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint < at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+ return {dist_and_fingerprint, bucket_idx};
+ }
+
+ void place_and_shift_up(Bucket bucket, value_idx_type place) {
+ while (0 != at(m_buckets, place).m_dist_and_fingerprint) {
+ bucket = std::exchange(at(m_buckets, place), bucket);
+ bucket.m_dist_and_fingerprint = dist_inc(bucket.m_dist_and_fingerprint);
+ place = next(place);
+ }
+ at(m_buckets, place) = bucket;
+ }
+
+ [[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> size_t {
+ return std::min(max_bucket_count(), size_t{1} << (64U - shifts));
+ }
+
+ [[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t {
+ auto shifts = initial_shifts;
+ while (shifts > 0 && static_cast<size_t>(static_cast<float>(calc_num_buckets(shifts)) * max_load_factor()) < s) {
+ --shifts;
+ }
+ return shifts;
+ }
+
+ // assumes m_values has data, m_buckets=m_buckets_end=nullptr, m_shifts is INITIAL_SHIFTS
+ void copy_buckets(table const& other) {
+ if (!empty()) {
+ m_shifts = other.m_shifts;
+ allocate_buckets_from_shift();
+ std::memcpy(m_buckets, other.m_buckets, sizeof(Bucket) * bucket_count());
+ }
+ }
+
+ /**
+ * True when no element can be added any more without increasing the size
+ */
+ [[nodiscard]] auto is_full() const -> bool {
+ return size() >= m_max_bucket_capacity;
+ }
+
+ void deallocate_buckets() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ if (nullptr != m_buckets) {
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
+ }
+ m_buckets = nullptr;
+ m_num_buckets = 0;
+ m_max_bucket_capacity = 0;
+ }
+
+ void allocate_buckets_from_shift() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ m_num_buckets = calc_num_buckets(m_shifts);
+ m_buckets = bucket_alloc_traits::allocate(ba, m_num_buckets);
+ if (m_num_buckets == max_bucket_count()) {
+ // reached the maximum, make sure we can use each bucket
+ m_max_bucket_capacity = max_bucket_count();
+ } else {
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(m_num_buckets) * max_load_factor());
+ }
+ }
+
+ void clear_buckets() {
+ if (m_buckets != nullptr) {
+ std::memset(&*m_buckets, 0, sizeof(Bucket) * bucket_count());
+ }
+ }
+
+ void clear_and_fill_buckets_from_values() {
+ clear_buckets();
+ for (value_idx_type value_idx = 0, end_idx = static_cast<value_idx_type>(m_values.size()); value_idx < end_idx;
+ ++value_idx) {
+ auto const& key = get_key(m_values[value_idx]);
+ auto [dist_and_fingerprint, bucket] = next_while_less(key);
+
+ // we know for certain that key has not yet been inserted, so no need to check it.
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket);
+ }
+ }
+
+ void increase_size() {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(m_max_bucket_capacity == max_bucket_count())) {
+ throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size");
+ }
+ --m_shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+
+ void do_erase(value_idx_type bucket_idx) {
+ auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx;
+
+ // shift down until either empty or an element with correct spot is found
+ auto next_bucket_idx = next(bucket_idx);
+ while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) {
+ at(m_buckets, bucket_idx) = {dist_dec(at(m_buckets, next_bucket_idx).m_dist_and_fingerprint),
+ at(m_buckets, next_bucket_idx).m_value_idx};
+ bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx));
+ }
+ at(m_buckets, bucket_idx) = {};
+
+ // update m_values
+ if (value_idx_to_remove != m_values.size() - 1) {
+ // no luck, we'll have to replace the value with the last one and update the index accordingly
+ auto& val = m_values[value_idx_to_remove];
+ val = std::move(m_values.back());
+
+ // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx
+ auto mh = mixed_hash(get_key(val));
+ bucket_idx = bucket_idx_from_hash(mh);
+
+ auto const values_idx_back = static_cast<value_idx_type>(m_values.size() - 1);
+ while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) {
+ bucket_idx = next(bucket_idx);
+ }
+ at(m_buckets, bucket_idx).m_value_idx = value_idx_to_remove;
+ }
+ m_values.pop_back();
+ }
+
+ template <typename K>
+ auto do_erase_key(K&& key) -> size_t {
+ if (empty()) {
+ return 0;
+ }
+
+ auto [dist_and_fingerprint, bucket_idx] = next_while_less(key);
+
+ while (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ !m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ if (dist_and_fingerprint != at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ return 0;
+ }
+ do_erase(bucket_idx);
+ return 1;
+ }
+
+ template <class K, class M>
+ auto do_insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
+ auto it_isinserted = try_emplace(std::forward<K>(key), std::forward<M>(mapped));
+ if (!it_isinserted.second) {
+ it_isinserted.first->second = std::forward<M>(mapped);
+ }
+ return it_isinserted;
+ }
+
+ template <typename K, typename... Args>
+ auto do_place_element(dist_and_fingerprint_type dist_and_fingerprint, value_idx_type bucket_idx, K&& key, Args&&... args)
+ -> std::pair<iterator, bool> {
+
+ // emplace the new value. If that throws an exception, no harm done; index is still in a valid state
+ m_values.emplace_back(std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+
+ // place element and shift up until we find an empty spot
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <typename K, typename... Args>
+ auto do_try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
+ increase_size();
+ }
+
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (true) {
+ auto* bucket = &at(m_buckets, bucket_idx);
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
+ if (m_equal(key, m_values[bucket->m_value_idx].first)) {
+ return {begin() + static_cast<difference_type>(bucket->m_value_idx), false};
+ }
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
+ return do_place_element(dist_and_fingerprint, bucket_idx, std::forward<K>(key), std::forward<Args>(args)...);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+ }
+
+ template <typename K>
+ auto do_find(K const& key) -> iterator {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(empty())) {
+ return end();
+ }
+
+ auto mh = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(mh);
+ auto bucket_idx = bucket_idx_from_hash(mh);
+ auto* bucket = &at(m_buckets, bucket_idx);
+
+ // unrolled loop. *Always* check a few directly, then enter the loop. This is faster.
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+
+ while (true) {
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
+ if (m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
+ return end();
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+ }
+ }
+
+ template <typename K>
+ auto do_find(K const& key) const -> const_iterator {
+ return const_cast<table*>(this)->do_find(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ }
+
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto do_at(K const& key) -> Q& {
+ if (auto it = find(key); end() != it) {
+ return it->second;
+ }
+ throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found");
+ }
+
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto do_at(K const& key) const -> Q const& {
+ return const_cast<table*>(this)->at(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ }
+
+public:
+ table()
+ : table(0) {}
+
+ explicit table(size_t /*bucket_count*/,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc_or_container = allocator_type())
+ : m_values(alloc_or_container)
+ , m_hash(hash)
+ , m_equal(equal) {}
+
+ table(size_t bucket_count, allocator_type const& alloc)
+ : table(bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ table(size_t bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(bucket_count, hash, KeyEqual(), alloc) {}
+
+ explicit table(allocator_type const& alloc)
+ : table(0, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ table(InputIt first,
+ InputIt last,
+ size_type bucket_count = 0,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc = allocator_type())
+ : table(bucket_count, hash, equal, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIt>
+ table(InputIt first, InputIt last, size_type bucket_count, allocator_type const& alloc)
+ : table(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ table(InputIt first, InputIt last, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(first, last, bucket_count, hash, KeyEqual(), alloc) {}
+
+ table(table const& other)
+ : table(other, other.m_values.get_allocator()) {}
+
+ table(table const& other, allocator_type const& alloc)
+ : m_values(other.m_values, alloc)
+ , m_max_load_factor(other.m_max_load_factor)
+ , m_hash(other.m_hash)
+ , m_equal(other.m_equal) {
+ copy_buckets(other);
+ }
+
+ table(table&& other) noexcept
+ : table(std::move(other), other.m_values.get_allocator()) {}
+
+ table(table&& other, allocator_type const& alloc) noexcept
+ : m_values(std::move(other.m_values), alloc)
+ , m_buckets(std::exchange(other.m_buckets, nullptr))
+ , m_num_buckets(std::exchange(other.m_num_buckets, 0))
+ , m_max_bucket_capacity(std::exchange(other.m_max_bucket_capacity, 0))
+ , m_max_load_factor(std::exchange(other.m_max_load_factor, default_max_load_factor))
+ , m_hash(std::exchange(other.m_hash, {}))
+ , m_equal(std::exchange(other.m_equal, {}))
+ , m_shifts(std::exchange(other.m_shifts, initial_shifts)) {
+ other.m_values.clear();
+ }
+
+ table(std::initializer_list<value_type> ilist,
+ size_t bucket_count = 0,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc = allocator_type())
+ : table(bucket_count, hash, equal, alloc) {
+ insert(ilist);
+ }
+
+ table(std::initializer_list<value_type> ilist, size_type bucket_count, allocator_type const& alloc)
+ : table(ilist, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ table(std::initializer_list<value_type> init, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(init, bucket_count, hash, KeyEqual(), alloc) {}
+
+ ~table() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
+ }
+
+ auto operator=(table const& other) -> table& {
+ if (&other != this) {
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
+ m_values = other.m_values;
+ m_max_load_factor = other.m_max_load_factor;
+ m_hash = other.m_hash;
+ m_equal = other.m_equal;
+ m_shifts = initial_shifts;
+ copy_buckets(other);
+ }
+ return *this;
+ }
+
+ auto operator=(table&& other) noexcept(
+ noexcept(std::is_nothrow_move_assignable_v<value_container_type>&& std::is_nothrow_move_assignable_v<Hash>&&
+ std::is_nothrow_move_assignable_v<KeyEqual>)) -> table& {
+ if (&other != this) {
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
+ m_values = std::move(other.m_values);
+ m_buckets = std::exchange(other.m_buckets, nullptr);
+ m_num_buckets = std::exchange(other.m_num_buckets, 0);
+ m_max_bucket_capacity = std::exchange(other.m_max_bucket_capacity, 0);
+ m_max_load_factor = std::exchange(other.m_max_load_factor, default_max_load_factor);
+ m_hash = std::exchange(other.m_hash, {});
+ m_equal = std::exchange(other.m_equal, {});
+ m_shifts = std::exchange(other.m_shifts, initial_shifts);
+ other.m_values.clear();
+ }
+ return *this;
+ }
+
+ auto operator=(std::initializer_list<value_type> ilist) -> table& {
+ clear();
+ insert(ilist);
+ return *this;
+ }
+
+ auto get_allocator() const noexcept -> allocator_type {
+ return m_values.get_allocator();
+ }
+
+ // iterators //////////////////////////////////////////////////////////////
+
+ auto begin() noexcept -> iterator {
+ return m_values.begin();
+ }
+
+ auto begin() const noexcept -> const_iterator {
+ return m_values.begin();
+ }
+
+ auto cbegin() const noexcept -> const_iterator {
+ return m_values.cbegin();
+ }
+
+ auto end() noexcept -> iterator {
+ return m_values.end();
+ }
+
+ auto cend() const noexcept -> const_iterator {
+ return m_values.cend();
+ }
+
+ auto end() const noexcept -> const_iterator {
+ return m_values.end();
+ }
+
+ // capacity ///////////////////////////////////////////////////////////////
+
+ [[nodiscard]] auto empty() const noexcept -> bool {
+ return m_values.empty();
+ }
+
+ [[nodiscard]] auto size() const noexcept -> size_t {
+ return m_values.size();
+ }
+
+ [[nodiscard]] static constexpr auto max_size() noexcept -> size_t {
+ if constexpr (std::numeric_limits<value_idx_type>::max() == std::numeric_limits<size_t>::max()) {
+ return size_t{1} << (sizeof(value_idx_type) * 8 - 1);
+ } else {
+ return size_t{1} << (sizeof(value_idx_type) * 8);
+ }
+ }
+
+ // modifiers //////////////////////////////////////////////////////////////
+
+ void clear() {
+ m_values.clear();
+ clear_buckets();
+ }
+
+ auto insert(value_type const& value) -> std::pair<iterator, bool> {
+ return emplace(value);
+ }
+
+ auto insert(value_type&& value) -> std::pair<iterator, bool> {
+ return emplace(std::move(value));
+ }
+
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
+ auto insert(P&& value) -> std::pair<iterator, bool> {
+ return emplace(std::forward<P>(value));
+ }
+
+ auto insert(const_iterator /*hint*/, value_type const& value) -> iterator {
+ return insert(value).first;
+ }
+
+ auto insert(const_iterator /*hint*/, value_type&& value) -> iterator {
+ return insert(std::move(value)).first;
+ }
+
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
+ auto insert(const_iterator /*hint*/, P&& value) -> iterator {
+ return insert(std::forward<P>(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ while (first != last) {
+ insert(*first);
+ ++first;
+ }
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ // nonstandard API: *this is emptied.
+ // Also see "A Standard flat_map" https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p0429r9.pdf
+ auto extract() && -> value_container_type {
+ return std::move(m_values);
+ }
+
+ // nonstandard API:
+ // Discards the internally held container and replaces it with the one passed. Erases non-unique elements.
+ auto replace(value_container_type&& container) {
+ if (container.size() > max_size()) {
+ throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements");
+ }
+
+ auto shifts = calc_shifts_for_size(container.size());
+ if (0 == m_num_buckets || shifts < m_shifts || container.get_allocator() != m_values.get_allocator()) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ }
+ clear_buckets();
+
+ m_values = std::move(container);
+
+ // can't use clear_and_fill_buckets_from_values() because container elements might not be unique
+ auto value_idx = value_idx_type{};
+
+ // loop until we reach the end of the container. duplicated entries will be replaced with back().
+ while (value_idx != static_cast<value_idx_type>(m_values.size())) {
+ auto const& key = get_key(m_values[value_idx]);
+
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ bool key_found = false;
+ while (true) {
+ auto const& bucket = at(m_buckets, bucket_idx);
+ if (dist_and_fingerprint > bucket.m_dist_and_fingerprint) {
+ break;
+ }
+ if (dist_and_fingerprint == bucket.m_dist_and_fingerprint &&
+ m_equal(key, m_values[bucket.m_value_idx].first)) {
+ key_found = true;
+ break;
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ if (key_found) {
+ if (value_idx != static_cast<value_idx_type>(m_values.size() - 1)) {
+ m_values[value_idx] = std::move(m_values.back());
+ }
+ m_values.pop_back();
+ } else {
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ ++value_idx;
+ }
+ }
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(Key const& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(key, std::forward<M>(mapped));
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(Key&& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped));
+ }
+
+ template <typename K,
+ typename M,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped));
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, Key const& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(key, std::forward<M>(mapped)).first;
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, Key&& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped)).first;
+ }
+
+ template <typename K,
+ typename M,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, K&& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped)).first;
+ }
+
+ // Single arguments for unordered_set can be used without having to construct the value_type
+ template <class K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<!is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto emplace(K&& key) -> std::pair<iterator, bool> {
+ if (is_full()) {
+ increase_size();
+ }
+
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ m_equal(key, m_values[at(m_buckets, bucket_idx).m_value_idx])) {
+ // found it, return without ever actually creating anything
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ // value is new, insert element first, so when exception happens we are in a valid state
+ m_values.emplace_back(std::forward<K>(key));
+ // now place the bucket and shift up until we find an empty spot
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <class... Args>
+ auto emplace(Args&&... args) -> std::pair<iterator, bool> {
+ if (is_full()) {
+ increase_size();
+ }
+
+ // we have to instantiate the value_type to be able to access the key.
+ // 1. emplace_back the object so it is constructed. 2. If the key is already there, pop it later in the loop.
+ auto& key = get_key(m_values.emplace_back(std::forward<Args>(args)...));
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
+ m_values.pop_back(); // value was already there, so get rid of it
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ // value is new, place the bucket and shift up until we find an empty spot
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <class... Args>
+ auto emplace_hint(const_iterator /*hint*/, Args&&... args) -> iterator {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(Key const& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(key, std::forward<Args>(args)...);
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(Key&& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...);
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(const_iterator /*hint*/, Key const& key, Args&&... args) -> iterator {
+ return do_try_emplace(key, std::forward<Args>(args)...).first;
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(const_iterator /*hint*/, Key&& key, Args&&... args) -> iterator {
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...).first;
+ }
+
+ template <
+ typename K,
+ typename... Args,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
+ bool> = true>
+ auto try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...);
+ }
+
+ template <
+ typename K,
+ typename... Args,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
+ bool> = true>
+ auto try_emplace(const_iterator /*hint*/, K&& key, Args&&... args) -> iterator {
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
+ }
+
+ auto erase(iterator it) -> iterator {
+ auto hash = mixed_hash(get_key(*it));
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ auto const value_idx_to_remove = static_cast<value_idx_type>(it - cbegin());
+ while (at(m_buckets, bucket_idx).m_value_idx != value_idx_to_remove) {
+ bucket_idx = next(bucket_idx);
+ }
+
+ do_erase(bucket_idx);
+ return begin() + static_cast<difference_type>(value_idx_to_remove);
+ }
+
+ auto erase(const_iterator it) -> iterator {
+ return erase(begin() + (it - cbegin()));
+ }
+
+ auto erase(const_iterator first, const_iterator last) -> iterator {
+ auto const idx_first = first - cbegin();
+ auto const idx_last = last - cbegin();
+ auto const first_to_last = std::distance(first, last);
+ auto const last_to_end = std::distance(last, cend());
+
+ // remove elements from left to right which moves elements from the end back
+ auto const mid = idx_first + std::min(first_to_last, last_to_end);
+ auto idx = idx_first;
+ while (idx != mid) {
+ erase(begin() + idx);
+ ++idx;
+ }
+
+ // all elements from the right are moved, now remove the last element until all done
+ idx = idx_last;
+ while (idx != mid) {
+ --idx;
+ erase(begin() + idx);
+ }
+
+ return begin() + idx_first;
+ }
+
+ auto erase(Key const& key) -> size_t {
+ return do_erase_key(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto erase(K&& key) -> size_t {
+ return do_erase_key(std::forward<K>(key));
+ }
+
+ void swap(table& other) noexcept(noexcept(std::is_nothrow_swappable_v<value_container_type>&&
+ std::is_nothrow_swappable_v<Hash>&& std::is_nothrow_swappable_v<KeyEqual>)) {
+ using std::swap;
+ swap(other, *this);
+ }
+
+ // lookup /////////////////////////////////////////////////////////////////
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto at(key_type const& key) -> Q& {
+ return do_at(key);
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto at(K const& key) -> Q& {
+ return do_at(key);
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto at(key_type const& key) const -> Q const& {
+ return do_at(key);
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto at(K const& key) const -> Q const& {
+ return do_at(key);
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto operator[](Key const& key) -> Q& {
+ return try_emplace(key).first->second;
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto operator[](Key&& key) -> Q& {
+ return try_emplace(std::move(key)).first->second;
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto operator[](K&& key) -> Q& {
+ return try_emplace(std::forward<K>(key)).first->second;
+ }
+
+ auto count(Key const& key) const -> size_t {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto count(K const& key) const -> size_t {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ auto find(Key const& key) -> iterator {
+ return do_find(key);
+ }
+
+ auto find(Key const& key) const -> const_iterator {
+ return do_find(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto find(K const& key) -> iterator {
+ return do_find(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto find(K const& key) const -> const_iterator {
+ return do_find(key);
+ }
+
+ auto contains(Key const& key) const -> bool {
+ return find(key) != end();
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto contains(K const& key) const -> bool {
+ return find(key) != end();
+ }
+
+ auto equal_range(Key const& key) -> std::pair<iterator, iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ auto equal_range(const Key& key) const -> std::pair<const_iterator, const_iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto equal_range(K const& key) -> std::pair<iterator, iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto equal_range(K const& key) const -> std::pair<const_iterator, const_iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ // bucket interface ///////////////////////////////////////////////////////
+
+ auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
+ return m_num_buckets;
+ }
+
+ static constexpr auto max_bucket_count() noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
+ return max_size();
+ }
+
+ // hash policy ////////////////////////////////////////////////////////////
+
+ [[nodiscard]] auto load_factor() const -> float {
+ return bucket_count() ? static_cast<float>(size()) / static_cast<float>(bucket_count()) : 0.0F;
+ }
+
+ [[nodiscard]] auto max_load_factor() const -> float {
+ return m_max_load_factor;
+ }
+
+ void max_load_factor(float ml) {
+ m_max_load_factor = ml;
+ if (m_num_buckets != max_bucket_count()) {
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(bucket_count()) * max_load_factor());
+ }
+ }
+
+ void rehash(size_t count) {
+ count = std::min(count, max_size());
+ auto shifts = calc_shifts_for_size(std::max(count, size()));
+ if (shifts != m_shifts) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ m_values.shrink_to_fit();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+ }
+
+ void reserve(size_t capa) {
+ capa = std::min(capa, max_size());
+ m_values.reserve(capa);
+ auto shifts = calc_shifts_for_size(std::max(capa, size()));
+ if (0 == m_num_buckets || shifts < m_shifts) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+ }
+
+ // observers //////////////////////////////////////////////////////////////
+
+ auto hash_function() const -> hasher {
+ return m_hash;
+ }
+
+ auto key_eq() const -> key_equal {
+ return m_equal;
+ }
+
+ // nonstandard API: expose the underlying values container
+ [[nodiscard]] auto values() const noexcept -> value_container_type const& {
+ return m_values;
+ }
+
+ // non-member functions ///////////////////////////////////////////////////
+
+ friend auto operator==(table const& a, table const& b) -> bool {
+ if (&a == &b) {
+ return true;
+ }
+ if (a.size() != b.size()) {
+ return false;
+ }
+ for (auto const& b_entry : b) {
+ auto it = a.find(get_key(b_entry));
+ if constexpr (std::is_void_v<T>) {
+ // set: only check that the key is here
+ if (a.end() == it) {
+ return false;
+ }
+ } else {
+ // map: check that key is here, then also check that value is the same
+ if (a.end() == it || !(b_entry.second == it->second)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ friend auto operator!=(table const& a, table const& b) -> bool {
+ return !(a == b);
+ }
+};
+
+} // namespace detail
+
+template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
+ class Bucket = bucket_type::standard>
+using map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
+
+template <class Key,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<Key>,
+ class Bucket = bucket_type::standard>
+using set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
+
+# if ANKERL_UNORDERED_DENSE_PMR
+
+namespace pmr {
+
+template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Bucket = bucket_type::standard>
+using map = detail::table<Key, T, Hash, KeyEqual, std::pmr::polymorphic_allocator<std::pair<Key, T>>, Bucket>;
+
+template <class Key, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>, class Bucket = bucket_type::standard>
+using set = detail::table<Key, void, Hash, KeyEqual, std::pmr::polymorphic_allocator<Key>, Bucket>;
+
+} // namespace pmr
+
+# endif
+
+// deduction guides ///////////////////////////////////////////////////////////
+
+// deduction guides for alias templates are only possible since C++20
+// see https://en.cppreference.com/w/cpp/language/class_template_argument_deduction
+
+} // namespace ANKERL_UNORDERED_DENSE_NAMESPACE
+} // namespace ankerl::unordered_dense
+
+// std extensions /////////////////////////////////////////////////////////////
+
+namespace std { // NOLINT(cert-dcl58-cpp)
+
+template <class Key, class T, class Hash, class KeyEqual, class AllocatorOrContainer, class Bucket, class Pred>
+auto erase_if(ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>& map, Pred pred)
+ -> size_t {
+ using map_t = ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
+
+ // going back to front because erase() invalidates the end iterator
+ auto const old_size = map.size();
+ auto idx = old_size;
+ while (idx) {
+ --idx;
+ auto it = map.begin() + static_cast<typename map_t::difference_type>(idx);
+ if (pred(*it)) {
+ map.erase(it);
+ }
+ }
+
+ return map.size() - old_size;
+}
+
+} // namespace std
+
+#endif
+#endif
diff --git a/misc/benchmarks/external/emhash/hash_table7.hpp b/misc/benchmarks/external/emhash/hash_table7.hpp
new file mode 100644
index 00000000..fdc33fe1
--- /dev/null
+++ b/misc/benchmarks/external/emhash/hash_table7.hpp
@@ -0,0 +1,1876 @@
+// emhash7::HashMap for C++11/14/17
+// version 2.2.3
+// https://github.com/ktprime/ktprime/blob/master/hash_table7.hpp
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2019-2022 Huang Yuanbing & bailuzhou AT 163.com
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+// From
+// NUMBER OF PROBES / LOOKUP Successful Unsuccessful
+// Quadratic collision resolution 1 - ln(1-L) - L/2 1/(1-L) - L - ln(1-L)
+// Linear collision resolution [1+1/(1-L)]/2 [1+1/(1-L)2]/2
+// separator chain resolution 1 + L / 2 exp(-L) + L
+
+// -- enlarge_factor -- 0.10 0.50 0.60 0.75 0.80 0.90 0.99
+// QUADRATIC COLLISION RES.
+// probes/successful lookup 1.05 1.44 1.62 2.01 2.21 2.85 5.11
+// probes/unsuccessful lookup 1.11 2.19 2.82 4.64 5.81 11.4 103.6
+// LINEAR COLLISION RES.
+// probes/successful lookup 1.06 1.5 1.75 2.5 3.0 5.5 50.5
+// probes/unsuccessful lookup 1.12 2.5 3.6 8.5 13.0 50.0
+// SEPARATE CHAN RES.
+// probes/successful lookup 1.05 1.25 1.3 1.25 1.4 1.45 1.50
+// probes/unsuccessful lookup 1.00 1.11 1.15 1.22 1.25 1.31 1.37
+// clacul/unsuccessful lookup 1.01 1.25 1.36, 1.56, 1.64, 1.81, 1.97
+
+/****************
+ under random hashCodes, the frequency of nodes in bins follows a Poisson
+distribution(http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of about 0.5
+on average for the default resizing threshold of 0.75, although with a large variance because
+of resizing granularity. Ignoring variance, the expected occurrences of list size k are
+(exp(-0.5) * pow(0.5, k)/factorial(k)). The first values are:
+0: 0.60653066
+1: 0.30326533
+2: 0.07581633
+3: 0.01263606
+4: 0.00157952
+5: 0.00015795
+6: 0.00001316
+7: 0.00000094
+8: 0.00000006
+
+ ============== buckets size ration ========
+ 1 1543981 0.36884964|0.36787944 36.885
+ 2 768655 0.36725597|0.36787944 73.611
+ 3 256236 0.18364065|0.18393972 91.975
+ 4 64126 0.06127757|0.06131324 98.102
+ 5 12907 0.01541710|0.01532831 99.644
+ 6 2050 0.00293841|0.00306566 99.938
+ 7 310 0.00051840|0.00051094 99.990
+ 8 49 0.00009365|0.00007299 99.999
+ 9 4 0.00000860|0.00000913 100.000
+========== collision miss ration ===========
+ _num_filled aver_size k.v size_kv = 4185936, 1.58, x.x 24
+ collision,possion,cache_miss hit_find|hit_miss, load_factor = 36.73%,36.74%,31.31% 1.50|2.00, 1.00
+============== buckets size ration ========
+*******************************************************/
+
+#pragma once
+
+#include <cstring>
+#include <string>
+#include <cmath>
+#include <cstdlib>
+#include <type_traits>
+#include <cassert>
+#include <utility>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <algorithm>
+
+#if EMH_WY_HASH
+ #include "wyhash.h"
+#endif
+
+#ifdef EMH_KEY
+ #undef EMH_KEY
+ #undef EMH_VAL
+ #undef EMH_PKV
+ #undef EMH_NEW
+ #undef EMH_SET
+ #undef EMH_BUCKET
+ #undef EMH_EMPTY
+#endif
+
+// likely/unlikely
+#if (__GNUC__ >= 4 || __clang__)
+# define EMH_LIKELY(condition) __builtin_expect(condition, 1)
+# define EMH_UNLIKELY(condition) __builtin_expect(condition, 0)
+#else
+# define EMH_LIKELY(condition) condition
+# define EMH_UNLIKELY(condition) condition
+#endif
+
+#ifndef EMH_BUCKET_INDEX
+ #define EMH_BUCKET_INDEX 1
+#endif
+
+#if EMH_BUCKET_INDEX == 0
+ #define EMH_KEY(p,n) p[n].second.first
+ #define EMH_VAL(p,n) p[n].second.second
+ #define EMH_BUCKET(p,n) p[n].first
+ #define EMH_PKV(p,n) p[n].second
+ #define EMH_NEW(key, val, bucket)\
+ new(_pairs + bucket) PairT(bucket, value_type(key, val));\
+ _num_filled ++; EMH_SET(bucket)
+#elif EMH_BUCKET_INDEX == 2
+ #define EMH_KEY(p,n) p[n].first.first
+ #define EMH_VAL(p,n) p[n].first.second
+ #define EMH_BUCKET(p,n) p[n].second
+ #define EMH_PKV(p,n) p[n].first
+ #define EMH_NEW(key, val, bucket)\
+ new(_pairs + bucket) PairT(value_type(key, val), bucket);\
+ _num_filled ++; EMH_SET(bucket)
+#else
+ #define EMH_KEY(p,n) p[n].first
+ #define EMH_VAL(p,n) p[n].second
+ #define EMH_BUCKET(p,n) p[n].bucket
+ #define EMH_PKV(p,n) p[n]
+ #define EMH_NEW(key, val, bucket)\
+ new(_pairs + bucket) PairT(key, val, bucket);\
+ _num_filled ++; EMH_SET(bucket)
+#endif
+
+#define EMH_MASK(bucket) 1 << (bucket % MASK_BIT)
+#define EMH_SET(bucket) _bitmask[bucket / MASK_BIT] &= ~(EMH_MASK(bucket))
+#define EMH_CLS(bucket) _bitmask[bucket / MASK_BIT] |= EMH_MASK(bucket)
+#define EMH_EMPTY(bitmask, bucket) (_bitmask[bucket / MASK_BIT] & (EMH_MASK(bucket))) != 0
+
+#if _WIN32
+ #include <intrin.h>
+#if _WIN64
+ #pragma intrinsic(_umul128)
+#endif
+#endif
+
+namespace emhash7 {
+
+#ifdef EMH_SIZE_TYPE_16BIT
+ typedef uint16_t size_type;
+ static constexpr size_type INACTIVE = 0xFFFF;
+#elif EMH_SIZE_TYPE_64BIT
+ typedef uint64_t size_type;
+ static constexpr size_type INACTIVE = 0 - 0x1ull;
+#else
+ typedef uint32_t size_type;
+ static constexpr size_type INACTIVE = 0 - 0x1u;
+#endif
+
+#ifndef EMH_SIZE_TYPE_16BIT
+static_assert((int)INACTIVE < 0, "INACTIVE must negative (to int)");
+#endif
+
+//count the leading zero bit
+inline static int CTZ(size_t n)
+{
+#if defined(__x86_64__) || defined(_WIN32) || (__BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+
+#elif __BIG_ENDIAN__ || (__BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ n = __builtin_bswap64(n);
+#else
+ static uint32_t endianness = 0x12345678;
+ const auto is_big = *(const char *)&endianness == 0x12;
+ if (is_big)
+ n = __builtin_bswap64(n);
+#endif
+
+#if _WIN32
+ unsigned long index;
+ #if defined(_WIN64)
+ _BitScanForward64(&index, n);
+ #else
+ _BitScanForward(&index, n);
+ #endif
+#elif defined (__LP64__) || (SIZE_MAX == UINT64_MAX) || defined (__x86_64__)
+ auto index = __builtin_ctzll(n);
+#elif 1
+ auto index = __builtin_ctzl(n);
+#else
+ #if defined (__LP64__) || (SIZE_MAX == UINT64_MAX) || defined (__x86_64__)
+ size_type index;
+ __asm__("bsfq %1, %0\n" : "=r" (index) : "rm" (n) : "cc");
+ #else
+ size_type index;
+ __asm__("bsf %1, %0\n" : "=r" (index) : "rm" (n) : "cc");
+ #endif
+#endif
+
+ return (int)index;
+}
+
+template <typename First, typename Second>
+struct entry {
+ using first_type = First;
+ using second_type = Second;
+ entry(const First& key, const Second& val, size_type ibucket)
+ :second(val), first(key)
+ {
+ bucket = ibucket;
+ }
+
+ entry(First&& key, Second&& val, size_type ibucket)
+ :second(std::move(val)), first(std::move(key))
+ {
+ bucket = ibucket;
+ }
+
+ template<typename K, typename V>
+ entry(K&& key, V&& val, size_type ibucket)
+ :second(std::forward<V>(val)), first(std::forward<K>(key))
+ {
+ bucket = ibucket;
+ }
+
+ entry(const std::pair<First, Second>& pair)
+ :second(pair.second), first(pair.first)
+ {
+ bucket = INACTIVE;
+ }
+
+ entry(std::pair<First, Second>&& pair)
+ :second(std::move(pair.second)), first(std::move(pair.first))
+ {
+ bucket = INACTIVE;
+ }
+
+ entry(std::tuple<First, Second>&& tup)
+ :second(std::move(std::get<2>(tup))), first(std::move(std::get<1>(tup)))
+ {
+ bucket = INACTIVE;
+ }
+
+ entry(const entry& rhs)
+ :second(rhs.second), first(rhs.first)
+ {
+ bucket = rhs.bucket;
+ }
+
+ entry(entry&& rhs) noexcept
+ :second(std::move(rhs.second)), first(std::move(rhs.first))
+ {
+ bucket = rhs.bucket;
+ }
+
+ entry& operator = (entry&& rhs) noexcept
+ {
+ second = std::move(rhs.second);
+ bucket = rhs.bucket;
+ first = std::move(rhs.first);
+ return *this;
+ }
+
+ entry& operator = (const entry& rhs)
+ {
+ second = rhs.second;
+ bucket = rhs.bucket;
+ first = rhs.first;
+ return *this;
+ }
+
+ bool operator == (const entry<First, Second>& p) const
+ {
+ return first == p.first && second == p.second;
+ }
+
+ bool operator == (const std::pair<First, Second>& p) const
+ {
+ return first == p.first && second == p.second;
+ }
+
+ void swap(entry<First, Second>& o)
+ {
+ std::swap(second, o.second);
+ std::swap(first, o.first);
+ }
+
+#if EMH_ORDER_KV || EMH_SIZE_TYPE_64BIT
+ First first;
+ size_type bucket;
+ Second second;
+#else
+ Second second;
+ size_type bucket;
+ First first;
+#endif
+};
+
+/// A cache-friendly hash table with open addressing, linear/qua probing and power-of-two capacity
+template <typename KeyT, typename ValueT, typename HashT = std::hash<KeyT>, typename EqT = std::equal_to<KeyT>>
+class HashMap
+{
+#ifndef EMH_DEFAULT_LOAD_FACTOR
+ constexpr static float EMH_DEFAULT_LOAD_FACTOR = 0.80f;
+ constexpr static float EMH_MIN_LOAD_FACTOR = 0.25f; //< 0.5
+#endif
+
+public:
+ typedef HashMap<KeyT, ValueT, HashT, EqT> htype;
+ typedef std::pair<KeyT, ValueT> value_type;
+
+#if EMH_BUCKET_INDEX == 0
+ typedef value_type value_pair;
+ typedef std::pair<size_type, value_type> PairT;
+#elif EMH_BUCKET_INDEX == 2
+ typedef value_type value_pair;
+ typedef std::pair<value_type, size_type> PairT;
+#else
+ typedef entry<KeyT, ValueT> value_pair;
+ typedef entry<KeyT, ValueT> PairT;
+#endif
+
+ typedef KeyT key_type;
+ typedef ValueT val_type;
+ typedef ValueT mapped_type;
+ typedef HashT hasher;
+ typedef EqT key_equal;
+ typedef PairT& reference;
+ typedef const PairT& const_reference;
+
+ class const_iterator;
+ class iterator
+ {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_pair value_type;
+
+ typedef value_pair* pointer;
+ typedef value_pair& reference;
+
+ iterator() = default;
+ iterator(const const_iterator& it) : _map(it._map), _bucket(it._bucket), _from(it._from), _bmask(it._bmask) { }
+ iterator(const htype* hash_map, size_type bucket, bool) : _map(hash_map), _bucket(bucket) { init(); }
+#if EMH_ITER_SAFE
+ iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { init(); }
+#else
+ iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { _bmask = _from = 0; }
+#endif
+
+ void init()
+ {
+ _from = (_bucket / SIZE_BIT) * SIZE_BIT;
+ if (_bucket < _map->bucket_count()) {
+ _bmask = *(size_t*)((size_t*)_map->_bitmask + _from / SIZE_BIT);
+ _bmask |= (1ull << _bucket % SIZE_BIT) - 1;
+ _bmask = ~_bmask;
+ } else {
+ _bmask = 0;
+ }
+ }
+
+ size_type bucket() const
+ {
+ return _bucket;
+ }
+
+ void clear(size_type bucket)
+ {
+ if (_bucket / SIZE_BIT == bucket / SIZE_BIT)
+ _bmask &= ~(1ull << (bucket % SIZE_BIT));
+ }
+
+ iterator& next()
+ {
+ goto_next_element();
+ return *this;
+ }
+
+ iterator& operator++()
+ {
+ _bmask &= _bmask - 1;
+ goto_next_element();
+ return *this;
+ }
+
+ iterator operator++(int)
+ {
+ iterator old = *this;
+ _bmask &= _bmask - 1;
+ goto_next_element();
+ return old;
+ }
+
+ reference operator*() const
+ {
+ return _map->EMH_PKV(_pairs, _bucket);
+ }
+
+ pointer operator->() const
+ {
+ return &(_map->EMH_PKV(_pairs, _bucket));
+ }
+
+ bool operator==(const iterator& rhs) const { return _bucket == rhs._bucket; }
+ bool operator!=(const iterator& rhs) const { return _bucket != rhs._bucket; }
+ bool operator==(const const_iterator& rhs) const { return _bucket == rhs._bucket; }
+ bool operator!=(const const_iterator& rhs) const { return _bucket != rhs._bucket; }
+
+ private:
+ void goto_next_element()
+ {
+ if (EMH_LIKELY(_bmask != 0)) {
+ _bucket = _from + CTZ(_bmask);
+ return;
+ }
+
+ do {
+ _bmask = ~*(size_t*)((size_t*)_map->_bitmask + (_from += SIZE_BIT) / SIZE_BIT);
+ } while (_bmask == 0);
+
+ _bucket = _from + CTZ(_bmask);
+ }
+
+ public:
+ const htype* _map;
+ size_type _bucket;
+ size_type _from;
+ size_t _bmask;
+ };
+
+ class const_iterator
+ {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_pair value_type;
+
+ typedef const value_pair* pointer;
+ typedef const value_pair& reference;
+
+ const_iterator(const iterator& it) : _map(it._map), _bucket(it._bucket), _from(it._from), _bmask(it._bmask) { }
+ const_iterator(const htype* hash_map, size_type bucket, bool) : _map(hash_map), _bucket(bucket) { init(); }
+#if EMH_ITER_SAFE
+ const_iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { init(); }
+#else
+ const_iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { _bmask = _from = 0; }
+#endif
+
+ void init()
+ {
+ _from = (_bucket / SIZE_BIT) * SIZE_BIT;
+ if (_bucket < _map->bucket_count()) {
+ _bmask = *(size_t*)((size_t*)_map->_bitmask + _from / SIZE_BIT);
+ _bmask |= (1ull << _bucket % SIZE_BIT) - 1;
+ _bmask = ~_bmask;
+ } else {
+ _bmask = 0;
+ }
+ }
+
+ size_type bucket() const
+ {
+ return _bucket;
+ }
+
+ const_iterator& operator++()
+ {
+ goto_next_element();
+ return *this;
+ }
+
+ const_iterator operator++(int)
+ {
+ const_iterator old(*this);
+ goto_next_element();
+ return old;
+ }
+
+ reference operator*() const
+ {
+ return _map->EMH_PKV(_pairs, _bucket);
+ }
+
+ pointer operator->() const
+ {
+ return &(_map->EMH_PKV(_pairs, _bucket));
+ }
+
+ bool operator==(const const_iterator& rhs) const { return _bucket == rhs._bucket; }
+ bool operator!=(const const_iterator& rhs) const { return _bucket != rhs._bucket; }
+
+ private:
+ void goto_next_element()
+ {
+ _bmask &= _bmask - 1;
+ if (EMH_LIKELY(_bmask != 0)) {
+ _bucket = _from + CTZ(_bmask);
+ return;
+ }
+
+ do {
+ _bmask = ~*(size_t*)((size_t*)_map->_bitmask + (_from += SIZE_BIT) / SIZE_BIT);
+ } while (_bmask == 0);
+
+ _bucket = _from + CTZ(_bmask);
+ }
+
+ public:
+ const htype* _map;
+ size_type _bucket;
+ size_type _from;
+ size_t _bmask;
+ };
+
+ void init(size_type bucket, float mlf = EMH_DEFAULT_LOAD_FACTOR)
+ {
+ _pairs = nullptr;
+ _bitmask = nullptr;
+ _num_buckets = _num_filled = 0;
+ max_load_factor(mlf);
+ rehash(bucket);
+ }
+
+ HashMap(size_type bucket = 2, float mlf = EMH_DEFAULT_LOAD_FACTOR) noexcept
+ {
+ init(bucket, mlf);
+ }
+
+ size_t AllocSize(uint64_t num_buckets) const
+ {
+ return (num_buckets + EPACK_SIZE) * sizeof(PairT) + (num_buckets + 7) / 8 + BIT_PACK;
+ }
+
+ HashMap(const HashMap& rhs) noexcept
+ {
+ if (rhs.load_factor() > EMH_MIN_LOAD_FACTOR) {
+ _pairs = (PairT*)malloc(AllocSize(rhs._num_buckets));
+ clone(rhs);
+ } else {
+ init(rhs._num_filled + 2, EMH_DEFAULT_LOAD_FACTOR);
+ for (auto it = rhs.begin(); it != rhs.end(); ++it)
+ insert_unique(it->first, it->second);
+ }
+ }
+
+ HashMap(HashMap&& rhs) noexcept
+ {
+#ifndef EMH_ZERO_MOVE
+ init(4);
+#else
+ _num_buckets = _num_filled = _mask = 0;
+ _pairs = nullptr;
+#endif
+ swap(rhs);
+ }
+
+ HashMap(std::initializer_list<value_type> ilist)
+ {
+ init((size_type)ilist.size());
+ for (auto it = ilist.begin(); it != ilist.end(); ++it)
+ do_insert(*it);
+ }
+
+ template<class InputIt>
+ HashMap(InputIt first, InputIt last, size_type bucket_count=4)
+ {
+ init(std::distance(first, last) + bucket_count);
+ for (; first != last; ++first)
+ emplace(*first);
+ }
+
+ HashMap& operator= (const HashMap& rhs) noexcept
+ {
+ if (this == &rhs)
+ return *this;
+
+ if (rhs.load_factor() < EMH_MIN_LOAD_FACTOR) {
+ clear(); free(_pairs); _pairs = nullptr;
+ rehash(rhs._num_filled + 2);
+ for (auto it = rhs.begin(); it != rhs.end(); ++it)
+ insert_unique(it->first, it->second);
+ return *this;
+ }
+
+ if (_num_filled)
+ clearkv();
+
+ if (_num_buckets != rhs._num_buckets) {
+ free(_pairs);
+ _pairs = (PairT*)malloc(AllocSize(rhs._num_buckets));
+ }
+
+ clone(rhs);
+ return *this;
+ }
+
+ HashMap& operator= (HashMap&& rhs) noexcept
+ {
+ if (this != &rhs) {
+ swap(rhs);
+ rhs.clear();
+ }
+ return *this;
+ }
+
+ template<typename Con>
+ bool operator == (const Con& rhs) const
+ {
+ if (size() != rhs.size())
+ return false;
+
+ for (auto it = begin(), last = end(); it != last; ++it) {
+ auto oi = rhs.find(it->first);
+ if (oi == rhs.end() || it->second != oi->second)
+ return false;
+ }
+ return true;
+ }
+
+ template<typename Con>
+ bool operator != (const Con& rhs) const { return !(*this == rhs); }
+
+ ~HashMap() noexcept
+ {
+ if (is_triviall_destructable() && _num_filled) {
+ for (auto it = cbegin(); _num_filled; ++it) {
+ _num_filled --;
+ it->~value_pair();
+ }
+ }
+ free(_pairs);
+ }
+
+ void clone(const HashMap& rhs) noexcept
+ {
+ _hasher = rhs._hasher;
+ //_eq = rhs._eq;
+
+ _num_filled = rhs._num_filled;
+ _mask = rhs._mask;
+ _mlf = rhs._mlf;
+ _num_buckets = rhs._num_buckets;
+
+ _bitmask = decltype(_bitmask)(_pairs + EPACK_SIZE + _num_buckets);
+ auto* opairs = rhs._pairs;
+
+ if (is_copy_trivially())
+ memcpy(_pairs, opairs, AllocSize(_num_buckets));
+ else {
+ memcpy(_pairs + _num_buckets, opairs + _num_buckets, EPACK_SIZE * sizeof(PairT) + (_num_buckets + 7) / 8 + BIT_PACK);
+ for (auto it = rhs.cbegin(); it.bucket() <= _mask; ++it) {
+ const auto bucket = it.bucket();
+ EMH_BUCKET(_pairs, bucket) = EMH_BUCKET(opairs, bucket);
+ new(_pairs + bucket) PairT(opairs[bucket]);
+ }
+ }
+ }
+
+ void swap(HashMap& rhs)
+ {
+ std::swap(_hasher, rhs._hasher);
+ //std::swap(_eq, rhs._eq);
+ std::swap(_pairs, rhs._pairs);
+ std::swap(_num_buckets, rhs._num_buckets);
+ std::swap(_num_filled, rhs._num_filled);
+ std::swap(_mask, rhs._mask);
+ std::swap(_mlf, rhs._mlf);
+ std::swap(_bitmask, rhs._bitmask);
+ }
+
+ // -------------------------------------------------------------
+ iterator begin() noexcept
+ {
+#ifdef EMH_ZERO_MOVE
+ if (0 == _num_filled)
+ return {this, _num_buckets};
+#endif
+
+ const auto bmask = ~(*(size_t*)_bitmask);
+ if (bmask != 0)
+ return {this, (size_type)CTZ(bmask), true};
+
+ iterator it(this, sizeof(bmask) * 8 - 1);
+ return it.next();
+ }
+
+ const_iterator cbegin() const noexcept
+ {
+#ifdef EMH_ZERO_MOVE
+ if (0 == _num_filled)
+ return {this, _num_buckets};
+#endif
+
+ const auto bmask = ~(*(size_t*)_bitmask);
+ if (bmask != 0)
+ return {this, (size_type)CTZ(bmask), true};
+
+ iterator it(this, sizeof(bmask) * 8 - 1);
+ return it.next();
+ }
+
+ iterator last() const
+ {
+ if (_num_filled == 0)
+ return end();
+
+ auto bucket = _mask;
+ while (EMH_EMPTY(_pairs, bucket)) bucket--;
+ return {this, bucket, true};
+ }
+
+ const_iterator begin() const noexcept { return cbegin(); }
+
+ iterator end() noexcept { return {this, _num_buckets}; }
+ const_iterator cend() const { return {this, _num_buckets}; }
+ const_iterator end() const { return cend(); }
+
+ size_type size() const { return _num_filled; }
+ bool empty() const { return _num_filled == 0; }
+
+ size_type bucket_count() const { return _num_buckets; }
+ float load_factor() const { return static_cast<float>(_num_filled) / (_mask + 1); }
+
+ HashT& hash_function() const { return _hasher; }
+ EqT& key_eq() const { return _eq; }
+
+ void max_load_factor(float mlf)
+ {
+ if (mlf < 0.999f && mlf > EMH_MIN_LOAD_FACTOR)
+ _mlf = (uint32_t)((1 << 27) / mlf);
+ }
+
+ constexpr float max_load_factor() const { return (1 << 27) / (float)_mlf; }
+ constexpr size_type max_size() const { return 1ull << (sizeof(size_type) * 8 - 1); }
+ constexpr size_type max_bucket_count() const { return max_size(); }
+
+ size_type bucket_main() const
+ {
+ auto main_size = 0;
+ for (size_type bucket = 0; bucket < _num_buckets; ++bucket) {
+ if (EMH_BUCKET(_pairs, bucket) == bucket)
+ main_size ++;
+ }
+ return main_size;
+ }
+
+#if EMH_STATIS
+ //Returns the bucket number where the element with key k is located.
+ size_type bucket(const KeyT& key) const
+ {
+ const auto bucket = hash_key(key) & _mask;
+ const auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if (EMH_EMPTY(_pairs, bucket))
+ return 0;
+ else if (bucket == next_bucket)
+ return bucket + 1;
+
+ const auto& bucket_key = EMH_KEY(_pairs, bucket);
+ return (hash_key(bucket_key) & _mask) + 1;
+ }
+
+ //Returns the number of elements in bucket n.
+ size_type bucket_size(const size_type bucket) const
+ {
+ if (EMH_EMPTY(_pairs, bucket))
+ return 0;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ next_bucket = hash_key(EMH_KEY(_pairs, bucket)) & _mask;
+ size_type bucket_size = 1;
+
+ //iterator each item in current main bucket
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket) {
+ break;
+ }
+ bucket_size++;
+ next_bucket = nbucket;
+ }
+ return bucket_size;
+ }
+
+ size_type get_main_bucket(const size_type bucket) const
+ {
+ if (EMH_EMPTY(_pairs, bucket))
+ return INACTIVE;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ const auto& bucket_key = EMH_KEY(_pairs, bucket);
+ const auto main_bucket = hash_key(bucket_key) & _mask;
+ return main_bucket;
+ }
+
+ size_type get_diss(size_type bucket, size_type next_bucket, const size_type slots) const
+ {
+ const int cahe_line_size = 64;
+ auto pbucket = reinterpret_cast<uint64_t>(&_pairs[bucket]);
+ auto pnext = reinterpret_cast<uint64_t>(&_pairs[next_bucket]);
+ if (pbucket / cahe_line_size == pnext / cahe_line_size)
+ return 0;
+ size_type diff = pbucket > pnext ? (pbucket - pnext) : (pnext - pbucket);
+ if (diff / cahe_line_size + 1 < slots)
+ return (diff / cahe_line_size + 1);
+ return slots - 1;
+ }
+
+ int get_bucket_info(const size_type bucket, size_type steps[], const size_type slots) const
+ {
+ if (EMH_EMPTY(_pairs, bucket))
+ return -1;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if ((hash_key(EMH_KEY(_pairs, bucket)) & _mask) != bucket)
+ return 0;
+ else if (next_bucket == bucket)
+ return 1;
+
+ steps[get_diss(bucket, next_bucket, slots)] ++;
+ size_type bucket_size = 2;
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ break;
+
+ steps[get_diss(nbucket, next_bucket, slots)] ++;
+ bucket_size ++;
+ next_bucket = nbucket;
+ }
+
+ return bucket_size;
+ }
+
+ void dump_statics(bool show_cache) const
+ {
+ const int slots = 128;
+ size_type buckets[slots + 1] = {0};
+ size_type steps[slots + 1] = {0};
+ char buff[1024 * 8];
+ for (size_type bucket = 0; bucket < _num_buckets; ++bucket) {
+ auto bsize = get_bucket_info(bucket, steps, slots);
+ if (bsize >= 0)
+ buckets[bsize] ++;
+ }
+
+ size_type sumb = 0, sums = 0, sumn = 0;
+ size_type miss = 0, finds = 0, bucket_coll = 0;
+ double lf = load_factor(), fk = 1.0 / exp(lf), sum_poisson = 0;
+ int bsize = sprintf (buff, "============== buckets size ration ========\n");
+
+ miss += _num_buckets - _num_filled;
+ for (int i = 1, factorial = 1; i < sizeof(buckets) / sizeof(buckets[0]); i++) {
+ double poisson = fk / factorial; factorial *= i; fk *= lf;
+ if (poisson > 1e-13 && i < 20)
+ sum_poisson += poisson * 100.0 * (i - 1) / i;
+
+ const int64_t bucketsi = buckets[i];
+ if (bucketsi == 0)
+ continue;
+
+ sumb += bucketsi;
+ sumn += bucketsi * i;
+ bucket_coll += bucketsi * (i - 1);
+ finds += bucketsi * i * (i + 1) / 2;
+ miss += bucketsi * i * i;
+ auto errs = (bucketsi * 1.0 * i / _num_filled - poisson) * 100 / poisson;
+ bsize += sprintf(buff + bsize, " %2d %8ld %0.8lf|%0.2lf%% %2.3lf\n",
+ i, bucketsi, bucketsi * 1.0 * i / _num_filled, errs, sumn * 100.0 / _num_filled);
+ if (sumn >= _num_filled)
+ break;
+ }
+
+ bsize += sprintf(buff + bsize, "========== collision miss ration ===========\n");
+ for (size_type i = 0; show_cache && i < sizeof(steps) / sizeof(steps[0]); i++) {
+ sums += steps[i];
+ if (steps[i] == 0)
+ continue;
+ if (steps[i] > 10)
+ bsize += sprintf(buff + bsize, " %2d %8u %0.2lf %.2lf\n", (int)i, steps[i], steps[i] * 100.0 / bucket_coll, sums * 100.0 / bucket_coll);
+ }
+
+ if (sumb == 0) return;
+
+ bsize += sprintf(buff + bsize, " _num_filled aver_size k.v size_kv = %u, %.2lf, %s.%s %zd\n",
+ _num_filled, _num_filled * 1.0 / sumb, typeid(KeyT).name(), typeid(ValueT).name(), sizeof(PairT));
+
+ bsize += sprintf(buff + bsize, " collision, poisson, cache_miss hit_find|hit_miss, load_factor = %.2lf%%,%.2lf%%,%.2lf%% %.2lf|%.2lf, %.2lf\n",
+ (bucket_coll * 100.0 / _num_filled), sum_poisson, (bucket_coll - steps[0]) * 100.0 / _num_filled,
+ finds * 1.0 / _num_filled, miss * 1.0 / _num_buckets, _num_filled * 1.0 / _num_buckets);
+
+ bsize += sprintf(buff + bsize, "============== buckets size end =============\n");
+ buff[bsize + 1] = 0;
+
+#ifdef EMH_LOG
+ EMH_LOG << __FUNCTION__ << "|" << buff << endl;
+#else
+ puts(buff);
+#endif
+ assert(sumn == _num_filled);
+ assert(sums == bucket_coll || !show_cache);
+ assert(bucket_coll == buckets[0]);
+ }
+#endif
+
+ // ------------------------------------------------------------
+ template<typename Key = KeyT>
+ iterator find(const Key& key, size_t key_hash) noexcept
+ {
+ return {this, find_filled_hash(key, key_hash)};
+ }
+
+ template<typename Key = KeyT>
+ const_iterator find(const Key& key, size_t key_hash) const noexcept
+ {
+ return {this, find_filled_hash(key, key_hash)};
+ }
+
+ template<typename Key=KeyT>
+ iterator find(const Key& key) noexcept
+ {
+ return {this, find_filled_bucket(key)};
+ }
+
+ template<typename Key = KeyT>
+ const_iterator find(const Key& key) const noexcept
+ {
+ return {this, find_filled_bucket(key)};
+ }
+
+ template<typename Key = KeyT>
+ ValueT& at(const KeyT& key)
+ {
+ const auto bucket = find_filled_bucket(key);
+ //throw
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ template<typename Key = KeyT>
+ const ValueT& at(const KeyT& key) const
+ {
+ const auto bucket = find_filled_bucket(key);
+ //throw
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ template<typename Key = KeyT>
+ bool contains(const Key& key) const noexcept
+ {
+ return find_filled_bucket(key) != _num_buckets;
+ }
+
+ template<typename Key = KeyT>
+ size_type count(const Key& key) const noexcept
+ {
+ return find_filled_bucket(key) != _num_buckets ? 1 : 0;
+ }
+
+ template<typename Key = KeyT>
+ std::pair<iterator, iterator> equal_range(const Key& key) const noexcept
+ {
+ const auto found = {this, find_filled_bucket(key), true};
+ if (found.bucket() == _num_buckets)
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+
+ template<typename K=KeyT>
+ std::pair<const_iterator, const_iterator> equal_range(const K& key) const
+ {
+ const auto found = {this, find_filled_bucket(key), true};
+ if (found.bucket() == _num_buckets)
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+
+ void merge(HashMap& rhs)
+ {
+ if (empty()) {
+ *this = std::move(rhs);
+ return;
+ }
+
+ for (auto rit = rhs.begin(); rit != rhs.end(); ) {
+ auto fit = find(rit->first);
+ if (fit.bucket() == _num_buckets) {
+ insert_unique(rit->first, std::move(rit->second));
+ rit = rhs.erase(rit);
+ } else {
+ ++rit;
+ }
+ }
+ }
+
+#ifdef EMH_EXT
+ bool try_get(const KeyT& key, ValueT& val) const noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ const auto found = bucket != _num_buckets;
+ if (found) {
+ val = EMH_VAL(_pairs, bucket);
+ }
+ return found;
+ }
+
+ /// Returns the matching ValueT or nullptr if k isn't found.
+ ValueT* try_get(const KeyT& key) noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ return bucket == _num_buckets ? nullptr : &EMH_VAL(_pairs, bucket);
+ }
+
+ /// Const version of the above
+ ValueT* try_get(const KeyT& key) const noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ return bucket == _num_buckets ? nullptr : &EMH_VAL(_pairs, bucket);
+ }
+
+ /// Convenience function.
+ ValueT get_or_return_default(const KeyT& key) const noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ return bucket == _num_buckets ? ValueT() : EMH_VAL(_pairs, bucket);
+ }
+#endif
+
+ // -----------------------------------------------------
+ template<typename K = KeyT, typename V = ValueT>
+ std::pair<iterator, bool> do_assign(K&& key, V&& val)
+ {
+ reserve(_num_filled);
+
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket);
+ } else {
+ EMH_VAL(_pairs, bucket) = std::move(val);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ std::pair<iterator, bool> do_insert(const value_type& value)
+ {
+ bool isempty;
+ const auto bucket = find_or_allocate(value.first, isempty);
+ if (isempty) {
+ EMH_NEW(value.first, value.second, bucket);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ std::pair<iterator, bool> do_insert(value_type&& value)
+ {
+ bool isempty;
+ const auto bucket = find_or_allocate(value.first, isempty);
+ if (isempty) {
+ EMH_NEW(std::move(value.first), std::move(value.second), bucket);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ template<typename K = KeyT, typename V = ValueT>
+ std::pair<iterator, bool> do_insert(K&& key, V&& val)
+ {
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ std::pair<iterator, bool> insert(const value_type& value)
+ {
+ check_expand_need();
+ return do_insert(value);
+ }
+
+ std::pair<iterator, bool> insert(value_type&& value)
+ {
+ check_expand_need();
+ return do_insert(std::move(value));
+ }
+
+ void insert(std::initializer_list<value_type> ilist)
+ {
+ reserve(ilist.size() + _num_filled);
+ for (auto it = ilist.begin(); it != ilist.end(); ++it)
+ do_insert(*it);
+ }
+
+ template <typename Iter>
+ void insert(Iter first, Iter last)
+ {
+ reserve(std::distance(first, last) + _num_filled);
+ for (auto it = first; it != last; ++it)
+ do_insert(it->first, it->second);
+ }
+
+#if 0
+ template <typename Iter>
+ void insert_unique(Iter begin, Iter end)
+ {
+ reserve(std::distance(begin, end) + _num_filled);
+ for (; begin != end; ++begin)
+ do_insert_unqiue(*begin);
+ }
+#endif
+
+ template<typename K, typename V>
+ size_type insert_unique(K&& key, V&& val)
+ {
+ return do_insert_unqiue(std::forward<K>(key), std::forward<V>(val));
+ }
+
+ size_type insert_unique(value_type&& value)
+ {
+ return do_insert_unqiue(std::move(value.first), std::move(value.second));
+ }
+
+ size_type insert_unique(const value_type& value)
+ {
+ return do_insert_unqiue(value.first, value.second);
+ }
+
+ template<typename K, typename V>
+ inline size_type do_insert_unqiue(K&& key, V&& val)
+ {
+ check_expand_need();
+ auto bucket = find_unique_bucket(key);
+ EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket);
+ return bucket;
+ }
+
+ std::pair<iterator, bool> insert_or_assign(const KeyT& key, ValueT&& val) { return do_assign(key, std::forward<ValueT>(val)); }
+ std::pair<iterator, bool> insert_or_assign(KeyT&& key, ValueT&& val) { return do_assign(std::move(key), std::forward<ValueT>(val)); }
+
+ template <typename... Args>
+ inline std::pair<iterator, bool> emplace(Args&&... args) noexcept
+ {
+ check_expand_need();
+ return do_insert(std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator hint, Args&&... args)
+ {
+ (void)hint;
+ check_expand_need();
+ return do_insert(std::forward<Args>(args)...).first;
+ }
+
+ template<class... Args>
+ std::pair<iterator, bool> try_emplace(const KeyT& key, Args&&... args)
+ {
+ check_expand_need();
+ return do_insert(key, std::forward<Args>(args)...);
+ }
+
+ template<class... Args>
+ std::pair<iterator, bool> try_emplace(KeyT&& key, Args&&... args)
+ {
+ check_expand_need();
+ return do_insert(std::forward<KeyT>(key), std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ inline size_type emplace_unique(Args&&... args) noexcept
+ {
+ return insert_unique(std::forward<Args>(args)...);
+ }
+
+ /* Check if inserting a new value rather than overwriting an old entry */
+ ValueT& operator[](const KeyT& key) noexcept
+ {
+ check_expand_need();
+
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(key, std::move(ValueT()), bucket);
+ }
+
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ ValueT& operator[](KeyT&& key) noexcept
+ {
+ check_expand_need();
+
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(std::move(key), std::move(ValueT()), bucket);
+ }
+
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ // -------------------------------------------------------
+ /// Erase an element from the hash table.
+ /// return 0 if element was not found
+ template<typename Key = KeyT>
+ size_type erase(const Key& key)
+ {
+ const auto bucket = erase_key(key);
+ if (bucket == INACTIVE)
+ return 0;
+
+ clear_bucket(bucket);
+ return 1;
+ }
+
+ //iterator erase const_iterator
+ iterator erase(const_iterator cit)
+ {
+ iterator it(cit);
+ return erase(it);
+ }
+
+ /// Erase an element typedef an iterator.
+ /// Returns an iterator to the next element (or end()).
+ iterator erase(iterator it)
+ {
+ const auto bucket = erase_bucket(it._bucket);
+ clear_bucket(bucket);
+ if (bucket == it._bucket) {
+ return ++it;
+ } else {
+ //erase main bucket as next
+ it.clear(bucket);
+ return it;
+ }
+ }
+
+ /// Erase an element typedef an iterator without return next iterator
+ void _erase(const_iterator it)
+ {
+ const auto bucket = erase_bucket(it._bucket);
+ clear_bucket(bucket);
+ }
+
+ template<typename Pred>
+ size_type erase_if(Pred pred)
+ {
+ auto old_size = size();
+ for (auto it = begin(), last = end(); it != last; ) {
+ if (pred(*it))
+ it = erase(it);
+ else
+ ++it;
+ }
+ return old_size - size();
+ }
+
+ static constexpr bool is_triviall_destructable()
+ {
+#if __cplusplus >= 201402L || _MSC_VER > 1600
+ return !(std::is_trivially_destructible<KeyT>::value && std::is_trivially_destructible<ValueT>::value);
+#else
+ return !(std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
+#endif
+ }
+
+ static constexpr bool is_copy_trivially()
+ {
+#if __cplusplus >= 201402L || _MSC_VER > 1600
+ return (std::is_trivially_copyable<KeyT>::value && std::is_trivially_copyable<ValueT>::value);
+#else
+ return (std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
+#endif
+ }
+
+ void clearkv()
+ {
+ if (is_triviall_destructable()) {
+ for (auto it = cbegin(); _num_filled; ++it)
+ clear_bucket(it.bucket());
+ }
+ }
+
+ /// Remove all elements, keeping full capacity.
+ void clear()
+ {
+ if (!is_triviall_destructable() && _num_filled) {
+ memset(_bitmask, 0xFFFFFFFF, (_num_buckets + 7) / 8);
+ if (_num_buckets < 8) _bitmask[0] = (1 << _num_buckets) - 1;
+ }
+ else if (_num_filled)
+ clearkv();
+
+ //EMH_BUCKET(_pairs, _num_buckets) = 0; //_last
+ _num_filled = 0;
+ }
+
+ void shrink_to_fit()
+ {
+ rehash(_num_filled + 1);
+ }
+
+ /// Make room for this many elements
+ bool reserve(uint64_t num_elems)
+ {
+ const auto required_buckets = (num_elems * _mlf >> 27);
+ if (EMH_LIKELY(required_buckets < _num_buckets))
+ return false;
+
+#if EMH_HIGH_LOAD
+ if (required_buckets < 64 && _num_filled < _num_buckets)
+ return false;
+#endif
+
+#if EMH_STATIS
+ if (_num_filled > EMH_STATIS) dump_statics(true);
+#endif
+ rehash(required_buckets + 2);
+ return true;
+ }
+
+ void rehash(uint64_t required_buckets)
+ {
+ if (required_buckets < _num_filled)
+ return;
+
+ uint64_t buckets = _num_filled > (1u << 16) ? (1u << 16) : 2u;
+ while (buckets < required_buckets) { buckets *= 2; }
+
+ // no need alloc large bucket for small key sizeof(KeyT) < sizeof(int).
+ // set small a max_load_factor, insert/reserve() will fail and introduce rehash issiue TODO: dothing ?
+ if (sizeof(KeyT) < sizeof(size_type) && buckets >= (1ul << (2 * 8)))
+ buckets = 2ul << (sizeof(KeyT) * 8);
+
+ assert(buckets < max_size() && buckets > _num_filled);
+ //TODO: throwOverflowError
+
+ auto num_buckets = (size_type)buckets;
+ auto old_num_filled = _num_filled;
+ auto old_mask = _num_buckets - 1;
+ auto old_pairs = _pairs;
+ auto* obmask = _bitmask;
+
+ _num_filled = 0;
+ _num_buckets = num_buckets;
+ _mask = num_buckets - 1;
+
+ _pairs = (PairT*)malloc(AllocSize(_num_buckets));
+ memset((char*)(_pairs + _num_buckets), 0, sizeof(PairT) * EPACK_SIZE);
+
+ _bitmask = decltype(_bitmask)(_pairs + EPACK_SIZE + num_buckets);
+
+ const auto mask_byte = (num_buckets + 7) / 8;
+ memset(_bitmask, 0xFFFFFFFF, mask_byte);
+ memset(((char*)_bitmask) + mask_byte, 0, BIT_PACK);
+ if (num_buckets < 8)
+ _bitmask[0] = (1 << num_buckets) - 1;
+
+ //for (size_type src_bucket = 0; _num_filled < old_num_filled; src_bucket++) {
+ for (size_type src_bucket = old_mask; _num_filled < old_num_filled; src_bucket --) {
+ if (obmask[src_bucket / MASK_BIT] & (EMH_MASK(src_bucket)))
+ continue;
+
+ auto& key = EMH_KEY(old_pairs, src_bucket);
+ const auto bucket = find_unique_bucket(key);
+ EMH_NEW(std::move(key), std::move(EMH_VAL(old_pairs, src_bucket)), bucket);
+ if (is_triviall_destructable())
+ old_pairs[src_bucket].~PairT();
+ }
+
+#if EMH_REHASH_LOG
+ if (_num_filled > EMH_REHASH_LOG) {
+ auto mbucket = bucket_main();
+ char buff[255] = {0};
+ sprintf(buff, " _num_filled/collision/main/K.V/pack/ = %u/%.2lf%%(%.2lf%%)/%s.%s/%zd",
+ _num_filled, 200.0f * (_num_filled - mbucket) / _mask, 100.0f * mbucket / _mask,
+ typeid(KeyT).name(), typeid(ValueT).name(), sizeof(_pairs[0]));
+#ifdef EMH_LOG
+ static size_t ihashs = 0;
+ EMH_LOG << "rhash_nums = " << ihashs ++ << "|" <<__FUNCTION__ << "|" << buff << endl;
+#else
+ puts(buff);
+#endif
+ }
+#endif
+
+ free(old_pairs);
+ assert(old_num_filled == _num_filled);
+ }
+
+private:
+ // Can we fit another element?
+ inline bool check_expand_need()
+ {
+ return reserve(_num_filled);
+ }
+
+ void clear_bucket(size_type bucket)
+ {
+ EMH_CLS(bucket);
+ _num_filled--;
+ if (is_triviall_destructable())
+ _pairs[bucket].~PairT();
+ }
+
+#if 1
+ //template<typename UType, typename std::enable_if<std::is_integral<UType>::value, size_type>::type = 0>
+ template<typename UType>
+ size_type erase_key(const UType& key)
+ {
+ const auto bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return INACTIVE;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ const auto eqkey = _eq(key, EMH_KEY(_pairs, bucket));
+ if (eqkey) {
+ if (next_bucket == bucket)
+ return bucket;
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (is_copy_trivially())
+ EMH_PKV(_pairs, bucket) = EMH_PKV(_pairs, next_bucket);
+ else
+ EMH_PKV(_pairs, bucket).swap(EMH_PKV(_pairs, next_bucket));
+
+ EMH_BUCKET(_pairs, bucket) = (nbucket == next_bucket) ? bucket : nbucket;
+ return next_bucket;
+ } else if (next_bucket == bucket)
+ return INACTIVE;
+ /* else if (EMH_UNLIKELY(bucket != hash_key(EMH_KEY(_pairs, bucket)) & _mask))
+ return INACTIVE;
+ */
+
+ auto prev_bucket = bucket;
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (_eq(key, EMH_KEY(_pairs, next_bucket))) {
+ EMH_BUCKET(_pairs, prev_bucket) = (nbucket == next_bucket) ? prev_bucket : nbucket;
+ return next_bucket;
+ }
+
+ if (nbucket == next_bucket)
+ break;
+ prev_bucket = next_bucket;
+ next_bucket = nbucket;
+ }
+
+ return INACTIVE;
+ }
+#else
+ template<typename UType, typename std::enable_if<!std::is_integral<UType>::value, size_type>::type = 0>
+ size_type erase_key(const UType& key)
+ {
+ const auto bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return INACTIVE;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if (next_bucket == bucket)
+ return _eq(key, EMH_KEY(_pairs, bucket)) ? bucket : INACTIVE;
+// else if (bucket != hash_key(EMH_KEY(_pairs, bucket)))
+// return INACTIVE;
+
+ //find erase key and swap to last bucket
+ size_type prev_bucket = bucket, find_bucket = INACTIVE;
+ next_bucket = bucket;
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (_eq(key, EMH_KEY(_pairs, next_bucket))) {
+ find_bucket = next_bucket;
+ if (nbucket == next_bucket) {
+ EMH_BUCKET(_pairs, prev_bucket) = prev_bucket;
+ break;
+ }
+ }
+ if (nbucket == next_bucket) {
+ if (find_bucket != INACTIVE) {
+ EMH_PKV(_pairs, find_bucket).swap(EMH_PKV(_pairs, nbucket));
+// EMH_PKV(_pairs, find_bucket) = EMH_PKV(_pairs, nbucket);
+ EMH_BUCKET(_pairs, prev_bucket) = prev_bucket;
+ find_bucket = nbucket;
+ }
+ break;
+ }
+ prev_bucket = next_bucket;
+ next_bucket = nbucket;
+ }
+
+ return find_bucket;
+ }
+#endif
+
+ size_type erase_bucket(const size_type bucket)
+ {
+ const auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ const auto main_bucket = hash_key(EMH_KEY(_pairs, bucket)) & _mask;
+ if (bucket == main_bucket) {
+ if (bucket != next_bucket) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (is_copy_trivially())
+ EMH_PKV(_pairs, bucket) = EMH_PKV(_pairs, next_bucket);
+ else
+ EMH_PKV(_pairs, bucket).swap(EMH_PKV(_pairs, next_bucket));
+ EMH_BUCKET(_pairs, bucket) = (nbucket == next_bucket) ? bucket : nbucket;
+ }
+ return next_bucket;
+ }
+
+ const auto prev_bucket = find_prev_bucket(main_bucket, bucket);
+ EMH_BUCKET(_pairs, prev_bucket) = (bucket == next_bucket) ? prev_bucket : next_bucket;
+ return bucket;
+ }
+
+ // Find the bucket with this key, or return bucket size
+ template<typename K = KeyT>
+ size_type find_filled_hash(const K& key, const size_t key_hash) const
+ {
+ const auto bucket = key_hash & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return _num_buckets;
+
+ auto next_bucket = bucket;
+ while (true) {
+ if (_eq(key, EMH_KEY(_pairs, next_bucket)))
+ return next_bucket;
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ break;
+ next_bucket = nbucket;
+ }
+
+ return _num_buckets;
+ }
+
+ // Find the bucket with this key, or return bucket size
+ template<typename K = KeyT>
+ size_type find_filled_bucket(const K& key) const
+ {
+ const auto bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return _num_buckets;
+
+ auto next_bucket = bucket;
+// else if (bucket != (hash_key(bucket_key) & _mask))
+// return _num_buckets;
+
+ while (true) {
+ if (_eq(key, EMH_KEY(_pairs, next_bucket)))
+ return next_bucket;
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ return _num_buckets;
+ next_bucket = nbucket;
+ }
+
+ return 0;
+ }
+
+ //kick out bucket and find empty to occpuy
+ //it will break the orgin link and relnik again.
+ //before: main_bucket-->prev_bucket --> bucket --> next_bucket
+ //atfer : main_bucket-->prev_bucket --> (removed)--> new_bucket--> next_bucket
+ size_type kickout_bucket(const size_type kmain, const size_type kbucket)
+ {
+ const auto next_bucket = EMH_BUCKET(_pairs, kbucket);
+ const auto new_bucket = find_empty_bucket(next_bucket, kbucket);
+ const auto prev_bucket = find_prev_bucket(kmain, kbucket);
+ new(_pairs + new_bucket) PairT(std::move(_pairs[kbucket]));
+ if (is_triviall_destructable())
+ _pairs[kbucket].~PairT();
+
+ if (next_bucket == kbucket)
+ EMH_BUCKET(_pairs, new_bucket) = new_bucket;
+ EMH_BUCKET(_pairs, prev_bucket) = new_bucket;
+
+ EMH_SET(new_bucket);
+ return kbucket;
+ }
+
+/*
+** inserts a new key into a hash table; first check whether key's main
+** bucket/position is free. If not, check whether colliding node/bucket is in its main
+** position or not: if it is not, move colliding bucket to an empty place and
+** put new key in its main position; otherwise (colliding bucket is in its main
+** position), new key goes to an empty position. ***/
+
+ template<typename K=KeyT>
+ size_type find_or_allocate(const K& key, bool& isempty)
+ {
+ const auto bucket = hash_key(key) & _mask;
+ const auto& bucket_key = EMH_KEY(_pairs, bucket);
+ if (EMH_EMPTY(_pairs, bucket)) {
+ isempty = true;
+ return bucket;
+ }
+ else if (_eq(key, bucket_key)) {
+ isempty = false;
+ return bucket;
+ }
+
+ isempty = true;
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ //check current bucket_key is in main bucket or not
+ const auto kmain_bucket = hash_key(bucket_key) & _mask;
+ if (kmain_bucket != bucket)
+ return kickout_bucket(kmain_bucket, bucket);
+ else if (next_bucket == bucket)
+ return EMH_BUCKET(_pairs, next_bucket) = find_empty_bucket(next_bucket, bucket);
+
+#if EMH_LRU_SET
+ auto prev_bucket = bucket;
+#endif
+ //find next linked bucket and check key, if lru is set then swap current key with prev_bucket
+ while (true) {
+ if (EMH_UNLIKELY(_eq(key, EMH_KEY(_pairs, next_bucket)))) {
+ isempty = false;
+#if EMH_LRU_SET
+ EMH_PKV(_pairs, next_bucket).swap(EMH_PKV(_pairs, prev_bucket));
+ return prev_bucket;
+#else
+ return next_bucket;
+#endif
+ }
+
+#if EMH_LRU_SET
+ prev_bucket = next_bucket;
+#endif
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ break;
+ next_bucket = nbucket;
+ }
+
+ //find a new empty and link it to tail, TODO link after main bucket?
+ const auto new_bucket = find_empty_bucket(next_bucket, bucket);// : find_empty_bucket(next_bucket);
+ return EMH_BUCKET(_pairs, next_bucket) = new_bucket;
+ }
+
+ // key is not in this map. Find a place to put it.
+ size_type find_empty_bucket(const size_type bucket_from, const size_type main_bucket)
+ {
+#ifdef EMH_ALIGN64 // only works 64bit
+ const auto boset = bucket_from % MASK_BIT;
+ auto* const align = _bitmask + bucket_from / MASK_BIT;
+ const auto bmask = ((size_t)align[1] << (MASK_BIT - boset)) | (align[0] >> boset);
+ if (EMH_LIKELY(bmask != 0))
+ return bucket_from + CTZ(bmask);
+#elif EMH_ITER_SAFE
+ const auto boset = bucket_from % 8;
+ auto* const start = (uint8_t*)_bitmask + bucket_from / 8;
+ size_t bmask; memcpy(&bmask, start + 0, sizeof(bmask)); bmask >>= boset;// bmask |= ((size_t)start[8] << (SIZE_BIT - boset));
+ if (EMH_LIKELY(bmask != 0))
+ return bucket_from + CTZ(bmask);
+#else
+ const auto boset = main_bucket % 8;
+ auto* const align = (uint8_t*)_bitmask + main_bucket / 8; (void)bucket_from;
+ const size_t bmask = (*(size_t*)(align) >> boset);// & 0xF0F0F0F0FF0FF0FFull;//
+ if (EMH_LIKELY(bmask != 0))
+ return main_bucket + CTZ(bmask);
+#endif
+
+ const auto qmask = _mask / SIZE_BIT;
+ if (1) {
+ const size_type step = (main_bucket - SIZE_BIT / 4) & qmask;
+ const auto bmask3 = *((size_t*)_bitmask + step);
+ if (bmask3 != 0)
+ return step * SIZE_BIT + CTZ(bmask3);
+ }
+
+ for (; ;) {
+ auto& _last = EMH_BUCKET(_pairs, _num_buckets);
+ const auto bmask2 = *((size_t*)_bitmask + _last);
+ if (bmask2 != 0)
+ return _last * SIZE_BIT + CTZ(bmask2);
+
+ const auto next1 = (qmask / 2 + _last) & qmask;
+ const auto bmask1 = *((size_t*)_bitmask + next1);
+ if (bmask1 != 0) {
+ //_last = next1;
+ return next1 * SIZE_BIT + CTZ(bmask1);
+ }
+ _last = (_last + 1) & qmask;
+ }
+
+ return 0;
+ }
+
+ // key is not in this map. Find a place to put it.
+ size_type find_unique_empty(const size_type bucket_from, const size_t main_bucket)
+ {
+#ifdef EMH_ALIGN64
+ const auto boset = bucket_from % MASK_BIT;
+ auto* const align = _bitmask + bucket_from / MASK_BIT;
+ const auto bmask = ((size_t)align[1] << (MASK_BIT - boset)) | (align[0] >> boset);
+ static_assert(sizeof(size_t) > 4);
+#elif EMH_ITER_SAFE
+ const auto boset = bucket_from % 8;
+ auto* const start = (uint8_t*)_bitmask + bucket_from / 8;
+ size_t bmask; memcpy(&bmask, start + 0, sizeof(bmask)); bmask >>= boset;
+#else
+ const auto boset = bucket_from % 8; (void)main_bucket;
+ auto* const align = (uint8_t*)_bitmask + bucket_from / 8;
+ const auto bmask = (*(size_t*)(align) >> boset); //maybe not aligned and warning
+#endif
+ if (EMH_LIKELY(bmask != 0))
+ return bucket_from + CTZ(bmask);
+
+ const auto qmask = _mask / SIZE_BIT;
+ for (auto last = (bucket_from + _mask) & qmask; ;) {
+ const auto bmask2 = *((size_t*)_bitmask + last);// & 0xF0F0F0F0FF0FF0FFull;
+ if (EMH_LIKELY(bmask2 != 0))
+ return last * SIZE_BIT + CTZ(bmask2);
+ last = (last + 1) & qmask;
+ }
+
+ return 0;
+ }
+
+ size_type find_last_bucket(size_type main_bucket) const
+ {
+ auto next_bucket = EMH_BUCKET(_pairs, main_bucket);
+ if (next_bucket == main_bucket)
+ return main_bucket;
+
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ return next_bucket;
+ next_bucket = nbucket;
+ }
+ }
+
+ size_type find_prev_bucket(size_type main_bucket, const size_type bucket) const
+ {
+ auto next_bucket = EMH_BUCKET(_pairs, main_bucket);
+ if (next_bucket == bucket)
+ return main_bucket;
+
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == bucket)
+ return next_bucket;
+ next_bucket = nbucket;
+ }
+ }
+
+ size_type find_unique_bucket(const KeyT& key)
+ {
+ const size_type bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return bucket;
+
+ //check current bucket_key is in main bucket or not
+ const auto kmain_bucket = hash_key(EMH_KEY(_pairs, bucket)) & _mask;
+ if (EMH_UNLIKELY(kmain_bucket != bucket))
+ return kickout_bucket(kmain_bucket, bucket);
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if (next_bucket != bucket)
+ next_bucket = find_last_bucket(next_bucket);
+
+ //find a new empty and link it to tail
+ return EMH_BUCKET(_pairs, next_bucket) = find_unique_empty(next_bucket, bucket);
+ }
+
+#if EMH_INT_HASH
+ static constexpr uint64_t KC = UINT64_C(11400714819323198485);
+ static inline uint64_t hash64(uint64_t key)
+ {
+#if __SIZEOF_INT128__ && EMH_INT_HASH == 1
+ __uint128_t r = key; r *= KC;
+ return (uint64_t)(r >> 64) + (uint64_t)r;
+#elif EMH_INT_HASH == 2
+ //MurmurHash3Mixer
+ uint64_t h = key;
+ h ^= h >> 33;
+ h *= 0xff51afd7ed558ccd;
+ h ^= h >> 33;
+ h *= 0xc4ceb9fe1a85ec53;
+ h ^= h >> 33;
+ return h;
+#elif _WIN64 && EMH_INT_HASH == 1
+ uint64_t high;
+ return _umul128(key, KC, &high) + high;
+#elif EMH_INT_HASH == 3
+ auto ror = (key >> 32) | (key << 32);
+ auto low = key * 0xA24BAED4963EE407ull;
+ auto high = ror * 0x9FB21C651E98DF25ull;
+ auto mix = low + high;
+ return mix;
+#elif EMH_INT_HASH == 1
+ uint64_t r = key * UINT64_C(0xca4bcaa75ec3f625);
+ return (r >> 32) + r;
+#elif EMH_WYHASH64
+ return wyhash64(key, KC);
+#else
+ uint64_t x = key;
+ x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);
+ x = (x ^ (x >> 27)) * UINT64_C(0x94d049bb133111eb);
+ x = x ^ (x >> 31);
+ return x;
+#endif
+ }
+#endif
+
+ template<typename UType, typename std::enable_if<std::is_integral<UType>::value, size_type>::type = 0>
+ inline size_type hash_key(const UType key) const
+ {
+#if EMH_INT_HASH
+ return hash64(key);
+#elif EMH_IDENTITY_HASH
+ return key + (key >> 24);
+#else
+ return (size_type)_hasher(key);
+#endif
+ }
+
+ template<typename UType, typename std::enable_if<std::is_same<UType, std::string>::value, size_type>::type = 0>
+ inline size_type hash_key(const UType& key) const
+ {
+#if EMH_WY_HASH
+ return wyhash(key.data(), key.size(), 0);
+#else
+ return (size_type)_hasher(key);
+#endif
+ }
+
+ template<typename UType, typename std::enable_if<!std::is_integral<UType>::value && !std::is_same<UType, std::string>::value, size_type>::type = 0>
+ inline size_type hash_key(const UType& key) const
+ {
+ return (size_type)_hasher(key);
+ }
+
+private:
+ uint32_t* _bitmask;
+ PairT* _pairs;
+ HashT _hasher;
+ EqT _eq;
+ size_type _mask;
+ size_type _num_buckets;
+
+ size_type _num_filled;
+ uint32_t _mlf;
+
+private:
+ static constexpr uint32_t BIT_PACK = sizeof(_bitmask[0]) * 2;
+ static constexpr uint32_t MASK_BIT = sizeof(_bitmask[0]) * 8;
+ static constexpr uint32_t SIZE_BIT = sizeof(size_t) * 8;
+ static constexpr uint32_t EPACK_SIZE = sizeof(PairT) >= sizeof(size_t) == 0 ? 1 : 2; // > 1
+};
+}
+// namespace emhash7
+#if __cplusplus >= 201103L
+//template <class Key, class Val> using ehmap7 = emhash7::HashMap<Key, Val, std::hash<Key>, std::equal_to<Key>>;
+#endif
+
+//TODO
+//2. improve rehash and find miss performance(reduce peak memory)
+//3. dump or Serialization interface
+//4. node hash map support
+//5. load_factor > 1.0 && add grow ration
+//... https://godbolt.org/
diff --git a/misc/benchmarks/external/khash.h b/misc/benchmarks/external/khash.h
new file mode 100644
index 00000000..61dabc4d
--- /dev/null
+++ b/misc/benchmarks/external/khash.h
@@ -0,0 +1,595 @@
+/* The MIT License
+ Copyright (c) 2008, 2009, 2011 by Attractive Chaos <[email protected]>
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/*
+ An example:
+#include "khash.h"
+KHASH_MAP_INIT_INT(32, char)
+int main() {
+ int ret, is_missing;
+ khiter_t k;
+ khash_t(32) *h = kh_init(32);
+ k = kh_put(32, h, 5, &ret);
+ kh_value(h, k) = 10;
+ k = kh_get(32, h, 10);
+ is_missing = (k == kh_end(h));
+ k = kh_get(32, h, 5);
+ kh_del(32, h, k);
+ for (k = kh_begin(h); k != kh_end(h); ++k)
+ if (kh_exist(h, k)) kh_value(h, k) = 1;
+ kh_destroy(32, h);
+ return 0;
+}
+*/
+
+/*
+ 2013-05-02 (0.2.8):
+ * Use quadratic probing. When the capacity is power of 2, stepping function
+ i*(i+1)/2 guarantees to traverse each bucket. It is better than double
+ hashing on cache performance and is more robust than linear probing.
+ In theory, double hashing should be more robust than quadratic probing.
+ However, my implementation is probably not for large hash tables, because
+ the second hash function is closely tied to the first hash function,
+ which reduce the effectiveness of double hashing.
+ Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php
+ 2011-12-29 (0.2.7):
+ * Minor code clean up; no actual effect.
+ 2011-09-16 (0.2.6):
+ * The capacity is a power of 2. This seems to dramatically improve the
+ speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
+ - http://code.google.com/p/ulib/
+ - http://nothings.org/computer/judy/
+ * Allow to optionally use linear probing which usually has better
+ performance for random input. Double hashing is still the default as it
+ is more robust to certain non-random input.
+ * Added Wang's integer hash function (not used by default). This hash
+ function is more robust to certain non-random input.
+ 2011-02-14 (0.2.5):
+ * Allow to declare global functions.
+ 2009-09-26 (0.2.4):
+ * Improve portability
+ 2008-09-19 (0.2.3):
+ * Corrected the example
+ * Improved interfaces
+ 2008-09-11 (0.2.2):
+ * Improved speed a little in kh_put()
+ 2008-09-10 (0.2.1):
+ * Added kh_clear()
+ * Fixed a compiling error
+ 2008-09-02 (0.2.0):
+ * Changed to token concatenation which increases flexibility.
+ 2008-08-31 (0.1.2):
+ * Fixed a bug in kh_get(), which has not been tested previously.
+ 2008-08-31 (0.1.1):
+ * Added destructor
+*/
+
+
+#ifndef __AC_KHASH_H
+#define __AC_KHASH_H
+
+/*!
+ @header
+ Generic hash table library.
+ */
+
+#define AC_VERSION_KHASH_H "0.2.8"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+/* compiler specific configuration */
+
+#if UINT_MAX == 0xffffffffu
+typedef unsigned int khint32_t;
+#elif ULONG_MAX == 0xffffffffu
+typedef unsigned long khint32_t;
+#endif
+
+#if ULONG_MAX == ULLONG_MAX
+typedef unsigned long khint64_t;
+#else
+typedef unsigned long long khint64_t;
+#endif
+
+#ifndef kh_inline
+#ifdef _MSC_VER
+#define kh_inline __inline
+#else
+#define kh_inline inline
+#endif
+#endif /* kh_inline */
+
+#ifndef klib_unused
+#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
+#define klib_unused __attribute__ ((__unused__))
+#else
+#define klib_unused
+#endif
+#endif /* klib_unused */
+
+typedef khint32_t khint_t;
+typedef khint_t khiter_t;
+
+#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2)
+#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1)
+#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3)
+#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1)))
+#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1)))
+#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1)))
+#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1))
+
+#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
+
+#ifndef kroundup32
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#endif
+
+#ifndef kcalloc
+#define kcalloc(N,Z) calloc(N,Z)
+#endif
+#ifndef kmalloc
+#define kmalloc(Z) malloc(Z)
+#endif
+#ifndef krealloc
+#define krealloc(P,Z) realloc(P,Z)
+#endif
+#ifndef kfree
+#define kfree(P) free(P)
+#endif
+
+static const double __ac_HASH_UPPER = 0.77;
+
+#define __KHASH_TYPE(name, khkey_t, khval_t) \
+ typedef struct kh_##name##_s { \
+ khint_t n_buckets, size, n_occupied, upper_bound; \
+ khint32_t *flags; \
+ khkey_t *keys; \
+ khval_t *vals; \
+ } kh_##name##_t;
+
+#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
+ extern kh_##name##_t *kh_init_##name(void); \
+ extern void kh_destroy_##name(kh_##name##_t *h); \
+ extern void kh_clear_##name(kh_##name##_t *h); \
+ extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
+ extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
+ extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
+ extern void kh_del_##name(kh_##name##_t *h, khint_t x);
+
+#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ SCOPE kh_##name##_t *kh_init_##name(void) { \
+ return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \
+ } \
+ SCOPE void kh_destroy_##name(kh_##name##_t *h) \
+ { \
+ if (h) { \
+ kfree((void *)h->keys); kfree(h->flags); \
+ kfree((void *)h->vals); \
+ kfree(h); \
+ } \
+ } \
+ SCOPE void kh_clear_##name(kh_##name##_t *h) \
+ { \
+ if (h && h->flags) { \
+ memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \
+ h->size = h->n_occupied = 0; \
+ } \
+ } \
+ SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
+ { \
+ if (h->n_buckets) { \
+ khint_t k, i, last, mask, step = 0; \
+ mask = h->n_buckets - 1; \
+ k = __hash_func(key); i = k & mask; \
+ last = i; \
+ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+ i = (i + (++step)) & mask; \
+ if (i == last) return h->n_buckets; \
+ } \
+ return __ac_iseither(h->flags, i)? h->n_buckets : i; \
+ } else return 0; \
+ } \
+ SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
+ { /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
+ khint32_t *new_flags = 0; \
+ khint_t j = 1; \
+ { \
+ kroundup32(new_n_buckets); \
+ if (new_n_buckets < 4) new_n_buckets = 4; \
+ if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
+ else { /* hash table size to be changed (shrink or expand); rehash */ \
+ new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ if (!new_flags) return -1; \
+ memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ if (h->n_buckets < new_n_buckets) { /* expand */ \
+ khkey_t *new_keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (!new_keys) { kfree(new_flags); return -1; } \
+ h->keys = new_keys; \
+ if (kh_is_map) { \
+ khval_t *new_vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+ if (!new_vals) { kfree(new_flags); return -1; } \
+ h->vals = new_vals; \
+ } \
+ } /* otherwise shrink */ \
+ } \
+ } \
+ if (j) { /* rehashing is needed */ \
+ for (j = 0; j != h->n_buckets; ++j) { \
+ if (__ac_iseither(h->flags, j) == 0) { \
+ khkey_t key = h->keys[j]; \
+ khval_t val; \
+ khint_t new_mask; \
+ new_mask = new_n_buckets - 1; \
+ if (kh_is_map) val = h->vals[j]; \
+ __ac_set_isdel_true(h->flags, j); \
+ while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
+ khint_t k, i, step = 0; \
+ k = __hash_func(key); \
+ i = k & new_mask; \
+ while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \
+ __ac_set_isempty_false(new_flags, i); \
+ if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \
+ { khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
+ if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \
+ __ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \
+ } else { /* write the element and jump out of the loop */ \
+ h->keys[i] = key; \
+ if (kh_is_map) h->vals[i] = val; \
+ break; \
+ } \
+ } \
+ } \
+ } \
+ if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
+ h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (kh_is_map) h->vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+ } \
+ kfree(h->flags); /* free the working space */ \
+ h->flags = new_flags; \
+ h->n_buckets = new_n_buckets; \
+ h->n_occupied = h->size; \
+ h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
+ } \
+ return 0; \
+ } \
+ SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
+ { \
+ khint_t x; \
+ if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
+ if (h->n_buckets > (h->size<<1)) { \
+ if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \
+ *ret = -1; return h->n_buckets; \
+ } \
+ } else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \
+ *ret = -1; return h->n_buckets; \
+ } \
+ } /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
+ { \
+ khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
+ x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
+ if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \
+ else { \
+ last = i; \
+ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+ if (__ac_isdel(h->flags, i)) site = i; \
+ i = (i + (++step)) & mask; \
+ if (i == last) { x = site; break; } \
+ } \
+ if (x == h->n_buckets) { \
+ if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \
+ else x = i; \
+ } \
+ } \
+ } \
+ if (__ac_isempty(h->flags, x)) { /* not present at all */ \
+ h->keys[x] = key; \
+ __ac_set_isboth_false(h->flags, x); \
+ ++h->size; ++h->n_occupied; \
+ *ret = 1; \
+ } else if (__ac_isdel(h->flags, x)) { /* deleted */ \
+ h->keys[x] = key; \
+ __ac_set_isboth_false(h->flags, x); \
+ ++h->size; \
+ *ret = 2; \
+ } else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
+ return x; \
+ } \
+ SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
+ { \
+ if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
+ __ac_set_isdel_true(h->flags, x); \
+ --h->size; \
+ } \
+ }
+
+#define KHASH_DECLARE(name, khkey_t, khval_t) \
+ __KHASH_TYPE(name, khkey_t, khval_t) \
+ __KHASH_PROTOTYPES(name, khkey_t, khval_t)
+
+#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ __KHASH_TYPE(name, khkey_t, khval_t) \
+ __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ KHASH_INIT2(name, static kh_inline klib_unused, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+/* --- BEGIN OF HASH FUNCTIONS --- */
+
+/*! @function
+ @abstract Integer hash function
+ @param key The integer [khint32_t]
+ @return The hash value [khint_t]
+ */
+#define kh_int_hash_func(key) (khint32_t)(key)
+/*! @function
+ @abstract Integer comparison function
+ */
+#define kh_int_hash_equal(a, b) ((a) == (b))
+/*! @function
+ @abstract 64-bit integer hash function
+ @param key The integer [khint64_t]
+ @return The hash value [khint_t]
+ */
+#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11)
+/*! @function
+ @abstract 64-bit integer comparison function
+ */
+#define kh_int64_hash_equal(a, b) ((a) == (b))
+/*! @function
+ @abstract const char* hash function
+ @param s Pointer to a null terminated string
+ @return The hash value
+ */
+static kh_inline khint_t __ac_X31_hash_string(const char *s)
+{
+ khint_t h = (khint_t)*s;
+ if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s;
+ return h;
+}
+/*! @function
+ @abstract Another interface to const char* hash function
+ @param key Pointer to a null terminated string [const char*]
+ @return The hash value [khint_t]
+ */
+#define kh_str_hash_func(key) __ac_X31_hash_string(key)
+/*! @function
+ @abstract Const char* comparison function
+ */
+#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
+
+static kh_inline khint_t __ac_Wang_hash(khint_t key)
+{
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+}
+#define kh_int_hash_func2(key) __ac_Wang_hash((khint_t)key)
+
+/* --- END OF HASH FUNCTIONS --- */
+
+/* Other convenient macros... */
+
+/*!
+ @abstract Type of the hash table.
+ @param name Name of the hash table [symbol]
+ */
+#define khash_t(name) kh_##name##_t
+
+/*! @function
+ @abstract Initiate a hash table.
+ @param name Name of the hash table [symbol]
+ @return Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_init(name) kh_init_##name()
+
+/*! @function
+ @abstract Destroy a hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_destroy(name, h) kh_destroy_##name(h)
+
+/*! @function
+ @abstract Reset a hash table without deallocating memory.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_clear(name, h) kh_clear_##name(h)
+
+/*! @function
+ @abstract Resize a hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param s New size [khint_t]
+ */
+#define kh_resize(name, h, s) kh_resize_##name(h, s)
+
+/*! @function
+ @abstract Insert a key to the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Key [type of keys]
+ @param r Extra return code: -1 if the operation failed;
+ 0 if the key is present in the hash table;
+ 1 if the bucket is empty (never used); 2 if the element in
+ the bucket has been deleted [int*]
+ @return Iterator to the inserted element [khint_t]
+ */
+#define kh_put(name, h, k, r) kh_put_##name(h, k, r)
+
+/*! @function
+ @abstract Retrieve a key from the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Key [type of keys]
+ @return Iterator to the found element, or kh_end(h) if the element is absent [khint_t]
+ */
+#define kh_get(name, h, k) kh_get_##name(h, k)
+
+/*! @function
+ @abstract Remove a key from the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Iterator to the element to be deleted [khint_t]
+ */
+#define kh_del(name, h, k) kh_del_##name(h, k)
+
+/*! @function
+ @abstract Test whether a bucket contains data.
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return 1 if containing data; 0 otherwise [int]
+ */
+#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
+
+/*! @function
+ @abstract Get key given an iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return Key [type of keys]
+ */
+#define kh_key(h, x) ((h)->keys[x])
+
+/*! @function
+ @abstract Get value given an iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return Value [type of values]
+ @discussion For hash sets, calling this results in segfault.
+ */
+#define kh_val(h, x) ((h)->vals[x])
+
+/*! @function
+ @abstract Alias of kh_val()
+ */
+#define kh_value(h, x) ((h)->vals[x])
+
+/*! @function
+ @abstract Get the start iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return The start iterator [khint_t]
+ */
+#define kh_begin(h) (khint_t)(0)
+
+/*! @function
+ @abstract Get the end iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return The end iterator [khint_t]
+ */
+#define kh_end(h) ((h)->n_buckets)
+
+/*! @function
+ @abstract Get the number of elements in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return Number of elements in the hash table [khint_t]
+ */
+#define kh_size(h) ((h)->size)
+
+/*! @function
+ @abstract Get the number of buckets in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return Number of buckets in the hash table [khint_t]
+ */
+#define kh_n_buckets(h) ((h)->n_buckets)
+
+/*! @function
+ @abstract Iterate over the entries in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param kvar Variable to which key will be assigned
+ @param vvar Variable to which value will be assigned
+ @param code Block of code to execute
+ */
+#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
+ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
+ if (!kh_exist(h,__i)) continue; \
+ (kvar) = kh_key(h,__i); \
+ (vvar) = kh_val(h,__i); \
+ code; \
+ } }
+
+/*! @function
+ @abstract Iterate over the values in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param vvar Variable to which value will be assigned
+ @param code Block of code to execute
+ */
+#define kh_foreach_value(h, vvar, code) { khint_t __i; \
+ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
+ if (!kh_exist(h,__i)) continue; \
+ (vvar) = kh_val(h,__i); \
+ code; \
+ } }
+
+/* More convenient interfaces */
+
+/*! @function
+ @abstract Instantiate a hash set containing integer keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_INT(name) \
+ KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing integer keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_INT(name, khval_t) \
+ KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash set containing 64-bit integer keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_INT64(name) \
+ KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing 64-bit integer keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_INT64(name, khval_t) \
+ KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
+
+typedef const char *kh_cstr_t;
+/*! @function
+ @abstract Instantiate a hash map containing const char* keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_STR(name) \
+ KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing const char* keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_STR(name, khval_t) \
+ KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal)
+
+#endif /* __AC_KHASH_H */
diff --git a/misc/benchmarks/external/skarupke/flat_hash_map.hpp b/misc/benchmarks/external/skarupke/flat_hash_map.hpp
new file mode 100644
index 00000000..a8723ee8
--- /dev/null
+++ b/misc/benchmarks/external/skarupke/flat_hash_map.hpp
@@ -0,0 +1,1496 @@
+// Copyright Malte Skarupke 2017.
+// Distributed under the Boost Software License, Version 1.0.
+// (See http://www.boost.org/LICENSE_1_0.txt)
+
+#pragma once
+
+#include <cstdint>
+#include <cstddef>
+#include <functional>
+#include <cmath>
+#include <algorithm>
+#include <iterator>
+#include <utility>
+#include <type_traits>
+
+#ifdef _MSC_VER
+#define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__
+#else
+#define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline))
+#endif
+
+namespace ska
+{
+struct prime_number_hash_policy;
+struct power_of_two_hash_policy;
+struct fibonacci_hash_policy;
+
+namespace detailv3
+{
+template<typename Result, typename Functor>
+struct functor_storage : Functor
+{
+ functor_storage() = default;
+ functor_storage(const Functor & functor)
+ : Functor(functor)
+ {
+ }
+ template<typename... Args>
+ Result operator()(Args &&... args)
+ {
+ return static_cast<Functor &>(*this)(std::forward<Args>(args)...);
+ }
+ template<typename... Args>
+ Result operator()(Args &&... args) const
+ {
+ return static_cast<const Functor &>(*this)(std::forward<Args>(args)...);
+ }
+};
+template<typename Result, typename... Args>
+struct functor_storage<Result, Result (*)(Args...)>
+{
+ typedef Result (*function_ptr)(Args...);
+ function_ptr function;
+ functor_storage(function_ptr function)
+ : function(function)
+ {
+ }
+ Result operator()(Args... args) const
+ {
+ return function(std::forward<Args>(args)...);
+ }
+ operator function_ptr &()
+ {
+ return function;
+ }
+ operator const function_ptr &()
+ {
+ return function;
+ }
+};
+template<typename key_type, typename value_type, typename hasher>
+struct KeyOrValueHasher : functor_storage<size_t, hasher>
+{
+ typedef functor_storage<size_t, hasher> hasher_storage;
+ KeyOrValueHasher() = default;
+ KeyOrValueHasher(const hasher & hash)
+ : hasher_storage(hash)
+ {
+ }
+ size_t operator()(const key_type & key)
+ {
+ return static_cast<hasher_storage &>(*this)(key);
+ }
+ size_t operator()(const key_type & key) const
+ {
+ return static_cast<const hasher_storage &>(*this)(key);
+ }
+ size_t operator()(const value_type & value)
+ {
+ return static_cast<hasher_storage &>(*this)(value.first);
+ }
+ size_t operator()(const value_type & value) const
+ {
+ return static_cast<const hasher_storage &>(*this)(value.first);
+ }
+ template<typename F, typename S>
+ size_t operator()(const std::pair<F, S> & value)
+ {
+ return static_cast<hasher_storage &>(*this)(value.first);
+ }
+ template<typename F, typename S>
+ size_t operator()(const std::pair<F, S> & value) const
+ {
+ return static_cast<const hasher_storage &>(*this)(value.first);
+ }
+};
+template<typename key_type, typename value_type, typename key_equal>
+struct KeyOrValueEquality : functor_storage<bool, key_equal>
+{
+ typedef functor_storage<bool, key_equal> equality_storage;
+ KeyOrValueEquality() = default;
+ KeyOrValueEquality(const key_equal & equality)
+ : equality_storage(equality)
+ {
+ }
+ bool operator()(const key_type & lhs, const key_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs, rhs);
+ }
+ bool operator()(const key_type & lhs, const value_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs, rhs.first);
+ }
+ bool operator()(const value_type & lhs, const key_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs);
+ }
+ bool operator()(const value_type & lhs, const value_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+ template<typename F, typename S>
+ bool operator()(const key_type & lhs, const std::pair<F, S> & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs, rhs.first);
+ }
+ template<typename F, typename S>
+ bool operator()(const std::pair<F, S> & lhs, const key_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs);
+ }
+ template<typename F, typename S>
+ bool operator()(const value_type & lhs, const std::pair<F, S> & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+ template<typename F, typename S>
+ bool operator()(const std::pair<F, S> & lhs, const value_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+ template<typename FL, typename SL, typename FR, typename SR>
+ bool operator()(const std::pair<FL, SL> & lhs, const std::pair<FR, SR> & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+};
+static constexpr int8_t min_lookups = 4;
+template<typename T>
+struct sherwood_v3_entry
+{
+ sherwood_v3_entry()
+ {
+ }
+ sherwood_v3_entry(int8_t distance_from_desired)
+ : distance_from_desired(distance_from_desired)
+ {
+ }
+ ~sherwood_v3_entry()
+ {
+ }
+ static sherwood_v3_entry * empty_default_table()
+ {
+ static sherwood_v3_entry result[min_lookups] = { {}, {}, {}, {special_end_value} };
+ return result;
+ }
+
+ bool has_value() const
+ {
+ return distance_from_desired >= 0;
+ }
+ bool is_empty() const
+ {
+ return distance_from_desired < 0;
+ }
+ bool is_at_desired_position() const
+ {
+ return distance_from_desired <= 0;
+ }
+ template<typename... Args>
+ void emplace(int8_t distance, Args &&... args)
+ {
+ new (std::addressof(value)) T(std::forward<Args>(args)...);
+ distance_from_desired = distance;
+ }
+
+ void destroy_value()
+ {
+ value.~T();
+ distance_from_desired = -1;
+ }
+
+ int8_t distance_from_desired = -1;
+ static constexpr int8_t special_end_value = 0;
+ union { T value; };
+};
+
+inline int8_t log2(size_t value)
+{
+ static constexpr int8_t table[64] =
+ {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5
+ };
+ value |= value >> 1;
+ value |= value >> 2;
+ value |= value >> 4;
+ value |= value >> 8;
+ value |= value >> 16;
+ value |= value >> 32;
+ return table[((value - (value >> 1)) * 0x07EDD5E59A4E28C2) >> 58];
+}
+
+template<typename T, bool>
+struct AssignIfTrue
+{
+ void operator()(T & lhs, const T & rhs)
+ {
+ lhs = rhs;
+ }
+ void operator()(T & lhs, T && rhs)
+ {
+ lhs = std::move(rhs);
+ }
+};
+template<typename T>
+struct AssignIfTrue<T, false>
+{
+ void operator()(T &, const T &)
+ {
+ }
+ void operator()(T &, T &&)
+ {
+ }
+};
+
+inline size_t next_power_of_two(size_t i)
+{
+ --i;
+ i |= i >> 1;
+ i |= i >> 2;
+ i |= i >> 4;
+ i |= i >> 8;
+ i |= i >> 16;
+ i |= i >> 32;
+ ++i;
+ return i;
+}
+
+template<typename...> using void_t = void;
+
+template<typename T, typename = void>
+struct HashPolicySelector
+{
+ typedef fibonacci_hash_policy type;
+};
+template<typename T>
+struct HashPolicySelector<T, void_t<typename T::hash_policy>>
+{
+ typedef typename T::hash_policy type;
+};
+
+template<typename T, typename FindKey, typename ArgumentHash, typename Hasher, typename ArgumentEqual, typename Equal, typename ArgumentAlloc, typename EntryAlloc>
+class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal
+{
+ using Entry = detailv3::sherwood_v3_entry<T>;
+ using AllocatorTraits = std::allocator_traits<EntryAlloc>;
+ using EntryPointer = typename AllocatorTraits::pointer;
+ struct convertible_to_iterator;
+
+public:
+
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = std::ptrdiff_t;
+ using hasher = ArgumentHash;
+ using key_equal = ArgumentEqual;
+ using allocator_type = EntryAlloc;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+
+ sherwood_v3_table()
+ {
+ }
+ explicit sherwood_v3_table(size_type bucket_count, const ArgumentHash & hash = ArgumentHash(), const ArgumentEqual & equal = ArgumentEqual(), const ArgumentAlloc & alloc = ArgumentAlloc())
+ : EntryAlloc(alloc), Hasher(hash), Equal(equal)
+ {
+ rehash(bucket_count);
+ }
+ sherwood_v3_table(size_type bucket_count, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(bucket_count, ArgumentHash(), ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(size_type bucket_count, const ArgumentHash & hash, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(bucket_count, hash, ArgumentEqual(), alloc)
+ {
+ }
+ explicit sherwood_v3_table(const ArgumentAlloc & alloc)
+ : EntryAlloc(alloc)
+ {
+ }
+ template<typename It>
+ sherwood_v3_table(It first, It last, size_type bucket_count = 0, const ArgumentHash & hash = ArgumentHash(), const ArgumentEqual & equal = ArgumentEqual(), const ArgumentAlloc & alloc = ArgumentAlloc())
+ : sherwood_v3_table(bucket_count, hash, equal, alloc)
+ {
+ insert(first, last);
+ }
+ template<typename It>
+ sherwood_v3_table(It first, It last, size_type bucket_count, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(first, last, bucket_count, ArgumentHash(), ArgumentEqual(), alloc)
+ {
+ }
+ template<typename It>
+ sherwood_v3_table(It first, It last, size_type bucket_count, const ArgumentHash & hash, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(first, last, bucket_count, hash, ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(std::initializer_list<T> il, size_type bucket_count = 0, const ArgumentHash & hash = ArgumentHash(), const ArgumentEqual & equal = ArgumentEqual(), const ArgumentAlloc & alloc = ArgumentAlloc())
+ : sherwood_v3_table(bucket_count, hash, equal, alloc)
+ {
+ if (bucket_count == 0)
+ rehash(il.size());
+ insert(il.begin(), il.end());
+ }
+ sherwood_v3_table(std::initializer_list<T> il, size_type bucket_count, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(il, bucket_count, ArgumentHash(), ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(std::initializer_list<T> il, size_type bucket_count, const ArgumentHash & hash, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(il, bucket_count, hash, ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(const sherwood_v3_table & other)
+ : sherwood_v3_table(other, AllocatorTraits::select_on_container_copy_construction(other.get_allocator()))
+ {
+ }
+ sherwood_v3_table(const sherwood_v3_table & other, const ArgumentAlloc & alloc)
+ : EntryAlloc(alloc), Hasher(other), Equal(other), _max_load_factor(other._max_load_factor)
+ {
+ rehash_for_other_container(other);
+ try
+ {
+ insert(other.begin(), other.end());
+ }
+ catch(...)
+ {
+ clear();
+ deallocate_data(entries, num_slots_minus_one, max_lookups);
+ throw;
+ }
+ }
+ sherwood_v3_table(sherwood_v3_table && other) noexcept
+ : EntryAlloc(std::move(other)), Hasher(std::move(other)), Equal(std::move(other))
+ {
+ swap_pointers(other);
+ }
+ sherwood_v3_table(sherwood_v3_table && other, const ArgumentAlloc & alloc) noexcept
+ : EntryAlloc(alloc), Hasher(std::move(other)), Equal(std::move(other))
+ {
+ swap_pointers(other);
+ }
+ sherwood_v3_table & operator=(const sherwood_v3_table & other)
+ {
+ if (this == std::addressof(other))
+ return *this;
+
+ clear();
+ if (AllocatorTraits::propagate_on_container_copy_assignment::value)
+ {
+ if (static_cast<EntryAlloc &>(*this) != static_cast<const EntryAlloc &>(other))
+ {
+ reset_to_empty_state();
+ }
+ AssignIfTrue<EntryAlloc, AllocatorTraits::propagate_on_container_copy_assignment::value>()(*this, other);
+ }
+ _max_load_factor = other._max_load_factor;
+ static_cast<Hasher &>(*this) = other;
+ static_cast<Equal &>(*this) = other;
+ rehash_for_other_container(other);
+ insert(other.begin(), other.end());
+ return *this;
+ }
+ sherwood_v3_table & operator=(sherwood_v3_table && other) noexcept
+ {
+ if (this == std::addressof(other))
+ return *this;
+ else if (AllocatorTraits::propagate_on_container_move_assignment::value)
+ {
+ clear();
+ reset_to_empty_state();
+ AssignIfTrue<EntryAlloc, AllocatorTraits::propagate_on_container_move_assignment::value>()(*this, std::move(other));
+ swap_pointers(other);
+ }
+ else if (static_cast<EntryAlloc &>(*this) == static_cast<EntryAlloc &>(other))
+ {
+ swap_pointers(other);
+ }
+ else
+ {
+ clear();
+ _max_load_factor = other._max_load_factor;
+ rehash_for_other_container(other);
+ for (T & elem : other)
+ emplace(std::move(elem));
+ other.clear();
+ }
+ static_cast<Hasher &>(*this) = std::move(other);
+ static_cast<Equal &>(*this) = std::move(other);
+ return *this;
+ }
+ ~sherwood_v3_table()
+ {
+ clear();
+ deallocate_data(entries, num_slots_minus_one, max_lookups);
+ }
+
+ const allocator_type & get_allocator() const
+ {
+ return static_cast<const allocator_type &>(*this);
+ }
+ const ArgumentEqual & key_eq() const
+ {
+ return static_cast<const ArgumentEqual &>(*this);
+ }
+ const ArgumentHash & hash_function() const
+ {
+ return static_cast<const ArgumentHash &>(*this);
+ }
+
+ template<typename ValueType>
+ struct templated_iterator
+ {
+ templated_iterator() = default;
+ templated_iterator(EntryPointer current)
+ : current(current)
+ {
+ }
+ EntryPointer current = EntryPointer();
+
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = ValueType;
+ using difference_type = ptrdiff_t;
+ using pointer = ValueType *;
+ using reference = ValueType &;
+
+ friend bool operator==(const templated_iterator & lhs, const templated_iterator & rhs)
+ {
+ return lhs.current == rhs.current;
+ }
+ friend bool operator!=(const templated_iterator & lhs, const templated_iterator & rhs)
+ {
+ return !(lhs == rhs);
+ }
+
+ templated_iterator & operator++()
+ {
+ do
+ {
+ ++current;
+ }
+ while(current->is_empty());
+ return *this;
+ }
+ templated_iterator operator++(int)
+ {
+ templated_iterator copy(*this);
+ ++*this;
+ return copy;
+ }
+
+ ValueType & operator*() const
+ {
+ return current->value;
+ }
+ ValueType * operator->() const
+ {
+ return std::addressof(current->value);
+ }
+
+ operator templated_iterator<const value_type>() const
+ {
+ return { current };
+ }
+ };
+ using iterator = templated_iterator<value_type>;
+ using const_iterator = templated_iterator<const value_type>;
+
+ iterator begin()
+ {
+ for (EntryPointer it = entries;; ++it)
+ {
+ if (it->has_value())
+ return { it };
+ }
+ }
+ const_iterator begin() const
+ {
+ for (EntryPointer it = entries;; ++it)
+ {
+ if (it->has_value())
+ return { it };
+ }
+ }
+ const_iterator cbegin() const
+ {
+ return begin();
+ }
+ iterator end()
+ {
+ return { entries + static_cast<ptrdiff_t>(num_slots_minus_one + max_lookups) };
+ }
+ const_iterator end() const
+ {
+ return { entries + static_cast<ptrdiff_t>(num_slots_minus_one + max_lookups) };
+ }
+ const_iterator cend() const
+ {
+ return end();
+ }
+
+ iterator find(const FindKey & key)
+ {
+ size_t index = hash_policy.index_for_hash(hash_object(key), num_slots_minus_one);
+ EntryPointer it = entries + ptrdiff_t(index);
+ for (int8_t distance = 0; it->distance_from_desired >= distance; ++distance, ++it)
+ {
+ if (compares_equal(key, it->value))
+ return { it };
+ }
+ return end();
+ }
+ const_iterator find(const FindKey & key) const
+ {
+ return const_cast<sherwood_v3_table *>(this)->find(key);
+ }
+ size_t count(const FindKey & key) const
+ {
+ return find(key) == end() ? 0 : 1;
+ }
+ std::pair<iterator, iterator> equal_range(const FindKey & key)
+ {
+ iterator found = find(key);
+ if (found == end())
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+ std::pair<const_iterator, const_iterator> equal_range(const FindKey & key) const
+ {
+ const_iterator found = find(key);
+ if (found == end())
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+
+ template<typename Key, typename... Args>
+ std::pair<iterator, bool> emplace(Key && key, Args &&... args)
+ {
+ size_t index = hash_policy.index_for_hash(hash_object(key), num_slots_minus_one);
+ EntryPointer current_entry = entries + ptrdiff_t(index);
+ int8_t distance_from_desired = 0;
+ for (; current_entry->distance_from_desired >= distance_from_desired; ++current_entry, ++distance_from_desired)
+ {
+ if (compares_equal(key, current_entry->value))
+ return { { current_entry }, false };
+ }
+ return emplace_new_key(distance_from_desired, current_entry, std::forward<Key>(key), std::forward<Args>(args)...);
+ }
+
+ std::pair<iterator, bool> insert(const value_type & value)
+ {
+ return emplace(value);
+ }
+ std::pair<iterator, bool> insert(value_type && value)
+ {
+ return emplace(std::move(value));
+ }
+ template<typename... Args>
+ iterator emplace_hint(const_iterator, Args &&... args)
+ {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+ iterator insert(const_iterator, const value_type & value)
+ {
+ return emplace(value).first;
+ }
+ iterator insert(const_iterator, value_type && value)
+ {
+ return emplace(std::move(value)).first;
+ }
+
+ template<typename It>
+ void insert(It begin, It end)
+ {
+ for (; begin != end; ++begin)
+ {
+ emplace(*begin);
+ }
+ }
+ void insert(std::initializer_list<value_type> il)
+ {
+ insert(il.begin(), il.end());
+ }
+
+ void rehash(size_t num_buckets)
+ {
+ num_buckets = std::max(num_buckets, static_cast<size_t>(std::ceil(num_elements / static_cast<double>(_max_load_factor))));
+ if (num_buckets == 0)
+ {
+ reset_to_empty_state();
+ return;
+ }
+ auto new_prime_index = hash_policy.next_size_over(num_buckets);
+ if (num_buckets == bucket_count())
+ return;
+ int8_t new_max_lookups = compute_max_lookups(num_buckets);
+ EntryPointer new_buckets(AllocatorTraits::allocate(*this, num_buckets + new_max_lookups));
+ EntryPointer special_end_item = new_buckets + static_cast<ptrdiff_t>(num_buckets + new_max_lookups - 1);
+ for (EntryPointer it = new_buckets; it != special_end_item; ++it)
+ it->distance_from_desired = -1;
+ special_end_item->distance_from_desired = Entry::special_end_value;
+ std::swap(entries, new_buckets);
+ std::swap(num_slots_minus_one, num_buckets);
+ --num_slots_minus_one;
+ hash_policy.commit(new_prime_index);
+ int8_t old_max_lookups = max_lookups;
+ max_lookups = new_max_lookups;
+ num_elements = 0;
+ for (EntryPointer it = new_buckets, end = it + static_cast<ptrdiff_t>(num_buckets + old_max_lookups); it != end; ++it)
+ {
+ if (it->has_value())
+ {
+ emplace(std::move(it->value));
+ it->destroy_value();
+ }
+ }
+ deallocate_data(new_buckets, num_buckets, old_max_lookups);
+ }
+
+ void reserve(size_t num_elements)
+ {
+ size_t required_buckets = num_buckets_for_reserve(num_elements);
+ if (required_buckets > bucket_count())
+ rehash(required_buckets);
+ }
+
+ // the return value is a type that can be converted to an iterator
+ // the reason for doing this is that it's not free to find the
+ // iterator pointing at the next element. if you care about the
+ // next iterator, turn the return value into an iterator
+ convertible_to_iterator erase(const_iterator to_erase)
+ {
+ EntryPointer current = to_erase.current;
+ current->destroy_value();
+ --num_elements;
+ for (EntryPointer next = current + ptrdiff_t(1); !next->is_at_desired_position(); ++current, ++next)
+ {
+ current->emplace(next->distance_from_desired - 1, std::move(next->value));
+ next->destroy_value();
+ }
+ return { to_erase.current };
+ }
+
+ iterator erase(const_iterator begin_it, const_iterator end_it)
+ {
+ if (begin_it == end_it)
+ return { begin_it.current };
+ for (EntryPointer it = begin_it.current, end = end_it.current; it != end; ++it)
+ {
+ if (it->has_value())
+ {
+ it->destroy_value();
+ --num_elements;
+ }
+ }
+ if (end_it == this->end())
+ return this->end();
+ ptrdiff_t num_to_move = std::min(static_cast<ptrdiff_t>(end_it.current->distance_from_desired), end_it.current - begin_it.current);
+ EntryPointer to_return = end_it.current - num_to_move;
+ for (EntryPointer it = end_it.current; !it->is_at_desired_position();)
+ {
+ EntryPointer target = it - num_to_move;
+ target->emplace(it->distance_from_desired - num_to_move, std::move(it->value));
+ it->destroy_value();
+ ++it;
+ num_to_move = std::min(static_cast<ptrdiff_t>(it->distance_from_desired), num_to_move);
+ }
+ return { to_return };
+ }
+
+ size_t erase(const FindKey & key)
+ {
+ auto found = find(key);
+ if (found == end())
+ return 0;
+ else
+ {
+ erase(found);
+ return 1;
+ }
+ }
+
+ void clear()
+ {
+ for (EntryPointer it = entries, end = it + static_cast<ptrdiff_t>(num_slots_minus_one + max_lookups); it != end; ++it)
+ {
+ if (it->has_value())
+ it->destroy_value();
+ }
+ num_elements = 0;
+ }
+
+ void shrink_to_fit()
+ {
+ rehash_for_other_container(*this);
+ }
+
+ void swap(sherwood_v3_table & other)
+ {
+ using std::swap;
+ swap_pointers(other);
+ swap(static_cast<ArgumentHash &>(*this), static_cast<ArgumentHash &>(other));
+ swap(static_cast<ArgumentEqual &>(*this), static_cast<ArgumentEqual &>(other));
+ if (AllocatorTraits::propagate_on_container_swap::value)
+ swap(static_cast<EntryAlloc &>(*this), static_cast<EntryAlloc &>(other));
+ }
+
+ size_t size() const
+ {
+ return num_elements;
+ }
+ size_t max_size() const
+ {
+ return (AllocatorTraits::max_size(*this)) / sizeof(Entry);
+ }
+ size_t bucket_count() const
+ {
+ return num_slots_minus_one ? num_slots_minus_one + 1 : 0;
+ }
+ size_type max_bucket_count() const
+ {
+ return (AllocatorTraits::max_size(*this) - min_lookups) / sizeof(Entry);
+ }
+ size_t bucket(const FindKey & key) const
+ {
+ return hash_policy.index_for_hash(hash_object(key), num_slots_minus_one);
+ }
+ float load_factor() const
+ {
+ size_t buckets = bucket_count();
+ if (buckets)
+ return static_cast<float>(num_elements) / bucket_count();
+ else
+ return 0;
+ }
+ void max_load_factor(float value)
+ {
+ _max_load_factor = value;
+ }
+ float max_load_factor() const
+ {
+ return _max_load_factor;
+ }
+
+ bool empty() const
+ {
+ return num_elements == 0;
+ }
+
+private:
+ EntryPointer entries = Entry::empty_default_table();
+ size_t num_slots_minus_one = 0;
+ typename HashPolicySelector<ArgumentHash>::type hash_policy;
+ int8_t max_lookups = detailv3::min_lookups - 1;
+ float _max_load_factor = 0.5f;
+ size_t num_elements = 0;
+
+ static int8_t compute_max_lookups(size_t num_buckets)
+ {
+ int8_t desired = detailv3::log2(num_buckets);
+ return std::max(detailv3::min_lookups, desired);
+ }
+
+ size_t num_buckets_for_reserve(size_t num_elements) const
+ {
+ return static_cast<size_t>(std::ceil(num_elements / std::min(0.5, static_cast<double>(_max_load_factor))));
+ }
+ void rehash_for_other_container(const sherwood_v3_table & other)
+ {
+ rehash(std::min(num_buckets_for_reserve(other.size()), other.bucket_count()));
+ }
+
+ void swap_pointers(sherwood_v3_table & other)
+ {
+ using std::swap;
+ swap(hash_policy, other.hash_policy);
+ swap(entries, other.entries);
+ swap(num_slots_minus_one, other.num_slots_minus_one);
+ swap(num_elements, other.num_elements);
+ swap(max_lookups, other.max_lookups);
+ swap(_max_load_factor, other._max_load_factor);
+ }
+
+ template<typename Key, typename... Args>
+ SKA_NOINLINE(std::pair<iterator, bool>) emplace_new_key(int8_t distance_from_desired, EntryPointer current_entry, Key && key, Args &&... args)
+ {
+ using std::swap;
+ if (num_slots_minus_one == 0 || distance_from_desired == max_lookups || num_elements + 1 > (num_slots_minus_one + 1) * static_cast<double>(_max_load_factor))
+ {
+ grow();
+ return emplace(std::forward<Key>(key), std::forward<Args>(args)...);
+ }
+ else if (current_entry->is_empty())
+ {
+ current_entry->emplace(distance_from_desired, std::forward<Key>(key), std::forward<Args>(args)...);
+ ++num_elements;
+ return { { current_entry }, true };
+ }
+ value_type to_insert(std::forward<Key>(key), std::forward<Args>(args)...);
+ swap(distance_from_desired, current_entry->distance_from_desired);
+ swap(to_insert, current_entry->value);
+ iterator result = { current_entry };
+ for (++distance_from_desired, ++current_entry;; ++current_entry)
+ {
+ if (current_entry->is_empty())
+ {
+ current_entry->emplace(distance_from_desired, std::move(to_insert));
+ ++num_elements;
+ return { result, true };
+ }
+ else if (current_entry->distance_from_desired < distance_from_desired)
+ {
+ swap(distance_from_desired, current_entry->distance_from_desired);
+ swap(to_insert, current_entry->value);
+ ++distance_from_desired;
+ }
+ else
+ {
+ ++distance_from_desired;
+ if (distance_from_desired == max_lookups)
+ {
+ swap(to_insert, result.current->value);
+ grow();
+ return emplace(std::move(to_insert));
+ }
+ }
+ }
+ }
+
+ void grow()
+ {
+ rehash(std::max(size_t(4), 2 * bucket_count()));
+ }
+
+ void deallocate_data(EntryPointer begin, size_t num_slots_minus_one, int8_t max_lookups)
+ {
+ if (begin != Entry::empty_default_table())
+ {
+ AllocatorTraits::deallocate(*this, begin, num_slots_minus_one + max_lookups + 1);
+ }
+ }
+
+ void reset_to_empty_state()
+ {
+ deallocate_data(entries, num_slots_minus_one, max_lookups);
+ entries = Entry::empty_default_table();
+ num_slots_minus_one = 0;
+ hash_policy.reset();
+ max_lookups = detailv3::min_lookups - 1;
+ }
+
+ template<typename U>
+ size_t hash_object(const U & key)
+ {
+ return static_cast<Hasher &>(*this)(key);
+ }
+ template<typename U>
+ size_t hash_object(const U & key) const
+ {
+ return static_cast<const Hasher &>(*this)(key);
+ }
+ template<typename L, typename R>
+ bool compares_equal(const L & lhs, const R & rhs)
+ {
+ return static_cast<Equal &>(*this)(lhs, rhs);
+ }
+
+ struct convertible_to_iterator
+ {
+ EntryPointer it;
+
+ operator iterator()
+ {
+ if (it->has_value())
+ return { it };
+ else
+ return ++iterator{it};
+ }
+ operator const_iterator()
+ {
+ if (it->has_value())
+ return { it };
+ else
+ return ++const_iterator{it};
+ }
+ };
+
+};
+}
+
+struct prime_number_hash_policy
+{
+ static size_t mod0(size_t) { return 0llu; }
+ static size_t mod2(size_t hash) { return hash % 2llu; }
+ static size_t mod3(size_t hash) { return hash % 3llu; }
+ static size_t mod5(size_t hash) { return hash % 5llu; }
+ static size_t mod7(size_t hash) { return hash % 7llu; }
+ static size_t mod11(size_t hash) { return hash % 11llu; }
+ static size_t mod13(size_t hash) { return hash % 13llu; }
+ static size_t mod17(size_t hash) { return hash % 17llu; }
+ static size_t mod23(size_t hash) { return hash % 23llu; }
+ static size_t mod29(size_t hash) { return hash % 29llu; }
+ static size_t mod37(size_t hash) { return hash % 37llu; }
+ static size_t mod47(size_t hash) { return hash % 47llu; }
+ static size_t mod59(size_t hash) { return hash % 59llu; }
+ static size_t mod73(size_t hash) { return hash % 73llu; }
+ static size_t mod97(size_t hash) { return hash % 97llu; }
+ static size_t mod127(size_t hash) { return hash % 127llu; }
+ static size_t mod151(size_t hash) { return hash % 151llu; }
+ static size_t mod197(size_t hash) { return hash % 197llu; }
+ static size_t mod251(size_t hash) { return hash % 251llu; }
+ static size_t mod313(size_t hash) { return hash % 313llu; }
+ static size_t mod397(size_t hash) { return hash % 397llu; }
+ static size_t mod499(size_t hash) { return hash % 499llu; }
+ static size_t mod631(size_t hash) { return hash % 631llu; }
+ static size_t mod797(size_t hash) { return hash % 797llu; }
+ static size_t mod1009(size_t hash) { return hash % 1009llu; }
+ static size_t mod1259(size_t hash) { return hash % 1259llu; }
+ static size_t mod1597(size_t hash) { return hash % 1597llu; }
+ static size_t mod2011(size_t hash) { return hash % 2011llu; }
+ static size_t mod2539(size_t hash) { return hash % 2539llu; }
+ static size_t mod3203(size_t hash) { return hash % 3203llu; }
+ static size_t mod4027(size_t hash) { return hash % 4027llu; }
+ static size_t mod5087(size_t hash) { return hash % 5087llu; }
+ static size_t mod6421(size_t hash) { return hash % 6421llu; }
+ static size_t mod8089(size_t hash) { return hash % 8089llu; }
+ static size_t mod10193(size_t hash) { return hash % 10193llu; }
+ static size_t mod12853(size_t hash) { return hash % 12853llu; }
+ static size_t mod16193(size_t hash) { return hash % 16193llu; }
+ static size_t mod20399(size_t hash) { return hash % 20399llu; }
+ static size_t mod25717(size_t hash) { return hash % 25717llu; }
+ static size_t mod32401(size_t hash) { return hash % 32401llu; }
+ static size_t mod40823(size_t hash) { return hash % 40823llu; }
+ static size_t mod51437(size_t hash) { return hash % 51437llu; }
+ static size_t mod64811(size_t hash) { return hash % 64811llu; }
+ static size_t mod81649(size_t hash) { return hash % 81649llu; }
+ static size_t mod102877(size_t hash) { return hash % 102877llu; }
+ static size_t mod129607(size_t hash) { return hash % 129607llu; }
+ static size_t mod163307(size_t hash) { return hash % 163307llu; }
+ static size_t mod205759(size_t hash) { return hash % 205759llu; }
+ static size_t mod259229(size_t hash) { return hash % 259229llu; }
+ static size_t mod326617(size_t hash) { return hash % 326617llu; }
+ static size_t mod411527(size_t hash) { return hash % 411527llu; }
+ static size_t mod518509(size_t hash) { return hash % 518509llu; }
+ static size_t mod653267(size_t hash) { return hash % 653267llu; }
+ static size_t mod823117(size_t hash) { return hash % 823117llu; }
+ static size_t mod1037059(size_t hash) { return hash % 1037059llu; }
+ static size_t mod1306601(size_t hash) { return hash % 1306601llu; }
+ static size_t mod1646237(size_t hash) { return hash % 1646237llu; }
+ static size_t mod2074129(size_t hash) { return hash % 2074129llu; }
+ static size_t mod2613229(size_t hash) { return hash % 2613229llu; }
+ static size_t mod3292489(size_t hash) { return hash % 3292489llu; }
+ static size_t mod4148279(size_t hash) { return hash % 4148279llu; }
+ static size_t mod5226491(size_t hash) { return hash % 5226491llu; }
+ static size_t mod6584983(size_t hash) { return hash % 6584983llu; }
+ static size_t mod8296553(size_t hash) { return hash % 8296553llu; }
+ static size_t mod10453007(size_t hash) { return hash % 10453007llu; }
+ static size_t mod13169977(size_t hash) { return hash % 13169977llu; }
+ static size_t mod16593127(size_t hash) { return hash % 16593127llu; }
+ static size_t mod20906033(size_t hash) { return hash % 20906033llu; }
+ static size_t mod26339969(size_t hash) { return hash % 26339969llu; }
+ static size_t mod33186281(size_t hash) { return hash % 33186281llu; }
+ static size_t mod41812097(size_t hash) { return hash % 41812097llu; }
+ static size_t mod52679969(size_t hash) { return hash % 52679969llu; }
+ static size_t mod66372617(size_t hash) { return hash % 66372617llu; }
+ static size_t mod83624237(size_t hash) { return hash % 83624237llu; }
+ static size_t mod105359939(size_t hash) { return hash % 105359939llu; }
+ static size_t mod132745199(size_t hash) { return hash % 132745199llu; }
+ static size_t mod167248483(size_t hash) { return hash % 167248483llu; }
+ static size_t mod210719881(size_t hash) { return hash % 210719881llu; }
+ static size_t mod265490441(size_t hash) { return hash % 265490441llu; }
+ static size_t mod334496971(size_t hash) { return hash % 334496971llu; }
+ static size_t mod421439783(size_t hash) { return hash % 421439783llu; }
+ static size_t mod530980861(size_t hash) { return hash % 530980861llu; }
+ static size_t mod668993977(size_t hash) { return hash % 668993977llu; }
+ static size_t mod842879579(size_t hash) { return hash % 842879579llu; }
+ static size_t mod1061961721(size_t hash) { return hash % 1061961721llu; }
+ static size_t mod1337987929(size_t hash) { return hash % 1337987929llu; }
+ static size_t mod1685759167(size_t hash) { return hash % 1685759167llu; }
+ static size_t mod2123923447(size_t hash) { return hash % 2123923447llu; }
+ static size_t mod2675975881(size_t hash) { return hash % 2675975881llu; }
+ static size_t mod3371518343(size_t hash) { return hash % 3371518343llu; }
+ static size_t mod4247846927(size_t hash) { return hash % 4247846927llu; }
+ static size_t mod5351951779(size_t hash) { return hash % 5351951779llu; }
+ static size_t mod6743036717(size_t hash) { return hash % 6743036717llu; }
+ static size_t mod8495693897(size_t hash) { return hash % 8495693897llu; }
+ static size_t mod10703903591(size_t hash) { return hash % 10703903591llu; }
+ static size_t mod13486073473(size_t hash) { return hash % 13486073473llu; }
+ static size_t mod16991387857(size_t hash) { return hash % 16991387857llu; }
+ static size_t mod21407807219(size_t hash) { return hash % 21407807219llu; }
+ static size_t mod26972146961(size_t hash) { return hash % 26972146961llu; }
+ static size_t mod33982775741(size_t hash) { return hash % 33982775741llu; }
+ static size_t mod42815614441(size_t hash) { return hash % 42815614441llu; }
+ static size_t mod53944293929(size_t hash) { return hash % 53944293929llu; }
+ static size_t mod67965551447(size_t hash) { return hash % 67965551447llu; }
+ static size_t mod85631228929(size_t hash) { return hash % 85631228929llu; }
+ static size_t mod107888587883(size_t hash) { return hash % 107888587883llu; }
+ static size_t mod135931102921(size_t hash) { return hash % 135931102921llu; }
+ static size_t mod171262457903(size_t hash) { return hash % 171262457903llu; }
+ static size_t mod215777175787(size_t hash) { return hash % 215777175787llu; }
+ static size_t mod271862205833(size_t hash) { return hash % 271862205833llu; }
+ static size_t mod342524915839(size_t hash) { return hash % 342524915839llu; }
+ static size_t mod431554351609(size_t hash) { return hash % 431554351609llu; }
+ static size_t mod543724411781(size_t hash) { return hash % 543724411781llu; }
+ static size_t mod685049831731(size_t hash) { return hash % 685049831731llu; }
+ static size_t mod863108703229(size_t hash) { return hash % 863108703229llu; }
+ static size_t mod1087448823553(size_t hash) { return hash % 1087448823553llu; }
+ static size_t mod1370099663459(size_t hash) { return hash % 1370099663459llu; }
+ static size_t mod1726217406467(size_t hash) { return hash % 1726217406467llu; }
+ static size_t mod2174897647073(size_t hash) { return hash % 2174897647073llu; }
+ static size_t mod2740199326961(size_t hash) { return hash % 2740199326961llu; }
+ static size_t mod3452434812973(size_t hash) { return hash % 3452434812973llu; }
+ static size_t mod4349795294267(size_t hash) { return hash % 4349795294267llu; }
+ static size_t mod5480398654009(size_t hash) { return hash % 5480398654009llu; }
+ static size_t mod6904869625999(size_t hash) { return hash % 6904869625999llu; }
+ static size_t mod8699590588571(size_t hash) { return hash % 8699590588571llu; }
+ static size_t mod10960797308051(size_t hash) { return hash % 10960797308051llu; }
+ static size_t mod13809739252051(size_t hash) { return hash % 13809739252051llu; }
+ static size_t mod17399181177241(size_t hash) { return hash % 17399181177241llu; }
+ static size_t mod21921594616111(size_t hash) { return hash % 21921594616111llu; }
+ static size_t mod27619478504183(size_t hash) { return hash % 27619478504183llu; }
+ static size_t mod34798362354533(size_t hash) { return hash % 34798362354533llu; }
+ static size_t mod43843189232363(size_t hash) { return hash % 43843189232363llu; }
+ static size_t mod55238957008387(size_t hash) { return hash % 55238957008387llu; }
+ static size_t mod69596724709081(size_t hash) { return hash % 69596724709081llu; }
+ static size_t mod87686378464759(size_t hash) { return hash % 87686378464759llu; }
+ static size_t mod110477914016779(size_t hash) { return hash % 110477914016779llu; }
+ static size_t mod139193449418173(size_t hash) { return hash % 139193449418173llu; }
+ static size_t mod175372756929481(size_t hash) { return hash % 175372756929481llu; }
+ static size_t mod220955828033581(size_t hash) { return hash % 220955828033581llu; }
+ static size_t mod278386898836457(size_t hash) { return hash % 278386898836457llu; }
+ static size_t mod350745513859007(size_t hash) { return hash % 350745513859007llu; }
+ static size_t mod441911656067171(size_t hash) { return hash % 441911656067171llu; }
+ static size_t mod556773797672909(size_t hash) { return hash % 556773797672909llu; }
+ static size_t mod701491027718027(size_t hash) { return hash % 701491027718027llu; }
+ static size_t mod883823312134381(size_t hash) { return hash % 883823312134381llu; }
+ static size_t mod1113547595345903(size_t hash) { return hash % 1113547595345903llu; }
+ static size_t mod1402982055436147(size_t hash) { return hash % 1402982055436147llu; }
+ static size_t mod1767646624268779(size_t hash) { return hash % 1767646624268779llu; }
+ static size_t mod2227095190691797(size_t hash) { return hash % 2227095190691797llu; }
+ static size_t mod2805964110872297(size_t hash) { return hash % 2805964110872297llu; }
+ static size_t mod3535293248537579(size_t hash) { return hash % 3535293248537579llu; }
+ static size_t mod4454190381383713(size_t hash) { return hash % 4454190381383713llu; }
+ static size_t mod5611928221744609(size_t hash) { return hash % 5611928221744609llu; }
+ static size_t mod7070586497075177(size_t hash) { return hash % 7070586497075177llu; }
+ static size_t mod8908380762767489(size_t hash) { return hash % 8908380762767489llu; }
+ static size_t mod11223856443489329(size_t hash) { return hash % 11223856443489329llu; }
+ static size_t mod14141172994150357(size_t hash) { return hash % 14141172994150357llu; }
+ static size_t mod17816761525534927(size_t hash) { return hash % 17816761525534927llu; }
+ static size_t mod22447712886978529(size_t hash) { return hash % 22447712886978529llu; }
+ static size_t mod28282345988300791(size_t hash) { return hash % 28282345988300791llu; }
+ static size_t mod35633523051069991(size_t hash) { return hash % 35633523051069991llu; }
+ static size_t mod44895425773957261(size_t hash) { return hash % 44895425773957261llu; }
+ static size_t mod56564691976601587(size_t hash) { return hash % 56564691976601587llu; }
+ static size_t mod71267046102139967(size_t hash) { return hash % 71267046102139967llu; }
+ static size_t mod89790851547914507(size_t hash) { return hash % 89790851547914507llu; }
+ static size_t mod113129383953203213(size_t hash) { return hash % 113129383953203213llu; }
+ static size_t mod142534092204280003(size_t hash) { return hash % 142534092204280003llu; }
+ static size_t mod179581703095829107(size_t hash) { return hash % 179581703095829107llu; }
+ static size_t mod226258767906406483(size_t hash) { return hash % 226258767906406483llu; }
+ static size_t mod285068184408560057(size_t hash) { return hash % 285068184408560057llu; }
+ static size_t mod359163406191658253(size_t hash) { return hash % 359163406191658253llu; }
+ static size_t mod452517535812813007(size_t hash) { return hash % 452517535812813007llu; }
+ static size_t mod570136368817120201(size_t hash) { return hash % 570136368817120201llu; }
+ static size_t mod718326812383316683(size_t hash) { return hash % 718326812383316683llu; }
+ static size_t mod905035071625626043(size_t hash) { return hash % 905035071625626043llu; }
+ static size_t mod1140272737634240411(size_t hash) { return hash % 1140272737634240411llu; }
+ static size_t mod1436653624766633509(size_t hash) { return hash % 1436653624766633509llu; }
+ static size_t mod1810070143251252131(size_t hash) { return hash % 1810070143251252131llu; }
+ static size_t mod2280545475268481167(size_t hash) { return hash % 2280545475268481167llu; }
+ static size_t mod2873307249533267101(size_t hash) { return hash % 2873307249533267101llu; }
+ static size_t mod3620140286502504283(size_t hash) { return hash % 3620140286502504283llu; }
+ static size_t mod4561090950536962147(size_t hash) { return hash % 4561090950536962147llu; }
+ static size_t mod5746614499066534157(size_t hash) { return hash % 5746614499066534157llu; }
+ static size_t mod7240280573005008577(size_t hash) { return hash % 7240280573005008577llu; }
+ static size_t mod9122181901073924329(size_t hash) { return hash % 9122181901073924329llu; }
+ static size_t mod11493228998133068689(size_t hash) { return hash % 11493228998133068689llu; }
+ static size_t mod14480561146010017169(size_t hash) { return hash % 14480561146010017169llu; }
+ static size_t mod18446744073709551557(size_t hash) { return hash % 18446744073709551557llu; }
+
+ using mod_function = size_t (*)(size_t);
+
+ mod_function next_size_over(size_t & size) const
+ {
+ // prime numbers generated by the following method:
+ // 1. start with a prime p = 2
+ // 2. go to wolfram alpha and get p = NextPrime(2 * p)
+ // 3. repeat 2. until you overflow 64 bits
+ // you now have large gaps which you would hit if somebody called reserve() with an unlucky number.
+ // 4. to fill the gaps for every prime p go to wolfram alpha and get ClosestPrime(p * 2^(1/3)) and ClosestPrime(p * 2^(2/3)) and put those in the gaps
+ // 5. get PrevPrime(2^64) and put it at the end
+ static constexpr const size_t prime_list[] =
+ {
+ 2llu, 3llu, 5llu, 7llu, 11llu, 13llu, 17llu, 23llu, 29llu, 37llu, 47llu,
+ 59llu, 73llu, 97llu, 127llu, 151llu, 197llu, 251llu, 313llu, 397llu,
+ 499llu, 631llu, 797llu, 1009llu, 1259llu, 1597llu, 2011llu, 2539llu,
+ 3203llu, 4027llu, 5087llu, 6421llu, 8089llu, 10193llu, 12853llu, 16193llu,
+ 20399llu, 25717llu, 32401llu, 40823llu, 51437llu, 64811llu, 81649llu,
+ 102877llu, 129607llu, 163307llu, 205759llu, 259229llu, 326617llu,
+ 411527llu, 518509llu, 653267llu, 823117llu, 1037059llu, 1306601llu,
+ 1646237llu, 2074129llu, 2613229llu, 3292489llu, 4148279llu, 5226491llu,
+ 6584983llu, 8296553llu, 10453007llu, 13169977llu, 16593127llu, 20906033llu,
+ 26339969llu, 33186281llu, 41812097llu, 52679969llu, 66372617llu,
+ 83624237llu, 105359939llu, 132745199llu, 167248483llu, 210719881llu,
+ 265490441llu, 334496971llu, 421439783llu, 530980861llu, 668993977llu,
+ 842879579llu, 1061961721llu, 1337987929llu, 1685759167llu, 2123923447llu,
+ 2675975881llu, 3371518343llu, 4247846927llu, 5351951779llu, 6743036717llu,
+ 8495693897llu, 10703903591llu, 13486073473llu, 16991387857llu,
+ 21407807219llu, 26972146961llu, 33982775741llu, 42815614441llu,
+ 53944293929llu, 67965551447llu, 85631228929llu, 107888587883llu,
+ 135931102921llu, 171262457903llu, 215777175787llu, 271862205833llu,
+ 342524915839llu, 431554351609llu, 543724411781llu, 685049831731llu,
+ 863108703229llu, 1087448823553llu, 1370099663459llu, 1726217406467llu,
+ 2174897647073llu, 2740199326961llu, 3452434812973llu, 4349795294267llu,
+ 5480398654009llu, 6904869625999llu, 8699590588571llu, 10960797308051llu,
+ 13809739252051llu, 17399181177241llu, 21921594616111llu, 27619478504183llu,
+ 34798362354533llu, 43843189232363llu, 55238957008387llu, 69596724709081llu,
+ 87686378464759llu, 110477914016779llu, 139193449418173llu,
+ 175372756929481llu, 220955828033581llu, 278386898836457llu,
+ 350745513859007llu, 441911656067171llu, 556773797672909llu,
+ 701491027718027llu, 883823312134381llu, 1113547595345903llu,
+ 1402982055436147llu, 1767646624268779llu, 2227095190691797llu,
+ 2805964110872297llu, 3535293248537579llu, 4454190381383713llu,
+ 5611928221744609llu, 7070586497075177llu, 8908380762767489llu,
+ 11223856443489329llu, 14141172994150357llu, 17816761525534927llu,
+ 22447712886978529llu, 28282345988300791llu, 35633523051069991llu,
+ 44895425773957261llu, 56564691976601587llu, 71267046102139967llu,
+ 89790851547914507llu, 113129383953203213llu, 142534092204280003llu,
+ 179581703095829107llu, 226258767906406483llu, 285068184408560057llu,
+ 359163406191658253llu, 452517535812813007llu, 570136368817120201llu,
+ 718326812383316683llu, 905035071625626043llu, 1140272737634240411llu,
+ 1436653624766633509llu, 1810070143251252131llu, 2280545475268481167llu,
+ 2873307249533267101llu, 3620140286502504283llu, 4561090950536962147llu,
+ 5746614499066534157llu, 7240280573005008577llu, 9122181901073924329llu,
+ 11493228998133068689llu, 14480561146010017169llu, 18446744073709551557llu
+ };
+ static constexpr size_t (* const mod_functions[])(size_t) =
+ {
+ &mod0, &mod2, &mod3, &mod5, &mod7, &mod11, &mod13, &mod17, &mod23, &mod29, &mod37,
+ &mod47, &mod59, &mod73, &mod97, &mod127, &mod151, &mod197, &mod251, &mod313, &mod397,
+ &mod499, &mod631, &mod797, &mod1009, &mod1259, &mod1597, &mod2011, &mod2539, &mod3203,
+ &mod4027, &mod5087, &mod6421, &mod8089, &mod10193, &mod12853, &mod16193, &mod20399,
+ &mod25717, &mod32401, &mod40823, &mod51437, &mod64811, &mod81649, &mod102877,
+ &mod129607, &mod163307, &mod205759, &mod259229, &mod326617, &mod411527, &mod518509,
+ &mod653267, &mod823117, &mod1037059, &mod1306601, &mod1646237, &mod2074129,
+ &mod2613229, &mod3292489, &mod4148279, &mod5226491, &mod6584983, &mod8296553,
+ &mod10453007, &mod13169977, &mod16593127, &mod20906033, &mod26339969, &mod33186281,
+ &mod41812097, &mod52679969, &mod66372617, &mod83624237, &mod105359939, &mod132745199,
+ &mod167248483, &mod210719881, &mod265490441, &mod334496971, &mod421439783,
+ &mod530980861, &mod668993977, &mod842879579, &mod1061961721, &mod1337987929,
+ &mod1685759167, &mod2123923447, &mod2675975881, &mod3371518343, &mod4247846927,
+ &mod5351951779, &mod6743036717, &mod8495693897, &mod10703903591, &mod13486073473,
+ &mod16991387857, &mod21407807219, &mod26972146961, &mod33982775741, &mod42815614441,
+ &mod53944293929, &mod67965551447, &mod85631228929, &mod107888587883, &mod135931102921,
+ &mod171262457903, &mod215777175787, &mod271862205833, &mod342524915839,
+ &mod431554351609, &mod543724411781, &mod685049831731, &mod863108703229,
+ &mod1087448823553, &mod1370099663459, &mod1726217406467, &mod2174897647073,
+ &mod2740199326961, &mod3452434812973, &mod4349795294267, &mod5480398654009,
+ &mod6904869625999, &mod8699590588571, &mod10960797308051, &mod13809739252051,
+ &mod17399181177241, &mod21921594616111, &mod27619478504183, &mod34798362354533,
+ &mod43843189232363, &mod55238957008387, &mod69596724709081, &mod87686378464759,
+ &mod110477914016779, &mod139193449418173, &mod175372756929481, &mod220955828033581,
+ &mod278386898836457, &mod350745513859007, &mod441911656067171, &mod556773797672909,
+ &mod701491027718027, &mod883823312134381, &mod1113547595345903, &mod1402982055436147,
+ &mod1767646624268779, &mod2227095190691797, &mod2805964110872297, &mod3535293248537579,
+ &mod4454190381383713, &mod5611928221744609, &mod7070586497075177, &mod8908380762767489,
+ &mod11223856443489329, &mod14141172994150357, &mod17816761525534927,
+ &mod22447712886978529, &mod28282345988300791, &mod35633523051069991,
+ &mod44895425773957261, &mod56564691976601587, &mod71267046102139967,
+ &mod89790851547914507, &mod113129383953203213, &mod142534092204280003,
+ &mod179581703095829107, &mod226258767906406483, &mod285068184408560057,
+ &mod359163406191658253, &mod452517535812813007, &mod570136368817120201,
+ &mod718326812383316683, &mod905035071625626043, &mod1140272737634240411,
+ &mod1436653624766633509, &mod1810070143251252131, &mod2280545475268481167,
+ &mod2873307249533267101, &mod3620140286502504283, &mod4561090950536962147,
+ &mod5746614499066534157, &mod7240280573005008577, &mod9122181901073924329,
+ &mod11493228998133068689, &mod14480561146010017169, &mod18446744073709551557
+ };
+ const size_t * found = std::lower_bound(std::begin(prime_list), std::end(prime_list) - 1, size);
+ size = *found;
+ return mod_functions[1 + found - prime_list];
+ }
+ void commit(mod_function new_mod_function)
+ {
+ current_mod_function = new_mod_function;
+ }
+ void reset()
+ {
+ current_mod_function = &mod0;
+ }
+
+ size_t index_for_hash(size_t hash, size_t /*num_slots_minus_one*/) const
+ {
+ return current_mod_function(hash);
+ }
+ size_t keep_in_range(size_t index, size_t num_slots_minus_one) const
+ {
+ return index > num_slots_minus_one ? current_mod_function(index) : index;
+ }
+
+private:
+ mod_function current_mod_function = &mod0;
+};
+
+struct power_of_two_hash_policy
+{
+ size_t index_for_hash(size_t hash, size_t num_slots_minus_one) const
+ {
+ return hash & num_slots_minus_one;
+ }
+ size_t keep_in_range(size_t index, size_t num_slots_minus_one) const
+ {
+ return index_for_hash(index, num_slots_minus_one);
+ }
+ int8_t next_size_over(size_t & size) const
+ {
+ size = detailv3::next_power_of_two(size);
+ return 0;
+ }
+ void commit(int8_t)
+ {
+ }
+ void reset()
+ {
+ }
+
+};
+
+struct fibonacci_hash_policy
+{
+ size_t index_for_hash(size_t hash, size_t /*num_slots_minus_one*/) const
+ {
+ return (11400714819323198485ull * hash) >> shift;
+ }
+ size_t keep_in_range(size_t index, size_t num_slots_minus_one) const
+ {
+ return index & num_slots_minus_one;
+ }
+
+ int8_t next_size_over(size_t & size) const
+ {
+ size = std::max(size_t(2), detailv3::next_power_of_two(size));
+ return 64 - detailv3::log2(size);
+ }
+ void commit(int8_t shift)
+ {
+ this->shift = shift;
+ }
+ void reset()
+ {
+ shift = 63;
+ }
+
+private:
+ int8_t shift = 63;
+};
+
+template<typename K, typename V, typename H = std::hash<K>, typename E = std::equal_to<K>, typename A = std::allocator<std::pair<K, V> > >
+class flat_hash_map
+ : public detailv3::sherwood_v3_table
+ <
+ std::pair<K, V>,
+ K,
+ H,
+ detailv3::KeyOrValueHasher<K, std::pair<K, V>, H>,
+ E,
+ detailv3::KeyOrValueEquality<K, std::pair<K, V>, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<std::pair<K, V>>>
+ >
+{
+ using Table = detailv3::sherwood_v3_table
+ <
+ std::pair<K, V>,
+ K,
+ H,
+ detailv3::KeyOrValueHasher<K, std::pair<K, V>, H>,
+ E,
+ detailv3::KeyOrValueEquality<K, std::pair<K, V>, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<std::pair<K, V>>>
+ >;
+public:
+
+ using key_type = K;
+ using mapped_type = V;
+
+ using Table::Table;
+ flat_hash_map()
+ {
+ }
+
+ inline V & operator[](const K & key)
+ {
+ return emplace(key, convertible_to_value()).first->second;
+ }
+ inline V & operator[](K && key)
+ {
+ return emplace(std::move(key), convertible_to_value()).first->second;
+ }
+ V & at(const K & key)
+ {
+ auto found = this->find(key);
+ if (found == this->end())
+ throw std::out_of_range("Argument passed to at() was not in the map.");
+ return found->second;
+ }
+ const V & at(const K & key) const
+ {
+ auto found = this->find(key);
+ if (found == this->end())
+ throw std::out_of_range("Argument passed to at() was not in the map.");
+ return found->second;
+ }
+
+ using Table::emplace;
+ std::pair<typename Table::iterator, bool> emplace()
+ {
+ return emplace(key_type(), convertible_to_value());
+ }
+ template<typename M>
+ std::pair<typename Table::iterator, bool> insert_or_assign(const key_type & key, M && m)
+ {
+ auto emplace_result = emplace(key, std::forward<M>(m));
+ if (!emplace_result.second)
+ emplace_result.first->second = std::forward<M>(m);
+ return emplace_result;
+ }
+ template<typename M>
+ std::pair<typename Table::iterator, bool> insert_or_assign(key_type && key, M && m)
+ {
+ auto emplace_result = emplace(std::move(key), std::forward<M>(m));
+ if (!emplace_result.second)
+ emplace_result.first->second = std::forward<M>(m);
+ return emplace_result;
+ }
+ template<typename M>
+ typename Table::iterator insert_or_assign(typename Table::const_iterator, const key_type & key, M && m)
+ {
+ return insert_or_assign(key, std::forward<M>(m)).first;
+ }
+ template<typename M>
+ typename Table::iterator insert_or_assign(typename Table::const_iterator, key_type && key, M && m)
+ {
+ return insert_or_assign(std::move(key), std::forward<M>(m)).first;
+ }
+
+ friend bool operator==(const flat_hash_map & lhs, const flat_hash_map & rhs)
+ {
+ if (lhs.size() != rhs.size())
+ return false;
+ for (const typename Table::value_type & value : lhs)
+ {
+ auto found = rhs.find(value.first);
+ if (found == rhs.end())
+ return false;
+ else if (value.second != found->second)
+ return false;
+ }
+ return true;
+ }
+ friend bool operator!=(const flat_hash_map & lhs, const flat_hash_map & rhs)
+ {
+ return !(lhs == rhs);
+ }
+
+private:
+ struct convertible_to_value
+ {
+ operator V() const
+ {
+ return V();
+ }
+ };
+};
+
+template<typename T, typename H = std::hash<T>, typename E = std::equal_to<T>, typename A = std::allocator<T> >
+class flat_hash_set
+ : public detailv3::sherwood_v3_table
+ <
+ T,
+ T,
+ H,
+ detailv3::functor_storage<size_t, H>,
+ E,
+ detailv3::functor_storage<bool, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<T>>
+ >
+{
+ using Table = detailv3::sherwood_v3_table
+ <
+ T,
+ T,
+ H,
+ detailv3::functor_storage<size_t, H>,
+ E,
+ detailv3::functor_storage<bool, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<T>>
+ >;
+public:
+
+ using key_type = T;
+
+ using Table::Table;
+ flat_hash_set()
+ {
+ }
+
+ template<typename... Args>
+ std::pair<typename Table::iterator, bool> emplace(Args &&... args)
+ {
+ return Table::emplace(T(std::forward<Args>(args)...));
+ }
+ std::pair<typename Table::iterator, bool> emplace(const key_type & arg)
+ {
+ return Table::emplace(arg);
+ }
+ std::pair<typename Table::iterator, bool> emplace(key_type & arg)
+ {
+ return Table::emplace(arg);
+ }
+ std::pair<typename Table::iterator, bool> emplace(const key_type && arg)
+ {
+ return Table::emplace(std::move(arg));
+ }
+ std::pair<typename Table::iterator, bool> emplace(key_type && arg)
+ {
+ return Table::emplace(std::move(arg));
+ }
+
+ friend bool operator==(const flat_hash_set & lhs, const flat_hash_set & rhs)
+ {
+ if (lhs.size() != rhs.size())
+ return false;
+ for (const T & value : lhs)
+ {
+ if (rhs.find(value) == rhs.end())
+ return false;
+ }
+ return true;
+ }
+ friend bool operator!=(const flat_hash_set & lhs, const flat_hash_set & rhs)
+ {
+ return !(lhs == rhs);
+ }
+};
+
+
+template<typename T>
+struct power_of_two_std_hash : std::hash<T>
+{
+ typedef ska::power_of_two_hash_policy hash_policy;
+};
+
+} // end namespace ska
diff --git a/misc/benchmarks/external/tsl/robin_growth_policy.h b/misc/benchmarks/external/tsl/robin_growth_policy.h
new file mode 100644
index 00000000..eba8cdfa
--- /dev/null
+++ b/misc/benchmarks/external/tsl/robin_growth_policy.h
@@ -0,0 +1,406 @@
+/**
+ * MIT License
+ *
+ * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TSL_ROBIN_GROWTH_POLICY_H
+#define TSL_ROBIN_GROWTH_POLICY_H
+
+#include <algorithm>
+#include <array>
+#include <climits>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <limits>
+#include <ratio>
+#include <stdexcept>
+
+#ifdef TSL_DEBUG
+#define tsl_rh_assert(expr) assert(expr)
+#else
+#define tsl_rh_assert(expr) (static_cast<void>(0))
+#endif
+
+/**
+ * If exceptions are enabled, throw the exception passed in parameter, otherwise
+ * call std::terminate.
+ */
+#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || \
+ (defined(_MSC_VER) && defined(_CPPUNWIND))) && \
+ !defined(TSL_NO_EXCEPTIONS)
+#define TSL_RH_THROW_OR_TERMINATE(ex, msg) throw ex(msg)
+#else
+#define TSL_RH_NO_EXCEPTIONS
+#ifdef TSL_DEBUG
+#include <iostream>
+#define TSL_RH_THROW_OR_TERMINATE(ex, msg) \
+ do { \
+ std::cerr << msg << std::endl; \
+ std::terminate(); \
+ } while (0)
+#else
+#define TSL_RH_THROW_OR_TERMINATE(ex, msg) std::terminate()
+#endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define TSL_RH_LIKELY(exp) (__builtin_expect(!!(exp), true))
+#else
+#define TSL_RH_LIKELY(exp) (exp)
+#endif
+
+#define TSL_RH_UNUSED(x) static_cast<void>(x)
+
+namespace tsl {
+namespace rh {
+
+/**
+ * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a
+ * power of two. It allows the table to use a mask operation instead of a modulo
+ * operation to map a hash to a bucket.
+ *
+ * GrowthFactor must be a power of two >= 2.
+ */
+template <std::size_t GrowthFactor>
+class power_of_two_growth_policy {
+ public:
+ /**
+ * Called on the hash table creation and on rehash. The number of buckets for
+ * the table is passed in parameter. This number is a minimum, the policy may
+ * update this value with a higher value if needed (but not lower).
+ *
+ * If 0 is given, min_bucket_count_in_out must still be 0 after the policy
+ * creation and bucket_for_hash must always return 0 in this case.
+ */
+ explicit power_of_two_growth_policy(std::size_t& min_bucket_count_in_out) {
+ if (min_bucket_count_in_out > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ if (min_bucket_count_in_out > 0) {
+ min_bucket_count_in_out =
+ round_up_to_power_of_two(min_bucket_count_in_out);
+ m_mask = min_bucket_count_in_out - 1;
+ } else {
+ m_mask = 0;
+ }
+ }
+
+ /**
+ * Return the bucket [0, bucket_count()) to which the hash belongs.
+ * If bucket_count() is 0, it must always return 0.
+ */
+ std::size_t bucket_for_hash(std::size_t hash) const noexcept {
+ return hash & m_mask;
+ }
+
+ /**
+ * Return the number of buckets that should be used on next growth.
+ */
+ std::size_t next_bucket_count() const {
+ if ((m_mask + 1) > max_bucket_count() / GrowthFactor) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ return (m_mask + 1) * GrowthFactor;
+ }
+
+ /**
+ * Return the maximum number of buckets supported by the policy.
+ */
+ std::size_t max_bucket_count() const {
+ // Largest power of two.
+ return (std::numeric_limits<std::size_t>::max() / 2) + 1;
+ }
+
+ /**
+ * Reset the growth policy as if it was created with a bucket count of 0.
+ * After a clear, the policy must always return 0 when bucket_for_hash is
+ * called.
+ */
+ void clear() noexcept { m_mask = 0; }
+
+ private:
+ static std::size_t round_up_to_power_of_two(std::size_t value) {
+ if (is_power_of_two(value)) {
+ return value;
+ }
+
+ if (value == 0) {
+ return 1;
+ }
+
+ --value;
+ for (std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) {
+ value |= value >> i;
+ }
+
+ return value + 1;
+ }
+
+ static constexpr bool is_power_of_two(std::size_t value) {
+ return value != 0 && (value & (value - 1)) == 0;
+ }
+
+ protected:
+ static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2,
+ "GrowthFactor must be a power of two >= 2.");
+
+ std::size_t m_mask;
+};
+
+/**
+ * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo
+ * to map a hash to a bucket. Slower but it can be useful if you want a slower
+ * growth.
+ */
+template <class GrowthFactor = std::ratio<3, 2>>
+class mod_growth_policy {
+ public:
+ explicit mod_growth_policy(std::size_t& min_bucket_count_in_out) {
+ if (min_bucket_count_in_out > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ if (min_bucket_count_in_out > 0) {
+ m_mod = min_bucket_count_in_out;
+ } else {
+ m_mod = 1;
+ }
+ }
+
+ std::size_t bucket_for_hash(std::size_t hash) const noexcept {
+ return hash % m_mod;
+ }
+
+ std::size_t next_bucket_count() const {
+ if (m_mod == max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ const double next_bucket_count =
+ std::ceil(double(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR);
+ if (!std::isnormal(next_bucket_count)) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ if (next_bucket_count > double(max_bucket_count())) {
+ return max_bucket_count();
+ } else {
+ return std::size_t(next_bucket_count);
+ }
+ }
+
+ std::size_t max_bucket_count() const { return MAX_BUCKET_COUNT; }
+
+ void clear() noexcept { m_mod = 1; }
+
+ private:
+ static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR =
+ 1.0 * GrowthFactor::num / GrowthFactor::den;
+ static const std::size_t MAX_BUCKET_COUNT =
+ std::size_t(double(std::numeric_limits<std::size_t>::max() /
+ REHASH_SIZE_MULTIPLICATION_FACTOR));
+
+ static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1,
+ "Growth factor should be >= 1.1.");
+
+ std::size_t m_mod;
+};
+
+namespace detail {
+
+#if SIZE_MAX >= ULLONG_MAX
+#define TSL_RH_NB_PRIMES 51
+#elif SIZE_MAX >= ULONG_MAX
+#define TSL_RH_NB_PRIMES 40
+#else
+#define TSL_RH_NB_PRIMES 23
+#endif
+
+static constexpr const std::array<std::size_t, TSL_RH_NB_PRIMES> PRIMES = {{
+ 1u,
+ 5u,
+ 17u,
+ 29u,
+ 37u,
+ 53u,
+ 67u,
+ 79u,
+ 97u,
+ 131u,
+ 193u,
+ 257u,
+ 389u,
+ 521u,
+ 769u,
+ 1031u,
+ 1543u,
+ 2053u,
+ 3079u,
+ 6151u,
+ 12289u,
+ 24593u,
+ 49157u,
+#if SIZE_MAX >= ULONG_MAX
+ 98317ul,
+ 196613ul,
+ 393241ul,
+ 786433ul,
+ 1572869ul,
+ 3145739ul,
+ 6291469ul,
+ 12582917ul,
+ 25165843ul,
+ 50331653ul,
+ 100663319ul,
+ 201326611ul,
+ 402653189ul,
+ 805306457ul,
+ 1610612741ul,
+ 3221225473ul,
+ 4294967291ul,
+#endif
+#if SIZE_MAX >= ULLONG_MAX
+ 6442450939ull,
+ 12884901893ull,
+ 25769803751ull,
+ 51539607551ull,
+ 103079215111ull,
+ 206158430209ull,
+ 412316860441ull,
+ 824633720831ull,
+ 1649267441651ull,
+ 3298534883309ull,
+ 6597069766657ull,
+#endif
+}};
+
+template <unsigned int IPrime>
+static constexpr std::size_t mod(std::size_t hash) {
+ return hash % PRIMES[IPrime];
+}
+
+// MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for
+// faster modulo as the compiler can optimize the modulo code better with a
+// constant known at the compilation.
+static constexpr const std::array<std::size_t (*)(std::size_t),
+ TSL_RH_NB_PRIMES>
+ MOD_PRIME = {{
+ &mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>,
+ &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, &mod<11>,
+ &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>,
+ &mod<18>, &mod<19>, &mod<20>, &mod<21>, &mod<22>,
+#if SIZE_MAX >= ULONG_MAX
+ &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>,
+ &mod<29>, &mod<30>, &mod<31>, &mod<32>, &mod<33>, &mod<34>,
+ &mod<35>, &mod<36>, &mod<37>, &mod<38>, &mod<39>,
+#endif
+#if SIZE_MAX >= ULLONG_MAX
+ &mod<40>, &mod<41>, &mod<42>, &mod<43>, &mod<44>, &mod<45>,
+ &mod<46>, &mod<47>, &mod<48>, &mod<49>, &mod<50>,
+#endif
+ }};
+
+} // namespace detail
+
+/**
+ * Grow the hash table by using prime numbers as bucket count. Slower than
+ * tsl::rh::power_of_two_growth_policy in general but will probably distribute
+ * the values around better in the buckets with a poor hash function.
+ *
+ * To allow the compiler to optimize the modulo operation, a lookup table is
+ * used with constant primes numbers.
+ *
+ * With a switch the code would look like:
+ * \code
+ * switch(iprime) { // iprime is the current prime of the hash table
+ * case 0: hash % 5ul;
+ * break;
+ * case 1: hash % 17ul;
+ * break;
+ * case 2: hash % 29ul;
+ * break;
+ * ...
+ * }
+ * \endcode
+ *
+ * Due to the constant variable in the modulo the compiler is able to optimize
+ * the operation by a series of multiplications, substractions and shifts.
+ *
+ * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34)
+ * * 5' in a 64 bits environment.
+ */
+class prime_growth_policy {
+ public:
+ explicit prime_growth_policy(std::size_t& min_bucket_count_in_out) {
+ auto it_prime = std::lower_bound(
+ detail::PRIMES.begin(), detail::PRIMES.end(), min_bucket_count_in_out);
+ if (it_prime == detail::PRIMES.end()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ m_iprime = static_cast<unsigned int>(
+ std::distance(detail::PRIMES.begin(), it_prime));
+ if (min_bucket_count_in_out > 0) {
+ min_bucket_count_in_out = *it_prime;
+ } else {
+ min_bucket_count_in_out = 0;
+ }
+ }
+
+ std::size_t bucket_for_hash(std::size_t hash) const noexcept {
+ return detail::MOD_PRIME[m_iprime](hash);
+ }
+
+ std::size_t next_bucket_count() const {
+ if (m_iprime + 1 >= detail::PRIMES.size()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ return detail::PRIMES[m_iprime + 1];
+ }
+
+ std::size_t max_bucket_count() const { return detail::PRIMES.back(); }
+
+ void clear() noexcept { m_iprime = 0; }
+
+ private:
+ unsigned int m_iprime;
+
+ static_assert(std::numeric_limits<decltype(m_iprime)>::max() >=
+ detail::PRIMES.size(),
+ "The type of m_iprime is not big enough.");
+};
+
+} // namespace rh
+} // namespace tsl
+
+#endif
diff --git a/misc/benchmarks/external/tsl/robin_hash.h b/misc/benchmarks/external/tsl/robin_hash.h
new file mode 100644
index 00000000..89c7c96f
--- /dev/null
+++ b/misc/benchmarks/external/tsl/robin_hash.h
@@ -0,0 +1,1639 @@
+/**
+ * MIT License
+ *
+ * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TSL_ROBIN_HASH_H
+#define TSL_ROBIN_HASH_H
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <exception>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <stdexcept>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "robin_growth_policy.h"
+
+namespace tsl {
+
+namespace detail_robin_hash {
+
+template <typename T>
+struct make_void {
+ using type = void;
+};
+
+template <typename T, typename = void>
+struct has_is_transparent : std::false_type {};
+
+template <typename T>
+struct has_is_transparent<T,
+ typename make_void<typename T::is_transparent>::type>
+ : std::true_type {};
+
+template <typename U>
+struct is_power_of_two_policy : std::false_type {};
+
+template <std::size_t GrowthFactor>
+struct is_power_of_two_policy<tsl::rh::power_of_two_growth_policy<GrowthFactor>>
+ : std::true_type {};
+
+// Only available in C++17, we need to be compatible with C++11
+template <class T>
+const T& clamp(const T& v, const T& lo, const T& hi) {
+ return std::min(hi, std::max(lo, v));
+}
+
+template <typename T, typename U>
+static T numeric_cast(U value,
+ const char* error_message = "numeric_cast() failed.") {
+ T ret = static_cast<T>(value);
+ if (static_cast<U>(ret) != value) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message);
+ }
+
+ const bool is_same_signedness =
+ (std::is_unsigned<T>::value && std::is_unsigned<U>::value) ||
+ (std::is_signed<T>::value && std::is_signed<U>::value);
+ if (!is_same_signedness && (ret < T{}) != (value < U{})) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message);
+ }
+
+ return ret;
+}
+
+template <class T, class Deserializer>
+static T deserialize_value(Deserializer& deserializer) {
+ // MSVC < 2017 is not conformant, circumvent the problem by removing the
+ // template keyword
+#if defined(_MSC_VER) && _MSC_VER < 1910
+ return deserializer.Deserializer::operator()<T>();
+#else
+ return deserializer.Deserializer::template operator()<T>();
+#endif
+}
+
+/**
+ * Fixed size type used to represent size_type values on serialization. Need to
+ * be big enough to represent a std::size_t on 32 and 64 bits platforms, and
+ * must be the same size on both platforms.
+ */
+using slz_size_type = std::uint64_t;
+static_assert(std::numeric_limits<slz_size_type>::max() >=
+ std::numeric_limits<std::size_t>::max(),
+ "slz_size_type must be >= std::size_t");
+
+using truncated_hash_type = std::uint32_t;
+
+/**
+ * Helper class that stores a truncated hash if StoreHash is true and nothing
+ * otherwise.
+ */
+template <bool StoreHash>
+class bucket_entry_hash {
+ public:
+ bool bucket_hash_equal(std::size_t /*hash*/) const noexcept { return true; }
+
+ truncated_hash_type truncated_hash() const noexcept { return 0; }
+
+ protected:
+ void set_hash(truncated_hash_type /*hash*/) noexcept {}
+};
+
+template <>
+class bucket_entry_hash<true> {
+ public:
+ bool bucket_hash_equal(std::size_t hash) const noexcept {
+ return m_hash == truncated_hash_type(hash);
+ }
+
+ truncated_hash_type truncated_hash() const noexcept { return m_hash; }
+
+ protected:
+ void set_hash(truncated_hash_type hash) noexcept {
+ m_hash = truncated_hash_type(hash);
+ }
+
+ private:
+ truncated_hash_type m_hash;
+};
+
+/**
+ * Each bucket entry has:
+ * - A value of type `ValueType`.
+ * - An integer to store how far the value of the bucket, if any, is from its
+ * ideal bucket (ex: if the current bucket 5 has the value 'foo' and
+ * `hash('foo') % nb_buckets` == 3, `dist_from_ideal_bucket()` will return 2 as
+ * the current value of the bucket is two buckets away from its ideal bucket) If
+ * there is no value in the bucket (i.e. `empty()` is true)
+ * `dist_from_ideal_bucket()` will be < 0.
+ * - A marker which tells us if the bucket is the last bucket of the bucket
+ * array (useful for the iterator of the hash table).
+ * - If `StoreHash` is true, 32 bits of the hash of the value, if any, are also
+ * stored in the bucket. If the size of the hash is more than 32 bits, it is
+ * truncated. We don't store the full hash as storing the hash is a potential
+ * opportunity to use the unused space due to the alignment of the bucket_entry
+ * structure. We can thus potentially store the hash without any extra space
+ * (which would not be possible with 64 bits of the hash).
+ */
+template <typename ValueType, bool StoreHash>
+class bucket_entry : public bucket_entry_hash<StoreHash> {
+ using bucket_hash = bucket_entry_hash<StoreHash>;
+
+ public:
+ using value_type = ValueType;
+ using distance_type = std::int16_t;
+
+ bucket_entry() noexcept
+ : bucket_hash(),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(false) {
+ tsl_rh_assert(empty());
+ }
+
+ bucket_entry(bool last_bucket) noexcept
+ : bucket_hash(),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(last_bucket) {
+ tsl_rh_assert(empty());
+ }
+
+ bucket_entry(const bucket_entry& other) noexcept(
+ std::is_nothrow_copy_constructible<value_type>::value)
+ : bucket_hash(other),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(other.m_last_bucket) {
+ if (!other.empty()) {
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(other.value());
+ m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;
+ }
+ tsl_rh_assert(empty() == other.empty());
+ }
+
+ /**
+ * Never really used, but still necessary as we must call resize on an empty
+ * `std::vector<bucket_entry>`. and we need to support move-only types. See
+ * robin_hash constructor for details.
+ */
+ bucket_entry(bucket_entry&& other) noexcept(
+ std::is_nothrow_move_constructible<value_type>::value)
+ : bucket_hash(std::move(other)),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(other.m_last_bucket) {
+ if (!other.empty()) {
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(std::move(other.value()));
+ m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;
+ }
+ tsl_rh_assert(empty() == other.empty());
+ }
+
+ bucket_entry& operator=(const bucket_entry& other) noexcept(
+ std::is_nothrow_copy_constructible<value_type>::value) {
+ if (this != &other) {
+ clear();
+
+ bucket_hash::operator=(other);
+ if (!other.empty()) {
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(other.value());
+ }
+
+ m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;
+ m_last_bucket = other.m_last_bucket;
+ }
+
+ return *this;
+ }
+
+ bucket_entry& operator=(bucket_entry&&) = delete;
+
+ ~bucket_entry() noexcept { clear(); }
+
+ void clear() noexcept {
+ if (!empty()) {
+ destroy_value();
+ m_dist_from_ideal_bucket = EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;
+ }
+ }
+
+ bool empty() const noexcept {
+ return m_dist_from_ideal_bucket == EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;
+ }
+
+ value_type& value() noexcept {
+ tsl_rh_assert(!empty());
+#if defined(__cplusplus) && __cplusplus >= 201703L
+ return *std::launder(
+ reinterpret_cast<value_type*>(std::addressof(m_value)));
+#else
+ return *reinterpret_cast<value_type*>(std::addressof(m_value));
+#endif
+ }
+
+ const value_type& value() const noexcept {
+ tsl_rh_assert(!empty());
+#if defined(__cplusplus) && __cplusplus >= 201703L
+ return *std::launder(
+ reinterpret_cast<const value_type*>(std::addressof(m_value)));
+#else
+ return *reinterpret_cast<const value_type*>(std::addressof(m_value));
+#endif
+ }
+
+ distance_type dist_from_ideal_bucket() const noexcept {
+ return m_dist_from_ideal_bucket;
+ }
+
+ bool last_bucket() const noexcept { return m_last_bucket; }
+
+ void set_as_last_bucket() noexcept { m_last_bucket = true; }
+
+ template <typename... Args>
+ void set_value_of_empty_bucket(distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash,
+ Args&&... value_type_args) {
+ tsl_rh_assert(dist_from_ideal_bucket >= 0);
+ tsl_rh_assert(empty());
+
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(std::forward<Args>(value_type_args)...);
+ this->set_hash(hash);
+ m_dist_from_ideal_bucket = dist_from_ideal_bucket;
+
+ tsl_rh_assert(!empty());
+ }
+
+ void swap_with_value_in_bucket(distance_type& dist_from_ideal_bucket,
+ truncated_hash_type& hash, value_type& value) {
+ tsl_rh_assert(!empty());
+ tsl_rh_assert(dist_from_ideal_bucket > m_dist_from_ideal_bucket);
+
+ using std::swap;
+ swap(value, this->value());
+ swap(dist_from_ideal_bucket, m_dist_from_ideal_bucket);
+
+ if (StoreHash) {
+ const truncated_hash_type tmp_hash = this->truncated_hash();
+ this->set_hash(hash);
+ hash = tmp_hash;
+ } else {
+ // Avoid warning of unused variable if StoreHash is false
+ TSL_RH_UNUSED(hash);
+ }
+ }
+
+ static truncated_hash_type truncate_hash(std::size_t hash) noexcept {
+ return truncated_hash_type(hash);
+ }
+
+ private:
+ void destroy_value() noexcept {
+ tsl_rh_assert(!empty());
+ value().~value_type();
+ }
+
+ public:
+ static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1;
+ static const distance_type DIST_FROM_IDEAL_BUCKET_LIMIT = 4096;
+ static_assert(DIST_FROM_IDEAL_BUCKET_LIMIT <=
+ std::numeric_limits<distance_type>::max() - 1,
+ "DIST_FROM_IDEAL_BUCKET_LIMIT must be <= "
+ "std::numeric_limits<distance_type>::max() - 1.");
+
+ private:
+ using storage = typename std::aligned_storage<sizeof(value_type),
+ alignof(value_type)>::type;
+
+ distance_type m_dist_from_ideal_bucket;
+ bool m_last_bucket;
+ storage m_value;
+};
+
+/**
+ * Internal common class used by `robin_map` and `robin_set`.
+ *
+ * ValueType is what will be stored by `robin_hash` (usually `std::pair<Key, T>`
+ * for map and `Key` for set).
+ *
+ * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in
+ * parameter and returns a reference to the key.
+ *
+ * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in
+ * parameter and returns a reference to the value. `ValueSelect` should be void
+ * if there is no value (in a set for example).
+ *
+ * The strong exception guarantee only holds if the expression
+ * `std::is_nothrow_swappable<ValueType>::value &&
+ * std::is_nothrow_move_constructible<ValueType>::value` is true.
+ *
+ * Behaviour is undefined if the destructor of `ValueType` throws.
+ */
+template <class ValueType, class KeySelect, class ValueSelect, class Hash,
+ class KeyEqual, class Allocator, bool StoreHash, class GrowthPolicy>
+class robin_hash : private Hash, private KeyEqual, private GrowthPolicy {
+ private:
+ template <typename U>
+ using has_mapped_type =
+ typename std::integral_constant<bool, !std::is_same<U, void>::value>;
+
+ static_assert(
+ noexcept(std::declval<GrowthPolicy>().bucket_for_hash(std::size_t(0))),
+ "GrowthPolicy::bucket_for_hash must be noexcept.");
+ static_assert(noexcept(std::declval<GrowthPolicy>().clear()),
+ "GrowthPolicy::clear must be noexcept.");
+
+ public:
+ template <bool IsConst>
+ class robin_iterator;
+
+ using key_type = typename KeySelect::key_type;
+ using value_type = ValueType;
+ using size_type = std::size_t;
+ using difference_type = std::ptrdiff_t;
+ using hasher = Hash;
+ using key_equal = KeyEqual;
+ using allocator_type = Allocator;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = value_type*;
+ using const_pointer = const value_type*;
+ using iterator = robin_iterator<false>;
+ using const_iterator = robin_iterator<true>;
+
+ private:
+ /**
+ * Either store the hash because we are asked by the `StoreHash` template
+ * parameter or store the hash because it doesn't cost us anything in size and
+ * can be used to speed up rehash.
+ */
+ static constexpr bool STORE_HASH =
+ StoreHash ||
+ ((sizeof(tsl::detail_robin_hash::bucket_entry<value_type, true>) ==
+ sizeof(tsl::detail_robin_hash::bucket_entry<value_type, false>)) &&
+ (sizeof(std::size_t) == sizeof(truncated_hash_type) ||
+ is_power_of_two_policy<GrowthPolicy>::value) &&
+ // Don't store the hash for primitive types with default hash.
+ (!std::is_arithmetic<key_type>::value ||
+ !std::is_same<Hash, std::hash<key_type>>::value));
+
+ /**
+ * Only use the stored hash on lookup if we are explicitly asked. We are not
+ * sure how slow the KeyEqual operation is. An extra comparison may slow
+ * things down with a fast KeyEqual.
+ */
+ static constexpr bool USE_STORED_HASH_ON_LOOKUP = StoreHash;
+
+ /**
+ * We can only use the hash on rehash if the size of the hash type is the same
+ * as the stored one or if we use a power of two modulo. In the case of the
+ * power of two modulo, we just mask the least significant bytes, we just have
+ * to check that the truncated_hash_type didn't truncated more bytes.
+ */
+ static bool USE_STORED_HASH_ON_REHASH(size_type bucket_count) {
+ if (STORE_HASH && sizeof(std::size_t) == sizeof(truncated_hash_type)) {
+ TSL_RH_UNUSED(bucket_count);
+ return true;
+ } else if (STORE_HASH && is_power_of_two_policy<GrowthPolicy>::value) {
+ return bucket_count == 0 ||
+ (bucket_count - 1) <=
+ std::numeric_limits<truncated_hash_type>::max();
+ } else {
+ TSL_RH_UNUSED(bucket_count);
+ return false;
+ }
+ }
+
+ using bucket_entry =
+ tsl::detail_robin_hash::bucket_entry<value_type, STORE_HASH>;
+ using distance_type = typename bucket_entry::distance_type;
+
+ using buckets_allocator = typename std::allocator_traits<
+ allocator_type>::template rebind_alloc<bucket_entry>;
+ using buckets_container_type = std::vector<bucket_entry, buckets_allocator>;
+
+ public:
+ /**
+ * The 'operator*()' and 'operator->()' methods return a const reference and
+ * const pointer respectively to the stored value type.
+ *
+ * In case of a map, to get a mutable reference to the value associated to a
+ * key (the '.second' in the stored pair), you have to call 'value()'.
+ *
+ * The main reason for this is that if we returned a `std::pair<Key, T>&`
+ * instead of a `const std::pair<Key, T>&`, the user may modify the key which
+ * will put the map in a undefined state.
+ */
+ template <bool IsConst>
+ class robin_iterator {
+ friend class robin_hash;
+
+ private:
+ using bucket_entry_ptr =
+ typename std::conditional<IsConst, const bucket_entry*,
+ bucket_entry*>::type;
+
+ robin_iterator(bucket_entry_ptr bucket) noexcept : m_bucket(bucket) {}
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = const typename robin_hash::value_type;
+ using difference_type = std::ptrdiff_t;
+ using reference = value_type&;
+ using pointer = value_type*;
+
+ robin_iterator() noexcept {}
+
+ // Copy constructor from iterator to const_iterator.
+ template <bool TIsConst = IsConst,
+ typename std::enable_if<TIsConst>::type* = nullptr>
+ robin_iterator(const robin_iterator<!TIsConst>& other) noexcept
+ : m_bucket(other.m_bucket) {}
+
+ robin_iterator(const robin_iterator& other) = default;
+ robin_iterator(robin_iterator&& other) = default;
+ robin_iterator& operator=(const robin_iterator& other) = default;
+ robin_iterator& operator=(robin_iterator&& other) = default;
+
+ const typename robin_hash::key_type& key() const {
+ return KeySelect()(m_bucket->value());
+ }
+
+ template <class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value &&
+ IsConst>::type* = nullptr>
+ const typename U::value_type& value() const {
+ return U()(m_bucket->value());
+ }
+
+ template <class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value &&
+ !IsConst>::type* = nullptr>
+ typename U::value_type& value() const {
+ return U()(m_bucket->value());
+ }
+
+ reference operator*() const { return m_bucket->value(); }
+
+ pointer operator->() const { return std::addressof(m_bucket->value()); }
+
+ robin_iterator& operator++() {
+ while (true) {
+ if (m_bucket->last_bucket()) {
+ ++m_bucket;
+ return *this;
+ }
+
+ ++m_bucket;
+ if (!m_bucket->empty()) {
+ return *this;
+ }
+ }
+ }
+
+ robin_iterator operator++(int) {
+ robin_iterator tmp(*this);
+ ++*this;
+
+ return tmp;
+ }
+
+ friend bool operator==(const robin_iterator& lhs,
+ const robin_iterator& rhs) {
+ return lhs.m_bucket == rhs.m_bucket;
+ }
+
+ friend bool operator!=(const robin_iterator& lhs,
+ const robin_iterator& rhs) {
+ return !(lhs == rhs);
+ }
+
+ private:
+ bucket_entry_ptr m_bucket;
+ };
+
+ public:
+#if defined(__cplusplus) && __cplusplus >= 201402L
+ robin_hash(size_type bucket_count, const Hash& hash, const KeyEqual& equal,
+ const Allocator& alloc,
+ float min_load_factor = DEFAULT_MIN_LOAD_FACTOR,
+ float max_load_factor = DEFAULT_MAX_LOAD_FACTOR)
+ : Hash(hash),
+ KeyEqual(equal),
+ GrowthPolicy(bucket_count),
+ m_buckets_data(bucket_count, alloc),
+ m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data()),
+ m_bucket_count(bucket_count),
+ m_nb_elements(0),
+ m_grow_on_next_insert(false),
+ m_try_shrink_on_next_insert(false) {
+ if (bucket_count > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The map exceeds its maximum bucket count.");
+ }
+
+ if (m_bucket_count > 0) {
+ tsl_rh_assert(!m_buckets_data.empty());
+ m_buckets_data.back().set_as_last_bucket();
+ }
+
+ this->min_load_factor(min_load_factor);
+ this->max_load_factor(max_load_factor);
+ }
+#else
+ /**
+ * C++11 doesn't support the creation of a std::vector with a custom allocator
+ * and 'count' default-inserted elements. The needed contructor `explicit
+ * vector(size_type count, const Allocator& alloc = Allocator());` is only
+ * available in C++14 and later. We thus must resize after using the
+ * `vector(const Allocator& alloc)` constructor.
+ *
+ * We can't use `vector(size_type count, const T& value, const Allocator&
+ * alloc)` as it requires the value T to be copyable.
+ */
+ robin_hash(size_type bucket_count, const Hash& hash, const KeyEqual& equal,
+ const Allocator& alloc,
+ float min_load_factor = DEFAULT_MIN_LOAD_FACTOR,
+ float max_load_factor = DEFAULT_MAX_LOAD_FACTOR)
+ : Hash(hash),
+ KeyEqual(equal),
+ GrowthPolicy(bucket_count),
+ m_buckets_data(alloc),
+ m_buckets(static_empty_bucket_ptr()),
+ m_bucket_count(bucket_count),
+ m_nb_elements(0),
+ m_grow_on_next_insert(false),
+ m_try_shrink_on_next_insert(false) {
+ if (bucket_count > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The map exceeds its maximum bucket count.");
+ }
+
+ if (m_bucket_count > 0) {
+ m_buckets_data.resize(m_bucket_count);
+ m_buckets = m_buckets_data.data();
+
+ tsl_rh_assert(!m_buckets_data.empty());
+ m_buckets_data.back().set_as_last_bucket();
+ }
+
+ this->min_load_factor(min_load_factor);
+ this->max_load_factor(max_load_factor);
+ }
+#endif
+
+ robin_hash(const robin_hash& other)
+ : Hash(other),
+ KeyEqual(other),
+ GrowthPolicy(other),
+ m_buckets_data(other.m_buckets_data),
+ m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data()),
+ m_bucket_count(other.m_bucket_count),
+ m_nb_elements(other.m_nb_elements),
+ m_load_threshold(other.m_load_threshold),
+ m_min_load_factor(other.m_min_load_factor),
+ m_max_load_factor(other.m_max_load_factor),
+ m_grow_on_next_insert(other.m_grow_on_next_insert),
+ m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert) {}
+
+ robin_hash(robin_hash&& other) noexcept(
+ std::is_nothrow_move_constructible<
+ Hash>::value&& std::is_nothrow_move_constructible<KeyEqual>::value&&
+ std::is_nothrow_move_constructible<GrowthPolicy>::value&&
+ std::is_nothrow_move_constructible<buckets_container_type>::value)
+ : Hash(std::move(static_cast<Hash&>(other))),
+ KeyEqual(std::move(static_cast<KeyEqual&>(other))),
+ GrowthPolicy(std::move(static_cast<GrowthPolicy&>(other))),
+ m_buckets_data(std::move(other.m_buckets_data)),
+ m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data()),
+ m_bucket_count(other.m_bucket_count),
+ m_nb_elements(other.m_nb_elements),
+ m_load_threshold(other.m_load_threshold),
+ m_min_load_factor(other.m_min_load_factor),
+ m_max_load_factor(other.m_max_load_factor),
+ m_grow_on_next_insert(other.m_grow_on_next_insert),
+ m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert) {
+ other.clear_and_shrink();
+ }
+
+ robin_hash& operator=(const robin_hash& other) {
+ if (&other != this) {
+ Hash::operator=(other);
+ KeyEqual::operator=(other);
+ GrowthPolicy::operator=(other);
+
+ m_buckets_data = other.m_buckets_data;
+ m_buckets = m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data();
+ m_bucket_count = other.m_bucket_count;
+ m_nb_elements = other.m_nb_elements;
+
+ m_load_threshold = other.m_load_threshold;
+ m_min_load_factor = other.m_min_load_factor;
+ m_max_load_factor = other.m_max_load_factor;
+
+ m_grow_on_next_insert = other.m_grow_on_next_insert;
+ m_try_shrink_on_next_insert = other.m_try_shrink_on_next_insert;
+ }
+
+ return *this;
+ }
+
+ robin_hash& operator=(robin_hash&& other) {
+ other.swap(*this);
+ other.clear_and_shrink();
+
+ return *this;
+ }
+
+ allocator_type get_allocator() const {
+ return m_buckets_data.get_allocator();
+ }
+
+ /*
+ * Iterators
+ */
+ iterator begin() noexcept {
+ std::size_t i = 0;
+ while (i < m_bucket_count && m_buckets[i].empty()) {
+ i++;
+ }
+
+ return iterator(m_buckets + i);
+ }
+
+ const_iterator begin() const noexcept { return cbegin(); }
+
+ const_iterator cbegin() const noexcept {
+ std::size_t i = 0;
+ while (i < m_bucket_count && m_buckets[i].empty()) {
+ i++;
+ }
+
+ return const_iterator(m_buckets + i);
+ }
+
+ iterator end() noexcept { return iterator(m_buckets + m_bucket_count); }
+
+ const_iterator end() const noexcept { return cend(); }
+
+ const_iterator cend() const noexcept {
+ return const_iterator(m_buckets + m_bucket_count);
+ }
+
+ /*
+ * Capacity
+ */
+ bool empty() const noexcept { return m_nb_elements == 0; }
+
+ size_type size() const noexcept { return m_nb_elements; }
+
+ size_type max_size() const noexcept { return m_buckets_data.max_size(); }
+
+ /*
+ * Modifiers
+ */
+ void clear() noexcept {
+ if (m_min_load_factor > 0.0f) {
+ clear_and_shrink();
+ } else {
+ for (auto& bucket : m_buckets_data) {
+ bucket.clear();
+ }
+
+ m_nb_elements = 0;
+ m_grow_on_next_insert = false;
+ }
+ }
+
+ template <typename P>
+ std::pair<iterator, bool> insert(P&& value) {
+ return insert_impl(KeySelect()(value), std::forward<P>(value));
+ }
+
+ template <typename P>
+ iterator insert_hint(const_iterator hint, P&& value) {
+ if (hint != cend() &&
+ compare_keys(KeySelect()(*hint), KeySelect()(value))) {
+ return mutable_iterator(hint);
+ }
+
+ return insert(std::forward<P>(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ if (std::is_base_of<
+ std::forward_iterator_tag,
+ typename std::iterator_traits<InputIt>::iterator_category>::value) {
+ const auto nb_elements_insert = std::distance(first, last);
+ const size_type nb_free_buckets = m_load_threshold - size();
+ tsl_rh_assert(m_load_threshold >= size());
+
+ if (nb_elements_insert > 0 &&
+ nb_free_buckets < size_type(nb_elements_insert)) {
+ reserve(size() + size_type(nb_elements_insert));
+ }
+ }
+
+ for (; first != last; ++first) {
+ insert(*first);
+ }
+ }
+
+ template <class K, class M>
+ std::pair<iterator, bool> insert_or_assign(K&& key, M&& obj) {
+ auto it = try_emplace(std::forward<K>(key), std::forward<M>(obj));
+ if (!it.second) {
+ it.first.value() = std::forward<M>(obj);
+ }
+
+ return it;
+ }
+
+ template <class K, class M>
+ iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) {
+ if (hint != cend() && compare_keys(KeySelect()(*hint), key)) {
+ auto it = mutable_iterator(hint);
+ it.value() = std::forward<M>(obj);
+
+ return it;
+ }
+
+ return insert_or_assign(std::forward<K>(key), std::forward<M>(obj)).first;
+ }
+
+ template <class... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return insert(value_type(std::forward<Args>(args)...));
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator hint, Args&&... args) {
+ return insert_hint(hint, value_type(std::forward<Args>(args)...));
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> try_emplace(K&& key, Args&&... args) {
+ return insert_impl(key, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+
+ template <class K, class... Args>
+ iterator try_emplace_hint(const_iterator hint, K&& key, Args&&... args) {
+ if (hint != cend() && compare_keys(KeySelect()(*hint), key)) {
+ return mutable_iterator(hint);
+ }
+
+ return try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
+ }
+
+ /**
+ * Here to avoid `template<class K> size_type erase(const K& key)` being used
+ * when we use an `iterator` instead of a `const_iterator`.
+ */
+ iterator erase(iterator pos) {
+ erase_from_bucket(pos);
+
+ /**
+ * Erase bucket used a backward shift after clearing the bucket.
+ * Check if there is a new value in the bucket, if not get the next
+ * non-empty.
+ */
+ if (pos.m_bucket->empty()) {
+ ++pos;
+ }
+
+ m_try_shrink_on_next_insert = true;
+
+ return pos;
+ }
+
+ iterator erase(const_iterator pos) { return erase(mutable_iterator(pos)); }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ if (first == last) {
+ return mutable_iterator(first);
+ }
+
+ auto first_mutable = mutable_iterator(first);
+ auto last_mutable = mutable_iterator(last);
+ for (auto it = first_mutable.m_bucket; it != last_mutable.m_bucket; ++it) {
+ if (!it->empty()) {
+ it->clear();
+ m_nb_elements--;
+ }
+ }
+
+ if (last_mutable == end()) {
+ m_try_shrink_on_next_insert = true;
+ return end();
+ }
+
+ /*
+ * Backward shift on the values which come after the deleted values.
+ * We try to move the values closer to their ideal bucket.
+ */
+ std::size_t icloser_bucket =
+ static_cast<std::size_t>(first_mutable.m_bucket - m_buckets);
+ std::size_t ito_move_closer_value =
+ static_cast<std::size_t>(last_mutable.m_bucket - m_buckets);
+ tsl_rh_assert(ito_move_closer_value > icloser_bucket);
+
+ const std::size_t ireturn_bucket =
+ ito_move_closer_value -
+ std::min(
+ ito_move_closer_value - icloser_bucket,
+ std::size_t(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));
+
+ while (ito_move_closer_value < m_bucket_count &&
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) {
+ icloser_bucket =
+ ito_move_closer_value -
+ std::min(
+ ito_move_closer_value - icloser_bucket,
+ std::size_t(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));
+
+ tsl_rh_assert(m_buckets[icloser_bucket].empty());
+ const distance_type new_distance = distance_type(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket() -
+ (ito_move_closer_value - icloser_bucket));
+ m_buckets[icloser_bucket].set_value_of_empty_bucket(
+ new_distance, m_buckets[ito_move_closer_value].truncated_hash(),
+ std::move(m_buckets[ito_move_closer_value].value()));
+ m_buckets[ito_move_closer_value].clear();
+
+ ++icloser_bucket;
+ ++ito_move_closer_value;
+ }
+
+ m_try_shrink_on_next_insert = true;
+
+ return iterator(m_buckets + ireturn_bucket);
+ }
+
+ template <class K>
+ size_type erase(const K& key) {
+ return erase(key, hash_key(key));
+ }
+
+ template <class K>
+ size_type erase(const K& key, std::size_t hash) {
+ auto it = find(key, hash);
+ if (it != end()) {
+ erase_from_bucket(it);
+ m_try_shrink_on_next_insert = true;
+
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ void swap(robin_hash& other) {
+ using std::swap;
+
+ swap(static_cast<Hash&>(*this), static_cast<Hash&>(other));
+ swap(static_cast<KeyEqual&>(*this), static_cast<KeyEqual&>(other));
+ swap(static_cast<GrowthPolicy&>(*this), static_cast<GrowthPolicy&>(other));
+ swap(m_buckets_data, other.m_buckets_data);
+ swap(m_buckets, other.m_buckets);
+ swap(m_bucket_count, other.m_bucket_count);
+ swap(m_nb_elements, other.m_nb_elements);
+ swap(m_load_threshold, other.m_load_threshold);
+ swap(m_min_load_factor, other.m_min_load_factor);
+ swap(m_max_load_factor, other.m_max_load_factor);
+ swap(m_grow_on_next_insert, other.m_grow_on_next_insert);
+ swap(m_try_shrink_on_next_insert, other.m_try_shrink_on_next_insert);
+ }
+
+ /*
+ * Lookup
+ */
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ typename U::value_type& at(const K& key) {
+ return at(key, hash_key(key));
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ typename U::value_type& at(const K& key, std::size_t hash) {
+ return const_cast<typename U::value_type&>(
+ static_cast<const robin_hash*>(this)->at(key, hash));
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ const typename U::value_type& at(const K& key) const {
+ return at(key, hash_key(key));
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ const typename U::value_type& at(const K& key, std::size_t hash) const {
+ auto it = find(key, hash);
+ if (it != cend()) {
+ return it.value();
+ } else {
+ TSL_RH_THROW_OR_TERMINATE(std::out_of_range, "Couldn't find key.");
+ }
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ typename U::value_type& operator[](K&& key) {
+ return try_emplace(std::forward<K>(key)).first.value();
+ }
+
+ template <class K>
+ size_type count(const K& key) const {
+ return count(key, hash_key(key));
+ }
+
+ template <class K>
+ size_type count(const K& key, std::size_t hash) const {
+ if (find(key, hash) != cend()) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ template <class K>
+ iterator find(const K& key) {
+ return find_impl(key, hash_key(key));
+ }
+
+ template <class K>
+ iterator find(const K& key, std::size_t hash) {
+ return find_impl(key, hash);
+ }
+
+ template <class K>
+ const_iterator find(const K& key) const {
+ return find_impl(key, hash_key(key));
+ }
+
+ template <class K>
+ const_iterator find(const K& key, std::size_t hash) const {
+ return find_impl(key, hash);
+ }
+
+ template <class K>
+ bool contains(const K& key) const {
+ return contains(key, hash_key(key));
+ }
+
+ template <class K>
+ bool contains(const K& key, std::size_t hash) const {
+ return count(key, hash) != 0;
+ }
+
+ template <class K>
+ std::pair<iterator, iterator> equal_range(const K& key) {
+ return equal_range(key, hash_key(key));
+ }
+
+ template <class K>
+ std::pair<iterator, iterator> equal_range(const K& key, std::size_t hash) {
+ iterator it = find(key, hash);
+ return std::make_pair(it, (it == end()) ? it : std::next(it));
+ }
+
+ template <class K>
+ std::pair<const_iterator, const_iterator> equal_range(const K& key) const {
+ return equal_range(key, hash_key(key));
+ }
+
+ template <class K>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const K& key, std::size_t hash) const {
+ const_iterator it = find(key, hash);
+ return std::make_pair(it, (it == cend()) ? it : std::next(it));
+ }
+
+ /*
+ * Bucket interface
+ */
+ size_type bucket_count() const { return m_bucket_count; }
+
+ size_type max_bucket_count() const {
+ return std::min(GrowthPolicy::max_bucket_count(),
+ m_buckets_data.max_size());
+ }
+
+ /*
+ * Hash policy
+ */
+ float load_factor() const {
+ if (bucket_count() == 0) {
+ return 0;
+ }
+
+ return float(m_nb_elements) / float(bucket_count());
+ }
+
+ float min_load_factor() const { return m_min_load_factor; }
+
+ float max_load_factor() const { return m_max_load_factor; }
+
+ void min_load_factor(float ml) {
+ m_min_load_factor = clamp(ml, float(MINIMUM_MIN_LOAD_FACTOR),
+ float(MAXIMUM_MIN_LOAD_FACTOR));
+ }
+
+ void max_load_factor(float ml) {
+ m_max_load_factor = clamp(ml, float(MINIMUM_MAX_LOAD_FACTOR),
+ float(MAXIMUM_MAX_LOAD_FACTOR));
+ m_load_threshold = size_type(float(bucket_count()) * m_max_load_factor);
+ tsl_rh_assert(bucket_count() == 0 || m_load_threshold < bucket_count());
+ }
+
+ void rehash(size_type count_) {
+ count_ = std::max(count_,
+ size_type(std::ceil(float(size()) / max_load_factor())));
+ rehash_impl(count_);
+ }
+
+ void reserve(size_type count_) {
+ rehash(size_type(std::ceil(float(count_) / max_load_factor())));
+ }
+
+ /*
+ * Observers
+ */
+ hasher hash_function() const { return static_cast<const Hash&>(*this); }
+
+ key_equal key_eq() const { return static_cast<const KeyEqual&>(*this); }
+
+ /*
+ * Other
+ */
+ iterator mutable_iterator(const_iterator pos) {
+ return iterator(const_cast<bucket_entry*>(pos.m_bucket));
+ }
+
+ template <class Serializer>
+ void serialize(Serializer& serializer) const {
+ serialize_impl(serializer);
+ }
+
+ template <class Deserializer>
+ void deserialize(Deserializer& deserializer, bool hash_compatible) {
+ deserialize_impl(deserializer, hash_compatible);
+ }
+
+ private:
+ template <class K>
+ std::size_t hash_key(const K& key) const {
+ return Hash::operator()(key);
+ }
+
+ template <class K1, class K2>
+ bool compare_keys(const K1& key1, const K2& key2) const {
+ return KeyEqual::operator()(key1, key2);
+ }
+
+ std::size_t bucket_for_hash(std::size_t hash) const {
+ const std::size_t bucket = GrowthPolicy::bucket_for_hash(hash);
+ tsl_rh_assert(bucket < m_bucket_count ||
+ (bucket == 0 && m_bucket_count == 0));
+
+ return bucket;
+ }
+
+ template <class U = GrowthPolicy,
+ typename std::enable_if<is_power_of_two_policy<U>::value>::type* =
+ nullptr>
+ std::size_t next_bucket(std::size_t index) const noexcept {
+ tsl_rh_assert(index < bucket_count());
+
+ return (index + 1) & this->m_mask;
+ }
+
+ template <class U = GrowthPolicy,
+ typename std::enable_if<!is_power_of_two_policy<U>::value>::type* =
+ nullptr>
+ std::size_t next_bucket(std::size_t index) const noexcept {
+ tsl_rh_assert(index < bucket_count());
+
+ index++;
+ return (index != bucket_count()) ? index : 0;
+ }
+
+ template <class K>
+ iterator find_impl(const K& key, std::size_t hash) {
+ return mutable_iterator(
+ static_cast<const robin_hash*>(this)->find(key, hash));
+ }
+
+ template <class K>
+ const_iterator find_impl(const K& key, std::size_t hash) const {
+ std::size_t ibucket = bucket_for_hash(hash);
+ distance_type dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (TSL_RH_LIKELY(
+ (!USE_STORED_HASH_ON_LOOKUP ||
+ m_buckets[ibucket].bucket_hash_equal(hash)) &&
+ compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) {
+ return const_iterator(m_buckets + ibucket);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ return cend();
+ }
+
+ void erase_from_bucket(iterator pos) {
+ pos.m_bucket->clear();
+ m_nb_elements--;
+
+ /**
+ * Backward shift, swap the empty bucket, previous_ibucket, with the values
+ * on its right, ibucket, until we cross another empty bucket or if the
+ * other bucket has a distance_from_ideal_bucket == 0.
+ *
+ * We try to move the values closer to their ideal bucket.
+ */
+ std::size_t previous_ibucket =
+ static_cast<std::size_t>(pos.m_bucket - m_buckets);
+ std::size_t ibucket = next_bucket(previous_ibucket);
+
+ while (m_buckets[ibucket].dist_from_ideal_bucket() > 0) {
+ tsl_rh_assert(m_buckets[previous_ibucket].empty());
+
+ const distance_type new_distance =
+ distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1);
+ m_buckets[previous_ibucket].set_value_of_empty_bucket(
+ new_distance, m_buckets[ibucket].truncated_hash(),
+ std::move(m_buckets[ibucket].value()));
+ m_buckets[ibucket].clear();
+
+ previous_ibucket = ibucket;
+ ibucket = next_bucket(ibucket);
+ }
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> insert_impl(const K& key,
+ Args&&... value_type_args) {
+ const std::size_t hash = hash_key(key);
+
+ std::size_t ibucket = bucket_for_hash(hash);
+ distance_type dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if ((!USE_STORED_HASH_ON_LOOKUP ||
+ m_buckets[ibucket].bucket_hash_equal(hash)) &&
+ compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) {
+ return std::make_pair(iterator(m_buckets + ibucket), false);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ if (rehash_on_extreme_load()) {
+ ibucket = bucket_for_hash(hash);
+ dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+ }
+
+ if (m_buckets[ibucket].empty()) {
+ m_buckets[ibucket].set_value_of_empty_bucket(
+ dist_from_ideal_bucket, bucket_entry::truncate_hash(hash),
+ std::forward<Args>(value_type_args)...);
+ } else {
+ insert_value(ibucket, dist_from_ideal_bucket,
+ bucket_entry::truncate_hash(hash),
+ std::forward<Args>(value_type_args)...);
+ }
+
+ m_nb_elements++;
+ /*
+ * The value will be inserted in ibucket in any case, either because it was
+ * empty or by stealing the bucket (robin hood).
+ */
+ return std::make_pair(iterator(m_buckets + ibucket), true);
+ }
+
+ template <class... Args>
+ void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, Args&&... value_type_args) {
+ value_type value(std::forward<Args>(value_type_args)...);
+ insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);
+ }
+
+ void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type&& value) {
+ insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);
+ }
+
+ /*
+ * We don't use `value_type&& value` as last argument due to a bug in MSVC
+ * when `value_type` is a pointer, The compiler is not able to see the
+ * difference between `std::string*` and `std::string*&&` resulting in a
+ * compilation error.
+ *
+ * The `value` will be in a moved state at the end of the function.
+ */
+ void insert_value_impl(std::size_t ibucket,
+ distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type& value) {
+ tsl_rh_assert(dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket());
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash,
+ value);
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+
+ while (!m_buckets[ibucket].empty()) {
+ if (dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (dist_from_ideal_bucket >=
+ bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT) {
+ /**
+ * The number of probes is really high, rehash the map on the next
+ * insert. Difficult to do now as rehash may throw an exception.
+ */
+ m_grow_on_next_insert = true;
+ }
+
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket,
+ hash, value);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash,
+ std::move(value));
+ }
+
+ void rehash_impl(size_type count_) {
+ robin_hash new_table(count_, static_cast<Hash&>(*this),
+ static_cast<KeyEqual&>(*this), get_allocator(),
+ m_min_load_factor, m_max_load_factor);
+ tsl_rh_assert(size() <= new_table.m_load_threshold);
+
+ const bool use_stored_hash =
+ USE_STORED_HASH_ON_REHASH(new_table.bucket_count());
+ for (auto& bucket : m_buckets_data) {
+ if (bucket.empty()) {
+ continue;
+ }
+
+ const std::size_t hash =
+ use_stored_hash ? bucket.truncated_hash()
+ : new_table.hash_key(KeySelect()(bucket.value()));
+
+ new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0,
+ bucket_entry::truncate_hash(hash),
+ std::move(bucket.value()));
+ }
+
+ new_table.m_nb_elements = m_nb_elements;
+ new_table.swap(*this);
+ }
+
+ void clear_and_shrink() noexcept {
+ GrowthPolicy::clear();
+ m_buckets_data.clear();
+ m_buckets = static_empty_bucket_ptr();
+ m_bucket_count = 0;
+ m_nb_elements = 0;
+ m_load_threshold = 0;
+ m_grow_on_next_insert = false;
+ m_try_shrink_on_next_insert = false;
+ }
+
+ void insert_value_on_rehash(std::size_t ibucket,
+ distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type&& value) {
+ while (true) {
+ if (dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (m_buckets[ibucket].empty()) {
+ m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket,
+ hash, std::move(value));
+ return;
+ } else {
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket,
+ hash, value);
+ }
+ }
+
+ dist_from_ideal_bucket++;
+ ibucket = next_bucket(ibucket);
+ }
+ }
+
+ /**
+ * Grow the table if m_grow_on_next_insert is true or we reached the
+ * max_load_factor. Shrink the table if m_try_shrink_on_next_insert is true
+ * (an erase occurred) and we're below the min_load_factor.
+ *
+ * Return true if the table has been rehashed.
+ */
+ bool rehash_on_extreme_load() {
+ if (m_grow_on_next_insert || size() >= m_load_threshold) {
+ rehash_impl(GrowthPolicy::next_bucket_count());
+ m_grow_on_next_insert = false;
+
+ return true;
+ }
+
+ if (m_try_shrink_on_next_insert) {
+ m_try_shrink_on_next_insert = false;
+ if (m_min_load_factor != 0.0f && load_factor() < m_min_load_factor) {
+ reserve(size() + 1);
+
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ template <class Serializer>
+ void serialize_impl(Serializer& serializer) const {
+ const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION;
+ serializer(version);
+
+ // Indicate if the truncated hash of each bucket is stored. Use a
+ // std::int16_t instead of a bool to avoid the need for the serializer to
+ // support an extra 'bool' type.
+ const std::int16_t hash_stored_for_bucket =
+ static_cast<std::int16_t>(STORE_HASH);
+ serializer(hash_stored_for_bucket);
+
+ const slz_size_type nb_elements = m_nb_elements;
+ serializer(nb_elements);
+
+ const slz_size_type bucket_count = m_buckets_data.size();
+ serializer(bucket_count);
+
+ const float min_load_factor = m_min_load_factor;
+ serializer(min_load_factor);
+
+ const float max_load_factor = m_max_load_factor;
+ serializer(max_load_factor);
+
+ for (const bucket_entry& bucket : m_buckets_data) {
+ if (bucket.empty()) {
+ const std::int16_t empty_bucket =
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;
+ serializer(empty_bucket);
+ } else {
+ const std::int16_t dist_from_ideal_bucket =
+ bucket.dist_from_ideal_bucket();
+ serializer(dist_from_ideal_bucket);
+ if (STORE_HASH) {
+ const std::uint32_t truncated_hash = bucket.truncated_hash();
+ serializer(truncated_hash);
+ }
+ serializer(bucket.value());
+ }
+ }
+ }
+
+ template <class Deserializer>
+ void deserialize_impl(Deserializer& deserializer, bool hash_compatible) {
+ tsl_rh_assert(m_buckets_data.empty()); // Current hash table must be empty
+
+ const slz_size_type version =
+ deserialize_value<slz_size_type>(deserializer);
+ // For now we only have one version of the serialization protocol.
+ // If it doesn't match there is a problem with the file.
+ if (version != SERIALIZATION_PROTOCOL_VERSION) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error,
+ "Can't deserialize the ordered_map/set. "
+ "The protocol version header is invalid.");
+ }
+
+ const bool hash_stored_for_bucket =
+ deserialize_value<std::int16_t>(deserializer) ? true : false;
+ if (hash_compatible && STORE_HASH != hash_stored_for_bucket) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Can't deserialize a map with a different StoreHash "
+ "than the one used during the serialization when "
+ "hash compatibility is used");
+ }
+
+ const slz_size_type nb_elements =
+ deserialize_value<slz_size_type>(deserializer);
+ const slz_size_type bucket_count_ds =
+ deserialize_value<slz_size_type>(deserializer);
+ const float min_load_factor = deserialize_value<float>(deserializer);
+ const float max_load_factor = deserialize_value<float>(deserializer);
+
+ if (min_load_factor < MINIMUM_MIN_LOAD_FACTOR ||
+ min_load_factor > MAXIMUM_MIN_LOAD_FACTOR) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Invalid min_load_factor. Check that the serializer "
+ "and deserializer support floats correctly as they "
+ "can be converted implicitly to ints.");
+ }
+
+ if (max_load_factor < MINIMUM_MAX_LOAD_FACTOR ||
+ max_load_factor > MAXIMUM_MAX_LOAD_FACTOR) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Invalid max_load_factor. Check that the serializer "
+ "and deserializer support floats correctly as they "
+ "can be converted implicitly to ints.");
+ }
+
+ this->min_load_factor(min_load_factor);
+ this->max_load_factor(max_load_factor);
+
+ if (bucket_count_ds == 0) {
+ tsl_rh_assert(nb_elements == 0);
+ return;
+ }
+
+ if (!hash_compatible) {
+ reserve(numeric_cast<size_type>(nb_elements,
+ "Deserialized nb_elements is too big."));
+ for (slz_size_type ibucket = 0; ibucket < bucket_count_ds; ibucket++) {
+ const distance_type dist_from_ideal_bucket =
+ deserialize_value<std::int16_t>(deserializer);
+ if (dist_from_ideal_bucket !=
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) {
+ if (hash_stored_for_bucket) {
+ TSL_RH_UNUSED(deserialize_value<std::uint32_t>(deserializer));
+ }
+
+ insert(deserialize_value<value_type>(deserializer));
+ }
+ }
+
+ tsl_rh_assert(nb_elements == size());
+ } else {
+ m_bucket_count = numeric_cast<size_type>(
+ bucket_count_ds, "Deserialized bucket_count is too big.");
+
+ GrowthPolicy::operator=(GrowthPolicy(m_bucket_count));
+ // GrowthPolicy should not modify the bucket count we got from
+ // deserialization
+ if (m_bucket_count != bucket_count_ds) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error,
+ "The GrowthPolicy is not the same even "
+ "though hash_compatible is true.");
+ }
+
+ m_nb_elements = numeric_cast<size_type>(
+ nb_elements, "Deserialized nb_elements is too big.");
+ m_buckets_data.resize(m_bucket_count);
+ m_buckets = m_buckets_data.data();
+
+ for (bucket_entry& bucket : m_buckets_data) {
+ const distance_type dist_from_ideal_bucket =
+ deserialize_value<std::int16_t>(deserializer);
+ if (dist_from_ideal_bucket !=
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) {
+ truncated_hash_type truncated_hash = 0;
+ if (hash_stored_for_bucket) {
+ tsl_rh_assert(hash_stored_for_bucket);
+ truncated_hash = deserialize_value<std::uint32_t>(deserializer);
+ }
+
+ bucket.set_value_of_empty_bucket(
+ dist_from_ideal_bucket, truncated_hash,
+ deserialize_value<value_type>(deserializer));
+ }
+ }
+
+ if (!m_buckets_data.empty()) {
+ m_buckets_data.back().set_as_last_bucket();
+ }
+ }
+ }
+
+ public:
+ static const size_type DEFAULT_INIT_BUCKETS_SIZE = 0;
+
+ static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f;
+ static constexpr float MINIMUM_MAX_LOAD_FACTOR = 0.2f;
+ static constexpr float MAXIMUM_MAX_LOAD_FACTOR = 0.95f;
+
+ static constexpr float DEFAULT_MIN_LOAD_FACTOR = 0.0f;
+ static constexpr float MINIMUM_MIN_LOAD_FACTOR = 0.0f;
+ static constexpr float MAXIMUM_MIN_LOAD_FACTOR = 0.15f;
+
+ static_assert(MINIMUM_MAX_LOAD_FACTOR < MAXIMUM_MAX_LOAD_FACTOR,
+ "MINIMUM_MAX_LOAD_FACTOR should be < MAXIMUM_MAX_LOAD_FACTOR");
+ static_assert(MINIMUM_MIN_LOAD_FACTOR < MAXIMUM_MIN_LOAD_FACTOR,
+ "MINIMUM_MIN_LOAD_FACTOR should be < MAXIMUM_MIN_LOAD_FACTOR");
+ static_assert(MAXIMUM_MIN_LOAD_FACTOR < MINIMUM_MAX_LOAD_FACTOR,
+ "MAXIMUM_MIN_LOAD_FACTOR should be < MINIMUM_MAX_LOAD_FACTOR");
+
+ private:
+ /**
+ * Protocol version currenlty used for serialization.
+ */
+ static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1;
+
+ /**
+ * Return an always valid pointer to an static empty bucket_entry with
+ * last_bucket() == true.
+ */
+ bucket_entry* static_empty_bucket_ptr() noexcept {
+ static bucket_entry empty_bucket(true);
+ tsl_rh_assert(empty_bucket.empty());
+ return &empty_bucket;
+ }
+
+ private:
+ buckets_container_type m_buckets_data;
+
+ /**
+ * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points
+ * to static_empty_bucket_ptr. This variable is useful to avoid the cost of
+ * checking if m_buckets_data is empty when trying to find an element.
+ *
+ * TODO Remove m_buckets_data and only use a pointer instead of a
+ * pointer+vector to save some space in the robin_hash object. Manage the
+ * Allocator manually.
+ */
+ bucket_entry* m_buckets;
+
+ /**
+ * Used a lot in find, avoid the call to m_buckets_data.size() which is a bit
+ * slower.
+ */
+ size_type m_bucket_count;
+
+ size_type m_nb_elements;
+
+ size_type m_load_threshold;
+
+ float m_min_load_factor;
+ float m_max_load_factor;
+
+ bool m_grow_on_next_insert;
+
+ /**
+ * We can't shrink down the map on erase operations as the erase methods need
+ * to return the next iterator. Shrinking the map would invalidate all the
+ * iterators and we could not return the next iterator in a meaningful way, On
+ * erase, we thus just indicate on erase that we should try to shrink the hash
+ * table on the next insert if we go below the min_load_factor.
+ */
+ bool m_try_shrink_on_next_insert;
+};
+
+} // namespace detail_robin_hash
+
+} // namespace tsl
+
+#endif
diff --git a/misc/benchmarks/external/tsl/robin_map.h b/misc/benchmarks/external/tsl/robin_map.h
new file mode 100644
index 00000000..aeb354c3
--- /dev/null
+++ b/misc/benchmarks/external/tsl/robin_map.h
@@ -0,0 +1,807 @@
+/**
+ * MIT License
+ *
+ * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TSL_ROBIN_MAP_H
+#define TSL_ROBIN_MAP_H
+
+#include <cstddef>
+#include <functional>
+#include <initializer_list>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "robin_hash.h"
+
+namespace tsl {
+
+/**
+ * Implementation of a hash map using open-addressing and the robin hood hashing
+ * algorithm with backward shift deletion.
+ *
+ * For operations modifying the hash map (insert, erase, rehash, ...), the
+ * strong exception guarantee is only guaranteed when the expression
+ * `std::is_nothrow_swappable<std::pair<Key, T>>::value &&
+ * std::is_nothrow_move_constructible<std::pair<Key, T>>::value` is true,
+ * otherwise if an exception is thrown during the swap or the move, the hash map
+ * may end up in a undefined state. Per the standard a `Key` or `T` with a
+ * noexcept copy constructor and no move constructor also satisfies the
+ * `std::is_nothrow_move_constructible<std::pair<Key, T>>::value` criterion (and
+ * will thus guarantee the strong exception for the map).
+ *
+ * When `StoreHash` is true, 32 bits of the hash are stored alongside the
+ * values. It can improve the performance during lookups if the `KeyEqual`
+ * function takes time (if it engenders a cache-miss for example) as we then
+ * compare the stored hashes before comparing the keys. When
+ * `tsl::rh::power_of_two_growth_policy` is used as `GrowthPolicy`, it may also
+ * speed-up the rehash process as we can avoid to recalculate the hash. When it
+ * is detected that storing the hash will not incur any memory penalty due to
+ * alignment (i.e. `sizeof(tsl::detail_robin_hash::bucket_entry<ValueType,
+ * true>) == sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, false>)`)
+ * and `tsl::rh::power_of_two_growth_policy` is used, the hash will be stored
+ * even if `StoreHash` is false so that we can speed-up the rehash (but it will
+ * not be used on lookups unless `StoreHash` is true).
+ *
+ * `GrowthPolicy` defines how the map grows and consequently how a hash value is
+ * mapped to a bucket. By default the map uses
+ * `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of
+ * buckets to a power of two and uses a mask to map the hash to a bucket instead
+ * of the slow modulo. Other growth policies are available and you may define
+ * your own growth policy, check `tsl::rh::power_of_two_growth_policy` for the
+ * interface.
+ *
+ * `std::pair<Key, T>` must be swappable.
+ *
+ * `Key` and `T` must be copy and/or move constructible.
+ *
+ * If the destructor of `Key` or `T` throws an exception, the behaviour of the
+ * class is undefined.
+ *
+ * Iterators invalidation:
+ * - clear, operator=, reserve, rehash: always invalidate the iterators.
+ * - insert, emplace, emplace_hint, operator[]: if there is an effective
+ * insert, invalidate the iterators.
+ * - erase: always invalidate the iterators.
+ */
+template <class Key, class T, class Hash = std::hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Allocator = std::allocator<std::pair<Key, T>>,
+ bool StoreHash = false,
+ class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>>
+class robin_map {
+ private:
+ template <typename U>
+ using has_is_transparent = tsl::detail_robin_hash::has_is_transparent<U>;
+
+ class KeySelect {
+ public:
+ using key_type = Key;
+
+ const key_type& operator()(
+ const std::pair<Key, T>& key_value) const noexcept {
+ return key_value.first;
+ }
+
+ key_type& operator()(std::pair<Key, T>& key_value) noexcept {
+ return key_value.first;
+ }
+ };
+
+ class ValueSelect {
+ public:
+ using value_type = T;
+
+ const value_type& operator()(
+ const std::pair<Key, T>& key_value) const noexcept {
+ return key_value.second;
+ }
+
+ value_type& operator()(std::pair<Key, T>& key_value) noexcept {
+ return key_value.second;
+ }
+ };
+
+ using ht = detail_robin_hash::robin_hash<std::pair<Key, T>, KeySelect,
+ ValueSelect, Hash, KeyEqual,
+ Allocator, StoreHash, GrowthPolicy>;
+
+ public:
+ using key_type = typename ht::key_type;
+ using mapped_type = T;
+ using value_type = typename ht::value_type;
+ using size_type = typename ht::size_type;
+ using difference_type = typename ht::difference_type;
+ using hasher = typename ht::hasher;
+ using key_equal = typename ht::key_equal;
+ using allocator_type = typename ht::allocator_type;
+ using reference = typename ht::reference;
+ using const_reference = typename ht::const_reference;
+ using pointer = typename ht::pointer;
+ using const_pointer = typename ht::const_pointer;
+ using iterator = typename ht::iterator;
+ using const_iterator = typename ht::const_iterator;
+
+ public:
+ /*
+ * Constructors
+ */
+ robin_map() : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) {}
+
+ explicit robin_map(size_type bucket_count, const Hash& hash = Hash(),
+ const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : m_ht(bucket_count, hash, equal, alloc) {}
+
+ robin_map(size_type bucket_count, const Allocator& alloc)
+ : robin_map(bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ robin_map(size_type bucket_count, const Hash& hash, const Allocator& alloc)
+ : robin_map(bucket_count, hash, KeyEqual(), alloc) {}
+
+ explicit robin_map(const Allocator& alloc)
+ : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {}
+
+ template <class InputIt>
+ robin_map(InputIt first, InputIt last,
+ size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,
+ const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : robin_map(bucket_count, hash, equal, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIt>
+ robin_map(InputIt first, InputIt last, size_type bucket_count,
+ const Allocator& alloc)
+ : robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ robin_map(InputIt first, InputIt last, size_type bucket_count,
+ const Hash& hash, const Allocator& alloc)
+ : robin_map(first, last, bucket_count, hash, KeyEqual(), alloc) {}
+
+ robin_map(std::initializer_list<value_type> init,
+ size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,
+ const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) {}
+
+ robin_map(std::initializer_list<value_type> init, size_type bucket_count,
+ const Allocator& alloc)
+ : robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(),
+ alloc) {}
+
+ robin_map(std::initializer_list<value_type> init, size_type bucket_count,
+ const Hash& hash, const Allocator& alloc)
+ : robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(),
+ alloc) {}
+
+ robin_map& operator=(std::initializer_list<value_type> ilist) {
+ m_ht.clear();
+
+ m_ht.reserve(ilist.size());
+ m_ht.insert(ilist.begin(), ilist.end());
+
+ return *this;
+ }
+
+ allocator_type get_allocator() const { return m_ht.get_allocator(); }
+
+ /*
+ * Iterators
+ */
+ iterator begin() noexcept { return m_ht.begin(); }
+ const_iterator begin() const noexcept { return m_ht.begin(); }
+ const_iterator cbegin() const noexcept { return m_ht.cbegin(); }
+
+ iterator end() noexcept { return m_ht.end(); }
+ const_iterator end() const noexcept { return m_ht.end(); }
+ const_iterator cend() const noexcept { return m_ht.cend(); }
+
+ /*
+ * Capacity
+ */
+ bool empty() const noexcept { return m_ht.empty(); }
+ size_type size() const noexcept { return m_ht.size(); }
+ size_type max_size() const noexcept { return m_ht.max_size(); }
+
+ /*
+ * Modifiers
+ */
+ void clear() noexcept { m_ht.clear(); }
+
+ std::pair<iterator, bool> insert(const value_type& value) {
+ return m_ht.insert(value);
+ }
+
+ template <class P, typename std::enable_if<std::is_constructible<
+ value_type, P&&>::value>::type* = nullptr>
+ std::pair<iterator, bool> insert(P&& value) {
+ return m_ht.emplace(std::forward<P>(value));
+ }
+
+ std::pair<iterator, bool> insert(value_type&& value) {
+ return m_ht.insert(std::move(value));
+ }
+
+ iterator insert(const_iterator hint, const value_type& value) {
+ return m_ht.insert_hint(hint, value);
+ }
+
+ template <class P, typename std::enable_if<std::is_constructible<
+ value_type, P&&>::value>::type* = nullptr>
+ iterator insert(const_iterator hint, P&& value) {
+ return m_ht.emplace_hint(hint, std::forward<P>(value));
+ }
+
+ iterator insert(const_iterator hint, value_type&& value) {
+ return m_ht.insert_hint(hint, std::move(value));
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ m_ht.insert(first, last);
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ m_ht.insert(ilist.begin(), ilist.end());
+ }
+
+ template <class M>
+ std::pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj) {
+ return m_ht.insert_or_assign(k, std::forward<M>(obj));
+ }
+
+ template <class M>
+ std::pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj) {
+ return m_ht.insert_or_assign(std::move(k), std::forward<M>(obj));
+ }
+
+ template <class M>
+ iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj) {
+ return m_ht.insert_or_assign(hint, k, std::forward<M>(obj));
+ }
+
+ template <class M>
+ iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj) {
+ return m_ht.insert_or_assign(hint, std::move(k), std::forward<M>(obj));
+ }
+
+ /**
+ * Due to the way elements are stored, emplace will need to move or copy the
+ * key-value once. The method is equivalent to
+ * insert(value_type(std::forward<Args>(args)...));
+ *
+ * Mainly here for compatibility with the std::unordered_map interface.
+ */
+ template <class... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return m_ht.emplace(std::forward<Args>(args)...);
+ }
+
+ /**
+ * Due to the way elements are stored, emplace_hint will need to move or copy
+ * the key-value once. The method is equivalent to insert(hint,
+ * value_type(std::forward<Args>(args)...));
+ *
+ * Mainly here for compatibility with the std::unordered_map interface.
+ */
+ template <class... Args>
+ iterator emplace_hint(const_iterator hint, Args&&... args) {
+ return m_ht.emplace_hint(hint, std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args) {
+ return m_ht.try_emplace(k, std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args) {
+ return m_ht.try_emplace(std::move(k), std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ iterator try_emplace(const_iterator hint, const key_type& k, Args&&... args) {
+ return m_ht.try_emplace_hint(hint, k, std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args) {
+ return m_ht.try_emplace_hint(hint, std::move(k),
+ std::forward<Args>(args)...);
+ }
+
+ iterator erase(iterator pos) { return m_ht.erase(pos); }
+ iterator erase(const_iterator pos) { return m_ht.erase(pos); }
+ iterator erase(const_iterator first, const_iterator last) {
+ return m_ht.erase(first, last);
+ }
+ size_type erase(const key_type& key) { return m_ht.erase(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup to the value if you already have the hash.
+ */
+ size_type erase(const key_type& key, std::size_t precalculated_hash) {
+ return m_ht.erase(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type erase(const K& key) {
+ return m_ht.erase(key);
+ }
+
+ /**
+ * @copydoc erase(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup to the value if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type erase(const K& key, std::size_t precalculated_hash) {
+ return m_ht.erase(key, precalculated_hash);
+ }
+
+ void swap(robin_map& other) { other.m_ht.swap(m_ht); }
+
+ /*
+ * Lookup
+ */
+ T& at(const Key& key) { return m_ht.at(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ T& at(const Key& key, std::size_t precalculated_hash) {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ const T& at(const Key& key) const { return m_ht.at(key); }
+
+ /**
+ * @copydoc at(const Key& key, std::size_t precalculated_hash)
+ */
+ const T& at(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ T& at(const K& key) {
+ return m_ht.at(key);
+ }
+
+ /**
+ * @copydoc at(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ T& at(const K& key, std::size_t precalculated_hash) {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ /**
+ * @copydoc at(const K& key)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const T& at(const K& key) const {
+ return m_ht.at(key);
+ }
+
+ /**
+ * @copydoc at(const K& key, std::size_t precalculated_hash)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const T& at(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ T& operator[](const Key& key) { return m_ht[key]; }
+ T& operator[](Key&& key) { return m_ht[std::move(key)]; }
+
+ size_type count(const Key& key) const { return m_ht.count(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ size_type count(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.count(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type count(const K& key) const {
+ return m_ht.count(key);
+ }
+
+ /**
+ * @copydoc count(const K& key) const
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type count(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.count(key, precalculated_hash);
+ }
+
+ iterator find(const Key& key) { return m_ht.find(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ iterator find(const Key& key, std::size_t precalculated_hash) {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ const_iterator find(const Key& key) const { return m_ht.find(key); }
+
+ /**
+ * @copydoc find(const Key& key, std::size_t precalculated_hash)
+ */
+ const_iterator find(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ iterator find(const K& key) {
+ return m_ht.find(key);
+ }
+
+ /**
+ * @copydoc find(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ iterator find(const K& key, std::size_t precalculated_hash) {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ /**
+ * @copydoc find(const K& key)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const_iterator find(const K& key) const {
+ return m_ht.find(key);
+ }
+
+ /**
+ * @copydoc find(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const_iterator find(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ bool contains(const Key& key) const { return m_ht.contains(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ bool contains(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.contains(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ bool contains(const K& key) const {
+ return m_ht.contains(key);
+ }
+
+ /**
+ * @copydoc contains(const K& key) const
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ bool contains(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.contains(key, precalculated_hash);
+ }
+
+ std::pair<iterator, iterator> equal_range(const Key& key) {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ std::pair<iterator, iterator> equal_range(const Key& key,
+ std::size_t precalculated_hash) {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ std::pair<const_iterator, const_iterator> equal_range(const Key& key) const {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * @copydoc equal_range(const Key& key, std::size_t precalculated_hash)
+ */
+ std::pair<const_iterator, const_iterator> equal_range(
+ const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<iterator, iterator> equal_range(const K& key) {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * @copydoc equal_range(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<iterator, iterator> equal_range(const K& key,
+ std::size_t precalculated_hash) {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ /**
+ * @copydoc equal_range(const K& key)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<const_iterator, const_iterator> equal_range(const K& key) const {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * @copydoc equal_range(const K& key, std::size_t precalculated_hash)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const K& key, std::size_t precalculated_hash) const {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ /*
+ * Bucket interface
+ */
+ size_type bucket_count() const { return m_ht.bucket_count(); }
+ size_type max_bucket_count() const { return m_ht.max_bucket_count(); }
+
+ /*
+ * Hash policy
+ */
+ float load_factor() const { return m_ht.load_factor(); }
+
+ float min_load_factor() const { return m_ht.min_load_factor(); }
+ float max_load_factor() const { return m_ht.max_load_factor(); }
+
+ /**
+ * Set the `min_load_factor` to `ml`. When the `load_factor` of the map goes
+ * below `min_load_factor` after some erase operations, the map will be
+ * shrunk when an insertion occurs. The erase method itself never shrinks
+ * the map.
+ *
+ * The default value of `min_load_factor` is 0.0f, the map never shrinks by
+ * default.
+ */
+ void min_load_factor(float ml) { m_ht.min_load_factor(ml); }
+ void max_load_factor(float ml) { m_ht.max_load_factor(ml); }
+
+ void rehash(size_type count_) { m_ht.rehash(count_); }
+ void reserve(size_type count_) { m_ht.reserve(count_); }
+
+ /*
+ * Observers
+ */
+ hasher hash_function() const { return m_ht.hash_function(); }
+ key_equal key_eq() const { return m_ht.key_eq(); }
+
+ /*
+ * Other
+ */
+
+ /**
+ * Convert a const_iterator to an iterator.
+ */
+ iterator mutable_iterator(const_iterator pos) {
+ return m_ht.mutable_iterator(pos);
+ }
+
+ /**
+ * Serialize the map through the `serializer` parameter.
+ *
+ * The `serializer` parameter must be a function object that supports the
+ * following call:
+ * - `template<typename U> void operator()(const U& value);` where the types
+ * `std::int16_t`, `std::uint32_t`, `std::uint64_t`, `float` and
+ * `std::pair<Key, T>` must be supported for U.
+ *
+ * The implementation leaves binary compatibility (endianness, IEEE 754 for
+ * floats, ...) of the types it serializes in the hands of the `Serializer`
+ * function object if compatibility is required.
+ */
+ template <class Serializer>
+ void serialize(Serializer& serializer) const {
+ m_ht.serialize(serializer);
+ }
+
+ /**
+ * Deserialize a previously serialized map through the `deserializer`
+ * parameter.
+ *
+ * The `deserializer` parameter must be a function object that supports the
+ * following call:
+ * - `template<typename U> U operator()();` where the types `std::int16_t`,
+ * `std::uint32_t`, `std::uint64_t`, `float` and `std::pair<Key, T>` must be
+ * supported for U.
+ *
+ * If the deserialized hash map type is hash compatible with the serialized
+ * map, the deserialization process can be sped up by setting
+ * `hash_compatible` to true. To be hash compatible, the Hash, KeyEqual and
+ * GrowthPolicy must behave the same way than the ones used on the serialized
+ * map and the StoreHash must have the same value. The `std::size_t` must also
+ * be of the same size as the one on the platform used to serialize the map.
+ * If these criteria are not met, the behaviour is undefined with
+ * `hash_compatible` sets to true.
+ *
+ * The behaviour is undefined if the type `Key` and `T` of the `robin_map` are
+ * not the same as the types used during serialization.
+ *
+ * The implementation leaves binary compatibility (endianness, IEEE 754 for
+ * floats, size of int, ...) of the types it deserializes in the hands of the
+ * `Deserializer` function object if compatibility is required.
+ */
+ template <class Deserializer>
+ static robin_map deserialize(Deserializer& deserializer,
+ bool hash_compatible = false) {
+ robin_map map(0);
+ map.m_ht.deserialize(deserializer, hash_compatible);
+
+ return map;
+ }
+
+ friend bool operator==(const robin_map& lhs, const robin_map& rhs) {
+ if (lhs.size() != rhs.size()) {
+ return false;
+ }
+
+ for (const auto& element_lhs : lhs) {
+ const auto it_element_rhs = rhs.find(element_lhs.first);
+ if (it_element_rhs == rhs.cend() ||
+ element_lhs.second != it_element_rhs->second) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ friend bool operator!=(const robin_map& lhs, const robin_map& rhs) {
+ return !operator==(lhs, rhs);
+ }
+
+ friend void swap(robin_map& lhs, robin_map& rhs) { lhs.swap(rhs); }
+
+ private:
+ ht m_ht;
+};
+
+/**
+ * Same as `tsl::robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash,
+ * tsl::rh::prime_growth_policy>`.
+ */
+template <class Key, class T, class Hash = std::hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Allocator = std::allocator<std::pair<Key, T>>,
+ bool StoreHash = false>
+using robin_pg_map = robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash,
+ tsl::rh::prime_growth_policy>;
+
+} // end namespace tsl
+
+#endif
diff --git a/misc/benchmarks/external/update.sh b/misc/benchmarks/external/update.sh
new file mode 100644
index 00000000..45b472b1
--- /dev/null
+++ b/misc/benchmarks/external/update.sh
@@ -0,0 +1,38 @@
+tsl_h="https://raw.github.com/Tessil/hopscotch-map/master/include/tsl"
+tsl_r="https://raw.github.com/Tessil/robin-map/master/include/tsl"
+greg_s="https://raw.github.com/greg7mdp/sparsepp/master/sparsepp"
+greg_p="https://raw.github.com/greg7mdp/parallel-hashmap"
+martinus_r="https://raw.github.com/martinus/robin-hood-hashing/master/src/include"
+martinus_d="https://raw.github.com/martinus/unordered_dense/master/include/ankerl"
+skarupke="https://raw.github.com/skarupke/flat_hash_map/master"
+ktprime="https://raw.github.com/ktprime/emhash/master"
+
+mkdir -p ankerl skarupke tsl emhash # sparsepp
+
+wget "$martinus_r/robin_hood.h" -O "ankerl/robin_hood.h"
+wget "$martinus_d/unordered_dense.h" -O "ankerl/unordered_dense.h"
+
+wget "$skarupke/flat_hash_map.hpp" -O "skarupke/flat_hash_map.hpp"
+
+wget "$tsl_r/robin_growth_policy.h" -O "tsl/robin_growth_policy.h"
+wget "$tsl_r/robin_hash.h" -O "tsl/robin_hash.h"
+wget "$tsl_r/robin_map.h" -O "tsl/robin_map.h"
+
+#wget "$ktprime/thirdparty/wyhash.h" -O "emhash/wyhash.h"
+wget "$ktprime/hash_table7.hpp" -O "emhash/hash_table7.hpp"
+
+#wget "$tsl_h/hopscotch_growth_policy.h" -O "tsl/hopscotch_growth_policy.h"
+#wget "$tsl_h/hopscotch_hash.h" -O "tsl/hopscotch_hash.h"
+#wget "$tsl_h/hopscotch_map.h" -O "tsl/hopscotch_map.h"
+
+#wget "$skarupke/bytell_hash_map.hpp" -O "skarupke/bytell_hash_map.hpp"
+
+#wget "$greg_s/spp.h" -O "sparsepp/spp.h"
+#wget "$greg_s/spp_config.h" -O "sparsepp/spp_config.h"
+#wget "$greg_s/spp_dlalloc.h" -O "sparsepp/spp_dlalloc.h"
+#wget "$greg_s/spp_memory.h" -O "sparsepp/spp_memory.h"
+#wget "$greg_s/spp_smartptr.h" -O "sparsepp/spp_smartptr.h"
+#wget "$greg_s/spp_stdint.h" -O "sparsepp/spp_stdint.h"
+#wget "$greg_s/spp_timer.h" -O "sparsepp/spp_timer.h"
+#wget "$greg_s/spp_traits.h" -O "sparsepp/spp_traits.h"
+#wget "$greg_s/spp_utils.h" -O "sparsepp/spp_utils.h"