summaryrefslogtreecommitdiffhomepage
path: root/misc/benchmarks
diff options
context:
space:
mode:
authorTyge Lovset <[email protected]>2022-12-20 23:31:51 +0100
committerTyge Lovset <[email protected]>2022-12-20 23:31:51 +0100
commit5f57d597cd27aef55adbcb3b452973b0c6e33667 (patch)
treedfd59c2fd0e36a6ef37912a9d0cc5a65970f1524 /misc/benchmarks
parent1763be8c8cbbc0896477fcf924edd4180d1345a9 (diff)
downloadSTC-modified-5f57d597cd27aef55adbcb3b452973b0c6e33667.tar.gz
STC-modified-5f57d597cd27aef55adbcb3b452973b0c6e33667.zip
Restructured folders: examples, benchmarks, tests into misc folder.
Diffstat (limited to 'misc/benchmarks')
-rw-r--r--misc/benchmarks/build_all.sh32
-rw-r--r--misc/benchmarks/external/ankerl/robin_hood.h2544
-rw-r--r--misc/benchmarks/external/ankerl/unordered_dense.h1503
-rw-r--r--misc/benchmarks/external/emhash/hash_table7.hpp1876
-rw-r--r--misc/benchmarks/external/khash.h595
-rw-r--r--misc/benchmarks/external/skarupke/flat_hash_map.hpp1496
-rw-r--r--misc/benchmarks/external/tsl/robin_growth_policy.h406
-rw-r--r--misc/benchmarks/external/tsl/robin_hash.h1639
-rw-r--r--misc/benchmarks/external/tsl/robin_map.h807
-rw-r--r--misc/benchmarks/external/update.sh38
-rw-r--r--misc/benchmarks/misc/cbits_benchmark.cpp123
-rw-r--r--misc/benchmarks/misc/names.txt5163
-rw-r--r--misc/benchmarks/misc/prng_bench.cpp223
-rw-r--r--misc/benchmarks/misc/rust_cmap.c61
-rw-r--r--misc/benchmarks/misc/rust_hashmap.rs82
-rw-r--r--misc/benchmarks/misc/sso_bench.cpp135
-rw-r--r--misc/benchmarks/misc/string_bench_STC.cpp300
-rw-r--r--misc/benchmarks/misc/string_bench_STD.cpp371
-rw-r--r--misc/benchmarks/picobench/picobench.hpp1479
-rw-r--r--misc/benchmarks/picobench/picobench_cmap.cpp284
-rw-r--r--misc/benchmarks/picobench/picobench_csmap.cpp320
-rw-r--r--misc/benchmarks/pics/benchmark.gifbin0 -> 160087 bytes
-rw-r--r--misc/benchmarks/plotbench/cdeq_benchmark.cpp138
-rw-r--r--misc/benchmarks/plotbench/clist_benchmark.cpp135
-rw-r--r--misc/benchmarks/plotbench/cmap_benchmark.cpp142
-rw-r--r--misc/benchmarks/plotbench/cpque_benchmark.cpp71
-rw-r--r--misc/benchmarks/plotbench/csmap_benchmark.cpp143
-rw-r--r--misc/benchmarks/plotbench/cvec_benchmark.cpp134
-rw-r--r--misc/benchmarks/plotbench/plot.py24
-rw-r--r--misc/benchmarks/plotbench/run_all.bat5
-rw-r--r--misc/benchmarks/plotbench/run_all.sh4
-rw-r--r--misc/benchmarks/plotbench/run_clang.sh14
-rw-r--r--misc/benchmarks/plotbench/run_gcc.sh12
-rw-r--r--misc/benchmarks/plotbench/run_vc.bat15
-rw-r--r--misc/benchmarks/shootout_hashmaps.cpp349
35 files changed, 20663 insertions, 0 deletions
diff --git a/misc/benchmarks/build_all.sh b/misc/benchmarks/build_all.sh
new file mode 100644
index 00000000..54340998
--- /dev/null
+++ b/misc/benchmarks/build_all.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+cc='g++ -I../../include -s -O3 -Wall -pedantic -x c++ -std=c++20'
+#cc='clang++ -I../include -s -O3 -Wall -pedantic -x c++ -std=c++20'
+#cc='cl -nologo -I../include -O2 -TP -EHsc -std:c++20'
+run=0
+if [ "$1" == '-h' -o "$1" == '--help' ]; then
+ echo usage: runall.sh [-run] [compiler + options]
+ exit
+fi
+if [ "$1" == '-run' ]; then
+ run=1
+ shift
+fi
+if [ ! -z "$1" ] ; then
+ cc=$@
+fi
+if [ $run = 0 ] ; then
+ for i in *.cpp misc/*.c* picobench/*.cpp plotbench/*.cpp ; do
+ echo $cc -I../include $i -o $(basename -s .cpp $i).exe
+ $cc -I../include $i -o $(basename -s .cpp $i).exe
+ done
+else
+ for i in misc/*.c* picobench/*.cpp ; do
+ echo $cc -O3 -I../include $i
+ $cc -O3 -I../include $i
+ if [ -f $(basename -s .c $i).exe ]; then ./$(basename -s .c $i).exe; fi
+ if [ -f ./a.exe ]; then ./a.exe; fi
+ if [ -f ./a.out ]; then ./a.out; fi
+ done
+fi
+
+rm -f a.out *.o *.obj # *.exe
diff --git a/misc/benchmarks/external/ankerl/robin_hood.h b/misc/benchmarks/external/ankerl/robin_hood.h
new file mode 100644
index 00000000..0af031f5
--- /dev/null
+++ b/misc/benchmarks/external/ankerl/robin_hood.h
@@ -0,0 +1,2544 @@
+// ______ _____ ______ _________
+// ______________ ___ /_ ___(_)_______ ___ /_ ______ ______ ______ /
+// __ ___/_ __ \__ __ \__ / __ __ \ __ __ \_ __ \_ __ \_ __ /
+// _ / / /_/ /_ /_/ /_ / _ / / / _ / / // /_/ // /_/ // /_/ /
+// /_/ \____/ /_.___/ /_/ /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/
+// _/_____/
+//
+// Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20
+// https://github.com/martinus/robin-hood-hashing
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2018-2021 Martin Ankerl <http://martin.ankerl.com>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef ROBIN_HOOD_H_INCLUDED
+#define ROBIN_HOOD_H_INCLUDED
+
+// see https://semver.org/
+#define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes
+#define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner
+#define ROBIN_HOOD_VERSION_PATCH 5 // for backwards-compatible bug fixes
+
+#include <algorithm>
+#include <cstdlib>
+#include <cstring>
+#include <functional>
+#include <limits>
+#include <memory> // only to support hash of smart pointers
+#include <stdexcept>
+#include <string>
+#include <type_traits>
+#include <utility>
+#if __cplusplus >= 201703L
+# include <string_view>
+#endif
+
+// #define ROBIN_HOOD_LOG_ENABLED
+#ifdef ROBIN_HOOD_LOG_ENABLED
+# include <iostream>
+# define ROBIN_HOOD_LOG(...) \
+ std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl;
+#else
+# define ROBIN_HOOD_LOG(x)
+#endif
+
+// #define ROBIN_HOOD_TRACE_ENABLED
+#ifdef ROBIN_HOOD_TRACE_ENABLED
+# include <iostream>
+# define ROBIN_HOOD_TRACE(...) \
+ std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl;
+#else
+# define ROBIN_HOOD_TRACE(x)
+#endif
+
+// #define ROBIN_HOOD_COUNT_ENABLED
+#ifdef ROBIN_HOOD_COUNT_ENABLED
+# include <iostream>
+# define ROBIN_HOOD_COUNT(x) ++counts().x;
+namespace robin_hood {
+struct Counts {
+ uint64_t shiftUp{};
+ uint64_t shiftDown{};
+};
+inline std::ostream& operator<<(std::ostream& os, Counts const& c) {
+ return os << c.shiftUp << " shiftUp" << std::endl << c.shiftDown << " shiftDown" << std::endl;
+}
+
+static Counts& counts() {
+ static Counts counts{};
+ return counts;
+}
+} // namespace robin_hood
+#else
+# define ROBIN_HOOD_COUNT(x)
+#endif
+
+// all non-argument macros should use this facility. See
+// https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/
+#define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x()
+
+// mark unused members with this macro
+#define ROBIN_HOOD_UNUSED(identifier)
+
+// bitness
+#if SIZE_MAX == UINT32_MAX
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32
+#elif SIZE_MAX == UINT64_MAX
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64
+#else
+# error Unsupported bitness
+#endif
+
+// endianess
+#ifdef _MSC_VER
+# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() \
+ (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+#endif
+
+// inline
+#ifdef _MSC_VER
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline)
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline))
+#endif
+
+// exceptions
+#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1
+#endif
+
+// count leading/trailing bits
+#if !defined(ROBIN_HOOD_DISABLE_INTRINSICS)
+# ifdef _MSC_VER
+# if ROBIN_HOOD(BITNESS) == 32
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64
+# endif
+# include <intrin.h>
+# pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD))
+# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \
+ [](size_t mask) noexcept -> int { \
+ unsigned long index; \
+ return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) \
+ : ROBIN_HOOD(BITNESS); \
+ }(x)
+# else
+# if ROBIN_HOOD(BITNESS) == 32
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll
+# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll
+# endif
+# define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS))
+# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS))
+# endif
+#endif
+
+// fallthrough
+#ifndef __has_cpp_attribute // For backwards compatibility
+# define __has_cpp_attribute(x) 0
+#endif
+#if __has_cpp_attribute(clang::fallthrough)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]]
+#elif __has_cpp_attribute(gnu::fallthrough)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]]
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH()
+#endif
+
+// likely/unlikely
+#ifdef _MSC_VER
+# define ROBIN_HOOD_LIKELY(condition) condition
+# define ROBIN_HOOD_UNLIKELY(condition) condition
+#else
+# define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1)
+# define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0)
+#endif
+
+// detect if native wchar_t type is availiable in MSVC
+#ifdef _MSC_VER
+# ifdef _NATIVE_WCHAR_T_DEFINED
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 0
+# endif
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
+#endif
+
+// detect if MSVC supports the pair(std::piecewise_construct_t,...) consructor being constexpr
+#ifdef _MSC_VER
+# if _MSC_VER <= 1900
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 1
+# else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0
+# endif
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0
+#endif
+
+// workaround missing "is_trivially_copyable" in g++ < 5.0
+// See https://stackoverflow.com/a/31798726/48181
+#if defined(__GNUC__) && __GNUC__ < 5
+# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
+#else
+# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
+#endif
+
+// helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L
+#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L
+
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]]
+#else
+# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD()
+#endif
+
+namespace robin_hood {
+
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
+# define ROBIN_HOOD_STD std
+#else
+
+// c++11 compatibility layer
+namespace ROBIN_HOOD_STD {
+template <class T>
+struct alignment_of
+ : std::integral_constant<std::size_t, alignof(typename std::remove_all_extents<T>::type)> {};
+
+template <class T, T... Ints>
+class integer_sequence {
+public:
+ using value_type = T;
+ static_assert(std::is_integral<value_type>::value, "not integral type");
+ static constexpr std::size_t size() noexcept {
+ return sizeof...(Ints);
+ }
+};
+template <std::size_t... Inds>
+using index_sequence = integer_sequence<std::size_t, Inds...>;
+
+namespace detail_ {
+template <class T, T Begin, T End, bool>
+struct IntSeqImpl {
+ using TValue = T;
+ static_assert(std::is_integral<TValue>::value, "not integral type");
+ static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)");
+
+ template <class, class>
+ struct IntSeqCombiner;
+
+ template <TValue... Inds0, TValue... Inds1>
+ struct IntSeqCombiner<integer_sequence<TValue, Inds0...>, integer_sequence<TValue, Inds1...>> {
+ using TResult = integer_sequence<TValue, Inds0..., Inds1...>;
+ };
+
+ using TResult =
+ typename IntSeqCombiner<typename IntSeqImpl<TValue, Begin, Begin + (End - Begin) / 2,
+ (End - Begin) / 2 == 1>::TResult,
+ typename IntSeqImpl<TValue, Begin + (End - Begin) / 2, End,
+ (End - Begin + 1) / 2 == 1>::TResult>::TResult;
+};
+
+template <class T, T Begin>
+struct IntSeqImpl<T, Begin, Begin, false> {
+ using TValue = T;
+ static_assert(std::is_integral<TValue>::value, "not integral type");
+ static_assert(Begin >= 0, "unexpected argument (Begin<0)");
+ using TResult = integer_sequence<TValue>;
+};
+
+template <class T, T Begin, T End>
+struct IntSeqImpl<T, Begin, End, true> {
+ using TValue = T;
+ static_assert(std::is_integral<TValue>::value, "not integral type");
+ static_assert(Begin >= 0, "unexpected argument (Begin<0)");
+ using TResult = integer_sequence<TValue, Begin>;
+};
+} // namespace detail_
+
+template <class T, T N>
+using make_integer_sequence = typename detail_::IntSeqImpl<T, 0, N, (N - 0) == 1>::TResult;
+
+template <std::size_t N>
+using make_index_sequence = make_integer_sequence<std::size_t, N>;
+
+template <class... T>
+using index_sequence_for = make_index_sequence<sizeof...(T)>;
+
+} // namespace ROBIN_HOOD_STD
+
+#endif
+
+namespace detail {
+
+// make sure we static_cast to the correct type for hash_int
+#if ROBIN_HOOD(BITNESS) == 64
+using SizeT = uint64_t;
+#else
+using SizeT = uint32_t;
+#endif
+
+template <typename T>
+T rotr(T x, unsigned k) {
+ return (x >> k) | (x << (8U * sizeof(T) - k));
+}
+
+// This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to
+// 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with
+// care!
+template <typename T>
+inline T reinterpret_cast_no_cast_align_warning(void* ptr) noexcept {
+ return reinterpret_cast<T>(ptr);
+}
+
+template <typename T>
+inline T reinterpret_cast_no_cast_align_warning(void const* ptr) noexcept {
+ return reinterpret_cast<T>(ptr);
+}
+
+// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
+// inlinings more difficult. Throws are also generally the slow path.
+template <typename E, typename... Args>
+[[noreturn]] ROBIN_HOOD(NOINLINE)
+#if ROBIN_HOOD(HAS_EXCEPTIONS)
+ void doThrow(Args&&... args) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay)
+ throw E(std::forward<Args>(args)...);
+}
+#else
+ void doThrow(Args&&... ROBIN_HOOD_UNUSED(args) /*unused*/) {
+ abort();
+}
+#endif
+
+template <typename E, typename T, typename... Args>
+T* assertNotNull(T* t, Args&&... args) {
+ if (ROBIN_HOOD_UNLIKELY(nullptr == t)) {
+ doThrow<E>(std::forward<Args>(args)...);
+ }
+ return t;
+}
+
+template <typename T>
+inline T unaligned_load(void const* ptr) noexcept {
+ // using memcpy so we don't get into unaligned load problems.
+ // compiler should optimize this very well anyways.
+ T t;
+ std::memcpy(&t, ptr, sizeof(T));
+ return t;
+}
+
+// Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor,
+// and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a
+// pointer.
+template <typename T, size_t MinNumAllocs = 4, size_t MaxNumAllocs = 256>
+class BulkPoolAllocator {
+public:
+ BulkPoolAllocator() noexcept = default;
+
+ // does not copy anything, just creates a new allocator.
+ BulkPoolAllocator(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept
+ : mHead(nullptr)
+ , mListForFree(nullptr) {}
+
+ BulkPoolAllocator(BulkPoolAllocator&& o) noexcept
+ : mHead(o.mHead)
+ , mListForFree(o.mListForFree) {
+ o.mListForFree = nullptr;
+ o.mHead = nullptr;
+ }
+
+ BulkPoolAllocator& operator=(BulkPoolAllocator&& o) noexcept {
+ reset();
+ mHead = o.mHead;
+ mListForFree = o.mListForFree;
+ o.mListForFree = nullptr;
+ o.mHead = nullptr;
+ return *this;
+ }
+
+ BulkPoolAllocator&
+ // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp)
+ operator=(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept {
+ // does not do anything
+ return *this;
+ }
+
+ ~BulkPoolAllocator() noexcept {
+ reset();
+ }
+
+ // Deallocates all allocated memory.
+ void reset() noexcept {
+ while (mListForFree) {
+ T* tmp = *mListForFree;
+ ROBIN_HOOD_LOG("std::free")
+ std::free(mListForFree);
+ mListForFree = reinterpret_cast_no_cast_align_warning<T**>(tmp);
+ }
+ mHead = nullptr;
+ }
+
+ // allocates, but does NOT initialize. Use in-place new constructor, e.g.
+ // T* obj = pool.allocate();
+ // ::new (static_cast<void*>(obj)) T();
+ T* allocate() {
+ T* tmp = mHead;
+ if (!tmp) {
+ tmp = performAllocation();
+ }
+
+ mHead = *reinterpret_cast_no_cast_align_warning<T**>(tmp);
+ return tmp;
+ }
+
+ // does not actually deallocate but puts it in store.
+ // make sure you have already called the destructor! e.g. with
+ // obj->~T();
+ // pool.deallocate(obj);
+ void deallocate(T* obj) noexcept {
+ *reinterpret_cast_no_cast_align_warning<T**>(obj) = mHead;
+ mHead = obj;
+ }
+
+ // Adds an already allocated block of memory to the allocator. This allocator is from now on
+ // responsible for freeing the data (with free()). If the provided data is not large enough to
+ // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor.
+ void addOrFree(void* ptr, const size_t numBytes) noexcept {
+ // calculate number of available elements in ptr
+ if (numBytes < ALIGNMENT + ALIGNED_SIZE) {
+ // not enough data for at least one element. Free and return.
+ ROBIN_HOOD_LOG("std::free")
+ std::free(ptr);
+ } else {
+ ROBIN_HOOD_LOG("add to buffer")
+ add(ptr, numBytes);
+ }
+ }
+
+ void swap(BulkPoolAllocator<T, MinNumAllocs, MaxNumAllocs>& other) noexcept {
+ using std::swap;
+ swap(mHead, other.mHead);
+ swap(mListForFree, other.mListForFree);
+ }
+
+private:
+ // iterates the list of allocated memory to calculate how many to alloc next.
+ // Recalculating this each time saves us a size_t member.
+ // This ignores the fact that memory blocks might have been added manually with addOrFree. In
+ // practice, this should not matter much.
+ ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept {
+ auto tmp = mListForFree;
+ size_t numAllocs = MinNumAllocs;
+
+ while (numAllocs * 2 <= MaxNumAllocs && tmp) {
+ auto x = reinterpret_cast<T***>(tmp);
+ tmp = *x;
+ numAllocs *= 2;
+ }
+
+ return numAllocs;
+ }
+
+ // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree().
+ void add(void* ptr, const size_t numBytes) noexcept {
+ const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE;
+
+ auto data = reinterpret_cast<T**>(ptr);
+
+ // link free list
+ auto x = reinterpret_cast<T***>(data);
+ *x = mListForFree;
+ mListForFree = data;
+
+ // create linked list for newly allocated data
+ auto* const headT =
+ reinterpret_cast_no_cast_align_warning<T*>(reinterpret_cast<char*>(ptr) + ALIGNMENT);
+
+ auto* const head = reinterpret_cast<char*>(headT);
+
+ // Visual Studio compiler automatically unrolls this loop, which is pretty cool
+ for (size_t i = 0; i < numElements; ++i) {
+ *reinterpret_cast_no_cast_align_warning<char**>(head + i * ALIGNED_SIZE) =
+ head + (i + 1) * ALIGNED_SIZE;
+ }
+
+ // last one points to 0
+ *reinterpret_cast_no_cast_align_warning<T**>(head + (numElements - 1) * ALIGNED_SIZE) =
+ mHead;
+ mHead = headT;
+ }
+
+ // Called when no memory is available (mHead == 0).
+ // Don't inline this slow path.
+ ROBIN_HOOD(NOINLINE) T* performAllocation() {
+ size_t const numElementsToAlloc = calcNumElementsToAlloc();
+
+ // alloc new memory: [prev |T, T, ... T]
+ size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc;
+ ROBIN_HOOD_LOG("std::malloc " << bytes << " = " << ALIGNMENT << " + " << ALIGNED_SIZE
+ << " * " << numElementsToAlloc)
+ add(assertNotNull<std::bad_alloc>(std::malloc(bytes)), bytes);
+ return mHead;
+ }
+
+ // enforce byte alignment of the T's
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
+ static constexpr size_t ALIGNMENT =
+ (std::max)(std::alignment_of<T>::value, std::alignment_of<T*>::value);
+#else
+ static const size_t ALIGNMENT =
+ (ROBIN_HOOD_STD::alignment_of<T>::value > ROBIN_HOOD_STD::alignment_of<T*>::value)
+ ? ROBIN_HOOD_STD::alignment_of<T>::value
+ : +ROBIN_HOOD_STD::alignment_of<T*>::value; // the + is for walkarround
+#endif
+
+ static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT;
+
+ static_assert(MinNumAllocs >= 1, "MinNumAllocs");
+ static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs");
+ static_assert(ALIGNED_SIZE >= sizeof(T*), "ALIGNED_SIZE");
+ static_assert(0 == (ALIGNED_SIZE % sizeof(T*)), "ALIGNED_SIZE mod");
+ static_assert(ALIGNMENT >= sizeof(T*), "ALIGNMENT");
+
+ T* mHead{nullptr};
+ T** mListForFree{nullptr};
+};
+
+template <typename T, size_t MinSize, size_t MaxSize, bool IsFlat>
+struct NodeAllocator;
+
+// dummy allocator that does nothing
+template <typename T, size_t MinSize, size_t MaxSize>
+struct NodeAllocator<T, MinSize, MaxSize, true> {
+
+ // we are not using the data, so just free it.
+ void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept {
+ ROBIN_HOOD_LOG("std::free")
+ std::free(ptr);
+ }
+};
+
+template <typename T, size_t MinSize, size_t MaxSize>
+struct NodeAllocator<T, MinSize, MaxSize, false> : public BulkPoolAllocator<T, MinSize, MaxSize> {};
+
+// c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making
+// my own here.
+namespace swappable {
+#if ROBIN_HOOD(CXX) < ROBIN_HOOD(CXX17)
+using std::swap;
+template <typename T>
+struct nothrow {
+ static const bool value = noexcept(swap(std::declval<T&>(), std::declval<T&>()));
+};
+#else
+template <typename T>
+struct nothrow {
+ static const bool value = std::is_nothrow_swappable<T>::value;
+};
+#endif
+} // namespace swappable
+
+} // namespace detail
+
+struct is_transparent_tag {};
+
+// A custom pair implementation is used in the map because std::pair is not is_trivially_copyable,
+// which means it would not be allowed to be used in std::memcpy. This struct is copyable, which is
+// also tested.
+template <typename T1, typename T2>
+struct pair {
+ using first_type = T1;
+ using second_type = T2;
+
+ template <typename U1 = T1, typename U2 = T2,
+ typename = typename std::enable_if<std::is_default_constructible<U1>::value &&
+ std::is_default_constructible<U2>::value>::type>
+ constexpr pair() noexcept(noexcept(U1()) && noexcept(U2()))
+ : first()
+ , second() {}
+
+ // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
+ explicit constexpr pair(std::pair<T1, T2> const& o) noexcept(
+ noexcept(T1(std::declval<T1 const&>())) && noexcept(T2(std::declval<T2 const&>())))
+ : first(o.first)
+ , second(o.second) {}
+
+ // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
+ explicit constexpr pair(std::pair<T1, T2>&& o) noexcept(noexcept(
+ T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>()))))
+ : first(std::move(o.first))
+ , second(std::move(o.second)) {}
+
+ constexpr pair(T1&& a, T2&& b) noexcept(noexcept(
+ T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>()))))
+ : first(std::move(a))
+ , second(std::move(b)) {}
+
+ template <typename U1, typename U2>
+ constexpr pair(U1&& a, U2&& b) noexcept(noexcept(T1(std::forward<U1>(
+ std::declval<U1&&>()))) && noexcept(T2(std::forward<U2>(std::declval<U2&&>()))))
+ : first(std::forward<U1>(a))
+ , second(std::forward<U2>(b)) {}
+
+ template <typename... U1, typename... U2>
+ // MSVC 2015 produces error "C2476: ‘constexpr’ constructor does not initialize all members"
+ // if this constructor is constexpr
+#if !ROBIN_HOOD(BROKEN_CONSTEXPR)
+ constexpr
+#endif
+ pair(std::piecewise_construct_t /*unused*/, std::tuple<U1...> a,
+ std::tuple<U2...>
+ b) noexcept(noexcept(pair(std::declval<std::tuple<U1...>&>(),
+ std::declval<std::tuple<U2...>&>(),
+ ROBIN_HOOD_STD::index_sequence_for<U1...>(),
+ ROBIN_HOOD_STD::index_sequence_for<U2...>())))
+ : pair(a, b, ROBIN_HOOD_STD::index_sequence_for<U1...>(),
+ ROBIN_HOOD_STD::index_sequence_for<U2...>()) {
+ }
+
+ // constructor called from the std::piecewise_construct_t ctor
+ template <typename... U1, size_t... I1, typename... U2, size_t... I2>
+ pair(std::tuple<U1...>& a, std::tuple<U2...>& b, ROBIN_HOOD_STD::index_sequence<I1...> /*unused*/, ROBIN_HOOD_STD::index_sequence<I2...> /*unused*/) noexcept(
+ noexcept(T1(std::forward<U1>(std::get<I1>(
+ std::declval<std::tuple<
+ U1...>&>()))...)) && noexcept(T2(std::
+ forward<U2>(std::get<I2>(
+ std::declval<std::tuple<U2...>&>()))...)))
+ : first(std::forward<U1>(std::get<I1>(a))...)
+ , second(std::forward<U2>(std::get<I2>(b))...) {
+ // make visual studio compiler happy about warning about unused a & b.
+ // Visual studio's pair implementation disables warning 4100.
+ (void)a;
+ (void)b;
+ }
+
+ void swap(pair<T1, T2>& o) noexcept((detail::swappable::nothrow<T1>::value) &&
+ (detail::swappable::nothrow<T2>::value)) {
+ using std::swap;
+ swap(first, o.first);
+ swap(second, o.second);
+ }
+
+ T1 first; // NOLINT(misc-non-private-member-variables-in-classes)
+ T2 second; // NOLINT(misc-non-private-member-variables-in-classes)
+};
+
+template <typename A, typename B>
+inline void swap(pair<A, B>& a, pair<A, B>& b) noexcept(
+ noexcept(std::declval<pair<A, B>&>().swap(std::declval<pair<A, B>&>()))) {
+ a.swap(b);
+}
+
+template <typename A, typename B>
+inline constexpr bool operator==(pair<A, B> const& x, pair<A, B> const& y) {
+ return (x.first == y.first) && (x.second == y.second);
+}
+template <typename A, typename B>
+inline constexpr bool operator!=(pair<A, B> const& x, pair<A, B> const& y) {
+ return !(x == y);
+}
+template <typename A, typename B>
+inline constexpr bool operator<(pair<A, B> const& x, pair<A, B> const& y) noexcept(noexcept(
+ std::declval<A const&>() < std::declval<A const&>()) && noexcept(std::declval<B const&>() <
+ std::declval<B const&>())) {
+ return x.first < y.first || (!(y.first < x.first) && x.second < y.second);
+}
+template <typename A, typename B>
+inline constexpr bool operator>(pair<A, B> const& x, pair<A, B> const& y) {
+ return y < x;
+}
+template <typename A, typename B>
+inline constexpr bool operator<=(pair<A, B> const& x, pair<A, B> const& y) {
+ return !(x > y);
+}
+template <typename A, typename B>
+inline constexpr bool operator>=(pair<A, B> const& x, pair<A, B> const& y) {
+ return !(x < y);
+}
+
+inline size_t hash_bytes(void const* ptr, size_t len) noexcept {
+ static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
+ static constexpr uint64_t seed = UINT64_C(0xe17a1465);
+ static constexpr unsigned int r = 47;
+
+ auto const* const data64 = static_cast<uint64_t const*>(ptr);
+ uint64_t h = seed ^ (len * m);
+
+ size_t const n_blocks = len / 8;
+ for (size_t i = 0; i < n_blocks; ++i) {
+ auto k = detail::unaligned_load<uint64_t>(data64 + i);
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h ^= k;
+ h *= m;
+ }
+
+ auto const* const data8 = reinterpret_cast<uint8_t const*>(data64 + n_blocks);
+ switch (len & 7U) {
+ case 7:
+ h ^= static_cast<uint64_t>(data8[6]) << 48U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 6:
+ h ^= static_cast<uint64_t>(data8[5]) << 40U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 5:
+ h ^= static_cast<uint64_t>(data8[4]) << 32U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 4:
+ h ^= static_cast<uint64_t>(data8[3]) << 24U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 3:
+ h ^= static_cast<uint64_t>(data8[2]) << 16U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 2:
+ h ^= static_cast<uint64_t>(data8[1]) << 8U;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ case 1:
+ h ^= static_cast<uint64_t>(data8[0]);
+ h *= m;
+ ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
+ default:
+ break;
+ }
+
+ h ^= h >> r;
+
+ // not doing the final step here, because this will be done by keyToIdx anyways
+ // h *= m;
+ // h ^= h >> r;
+ return static_cast<size_t>(h);
+}
+
+inline size_t hash_int(uint64_t x) noexcept {
+ // tried lots of different hashes, let's stick with murmurhash3. It's simple, fast, well tested,
+ // and doesn't need any special 128bit operations.
+ x ^= x >> 33U;
+ x *= UINT64_C(0xff51afd7ed558ccd);
+ x ^= x >> 33U;
+
+ // not doing the final step here, because this will be done by keyToIdx anyways
+ // x *= UINT64_C(0xc4ceb9fe1a85ec53);
+ // x ^= x >> 33U;
+ return static_cast<size_t>(x);
+}
+
+// A thin wrapper around std::hash, performing an additional simple mixing step of the result.
+template <typename T, typename Enable = void>
+struct hash : public std::hash<T> {
+ size_t operator()(T const& obj) const
+ noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) {
+ // call base hash
+ auto result = std::hash<T>::operator()(obj);
+ // return mixed of that, to be save against identity has
+ return hash_int(static_cast<detail::SizeT>(result));
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string<CharT>> {
+ size_t operator()(std::basic_string<CharT> const& str) const noexcept {
+ return hash_bytes(str.data(), sizeof(CharT) * str.size());
+ }
+};
+
+#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
+template <typename CharT>
+struct hash<std::basic_string_view<CharT>> {
+ size_t operator()(std::basic_string_view<CharT> const& sv) const noexcept {
+ return hash_bytes(sv.data(), sizeof(CharT) * sv.size());
+ }
+};
+#endif
+
+template <class T>
+struct hash<T*> {
+ size_t operator()(T* ptr) const noexcept {
+ return hash_int(reinterpret_cast<detail::SizeT>(ptr));
+ }
+};
+
+template <class T>
+struct hash<std::unique_ptr<T>> {
+ size_t operator()(std::unique_ptr<T> const& ptr) const noexcept {
+ return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
+ }
+};
+
+template <class T>
+struct hash<std::shared_ptr<T>> {
+ size_t operator()(std::shared_ptr<T> const& ptr) const noexcept {
+ return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
+ }
+};
+
+template <typename Enum>
+struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
+ size_t operator()(Enum e) const noexcept {
+ using Underlying = typename std::underlying_type<Enum>::type;
+ return hash<Underlying>{}(static_cast<Underlying>(e));
+ }
+};
+
+#define ROBIN_HOOD_HASH_INT(T) \
+ template <> \
+ struct hash<T> { \
+ size_t operator()(T const& obj) const noexcept { \
+ return hash_int(static_cast<uint64_t>(obj)); \
+ } \
+ }
+
+#if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wuseless-cast"
+#endif
+// see https://en.cppreference.com/w/cpp/utility/hash
+ROBIN_HOOD_HASH_INT(bool);
+ROBIN_HOOD_HASH_INT(char);
+ROBIN_HOOD_HASH_INT(signed char);
+ROBIN_HOOD_HASH_INT(unsigned char);
+ROBIN_HOOD_HASH_INT(char16_t);
+ROBIN_HOOD_HASH_INT(char32_t);
+#if ROBIN_HOOD(HAS_NATIVE_WCHART)
+ROBIN_HOOD_HASH_INT(wchar_t);
+#endif
+ROBIN_HOOD_HASH_INT(short);
+ROBIN_HOOD_HASH_INT(unsigned short);
+ROBIN_HOOD_HASH_INT(int);
+ROBIN_HOOD_HASH_INT(unsigned int);
+ROBIN_HOOD_HASH_INT(long);
+ROBIN_HOOD_HASH_INT(long long);
+ROBIN_HOOD_HASH_INT(unsigned long);
+ROBIN_HOOD_HASH_INT(unsigned long long);
+#if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic pop
+#endif
+namespace detail {
+
+template <typename T>
+struct void_type {
+ using type = void;
+};
+
+template <typename T, typename = void>
+struct has_is_transparent : public std::false_type {};
+
+template <typename T>
+struct has_is_transparent<T, typename void_type<typename T::is_transparent>::type>
+ : public std::true_type {};
+
+// using wrapper classes for hash and key_equal prevents the diamond problem when the same type
+// is used. see https://stackoverflow.com/a/28771920/48181
+template <typename T>
+struct WrapHash : public T {
+ WrapHash() = default;
+ explicit WrapHash(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
+ : T(o) {}
+};
+
+template <typename T>
+struct WrapKeyEqual : public T {
+ WrapKeyEqual() = default;
+ explicit WrapKeyEqual(T const& o) noexcept(noexcept(T(std::declval<T const&>())))
+ : T(o) {}
+};
+
+// A highly optimized hashmap implementation, using the Robin Hood algorithm.
+//
+// In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but
+// be about 2x faster in most cases and require much less allocations.
+//
+// This implementation uses the following memory layout:
+//
+// [Node, Node, ... Node | info, info, ... infoSentinel ]
+//
+// * Node: either a DataNode that directly has the std::pair<key, val> as member,
+// or a DataNode with a pointer to std::pair<key,val>. Which DataNode representation to use
+// depends on how fast the swap() operation is. Heuristically, this is automatically choosen
+// based on sizeof(). there are always 2^n Nodes.
+//
+// * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes.
+// Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the
+// corresponding node contains data. Set to 2 means the corresponding Node is filled, but it
+// actually belongs to the previous position and was pushed out because that place is already
+// taken.
+//
+// * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the
+// need for a idx variable.
+//
+// According to STL, order of templates has effect on throughput. That's why I've moved the
+// boolean to the front.
+// https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/
+template <bool IsFlat, size_t MaxLoadFactor100, typename Key, typename T, typename Hash,
+ typename KeyEqual>
+class Table
+ : public WrapHash<Hash>,
+ public WrapKeyEqual<KeyEqual>,
+ detail::NodeAllocator<
+ typename std::conditional<
+ std::is_void<T>::value, Key,
+ robin_hood::pair<typename std::conditional<IsFlat, Key, Key const>::type, T>>::type,
+ 4, 16384, IsFlat> {
+public:
+ static constexpr bool is_flat = IsFlat;
+ static constexpr bool is_map = !std::is_void<T>::value;
+ static constexpr bool is_set = !is_map;
+ static constexpr bool is_transparent =
+ has_is_transparent<Hash>::value && has_is_transparent<KeyEqual>::value;
+
+ using key_type = Key;
+ using mapped_type = T;
+ using value_type = typename std::conditional<
+ is_set, Key,
+ robin_hood::pair<typename std::conditional<is_flat, Key, Key const>::type, T>>::type;
+ using size_type = size_t;
+ using hasher = Hash;
+ using key_equal = KeyEqual;
+ using Self = Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
+
+private:
+ static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100,
+ "MaxLoadFactor100 needs to be >10 && < 100");
+
+ using WHash = WrapHash<Hash>;
+ using WKeyEqual = WrapKeyEqual<KeyEqual>;
+
+ // configuration defaults
+
+ // make sure we have 8 elements, needed to quickly rehash mInfo
+ static constexpr size_t InitialNumElements = sizeof(uint64_t);
+ static constexpr uint32_t InitialInfoNumBits = 5;
+ static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits;
+ static constexpr size_t InfoMask = InitialInfoInc - 1U;
+ static constexpr uint8_t InitialInfoHashShift = 0;
+ using DataPool = detail::NodeAllocator<value_type, 4, 16384, IsFlat>;
+
+ // type needs to be wider than uint8_t.
+ using InfoType = uint32_t;
+
+ // DataNode ////////////////////////////////////////////////////////
+
+ // Primary template for the data node. We have special implementations for small and big
+ // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these
+ // on the heap so swap merely swaps a pointer.
+ template <typename M, bool>
+ class DataNode {};
+
+ // Small: just allocate on the stack.
+ template <typename M>
+ class DataNode<M, true> final {
+ public:
+ template <typename... Args>
+ explicit DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, Args&&... args) noexcept(
+ noexcept(value_type(std::forward<Args>(args)...)))
+ : mData(std::forward<Args>(args)...) {}
+
+ DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, true>&& n) noexcept(
+ std::is_nothrow_move_constructible<value_type>::value)
+ : mData(std::move(n.mData)) {}
+
+ // doesn't do anything
+ void destroy(M& ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {}
+ void destroyDoNotDeallocate() noexcept {}
+
+ value_type const* operator->() const noexcept {
+ return &mData;
+ }
+ value_type* operator->() noexcept {
+ return &mData;
+ }
+
+ const value_type& operator*() const noexcept {
+ return mData;
+ }
+
+ value_type& operator*() noexcept {
+ return mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept {
+ return mData.first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT&>::type getFirst() noexcept {
+ return mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type const&>::type
+ getFirst() const noexcept {
+ return mData.first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept {
+ return mData;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, MT&>::type getSecond() noexcept {
+ return mData.second;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, MT const&>::type getSecond() const noexcept {
+ return mData.second;
+ }
+
+ void swap(DataNode<M, true>& o) noexcept(
+ noexcept(std::declval<value_type>().swap(std::declval<value_type>()))) {
+ mData.swap(o.mData);
+ }
+
+ private:
+ value_type mData;
+ };
+
+ // big object: allocate on heap.
+ template <typename M>
+ class DataNode<M, false> {
+ public:
+ template <typename... Args>
+ explicit DataNode(M& map, Args&&... args)
+ : mData(map.allocate()) {
+ ::new (static_cast<void*>(mData)) value_type(std::forward<Args>(args)...);
+ }
+
+ DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, false>&& n) noexcept
+ : mData(std::move(n.mData)) {}
+
+ void destroy(M& map) noexcept {
+ // don't deallocate, just put it into list of datapool.
+ mData->~value_type();
+ map.deallocate(mData);
+ }
+
+ void destroyDoNotDeallocate() noexcept {
+ mData->~value_type();
+ }
+
+ value_type const* operator->() const noexcept {
+ return mData;
+ }
+
+ value_type* operator->() noexcept {
+ return mData;
+ }
+
+ const value_type& operator*() const {
+ return *mData;
+ }
+
+ value_type& operator*() {
+ return *mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept {
+ return mData->first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT&>::type getFirst() noexcept {
+ return *mData;
+ }
+
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, typename VT::first_type const&>::type
+ getFirst() const noexcept {
+ return mData->first;
+ }
+ template <typename VT = value_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept {
+ return *mData;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, MT&>::type getSecond() noexcept {
+ return mData->second;
+ }
+
+ template <typename MT = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<is_map, MT const&>::type getSecond() const noexcept {
+ return mData->second;
+ }
+
+ void swap(DataNode<M, false>& o) noexcept {
+ using std::swap;
+ swap(mData, o.mData);
+ }
+
+ private:
+ value_type* mData;
+ };
+
+ using Node = DataNode<Self, IsFlat>;
+
+ // helpers for insertKeyPrepareEmptySpot: extract first entry (only const required)
+ ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(Node const& n) const noexcept {
+ return n.getFirst();
+ }
+
+ // in case we have void mapped_type, we are not using a pair, thus we just route k through.
+ // No need to disable this because it's just not used if not applicable.
+ ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(key_type const& k) const noexcept {
+ return k;
+ }
+
+ // in case we have non-void mapped_type, we have a standard robin_hood::pair
+ template <typename Q = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<!std::is_void<Q>::value, key_type const&>::type
+ getFirstConst(value_type const& vt) const noexcept {
+ return vt.first;
+ }
+
+ // Cloner //////////////////////////////////////////////////////////
+
+ template <typename M, bool UseMemcpy>
+ struct Cloner;
+
+ // fast path: Just copy data, without allocating anything.
+ template <typename M>
+ struct Cloner<M, true> {
+ void operator()(M const& source, M& target) const {
+ auto const* const src = reinterpret_cast<char const*>(source.mKeyVals);
+ auto* tgt = reinterpret_cast<char*>(target.mKeyVals);
+ auto const numElementsWithBuffer = target.calcNumElementsWithBuffer(target.mMask + 1);
+ std::copy(src, src + target.calcNumBytesTotal(numElementsWithBuffer), tgt);
+ }
+ };
+
+ template <typename M>
+ struct Cloner<M, false> {
+ void operator()(M const& s, M& t) const {
+ auto const numElementsWithBuffer = t.calcNumElementsWithBuffer(t.mMask + 1);
+ std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(numElementsWithBuffer), t.mInfo);
+
+ for (size_t i = 0; i < numElementsWithBuffer; ++i) {
+ if (t.mInfo[i]) {
+ ::new (static_cast<void*>(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]);
+ }
+ }
+ }
+ };
+
+ // Destroyer ///////////////////////////////////////////////////////
+
+ template <typename M, bool IsFlatAndTrivial>
+ struct Destroyer {};
+
+ template <typename M>
+ struct Destroyer<M, true> {
+ void nodes(M& m) const noexcept {
+ m.mNumElements = 0;
+ }
+
+ void nodesDoNotDeallocate(M& m) const noexcept {
+ m.mNumElements = 0;
+ }
+ };
+
+ template <typename M>
+ struct Destroyer<M, false> {
+ void nodes(M& m) const noexcept {
+ m.mNumElements = 0;
+ // clear also resets mInfo to 0, that's sometimes not necessary.
+ auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1);
+
+ for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) {
+ if (0 != m.mInfo[idx]) {
+ Node& n = m.mKeyVals[idx];
+ n.destroy(m);
+ n.~Node();
+ }
+ }
+ }
+
+ void nodesDoNotDeallocate(M& m) const noexcept {
+ m.mNumElements = 0;
+ // clear also resets mInfo to 0, that's sometimes not necessary.
+ auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1);
+ for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) {
+ if (0 != m.mInfo[idx]) {
+ Node& n = m.mKeyVals[idx];
+ n.destroyDoNotDeallocate();
+ n.~Node();
+ }
+ }
+ }
+ };
+
+ // Iter ////////////////////////////////////////////////////////////
+
+ struct fast_forward_tag {};
+
+ // generic iterator for both const_iterator and iterator.
+ template <bool IsConst>
+ // NOLINTNEXTLINE(hicpp-special-member-functions,cppcoreguidelines-special-member-functions)
+ class Iter {
+ private:
+ using NodePtr = typename std::conditional<IsConst, Node const*, Node*>::type;
+
+ public:
+ using difference_type = std::ptrdiff_t;
+ using value_type = typename Self::value_type;
+ using reference = typename std::conditional<IsConst, value_type const&, value_type&>::type;
+ using pointer = typename std::conditional<IsConst, value_type const*, value_type*>::type;
+ using iterator_category = std::forward_iterator_tag;
+
+ // default constructed iterator can be compared to itself, but WON'T return true when
+ // compared to end().
+ Iter() = default;
+
+ // Rule of zero: nothing specified. The conversion constructor is only enabled for
+ // iterator to const_iterator, so it doesn't accidentally work as a copy ctor.
+
+ // Conversion constructor from iterator to const_iterator.
+ template <bool OtherIsConst,
+ typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
+ // NOLINTNEXTLINE(hicpp-explicit-conversions)
+ Iter(Iter<OtherIsConst> const& other) noexcept
+ : mKeyVals(other.mKeyVals)
+ , mInfo(other.mInfo) {}
+
+ Iter(NodePtr valPtr, uint8_t const* infoPtr) noexcept
+ : mKeyVals(valPtr)
+ , mInfo(infoPtr) {}
+
+ Iter(NodePtr valPtr, uint8_t const* infoPtr,
+ fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept
+ : mKeyVals(valPtr)
+ , mInfo(infoPtr) {
+ fastForward();
+ }
+
+ template <bool OtherIsConst,
+ typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
+ Iter& operator=(Iter<OtherIsConst> const& other) noexcept {
+ mKeyVals = other.mKeyVals;
+ mInfo = other.mInfo;
+ return *this;
+ }
+
+ // prefix increment. Undefined behavior if we are at end()!
+ Iter& operator++() noexcept {
+ mInfo++;
+ mKeyVals++;
+ fastForward();
+ return *this;
+ }
+
+ Iter operator++(int) noexcept {
+ Iter tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+
+ reference operator*() const {
+ return **mKeyVals;
+ }
+
+ pointer operator->() const {
+ return &**mKeyVals;
+ }
+
+ template <bool O>
+ bool operator==(Iter<O> const& o) const noexcept {
+ return mKeyVals == o.mKeyVals;
+ }
+
+ template <bool O>
+ bool operator!=(Iter<O> const& o) const noexcept {
+ return mKeyVals != o.mKeyVals;
+ }
+
+ private:
+ // fast forward to the next non-free info byte
+ // I've tried a few variants that don't depend on intrinsics, but unfortunately they are
+ // quite a bit slower than this one. So I've reverted that change again. See map_benchmark.
+ void fastForward() noexcept {
+ size_t n = 0;
+ while (0U == (n = detail::unaligned_load<size_t>(mInfo))) {
+ mInfo += sizeof(size_t);
+ mKeyVals += sizeof(size_t);
+ }
+#if defined(ROBIN_HOOD_DISABLE_INTRINSICS)
+ // we know for certain that within the next 8 bytes we'll find a non-zero one.
+ if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint32_t>(mInfo))) {
+ mInfo += 4;
+ mKeyVals += 4;
+ }
+ if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint16_t>(mInfo))) {
+ mInfo += 2;
+ mKeyVals += 2;
+ }
+ if (ROBIN_HOOD_UNLIKELY(0U == *mInfo)) {
+ mInfo += 1;
+ mKeyVals += 1;
+ }
+#else
+# if ROBIN_HOOD(LITTLE_ENDIAN)
+ auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8;
+# else
+ auto inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8;
+# endif
+ mInfo += inc;
+ mKeyVals += inc;
+#endif
+ }
+
+ friend class Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
+ NodePtr mKeyVals{nullptr};
+ uint8_t const* mInfo{nullptr};
+ };
+
+ ////////////////////////////////////////////////////////////////////
+
+ // highly performance relevant code.
+ // Lower bits are used for indexing into the array (2^n size)
+ // The upper 1-5 bits need to be a reasonable good hash, to save comparisons.
+ template <typename HashKey>
+ void keyToIdx(HashKey&& key, size_t* idx, InfoType* info) const {
+ // In addition to whatever hash is used, add another mul & shift so we get better hashing.
+ // This serves as a bad hash prevention, if the given data is
+ // badly mixed.
+ auto h = static_cast<uint64_t>(WHash::operator()(key));
+
+ h *= mHashMultiplier;
+ h ^= h >> 33U;
+
+ // the lower InitialInfoNumBits are reserved for info.
+ *info = mInfoInc + static_cast<InfoType>((h & InfoMask) >> mInfoHashShift);
+ *idx = (static_cast<size_t>(h) >> InitialInfoNumBits) & mMask;
+ }
+
+ // forwards the index by one, wrapping around at the end
+ void next(InfoType* info, size_t* idx) const noexcept {
+ *idx = *idx + 1;
+ *info += mInfoInc;
+ }
+
+ void nextWhileLess(InfoType* info, size_t* idx) const noexcept {
+ // unrolling this by hand did not bring any speedups.
+ while (*info < mInfo[*idx]) {
+ next(info, idx);
+ }
+ }
+
+ // Shift everything up by one element. Tries to move stuff around.
+ void
+ shiftUp(size_t startIdx,
+ size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
+ auto idx = startIdx;
+ ::new (static_cast<void*>(mKeyVals + idx)) Node(std::move(mKeyVals[idx - 1]));
+ while (--idx != insertion_idx) {
+ mKeyVals[idx] = std::move(mKeyVals[idx - 1]);
+ }
+
+ idx = startIdx;
+ while (idx != insertion_idx) {
+ ROBIN_HOOD_COUNT(shiftUp)
+ mInfo[idx] = static_cast<uint8_t>(mInfo[idx - 1] + mInfoInc);
+ if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) {
+ mMaxNumElementsAllowed = 0;
+ }
+ --idx;
+ }
+ }
+
+ void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable<Node>::value) {
+ // until we find one that is either empty or has zero offset.
+ // TODO(martinus) we don't need to move everything, just the last one for the same
+ // bucket.
+ mKeyVals[idx].destroy(*this);
+
+ // until we find one that is either empty or has zero offset.
+ while (mInfo[idx + 1] >= 2 * mInfoInc) {
+ ROBIN_HOOD_COUNT(shiftDown)
+ mInfo[idx] = static_cast<uint8_t>(mInfo[idx + 1] - mInfoInc);
+ mKeyVals[idx] = std::move(mKeyVals[idx + 1]);
+ ++idx;
+ }
+
+ mInfo[idx] = 0;
+ // don't destroy, we've moved it
+ // mKeyVals[idx].destroy(*this);
+ mKeyVals[idx].~Node();
+ }
+
+ // copy of find(), except that it returns iterator instead of const_iterator.
+ template <typename Other>
+ ROBIN_HOOD(NODISCARD)
+ size_t findIdx(Other const& key) const {
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(key, &idx, &info);
+
+ do {
+ // unrolling this twice gives a bit of a speedup. More unrolling did not help.
+ if (info == mInfo[idx] &&
+ ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) {
+ return idx;
+ }
+ next(&info, &idx);
+ if (info == mInfo[idx] &&
+ ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) {
+ return idx;
+ }
+ next(&info, &idx);
+ } while (info <= mInfo[idx]);
+
+ // nothing found!
+ return mMask == 0 ? 0
+ : static_cast<size_t>(std::distance(
+ mKeyVals, reinterpret_cast_no_cast_align_warning<Node*>(mInfo)));
+ }
+
+ void cloneData(const Table& o) {
+ Cloner<Table, IsFlat && ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(Node)>()(o, *this);
+ }
+
+ // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized.
+ // @return True on success, false if something went wrong
+ void insert_move(Node&& keyval) {
+ // we don't retry, fail if overflowing
+ // don't need to check max num elements
+ if (0 == mMaxNumElementsAllowed && !try_increase_info()) {
+ throwOverflowError();
+ }
+
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(keyval.getFirst(), &idx, &info);
+
+ // skip forward. Use <= because we are certain that the element is not there.
+ while (info <= mInfo[idx]) {
+ idx = idx + 1;
+ info += mInfoInc;
+ }
+
+ // key not found, so we are now exactly where we want to insert it.
+ auto const insertion_idx = idx;
+ auto const insertion_info = static_cast<uint8_t>(info);
+ if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
+ mMaxNumElementsAllowed = 0;
+ }
+
+ // find an empty spot
+ while (0 != mInfo[idx]) {
+ next(&info, &idx);
+ }
+
+ auto& l = mKeyVals[insertion_idx];
+ if (idx == insertion_idx) {
+ ::new (static_cast<void*>(&l)) Node(std::move(keyval));
+ } else {
+ shiftUp(idx, insertion_idx);
+ l = std::move(keyval);
+ }
+
+ // put at empty spot
+ mInfo[insertion_idx] = insertion_info;
+
+ ++mNumElements;
+ }
+
+public:
+ using iterator = Iter<false>;
+ using const_iterator = Iter<true>;
+
+ Table() noexcept(noexcept(Hash()) && noexcept(KeyEqual()))
+ : WHash()
+ , WKeyEqual() {
+ ROBIN_HOOD_TRACE(this)
+ }
+
+ // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert.
+ // This tremendously speeds up ctor & dtor of a map that never receives an element. The
+ // penalty is payed at the first insert, and not before. Lookup of this empty map works
+ // because everybody points to DummyInfoByte::b. parameter bucket_count is dictated by the
+ // standard, but we can ignore it.
+ explicit Table(
+ size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/, const Hash& h = Hash{},
+ const KeyEqual& equal = KeyEqual{}) noexcept(noexcept(Hash(h)) && noexcept(KeyEqual(equal)))
+ : WHash(h)
+ , WKeyEqual(equal) {
+ ROBIN_HOOD_TRACE(this)
+ }
+
+ template <typename Iter>
+ Table(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0,
+ const Hash& h = Hash{}, const KeyEqual& equal = KeyEqual{})
+ : WHash(h)
+ , WKeyEqual(equal) {
+ ROBIN_HOOD_TRACE(this)
+ insert(first, last);
+ }
+
+ Table(std::initializer_list<value_type> initlist,
+ size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash& h = Hash{},
+ const KeyEqual& equal = KeyEqual{})
+ : WHash(h)
+ , WKeyEqual(equal) {
+ ROBIN_HOOD_TRACE(this)
+ insert(initlist.begin(), initlist.end());
+ }
+
+ Table(Table&& o) noexcept
+ : WHash(std::move(static_cast<WHash&>(o)))
+ , WKeyEqual(std::move(static_cast<WKeyEqual&>(o)))
+ , DataPool(std::move(static_cast<DataPool&>(o))) {
+ ROBIN_HOOD_TRACE(this)
+ if (o.mMask) {
+ mHashMultiplier = std::move(o.mHashMultiplier);
+ mKeyVals = std::move(o.mKeyVals);
+ mInfo = std::move(o.mInfo);
+ mNumElements = std::move(o.mNumElements);
+ mMask = std::move(o.mMask);
+ mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
+ mInfoInc = std::move(o.mInfoInc);
+ mInfoHashShift = std::move(o.mInfoHashShift);
+ // set other's mask to 0 so its destructor won't do anything
+ o.init();
+ }
+ }
+
+ Table& operator=(Table&& o) noexcept {
+ ROBIN_HOOD_TRACE(this)
+ if (&o != this) {
+ if (o.mMask) {
+ // only move stuff if the other map actually has some data
+ destroy();
+ mHashMultiplier = std::move(o.mHashMultiplier);
+ mKeyVals = std::move(o.mKeyVals);
+ mInfo = std::move(o.mInfo);
+ mNumElements = std::move(o.mNumElements);
+ mMask = std::move(o.mMask);
+ mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
+ mInfoInc = std::move(o.mInfoInc);
+ mInfoHashShift = std::move(o.mInfoHashShift);
+ WHash::operator=(std::move(static_cast<WHash&>(o)));
+ WKeyEqual::operator=(std::move(static_cast<WKeyEqual&>(o)));
+ DataPool::operator=(std::move(static_cast<DataPool&>(o)));
+
+ o.init();
+
+ } else {
+ // nothing in the other map => just clear us.
+ clear();
+ }
+ }
+ return *this;
+ }
+
+ Table(const Table& o)
+ : WHash(static_cast<const WHash&>(o))
+ , WKeyEqual(static_cast<const WKeyEqual&>(o))
+ , DataPool(static_cast<const DataPool&>(o)) {
+ ROBIN_HOOD_TRACE(this)
+ if (!o.empty()) {
+ // not empty: create an exact copy. it is also possible to just iterate through all
+ // elements and insert them, but copying is probably faster.
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
+ auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
+
+ ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal("
+ << numElementsWithBuffer << ")")
+ mHashMultiplier = o.mHashMultiplier;
+ mKeyVals = static_cast<Node*>(
+ detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
+ // no need for calloc because clonData does memcpy
+ mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
+ mNumElements = o.mNumElements;
+ mMask = o.mMask;
+ mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
+ mInfoInc = o.mInfoInc;
+ mInfoHashShift = o.mInfoHashShift;
+ cloneData(o);
+ }
+ }
+
+ // Creates a copy of the given map. Copy constructor of each entry is used.
+ // Not sure why clang-tidy thinks this doesn't handle self assignment, it does
+ // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp)
+ Table& operator=(Table const& o) {
+ ROBIN_HOOD_TRACE(this)
+ if (&o == this) {
+ // prevent assigning of itself
+ return *this;
+ }
+
+ // we keep using the old allocator and not assign the new one, because we want to keep
+ // the memory available. when it is the same size.
+ if (o.empty()) {
+ if (0 == mMask) {
+ // nothing to do, we are empty too
+ return *this;
+ }
+
+ // not empty: destroy what we have there
+ // clear also resets mInfo to 0, that's sometimes not necessary.
+ destroy();
+ init();
+ WHash::operator=(static_cast<const WHash&>(o));
+ WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
+ DataPool::operator=(static_cast<DataPool const&>(o));
+
+ return *this;
+ }
+
+ // clean up old stuff
+ Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
+
+ if (mMask != o.mMask) {
+ // no luck: we don't have the same array size allocated, so we need to realloc.
+ if (0 != mMask) {
+ // only deallocate if we actually have data!
+ ROBIN_HOOD_LOG("std::free")
+ std::free(mKeyVals);
+ }
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
+ auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
+ ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal("
+ << numElementsWithBuffer << ")")
+ mKeyVals = static_cast<Node*>(
+ detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
+
+ // no need for calloc here because cloneData performs a memcpy.
+ mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
+ // sentinel is set in cloneData
+ }
+ WHash::operator=(static_cast<const WHash&>(o));
+ WKeyEqual::operator=(static_cast<const WKeyEqual&>(o));
+ DataPool::operator=(static_cast<DataPool const&>(o));
+ mHashMultiplier = o.mHashMultiplier;
+ mNumElements = o.mNumElements;
+ mMask = o.mMask;
+ mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
+ mInfoInc = o.mInfoInc;
+ mInfoHashShift = o.mInfoHashShift;
+ cloneData(o);
+
+ return *this;
+ }
+
+ // Swaps everything between the two maps.
+ void swap(Table& o) {
+ ROBIN_HOOD_TRACE(this)
+ using std::swap;
+ swap(o, *this);
+ }
+
+ // Clears all data, without resizing.
+ void clear() {
+ ROBIN_HOOD_TRACE(this)
+ if (empty()) {
+ // don't do anything! also important because we don't want to write to
+ // DummyInfoByte::b, even though we would just write 0 to it.
+ return;
+ }
+
+ Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
+ // clear everything, then set the sentinel again
+ uint8_t const z = 0;
+ std::fill(mInfo, mInfo + calcNumBytesInfo(numElementsWithBuffer), z);
+ mInfo[numElementsWithBuffer] = 1;
+
+ mInfoInc = InitialInfoInc;
+ mInfoHashShift = InitialInfoHashShift;
+ }
+
+ // Destroys the map and all it's contents.
+ ~Table() {
+ ROBIN_HOOD_TRACE(this)
+ destroy();
+ }
+
+ // Checks if both tables contain the same entries. Order is irrelevant.
+ bool operator==(const Table& other) const {
+ ROBIN_HOOD_TRACE(this)
+ if (other.size() != size()) {
+ return false;
+ }
+ for (auto const& otherEntry : other) {
+ if (!has(otherEntry)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool operator!=(const Table& other) const {
+ ROBIN_HOOD_TRACE(this)
+ return !operator==(other);
+ }
+
+ template <typename Q = mapped_type>
+ typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](const key_type& key) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first]))
+ Node(*this, std::piecewise_construct, std::forward_as_tuple(key),
+ std::forward_as_tuple());
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
+ std::forward_as_tuple(key), std::forward_as_tuple());
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ }
+
+ return mKeyVals[idxAndState.first].getSecond();
+ }
+
+ template <typename Q = mapped_type>
+ typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](key_type&& key) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first]))
+ Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)),
+ std::forward_as_tuple());
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] =
+ Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)),
+ std::forward_as_tuple());
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ }
+
+ return mKeyVals[idxAndState.first].getSecond();
+ }
+
+ template <typename Iter>
+ void insert(Iter first, Iter last) {
+ for (; first != last; ++first) {
+ // value_type ctor needed because this might be called with std::pair's
+ insert(value_type(*first));
+ }
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ for (auto&& vt : ilist) {
+ insert(std::move(vt));
+ }
+ }
+
+ template <typename... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ ROBIN_HOOD_TRACE(this)
+ Node n{*this, std::forward<Args>(args)...};
+ auto idxAndState = insertKeyPrepareEmptySpot(getFirstConst(n));
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ n.destroy(*this);
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(*this, std::move(n));
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = std::move(n);
+ break;
+
+ case InsertionState::overflow_error:
+ n.destroy(*this);
+ throwOverflowError();
+ break;
+ }
+
+ return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
+ InsertionState::key_found != idxAndState.second);
+ }
+
+ template <typename... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args) {
+ (void)position;
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ template <typename... Args>
+ std::pair<iterator, bool> try_emplace(const key_type& key, Args&&... args) {
+ return try_emplace_impl(key, std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ std::pair<iterator, bool> try_emplace(key_type&& key, Args&&... args) {
+ return try_emplace_impl(std::move(key), std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ iterator try_emplace(const_iterator hint, const key_type& key, Args&&... args) {
+ (void)hint;
+ return try_emplace_impl(key, std::forward<Args>(args)...).first;
+ }
+
+ template <typename... Args>
+ iterator try_emplace(const_iterator hint, key_type&& key, Args&&... args) {
+ (void)hint;
+ return try_emplace_impl(std::move(key), std::forward<Args>(args)...).first;
+ }
+
+ template <typename Mapped>
+ std::pair<iterator, bool> insert_or_assign(const key_type& key, Mapped&& obj) {
+ return insertOrAssignImpl(key, std::forward<Mapped>(obj));
+ }
+
+ template <typename Mapped>
+ std::pair<iterator, bool> insert_or_assign(key_type&& key, Mapped&& obj) {
+ return insertOrAssignImpl(std::move(key), std::forward<Mapped>(obj));
+ }
+
+ template <typename Mapped>
+ iterator insert_or_assign(const_iterator hint, const key_type& key, Mapped&& obj) {
+ (void)hint;
+ return insertOrAssignImpl(key, std::forward<Mapped>(obj)).first;
+ }
+
+ template <typename Mapped>
+ iterator insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) {
+ (void)hint;
+ return insertOrAssignImpl(std::move(key), std::forward<Mapped>(obj)).first;
+ }
+
+ std::pair<iterator, bool> insert(const value_type& keyval) {
+ ROBIN_HOOD_TRACE(this)
+ return emplace(keyval);
+ }
+
+ iterator insert(const_iterator hint, const value_type& keyval) {
+ (void)hint;
+ return emplace(keyval).first;
+ }
+
+ std::pair<iterator, bool> insert(value_type&& keyval) {
+ return emplace(std::move(keyval));
+ }
+
+ iterator insert(const_iterator hint, value_type&& keyval) {
+ (void)hint;
+ return emplace(std::move(keyval)).first;
+ }
+
+ // Returns 1 if key is found, 0 otherwise.
+ size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ return 1;
+ }
+ return 0;
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<Self_::is_transparent, size_t>::type count(const OtherKey& key) const {
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ return 1;
+ }
+ return 0;
+ }
+
+ bool contains(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
+ return 1U == count(key);
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<Self_::is_transparent, bool>::type contains(const OtherKey& key) const {
+ return 1U == count(key);
+ }
+
+ // Returns a reference to the value found for key.
+ // Throws std::out_of_range if element cannot be found
+ template <typename Q = mapped_type>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<!std::is_void<Q>::value, Q&>::type at(key_type const& key) {
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ doThrow<std::out_of_range>("key not found");
+ }
+ return kv->getSecond();
+ }
+
+ // Returns a reference to the value found for key.
+ // Throws std::out_of_range if element cannot be found
+ template <typename Q = mapped_type>
+ // NOLINTNEXTLINE(modernize-use-nodiscard)
+ typename std::enable_if<!std::is_void<Q>::value, Q const&>::type at(key_type const& key) const {
+ ROBIN_HOOD_TRACE(this)
+ auto kv = mKeyVals + findIdx(key);
+ if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) {
+ doThrow<std::out_of_range>("key not found");
+ }
+ return kv->getSecond();
+ }
+
+ const_iterator find(const key_type& key) const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return const_iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey>
+ const_iterator find(const OtherKey& key, is_transparent_tag /*unused*/) const {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return const_iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ typename std::enable_if<Self_::is_transparent, // NOLINT(modernize-use-nodiscard)
+ const_iterator>::type // NOLINT(modernize-use-nodiscard)
+ find(const OtherKey& key) const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return const_iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ iterator find(const key_type& key) {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey>
+ iterator find(const OtherKey& key, is_transparent_tag /*unused*/) {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ template <typename OtherKey, typename Self_ = Self>
+ typename std::enable_if<Self_::is_transparent, iterator>::type find(const OtherKey& key) {
+ ROBIN_HOOD_TRACE(this)
+ const size_t idx = findIdx(key);
+ return iterator{mKeyVals + idx, mInfo + idx};
+ }
+
+ iterator begin() {
+ ROBIN_HOOD_TRACE(this)
+ if (empty()) {
+ return end();
+ }
+ return iterator(mKeyVals, mInfo, fast_forward_tag{});
+ }
+ const_iterator begin() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return cbegin();
+ }
+ const_iterator cbegin() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ if (empty()) {
+ return cend();
+ }
+ return const_iterator(mKeyVals, mInfo, fast_forward_tag{});
+ }
+
+ iterator end() {
+ ROBIN_HOOD_TRACE(this)
+ // no need to supply valid info pointer: end() must not be dereferenced, and only node
+ // pointer is compared.
+ return iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
+ }
+ const_iterator end() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return cend();
+ }
+ const_iterator cend() const { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return const_iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr};
+ }
+
+ iterator erase(const_iterator pos) {
+ ROBIN_HOOD_TRACE(this)
+ // its safe to perform const cast here
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ return erase(iterator{const_cast<Node*>(pos.mKeyVals), const_cast<uint8_t*>(pos.mInfo)});
+ }
+
+ // Erases element at pos, returns iterator to the next element.
+ iterator erase(iterator pos) {
+ ROBIN_HOOD_TRACE(this)
+ // we assume that pos always points to a valid entry, and not end().
+ auto const idx = static_cast<size_t>(pos.mKeyVals - mKeyVals);
+
+ shiftDown(idx);
+ --mNumElements;
+
+ if (*pos.mInfo) {
+ // we've backward shifted, return this again
+ return pos;
+ }
+
+ // no backward shift, return next element
+ return ++pos;
+ }
+
+ size_t erase(const key_type& key) {
+ ROBIN_HOOD_TRACE(this)
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(key, &idx, &info);
+
+ // check while info matches with the source idx
+ do {
+ if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
+ shiftDown(idx);
+ --mNumElements;
+ return 1;
+ }
+ next(&info, &idx);
+ } while (info <= mInfo[idx]);
+
+ // nothing found to delete
+ return 0;
+ }
+
+ // reserves space for the specified number of elements. Makes sure the old data fits.
+ // exactly the same as reserve(c).
+ void rehash(size_t c) {
+ // forces a reserve
+ reserve(c, true);
+ }
+
+ // reserves space for the specified number of elements. Makes sure the old data fits.
+ // Exactly the same as rehash(c). Use rehash(0) to shrink to fit.
+ void reserve(size_t c) {
+ // reserve, but don't force rehash
+ reserve(c, false);
+ }
+
+ // If possible reallocates the map to a smaller one. This frees the underlying table.
+ // Does not do anything if load_factor is too large for decreasing the table's size.
+ void compact() {
+ ROBIN_HOOD_TRACE(this)
+ auto newSize = InitialNumElements;
+ while (calcMaxNumElementsAllowed(newSize) < mNumElements && newSize != 0) {
+ newSize *= 2;
+ }
+ if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
+ throwOverflowError();
+ }
+
+ ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1")
+
+ // only actually do anything when the new size is bigger than the old one. This prevents to
+ // continuously allocate for each reserve() call.
+ if (newSize < mMask + 1) {
+ rehashPowerOfTwo(newSize, true);
+ }
+ }
+
+ size_type size() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return mNumElements;
+ }
+
+ size_type max_size() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return static_cast<size_type>(-1);
+ }
+
+ ROBIN_HOOD(NODISCARD) bool empty() const noexcept {
+ ROBIN_HOOD_TRACE(this)
+ return 0 == mNumElements;
+ }
+
+ float max_load_factor() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return MaxLoadFactor100 / 100.0F;
+ }
+
+ // Average number of elements per bucket. Since we allow only 1 per bucket
+ float load_factor() const noexcept { // NOLINT(modernize-use-nodiscard)
+ ROBIN_HOOD_TRACE(this)
+ return static_cast<float>(size()) / static_cast<float>(mMask + 1);
+ }
+
+ ROBIN_HOOD(NODISCARD) size_t mask() const noexcept {
+ ROBIN_HOOD_TRACE(this)
+ return mMask;
+ }
+
+ ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept {
+ if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits<size_t>::max)() / 100)) {
+ return maxElements * MaxLoadFactor100 / 100;
+ }
+
+ // we might be a bit inprecise, but since maxElements is quite large that doesn't matter
+ return (maxElements / 100) * MaxLoadFactor100;
+ }
+
+ ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const noexcept {
+ // we add a uint64_t, which houses the sentinel (first byte) and padding so we can load
+ // 64bit types.
+ return numElements + sizeof(uint64_t);
+ }
+
+ ROBIN_HOOD(NODISCARD)
+ size_t calcNumElementsWithBuffer(size_t numElements) const noexcept {
+ auto maxNumElementsAllowed = calcMaxNumElementsAllowed(numElements);
+ return numElements + (std::min)(maxNumElementsAllowed, (static_cast<size_t>(0xFF)));
+ }
+
+ // calculation only allowed for 2^n values
+ ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const {
+#if ROBIN_HOOD(BITNESS) == 64
+ return numElements * sizeof(Node) + calcNumBytesInfo(numElements);
+#else
+ // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows.
+ auto const ne = static_cast<uint64_t>(numElements);
+ auto const s = static_cast<uint64_t>(sizeof(Node));
+ auto const infos = static_cast<uint64_t>(calcNumBytesInfo(numElements));
+
+ auto const total64 = ne * s + infos;
+ auto const total = static_cast<size_t>(total64);
+
+ if (ROBIN_HOOD_UNLIKELY(static_cast<uint64_t>(total) != total64)) {
+ throwOverflowError();
+ }
+ return total;
+#endif
+ }
+
+private:
+ template <typename Q = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<!std::is_void<Q>::value, bool>::type has(const value_type& e) const {
+ ROBIN_HOOD_TRACE(this)
+ auto it = find(e.first);
+ return it != end() && it->second == e.second;
+ }
+
+ template <typename Q = mapped_type>
+ ROBIN_HOOD(NODISCARD)
+ typename std::enable_if<std::is_void<Q>::value, bool>::type has(const value_type& e) const {
+ ROBIN_HOOD_TRACE(this)
+ return find(e) != end();
+ }
+
+ void reserve(size_t c, bool forceRehash) {
+ ROBIN_HOOD_TRACE(this)
+ auto const minElementsAllowed = (std::max)(c, mNumElements);
+ auto newSize = InitialNumElements;
+ while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) {
+ newSize *= 2;
+ }
+ if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
+ throwOverflowError();
+ }
+
+ ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1")
+
+ // only actually do anything when the new size is bigger than the old one. This prevents to
+ // continuously allocate for each reserve() call.
+ if (forceRehash || newSize > mMask + 1) {
+ rehashPowerOfTwo(newSize, false);
+ }
+ }
+
+ // reserves space for at least the specified number of elements.
+ // only works if numBuckets if power of two
+ // True on success, false otherwise
+ void rehashPowerOfTwo(size_t numBuckets, bool forceFree) {
+ ROBIN_HOOD_TRACE(this)
+
+ Node* const oldKeyVals = mKeyVals;
+ uint8_t const* const oldInfo = mInfo;
+
+ const size_t oldMaxElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
+
+ // resize operation: move stuff
+ initData(numBuckets);
+ if (oldMaxElementsWithBuffer > 1) {
+ for (size_t i = 0; i < oldMaxElementsWithBuffer; ++i) {
+ if (oldInfo[i] != 0) {
+ // might throw an exception, which is really bad since we are in the middle of
+ // moving stuff.
+ insert_move(std::move(oldKeyVals[i]));
+ // destroy the node but DON'T destroy the data.
+ oldKeyVals[i].~Node();
+ }
+ }
+
+ // this check is not necessary as it's guarded by the previous if, but it helps
+ // silence g++'s overeager "attempt to free a non-heap object 'map'
+ // [-Werror=free-nonheap-object]" warning.
+ if (oldKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) {
+ // don't destroy old data: put it into the pool instead
+ if (forceFree) {
+ std::free(oldKeyVals);
+ } else {
+ DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElementsWithBuffer));
+ }
+ }
+ }
+ }
+
+ ROBIN_HOOD(NOINLINE) void throwOverflowError() const {
+#if ROBIN_HOOD(HAS_EXCEPTIONS)
+ throw std::overflow_error("robin_hood::map overflow");
+#else
+ abort();
+#endif
+ }
+
+ template <typename OtherKey, typename... Args>
+ std::pair<iterator, bool> try_emplace_impl(OtherKey&& key, Args&&... args) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(
+ *this, std::piecewise_construct, std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ break;
+ }
+
+ return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
+ InsertionState::key_found != idxAndState.second);
+ }
+
+ template <typename OtherKey, typename Mapped>
+ std::pair<iterator, bool> insertOrAssignImpl(OtherKey&& key, Mapped&& obj) {
+ ROBIN_HOOD_TRACE(this)
+ auto idxAndState = insertKeyPrepareEmptySpot(key);
+ switch (idxAndState.second) {
+ case InsertionState::key_found:
+ mKeyVals[idxAndState.first].getSecond() = std::forward<Mapped>(obj);
+ break;
+
+ case InsertionState::new_node:
+ ::new (static_cast<void*>(&mKeyVals[idxAndState.first])) Node(
+ *this, std::piecewise_construct, std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Mapped>(obj)));
+ break;
+
+ case InsertionState::overwrite_node:
+ mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<OtherKey>(key)),
+ std::forward_as_tuple(std::forward<Mapped>(obj)));
+ break;
+
+ case InsertionState::overflow_error:
+ throwOverflowError();
+ break;
+ }
+
+ return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first),
+ InsertionState::key_found != idxAndState.second);
+ }
+
+ void initData(size_t max_elements) {
+ mNumElements = 0;
+ mMask = max_elements - 1;
+ mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements);
+
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements);
+
+ // malloc & zero mInfo. Faster than calloc everything.
+ auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer);
+ ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal("
+ << numElementsWithBuffer << ")")
+ mKeyVals = reinterpret_cast<Node*>(
+ detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal)));
+ mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
+ std::memset(mInfo, 0, numBytesTotal - numElementsWithBuffer * sizeof(Node));
+
+ // set sentinel
+ mInfo[numElementsWithBuffer] = 1;
+
+ mInfoInc = InitialInfoInc;
+ mInfoHashShift = InitialInfoHashShift;
+ }
+
+ enum class InsertionState { overflow_error, key_found, new_node, overwrite_node };
+
+ // Finds key, and if not already present prepares a spot where to pot the key & value.
+ // This potentially shifts nodes out of the way, updates mInfo and number of inserted
+ // elements, so the only operation left to do is create/assign a new node at that spot.
+ template <typename OtherKey>
+ std::pair<size_t, InsertionState> insertKeyPrepareEmptySpot(OtherKey&& key) {
+ for (int i = 0; i < 256; ++i) {
+ size_t idx{};
+ InfoType info{};
+ keyToIdx(key, &idx, &info);
+ nextWhileLess(&info, &idx);
+
+ // while we potentially have a match
+ while (info == mInfo[idx]) {
+ if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
+ // key already exists, do NOT insert.
+ // see http://en.cppreference.com/w/cpp/container/unordered_map/insert
+ return std::make_pair(idx, InsertionState::key_found);
+ }
+ next(&info, &idx);
+ }
+
+ // unlikely that this evaluates to true
+ if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) {
+ if (!increase_size()) {
+ return std::make_pair(size_t(0), InsertionState::overflow_error);
+ }
+ continue;
+ }
+
+ // key not found, so we are now exactly where we want to insert it.
+ auto const insertion_idx = idx;
+ auto const insertion_info = info;
+ if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
+ mMaxNumElementsAllowed = 0;
+ }
+
+ // find an empty spot
+ while (0 != mInfo[idx]) {
+ next(&info, &idx);
+ }
+
+ if (idx != insertion_idx) {
+ shiftUp(idx, insertion_idx);
+ }
+ // put at empty spot
+ mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info);
+ ++mNumElements;
+ return std::make_pair(insertion_idx, idx == insertion_idx
+ ? InsertionState::new_node
+ : InsertionState::overwrite_node);
+ }
+
+ // enough attempts failed, so finally give up.
+ return std::make_pair(size_t(0), InsertionState::overflow_error);
+ }
+
+ bool try_increase_info() {
+ ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements
+ << ", maxNumElementsAllowed="
+ << calcMaxNumElementsAllowed(mMask + 1))
+ if (mInfoInc <= 2) {
+ // need to be > 2 so that shift works (otherwise undefined behavior!)
+ return false;
+ }
+ // we got space left, try to make info smaller
+ mInfoInc = static_cast<uint8_t>(mInfoInc >> 1U);
+
+ // remove one bit of the hash, leaving more space for the distance info.
+ // This is extremely fast because we can operate on 8 bytes at once.
+ ++mInfoHashShift;
+ auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1);
+
+ for (size_t i = 0; i < numElementsWithBuffer; i += 8) {
+ auto val = unaligned_load<uint64_t>(mInfo + i);
+ val = (val >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f);
+ std::memcpy(mInfo + i, &val, sizeof(val));
+ }
+ // update sentinel, which might have been cleared out!
+ mInfo[numElementsWithBuffer] = 1;
+
+ mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
+ return true;
+ }
+
+ // True if resize was possible, false otherwise
+ bool increase_size() {
+ // nothing allocated yet? just allocate InitialNumElements
+ if (0 == mMask) {
+ initData(InitialNumElements);
+ return true;
+ }
+
+ auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
+ if (mNumElements < maxNumElementsAllowed && try_increase_info()) {
+ return true;
+ }
+
+ ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed="
+ << maxNumElementsAllowed << ", load="
+ << (static_cast<double>(mNumElements) * 100.0 /
+ (static_cast<double>(mMask) + 1)))
+
+ if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) {
+ // we have to resize, even though there would still be plenty of space left!
+ // Try to rehash instead. Delete freed memory so we don't steadyily increase mem in case
+ // we have to rehash a few times
+ nextHashMultiplier();
+ rehashPowerOfTwo(mMask + 1, true);
+ } else {
+ // we've reached the capacity of the map, so the hash seems to work nice. Keep using it.
+ rehashPowerOfTwo((mMask + 1) * 2, false);
+ }
+ return true;
+ }
+
+ void nextHashMultiplier() {
+ // adding an *even* number, so that the multiplier will always stay odd. This is necessary
+ // so that the hash stays a mixing function (and thus doesn't have any information loss).
+ mHashMultiplier += UINT64_C(0xc4ceb9fe1a85ec54);
+ }
+
+ void destroy() {
+ if (0 == mMask) {
+ // don't deallocate!
+ return;
+ }
+
+ Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}
+ .nodesDoNotDeallocate(*this);
+
+ // This protection against not deleting mMask shouldn't be needed as it's sufficiently
+ // protected with the 0==mMask check, but I have this anyways because g++ 7 otherwise
+ // reports a compile error: attempt to free a non-heap object 'fm'
+ // [-Werror=free-nonheap-object]
+ if (mKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) {
+ ROBIN_HOOD_LOG("std::free")
+ std::free(mKeyVals);
+ }
+ }
+
+ void init() noexcept {
+ mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask);
+ mInfo = reinterpret_cast<uint8_t*>(&mMask);
+ mNumElements = 0;
+ mMask = 0;
+ mMaxNumElementsAllowed = 0;
+ mInfoInc = InitialInfoInc;
+ mInfoHashShift = InitialInfoHashShift;
+ }
+
+ // members are sorted so no padding occurs
+ uint64_t mHashMultiplier = UINT64_C(0xc4ceb9fe1a85ec53); // 8 byte 8
+ Node* mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask); // 8 byte 16
+ uint8_t* mInfo = reinterpret_cast<uint8_t*>(&mMask); // 8 byte 24
+ size_t mNumElements = 0; // 8 byte 32
+ size_t mMask = 0; // 8 byte 40
+ size_t mMaxNumElementsAllowed = 0; // 8 byte 48
+ InfoType mInfoInc = InitialInfoInc; // 4 byte 52
+ InfoType mInfoHashShift = InitialInfoHashShift; // 4 byte 56
+ // 16 byte 56 if NodeAllocator
+};
+
+} // namespace detail
+
+// map
+
+template <typename Key, typename T, typename Hash = hash<Key>,
+ typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
+using unordered_flat_map = detail::Table<true, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
+
+template <typename Key, typename T, typename Hash = hash<Key>,
+ typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
+using unordered_node_map = detail::Table<false, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
+
+template <typename Key, typename T, typename Hash = hash<Key>,
+ typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80>
+using unordered_map =
+ detail::Table<sizeof(robin_hood::pair<Key, T>) <= sizeof(size_t) * 6 &&
+ std::is_nothrow_move_constructible<robin_hood::pair<Key, T>>::value &&
+ std::is_nothrow_move_assignable<robin_hood::pair<Key, T>>::value,
+ MaxLoadFactor100, Key, T, Hash, KeyEqual>;
+
+// set
+
+template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
+ size_t MaxLoadFactor100 = 80>
+using unordered_flat_set = detail::Table<true, MaxLoadFactor100, Key, void, Hash, KeyEqual>;
+
+template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
+ size_t MaxLoadFactor100 = 80>
+using unordered_node_set = detail::Table<false, MaxLoadFactor100, Key, void, Hash, KeyEqual>;
+
+template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
+ size_t MaxLoadFactor100 = 80>
+using unordered_set = detail::Table<sizeof(Key) <= sizeof(size_t) * 6 &&
+ std::is_nothrow_move_constructible<Key>::value &&
+ std::is_nothrow_move_assignable<Key>::value,
+ MaxLoadFactor100, Key, void, Hash, KeyEqual>;
+
+} // namespace robin_hood
+
+#endif
diff --git a/misc/benchmarks/external/ankerl/unordered_dense.h b/misc/benchmarks/external/ankerl/unordered_dense.h
new file mode 100644
index 00000000..ff902ad4
--- /dev/null
+++ b/misc/benchmarks/external/ankerl/unordered_dense.h
@@ -0,0 +1,1503 @@
+///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
+
+// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
+// Version 2.0.0
+// https://github.com/martinus/unordered_dense
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2022 Martin Leitner-Ankerl <[email protected]>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef ANKERL_UNORDERED_DENSE_H
+#define ANKERL_UNORDERED_DENSE_H
+
+// see https://semver.org/spec/v2.0.0.html
+#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 2 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes
+#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality
+#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes
+
+// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/
+#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch
+#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch)
+#define ANKERL_UNORDERED_DENSE_NAMESPACE \
+ ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \
+ ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH)
+
+#if defined(_MSVC_LANG)
+# define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG
+#else
+# define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus
+#endif
+
+#if defined(__GNUC__)
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__))
+#elif defined(_MSC_VER)
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop))
+#endif
+
+#if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L
+# error ankerl::unordered_dense requires C++17 or higher
+#else
+# include <array> // for array
+# include <cstdint> // for uint64_t, uint32_t, uint8_t, UINT64_C
+# include <cstring> // for size_t, memcpy, memset
+# include <functional> // for equal_to, hash
+# include <initializer_list> // for initializer_list
+# include <iterator> // for pair, distance
+# include <limits> // for numeric_limits
+# include <memory> // for allocator, allocator_traits, shared_ptr
+# include <stdexcept> // for out_of_range
+# include <string> // for basic_string
+# include <string_view> // for basic_string_view, hash
+# include <tuple> // for forward_as_tuple
+# include <type_traits> // for enable_if_t, declval, conditional_t, ena...
+# include <utility> // for forward, exchange, pair, as_const, piece...
+# include <vector> // for vector
+
+# define ANKERL_UNORDERED_DENSE_PMR 0 // NOLINT(cppcoreguidelines-macro-usage)
+# if defined(__has_include)
+# if __has_include(<memory_resource>)
+# undef ANKERL_UNORDERED_DENSE_PMR
+# define ANKERL_UNORDERED_DENSE_PMR 1 // NOLINT(cppcoreguidelines-macro-usage)
+# include <memory_resource> // for polymorphic_allocator
+# endif
+# endif
+
+# if defined(_MSC_VER) && defined(_M_X64)
+# include <intrin.h>
+# pragma intrinsic(_umul128)
+# endif
+
+# if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
+# define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage)
+# else
+# define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
+# endif
+
+namespace ankerl::unordered_dense {
+inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE {
+
+// hash ///////////////////////////////////////////////////////////////////////
+
+// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash
+// No big-endian support (because different values on different machines don't matter),
+// hardcodes seed and the secret, reformattes the code, and clang-tidy fixes.
+namespace detail::wyhash {
+
+static inline void mum(uint64_t* a, uint64_t* b) {
+# if defined(__SIZEOF_INT128__)
+ __uint128_t r = *a;
+ r *= *b;
+ *a = static_cast<uint64_t>(r);
+ *b = static_cast<uint64_t>(r >> 64U);
+# elif defined(_MSC_VER) && defined(_M_X64)
+ *a = _umul128(*a, *b, b);
+# else
+ uint64_t ha = *a >> 32U;
+ uint64_t hb = *b >> 32U;
+ uint64_t la = static_cast<uint32_t>(*a);
+ uint64_t lb = static_cast<uint32_t>(*b);
+ uint64_t hi{};
+ uint64_t lo{};
+ uint64_t rh = ha * hb;
+ uint64_t rm0 = ha * lb;
+ uint64_t rm1 = hb * la;
+ uint64_t rl = la * lb;
+ uint64_t t = rl + (rm0 << 32U);
+ auto c = static_cast<uint64_t>(t < rl);
+ lo = t + (rm1 << 32U);
+ c += static_cast<uint64_t>(lo < t);
+ hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c;
+ *a = lo;
+ *b = hi;
+# endif
+}
+
+// multiply and xor mix function, aka MUM
+[[nodiscard]] static inline auto mix(uint64_t a, uint64_t b) -> uint64_t {
+ mum(&a, &b);
+ return a ^ b;
+}
+
+// read functions. WARNING: we don't care about endianness, so results are different on big endian!
+[[nodiscard]] static inline auto r8(const uint8_t* p) -> uint64_t {
+ uint64_t v{};
+ std::memcpy(&v, p, 8U);
+ return v;
+}
+
+[[nodiscard]] static inline auto r4(const uint8_t* p) -> uint64_t {
+ uint32_t v{};
+ std::memcpy(&v, p, 4);
+ return v;
+}
+
+// reads 1, 2, or 3 bytes
+[[nodiscard]] static inline auto r3(const uint8_t* p, size_t k) -> uint64_t {
+ return (static_cast<uint64_t>(p[0]) << 16U) | (static_cast<uint64_t>(p[k >> 1U]) << 8U) | p[k - 1];
+}
+
+[[maybe_unused]] [[nodiscard]] static inline auto hash(void const* key, size_t len) -> uint64_t {
+ static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f),
+ UINT64_C(0xe7037ed1a0b428db),
+ UINT64_C(0x8ebc6af09c88c6e3),
+ UINT64_C(0x589965cc75374cc3)};
+
+ auto const* p = static_cast<uint8_t const*>(key);
+ uint64_t seed = secret[0];
+ uint64_t a{};
+ uint64_t b{};
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) {
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) {
+ a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U));
+ b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U));
+ } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) {
+ a = r3(p, len);
+ b = 0;
+ } else {
+ a = 0;
+ b = 0;
+ }
+ } else {
+ size_t i = len;
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) {
+ uint64_t see1 = seed;
+ uint64_t see2 = seed;
+ do {
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
+ see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1);
+ see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2);
+ p += 48;
+ i -= 48;
+ } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48));
+ seed ^= see1 ^ see2;
+ }
+ while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) {
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
+ i -= 16;
+ p += 16;
+ }
+ a = r8(p + i - 16);
+ b = r8(p + i - 8);
+ }
+
+ return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed));
+}
+
+[[nodiscard]] static inline auto hash(uint64_t x) -> uint64_t {
+ return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15));
+}
+
+} // namespace detail::wyhash
+
+template <typename T, typename Enable = void>
+struct hash {
+ auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>())))
+ -> uint64_t {
+ return std::hash<T>{}(obj);
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string<CharT>> {
+ using is_avalanching = void;
+ auto operator()(std::basic_string<CharT> const& str) const noexcept -> uint64_t {
+ return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size());
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string_view<CharT>> {
+ using is_avalanching = void;
+ auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> uint64_t {
+ return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size());
+ }
+};
+
+template <class T>
+struct hash<T*> {
+ using is_avalanching = void;
+ auto operator()(T* ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr));
+ }
+};
+
+template <class T>
+struct hash<std::unique_ptr<T>> {
+ using is_avalanching = void;
+ auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
+ }
+};
+
+template <class T>
+struct hash<std::shared_ptr<T>> {
+ using is_avalanching = void;
+ auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
+ }
+};
+
+template <typename Enum>
+struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
+ using is_avalanching = void;
+ auto operator()(Enum e) const noexcept -> uint64_t {
+ using underlying = typename std::underlying_type_t<Enum>;
+ return detail::wyhash::hash(static_cast<underlying>(e));
+ }
+};
+
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \
+ template <> \
+ struct hash<T> { \
+ using is_avalanching = void; \
+ auto operator()(T const& obj) const noexcept -> uint64_t { \
+ return detail::wyhash::hash(static_cast<uint64_t>(obj)); \
+ } \
+ }
+
+# if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wuseless-cast"
+# endif
+// see https://en.cppreference.com/w/cpp/utility/hash
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char);
+# if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t);
+# endif
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long);
+
+# if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic pop
+# endif
+
+// bucket_type //////////////////////////////////////////////////////////
+
+namespace bucket_type {
+
+struct standard {
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
+
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
+ uint32_t m_value_idx; // index into the m_values vector.
+};
+
+ANKERL_UNORDERED_DENSE_PACK(struct big {
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
+
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
+ size_t m_value_idx; // index into the m_values vector.
+});
+
+} // namespace bucket_type
+
+namespace detail {
+
+struct nonesuch {};
+
+template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
+struct detector {
+ using value_t = std::false_type;
+ using type = Default;
+};
+
+template <class Default, template <class...> class Op, class... Args>
+struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> {
+ using value_t = std::true_type;
+ using type = Op<Args...>;
+};
+
+template <template <class...> class Op, class... Args>
+using is_detected = typename detail::detector<detail::nonesuch, void, Op, Args...>::value_t;
+
+template <template <class...> class Op, class... Args>
+constexpr bool is_detected_v = is_detected<Op, Args...>::value;
+
+template <typename T>
+using detect_avalanching = typename T::is_avalanching;
+
+template <typename T>
+using detect_is_transparent = typename T::is_transparent;
+
+template <typename T>
+using detect_iterator = typename T::iterator;
+
+// enable_if helpers
+
+template <typename Mapped>
+constexpr bool is_map_v = !std::is_void_v<Mapped>;
+
+template <typename Hash, typename KeyEqual>
+constexpr bool is_transparent_v = is_detected_v<detect_is_transparent, Hash>&& is_detected_v<detect_is_transparent, KeyEqual>;
+
+template <typename From, typename To1, typename To2>
+constexpr bool is_neither_convertible_v = !std::is_convertible_v<From, To1> && !std::is_convertible_v<From, To2>;
+
+// This is it, the table. Doubles as map and set, and uses `void` for T when its used as a set.
+template <class Key,
+ class T, // when void, treat it as a set.
+ class Hash,
+ class KeyEqual,
+ class AllocatorOrContainer,
+ class Bucket>
+class table {
+public:
+ using value_container_type = std::conditional_t<
+ is_detected_v<detect_iterator, AllocatorOrContainer>,
+ AllocatorOrContainer,
+ typename std::vector<typename std::conditional_t<std::is_void_v<T>, Key, std::pair<Key, T>>, AllocatorOrContainer>>;
+
+private:
+ using bucket_alloc =
+ typename std::allocator_traits<typename value_container_type::allocator_type>::template rebind_alloc<Bucket>;
+ using bucket_alloc_traits = std::allocator_traits<bucket_alloc>;
+
+ static constexpr uint8_t initial_shifts = 64 - 3; // 2^(64-m_shift) number of buckets
+ static constexpr float default_max_load_factor = 0.8F;
+
+public:
+ using key_type = Key;
+ using mapped_type = T;
+ using value_type = typename value_container_type::value_type;
+ using size_type = typename value_container_type::size_type;
+ using difference_type = typename value_container_type::difference_type;
+ using hasher = Hash;
+ using key_equal = KeyEqual;
+ using allocator_type = typename value_container_type::allocator_type;
+ using reference = typename value_container_type::reference;
+ using const_reference = typename value_container_type::const_reference;
+ using pointer = typename value_container_type::pointer;
+ using const_pointer = typename value_container_type::const_pointer;
+ using iterator = typename value_container_type::iterator;
+ using const_iterator = typename value_container_type::const_iterator;
+ using bucket_type = Bucket;
+
+private:
+ using value_idx_type = decltype(Bucket::m_value_idx);
+ using dist_and_fingerprint_type = decltype(Bucket::m_dist_and_fingerprint);
+
+ static_assert(std::is_trivially_destructible_v<Bucket>, "assert there's no need to call destructor / std::destroy");
+ static_assert(std::is_trivially_copyable_v<Bucket>, "assert we can just memset / memcpy");
+
+ value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes.
+ typename std::allocator_traits<bucket_alloc>::pointer m_buckets{};
+ size_t m_num_buckets = 0;
+ size_t m_max_bucket_capacity = 0;
+ float m_max_load_factor = default_max_load_factor;
+ Hash m_hash{};
+ KeyEqual m_equal{};
+ uint8_t m_shifts = initial_shifts;
+
+ [[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type {
+ return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == m_num_buckets)
+ ? 0
+ : static_cast<value_idx_type>(bucket_idx + 1U);
+ }
+
+ // Helper to access bucket through pointer types
+ [[nodiscard]] static constexpr auto at(typename std::allocator_traits<bucket_alloc>::pointer bucket_ptr, size_t offset)
+ -> Bucket& {
+ return *(bucket_ptr + static_cast<typename std::allocator_traits<bucket_alloc>::difference_type>(offset));
+ }
+
+ // use the dist_inc and dist_dec functions so that uint16_t types work without warning
+ [[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
+ return static_cast<dist_and_fingerprint_type>(x + Bucket::dist_inc);
+ }
+
+ [[nodiscard]] static constexpr auto dist_dec(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
+ return static_cast<dist_and_fingerprint_type>(x - Bucket::dist_inc);
+ }
+
+ // The goal of mixed_hash is to always produce a high quality 64bit hash.
+ template <typename K>
+ [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t {
+ if constexpr (is_detected_v<detect_avalanching, Hash>) {
+ // we know that the hash is good because is_avalanching.
+ if constexpr (sizeof(decltype(m_hash(key))) < sizeof(uint64_t)) {
+ // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards
+ return m_hash(key) * UINT64_C(0x9ddfea08eb382d69);
+ } else {
+ // 64bit and is_avalanching => only use the hash itself.
+ return m_hash(key);
+ }
+ } else {
+ // not is_avalanching => apply wyhash
+ return wyhash::hash(m_hash(key));
+ }
+ }
+
+ [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> dist_and_fingerprint_type {
+ return Bucket::dist_inc | (static_cast<dist_and_fingerprint_type>(hash) & Bucket::fingerprint_mask);
+ }
+
+ [[nodiscard]] constexpr auto bucket_idx_from_hash(uint64_t hash) const -> value_idx_type {
+ return static_cast<value_idx_type>(hash >> m_shifts);
+ }
+
+ [[nodiscard]] static constexpr auto get_key(value_type const& vt) -> key_type const& {
+ if constexpr (std::is_void_v<T>) {
+ return vt;
+ } else {
+ return vt.first;
+ }
+ }
+
+ template <typename K>
+ [[nodiscard]] auto next_while_less(K const& key) const -> Bucket {
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint < at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+ return {dist_and_fingerprint, bucket_idx};
+ }
+
+ void place_and_shift_up(Bucket bucket, value_idx_type place) {
+ while (0 != at(m_buckets, place).m_dist_and_fingerprint) {
+ bucket = std::exchange(at(m_buckets, place), bucket);
+ bucket.m_dist_and_fingerprint = dist_inc(bucket.m_dist_and_fingerprint);
+ place = next(place);
+ }
+ at(m_buckets, place) = bucket;
+ }
+
+ [[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> size_t {
+ return std::min(max_bucket_count(), size_t{1} << (64U - shifts));
+ }
+
+ [[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t {
+ auto shifts = initial_shifts;
+ while (shifts > 0 && static_cast<size_t>(static_cast<float>(calc_num_buckets(shifts)) * max_load_factor()) < s) {
+ --shifts;
+ }
+ return shifts;
+ }
+
+ // assumes m_values has data, m_buckets=m_buckets_end=nullptr, m_shifts is INITIAL_SHIFTS
+ void copy_buckets(table const& other) {
+ if (!empty()) {
+ m_shifts = other.m_shifts;
+ allocate_buckets_from_shift();
+ std::memcpy(m_buckets, other.m_buckets, sizeof(Bucket) * bucket_count());
+ }
+ }
+
+ /**
+ * True when no element can be added any more without increasing the size
+ */
+ [[nodiscard]] auto is_full() const -> bool {
+ return size() >= m_max_bucket_capacity;
+ }
+
+ void deallocate_buckets() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ if (nullptr != m_buckets) {
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
+ }
+ m_buckets = nullptr;
+ m_num_buckets = 0;
+ m_max_bucket_capacity = 0;
+ }
+
+ void allocate_buckets_from_shift() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ m_num_buckets = calc_num_buckets(m_shifts);
+ m_buckets = bucket_alloc_traits::allocate(ba, m_num_buckets);
+ if (m_num_buckets == max_bucket_count()) {
+ // reached the maximum, make sure we can use each bucket
+ m_max_bucket_capacity = max_bucket_count();
+ } else {
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(m_num_buckets) * max_load_factor());
+ }
+ }
+
+ void clear_buckets() {
+ if (m_buckets != nullptr) {
+ std::memset(&*m_buckets, 0, sizeof(Bucket) * bucket_count());
+ }
+ }
+
+ void clear_and_fill_buckets_from_values() {
+ clear_buckets();
+ for (value_idx_type value_idx = 0, end_idx = static_cast<value_idx_type>(m_values.size()); value_idx < end_idx;
+ ++value_idx) {
+ auto const& key = get_key(m_values[value_idx]);
+ auto [dist_and_fingerprint, bucket] = next_while_less(key);
+
+ // we know for certain that key has not yet been inserted, so no need to check it.
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket);
+ }
+ }
+
+ void increase_size() {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(m_max_bucket_capacity == max_bucket_count())) {
+ throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size");
+ }
+ --m_shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+
+ void do_erase(value_idx_type bucket_idx) {
+ auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx;
+
+ // shift down until either empty or an element with correct spot is found
+ auto next_bucket_idx = next(bucket_idx);
+ while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) {
+ at(m_buckets, bucket_idx) = {dist_dec(at(m_buckets, next_bucket_idx).m_dist_and_fingerprint),
+ at(m_buckets, next_bucket_idx).m_value_idx};
+ bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx));
+ }
+ at(m_buckets, bucket_idx) = {};
+
+ // update m_values
+ if (value_idx_to_remove != m_values.size() - 1) {
+ // no luck, we'll have to replace the value with the last one and update the index accordingly
+ auto& val = m_values[value_idx_to_remove];
+ val = std::move(m_values.back());
+
+ // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx
+ auto mh = mixed_hash(get_key(val));
+ bucket_idx = bucket_idx_from_hash(mh);
+
+ auto const values_idx_back = static_cast<value_idx_type>(m_values.size() - 1);
+ while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) {
+ bucket_idx = next(bucket_idx);
+ }
+ at(m_buckets, bucket_idx).m_value_idx = value_idx_to_remove;
+ }
+ m_values.pop_back();
+ }
+
+ template <typename K>
+ auto do_erase_key(K&& key) -> size_t {
+ if (empty()) {
+ return 0;
+ }
+
+ auto [dist_and_fingerprint, bucket_idx] = next_while_less(key);
+
+ while (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ !m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ if (dist_and_fingerprint != at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ return 0;
+ }
+ do_erase(bucket_idx);
+ return 1;
+ }
+
+ template <class K, class M>
+ auto do_insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
+ auto it_isinserted = try_emplace(std::forward<K>(key), std::forward<M>(mapped));
+ if (!it_isinserted.second) {
+ it_isinserted.first->second = std::forward<M>(mapped);
+ }
+ return it_isinserted;
+ }
+
+ template <typename K, typename... Args>
+ auto do_place_element(dist_and_fingerprint_type dist_and_fingerprint, value_idx_type bucket_idx, K&& key, Args&&... args)
+ -> std::pair<iterator, bool> {
+
+ // emplace the new value. If that throws an exception, no harm done; index is still in a valid state
+ m_values.emplace_back(std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+
+ // place element and shift up until we find an empty spot
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <typename K, typename... Args>
+ auto do_try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
+ increase_size();
+ }
+
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (true) {
+ auto* bucket = &at(m_buckets, bucket_idx);
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
+ if (m_equal(key, m_values[bucket->m_value_idx].first)) {
+ return {begin() + static_cast<difference_type>(bucket->m_value_idx), false};
+ }
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
+ return do_place_element(dist_and_fingerprint, bucket_idx, std::forward<K>(key), std::forward<Args>(args)...);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+ }
+
+ template <typename K>
+ auto do_find(K const& key) -> iterator {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(empty())) {
+ return end();
+ }
+
+ auto mh = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(mh);
+ auto bucket_idx = bucket_idx_from_hash(mh);
+ auto* bucket = &at(m_buckets, bucket_idx);
+
+ // unrolled loop. *Always* check a few directly, then enter the loop. This is faster.
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+
+ while (true) {
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
+ if (m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
+ return end();
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+ }
+ }
+
+ template <typename K>
+ auto do_find(K const& key) const -> const_iterator {
+ return const_cast<table*>(this)->do_find(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ }
+
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto do_at(K const& key) -> Q& {
+ if (auto it = find(key); end() != it) {
+ return it->second;
+ }
+ throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found");
+ }
+
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto do_at(K const& key) const -> Q const& {
+ return const_cast<table*>(this)->at(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ }
+
+public:
+ table()
+ : table(0) {}
+
+ explicit table(size_t /*bucket_count*/,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc_or_container = allocator_type())
+ : m_values(alloc_or_container)
+ , m_hash(hash)
+ , m_equal(equal) {}
+
+ table(size_t bucket_count, allocator_type const& alloc)
+ : table(bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ table(size_t bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(bucket_count, hash, KeyEqual(), alloc) {}
+
+ explicit table(allocator_type const& alloc)
+ : table(0, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ table(InputIt first,
+ InputIt last,
+ size_type bucket_count = 0,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc = allocator_type())
+ : table(bucket_count, hash, equal, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIt>
+ table(InputIt first, InputIt last, size_type bucket_count, allocator_type const& alloc)
+ : table(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ table(InputIt first, InputIt last, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(first, last, bucket_count, hash, KeyEqual(), alloc) {}
+
+ table(table const& other)
+ : table(other, other.m_values.get_allocator()) {}
+
+ table(table const& other, allocator_type const& alloc)
+ : m_values(other.m_values, alloc)
+ , m_max_load_factor(other.m_max_load_factor)
+ , m_hash(other.m_hash)
+ , m_equal(other.m_equal) {
+ copy_buckets(other);
+ }
+
+ table(table&& other) noexcept
+ : table(std::move(other), other.m_values.get_allocator()) {}
+
+ table(table&& other, allocator_type const& alloc) noexcept
+ : m_values(std::move(other.m_values), alloc)
+ , m_buckets(std::exchange(other.m_buckets, nullptr))
+ , m_num_buckets(std::exchange(other.m_num_buckets, 0))
+ , m_max_bucket_capacity(std::exchange(other.m_max_bucket_capacity, 0))
+ , m_max_load_factor(std::exchange(other.m_max_load_factor, default_max_load_factor))
+ , m_hash(std::exchange(other.m_hash, {}))
+ , m_equal(std::exchange(other.m_equal, {}))
+ , m_shifts(std::exchange(other.m_shifts, initial_shifts)) {
+ other.m_values.clear();
+ }
+
+ table(std::initializer_list<value_type> ilist,
+ size_t bucket_count = 0,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc = allocator_type())
+ : table(bucket_count, hash, equal, alloc) {
+ insert(ilist);
+ }
+
+ table(std::initializer_list<value_type> ilist, size_type bucket_count, allocator_type const& alloc)
+ : table(ilist, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ table(std::initializer_list<value_type> init, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(init, bucket_count, hash, KeyEqual(), alloc) {}
+
+ ~table() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
+ }
+
+ auto operator=(table const& other) -> table& {
+ if (&other != this) {
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
+ m_values = other.m_values;
+ m_max_load_factor = other.m_max_load_factor;
+ m_hash = other.m_hash;
+ m_equal = other.m_equal;
+ m_shifts = initial_shifts;
+ copy_buckets(other);
+ }
+ return *this;
+ }
+
+ auto operator=(table&& other) noexcept(
+ noexcept(std::is_nothrow_move_assignable_v<value_container_type>&& std::is_nothrow_move_assignable_v<Hash>&&
+ std::is_nothrow_move_assignable_v<KeyEqual>)) -> table& {
+ if (&other != this) {
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
+ m_values = std::move(other.m_values);
+ m_buckets = std::exchange(other.m_buckets, nullptr);
+ m_num_buckets = std::exchange(other.m_num_buckets, 0);
+ m_max_bucket_capacity = std::exchange(other.m_max_bucket_capacity, 0);
+ m_max_load_factor = std::exchange(other.m_max_load_factor, default_max_load_factor);
+ m_hash = std::exchange(other.m_hash, {});
+ m_equal = std::exchange(other.m_equal, {});
+ m_shifts = std::exchange(other.m_shifts, initial_shifts);
+ other.m_values.clear();
+ }
+ return *this;
+ }
+
+ auto operator=(std::initializer_list<value_type> ilist) -> table& {
+ clear();
+ insert(ilist);
+ return *this;
+ }
+
+ auto get_allocator() const noexcept -> allocator_type {
+ return m_values.get_allocator();
+ }
+
+ // iterators //////////////////////////////////////////////////////////////
+
+ auto begin() noexcept -> iterator {
+ return m_values.begin();
+ }
+
+ auto begin() const noexcept -> const_iterator {
+ return m_values.begin();
+ }
+
+ auto cbegin() const noexcept -> const_iterator {
+ return m_values.cbegin();
+ }
+
+ auto end() noexcept -> iterator {
+ return m_values.end();
+ }
+
+ auto cend() const noexcept -> const_iterator {
+ return m_values.cend();
+ }
+
+ auto end() const noexcept -> const_iterator {
+ return m_values.end();
+ }
+
+ // capacity ///////////////////////////////////////////////////////////////
+
+ [[nodiscard]] auto empty() const noexcept -> bool {
+ return m_values.empty();
+ }
+
+ [[nodiscard]] auto size() const noexcept -> size_t {
+ return m_values.size();
+ }
+
+ [[nodiscard]] static constexpr auto max_size() noexcept -> size_t {
+ if constexpr (std::numeric_limits<value_idx_type>::max() == std::numeric_limits<size_t>::max()) {
+ return size_t{1} << (sizeof(value_idx_type) * 8 - 1);
+ } else {
+ return size_t{1} << (sizeof(value_idx_type) * 8);
+ }
+ }
+
+ // modifiers //////////////////////////////////////////////////////////////
+
+ void clear() {
+ m_values.clear();
+ clear_buckets();
+ }
+
+ auto insert(value_type const& value) -> std::pair<iterator, bool> {
+ return emplace(value);
+ }
+
+ auto insert(value_type&& value) -> std::pair<iterator, bool> {
+ return emplace(std::move(value));
+ }
+
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
+ auto insert(P&& value) -> std::pair<iterator, bool> {
+ return emplace(std::forward<P>(value));
+ }
+
+ auto insert(const_iterator /*hint*/, value_type const& value) -> iterator {
+ return insert(value).first;
+ }
+
+ auto insert(const_iterator /*hint*/, value_type&& value) -> iterator {
+ return insert(std::move(value)).first;
+ }
+
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
+ auto insert(const_iterator /*hint*/, P&& value) -> iterator {
+ return insert(std::forward<P>(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ while (first != last) {
+ insert(*first);
+ ++first;
+ }
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ // nonstandard API: *this is emptied.
+ // Also see "A Standard flat_map" https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p0429r9.pdf
+ auto extract() && -> value_container_type {
+ return std::move(m_values);
+ }
+
+ // nonstandard API:
+ // Discards the internally held container and replaces it with the one passed. Erases non-unique elements.
+ auto replace(value_container_type&& container) {
+ if (container.size() > max_size()) {
+ throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements");
+ }
+
+ auto shifts = calc_shifts_for_size(container.size());
+ if (0 == m_num_buckets || shifts < m_shifts || container.get_allocator() != m_values.get_allocator()) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ }
+ clear_buckets();
+
+ m_values = std::move(container);
+
+ // can't use clear_and_fill_buckets_from_values() because container elements might not be unique
+ auto value_idx = value_idx_type{};
+
+ // loop until we reach the end of the container. duplicated entries will be replaced with back().
+ while (value_idx != static_cast<value_idx_type>(m_values.size())) {
+ auto const& key = get_key(m_values[value_idx]);
+
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ bool key_found = false;
+ while (true) {
+ auto const& bucket = at(m_buckets, bucket_idx);
+ if (dist_and_fingerprint > bucket.m_dist_and_fingerprint) {
+ break;
+ }
+ if (dist_and_fingerprint == bucket.m_dist_and_fingerprint &&
+ m_equal(key, m_values[bucket.m_value_idx].first)) {
+ key_found = true;
+ break;
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ if (key_found) {
+ if (value_idx != static_cast<value_idx_type>(m_values.size() - 1)) {
+ m_values[value_idx] = std::move(m_values.back());
+ }
+ m_values.pop_back();
+ } else {
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ ++value_idx;
+ }
+ }
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(Key const& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(key, std::forward<M>(mapped));
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(Key&& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped));
+ }
+
+ template <typename K,
+ typename M,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped));
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, Key const& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(key, std::forward<M>(mapped)).first;
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, Key&& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped)).first;
+ }
+
+ template <typename K,
+ typename M,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, K&& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped)).first;
+ }
+
+ // Single arguments for unordered_set can be used without having to construct the value_type
+ template <class K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<!is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto emplace(K&& key) -> std::pair<iterator, bool> {
+ if (is_full()) {
+ increase_size();
+ }
+
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ m_equal(key, m_values[at(m_buckets, bucket_idx).m_value_idx])) {
+ // found it, return without ever actually creating anything
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ // value is new, insert element first, so when exception happens we are in a valid state
+ m_values.emplace_back(std::forward<K>(key));
+ // now place the bucket and shift up until we find an empty spot
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <class... Args>
+ auto emplace(Args&&... args) -> std::pair<iterator, bool> {
+ if (is_full()) {
+ increase_size();
+ }
+
+ // we have to instantiate the value_type to be able to access the key.
+ // 1. emplace_back the object so it is constructed. 2. If the key is already there, pop it later in the loop.
+ auto& key = get_key(m_values.emplace_back(std::forward<Args>(args)...));
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
+ m_values.pop_back(); // value was already there, so get rid of it
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ // value is new, place the bucket and shift up until we find an empty spot
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <class... Args>
+ auto emplace_hint(const_iterator /*hint*/, Args&&... args) -> iterator {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(Key const& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(key, std::forward<Args>(args)...);
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(Key&& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...);
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(const_iterator /*hint*/, Key const& key, Args&&... args) -> iterator {
+ return do_try_emplace(key, std::forward<Args>(args)...).first;
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(const_iterator /*hint*/, Key&& key, Args&&... args) -> iterator {
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...).first;
+ }
+
+ template <
+ typename K,
+ typename... Args,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
+ bool> = true>
+ auto try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...);
+ }
+
+ template <
+ typename K,
+ typename... Args,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
+ bool> = true>
+ auto try_emplace(const_iterator /*hint*/, K&& key, Args&&... args) -> iterator {
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
+ }
+
+ auto erase(iterator it) -> iterator {
+ auto hash = mixed_hash(get_key(*it));
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ auto const value_idx_to_remove = static_cast<value_idx_type>(it - cbegin());
+ while (at(m_buckets, bucket_idx).m_value_idx != value_idx_to_remove) {
+ bucket_idx = next(bucket_idx);
+ }
+
+ do_erase(bucket_idx);
+ return begin() + static_cast<difference_type>(value_idx_to_remove);
+ }
+
+ auto erase(const_iterator it) -> iterator {
+ return erase(begin() + (it - cbegin()));
+ }
+
+ auto erase(const_iterator first, const_iterator last) -> iterator {
+ auto const idx_first = first - cbegin();
+ auto const idx_last = last - cbegin();
+ auto const first_to_last = std::distance(first, last);
+ auto const last_to_end = std::distance(last, cend());
+
+ // remove elements from left to right which moves elements from the end back
+ auto const mid = idx_first + std::min(first_to_last, last_to_end);
+ auto idx = idx_first;
+ while (idx != mid) {
+ erase(begin() + idx);
+ ++idx;
+ }
+
+ // all elements from the right are moved, now remove the last element until all done
+ idx = idx_last;
+ while (idx != mid) {
+ --idx;
+ erase(begin() + idx);
+ }
+
+ return begin() + idx_first;
+ }
+
+ auto erase(Key const& key) -> size_t {
+ return do_erase_key(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto erase(K&& key) -> size_t {
+ return do_erase_key(std::forward<K>(key));
+ }
+
+ void swap(table& other) noexcept(noexcept(std::is_nothrow_swappable_v<value_container_type>&&
+ std::is_nothrow_swappable_v<Hash>&& std::is_nothrow_swappable_v<KeyEqual>)) {
+ using std::swap;
+ swap(other, *this);
+ }
+
+ // lookup /////////////////////////////////////////////////////////////////
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto at(key_type const& key) -> Q& {
+ return do_at(key);
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto at(K const& key) -> Q& {
+ return do_at(key);
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto at(key_type const& key) const -> Q const& {
+ return do_at(key);
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto at(K const& key) const -> Q const& {
+ return do_at(key);
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto operator[](Key const& key) -> Q& {
+ return try_emplace(key).first->second;
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto operator[](Key&& key) -> Q& {
+ return try_emplace(std::move(key)).first->second;
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto operator[](K&& key) -> Q& {
+ return try_emplace(std::forward<K>(key)).first->second;
+ }
+
+ auto count(Key const& key) const -> size_t {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto count(K const& key) const -> size_t {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ auto find(Key const& key) -> iterator {
+ return do_find(key);
+ }
+
+ auto find(Key const& key) const -> const_iterator {
+ return do_find(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto find(K const& key) -> iterator {
+ return do_find(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto find(K const& key) const -> const_iterator {
+ return do_find(key);
+ }
+
+ auto contains(Key const& key) const -> bool {
+ return find(key) != end();
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto contains(K const& key) const -> bool {
+ return find(key) != end();
+ }
+
+ auto equal_range(Key const& key) -> std::pair<iterator, iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ auto equal_range(const Key& key) const -> std::pair<const_iterator, const_iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto equal_range(K const& key) -> std::pair<iterator, iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto equal_range(K const& key) const -> std::pair<const_iterator, const_iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ // bucket interface ///////////////////////////////////////////////////////
+
+ auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
+ return m_num_buckets;
+ }
+
+ static constexpr auto max_bucket_count() noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
+ return max_size();
+ }
+
+ // hash policy ////////////////////////////////////////////////////////////
+
+ [[nodiscard]] auto load_factor() const -> float {
+ return bucket_count() ? static_cast<float>(size()) / static_cast<float>(bucket_count()) : 0.0F;
+ }
+
+ [[nodiscard]] auto max_load_factor() const -> float {
+ return m_max_load_factor;
+ }
+
+ void max_load_factor(float ml) {
+ m_max_load_factor = ml;
+ if (m_num_buckets != max_bucket_count()) {
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(bucket_count()) * max_load_factor());
+ }
+ }
+
+ void rehash(size_t count) {
+ count = std::min(count, max_size());
+ auto shifts = calc_shifts_for_size(std::max(count, size()));
+ if (shifts != m_shifts) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ m_values.shrink_to_fit();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+ }
+
+ void reserve(size_t capa) {
+ capa = std::min(capa, max_size());
+ m_values.reserve(capa);
+ auto shifts = calc_shifts_for_size(std::max(capa, size()));
+ if (0 == m_num_buckets || shifts < m_shifts) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+ }
+
+ // observers //////////////////////////////////////////////////////////////
+
+ auto hash_function() const -> hasher {
+ return m_hash;
+ }
+
+ auto key_eq() const -> key_equal {
+ return m_equal;
+ }
+
+ // nonstandard API: expose the underlying values container
+ [[nodiscard]] auto values() const noexcept -> value_container_type const& {
+ return m_values;
+ }
+
+ // non-member functions ///////////////////////////////////////////////////
+
+ friend auto operator==(table const& a, table const& b) -> bool {
+ if (&a == &b) {
+ return true;
+ }
+ if (a.size() != b.size()) {
+ return false;
+ }
+ for (auto const& b_entry : b) {
+ auto it = a.find(get_key(b_entry));
+ if constexpr (std::is_void_v<T>) {
+ // set: only check that the key is here
+ if (a.end() == it) {
+ return false;
+ }
+ } else {
+ // map: check that key is here, then also check that value is the same
+ if (a.end() == it || !(b_entry.second == it->second)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ friend auto operator!=(table const& a, table const& b) -> bool {
+ return !(a == b);
+ }
+};
+
+} // namespace detail
+
+template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
+ class Bucket = bucket_type::standard>
+using map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
+
+template <class Key,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<Key>,
+ class Bucket = bucket_type::standard>
+using set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
+
+# if ANKERL_UNORDERED_DENSE_PMR
+
+namespace pmr {
+
+template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Bucket = bucket_type::standard>
+using map = detail::table<Key, T, Hash, KeyEqual, std::pmr::polymorphic_allocator<std::pair<Key, T>>, Bucket>;
+
+template <class Key, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>, class Bucket = bucket_type::standard>
+using set = detail::table<Key, void, Hash, KeyEqual, std::pmr::polymorphic_allocator<Key>, Bucket>;
+
+} // namespace pmr
+
+# endif
+
+// deduction guides ///////////////////////////////////////////////////////////
+
+// deduction guides for alias templates are only possible since C++20
+// see https://en.cppreference.com/w/cpp/language/class_template_argument_deduction
+
+} // namespace ANKERL_UNORDERED_DENSE_NAMESPACE
+} // namespace ankerl::unordered_dense
+
+// std extensions /////////////////////////////////////////////////////////////
+
+namespace std { // NOLINT(cert-dcl58-cpp)
+
+template <class Key, class T, class Hash, class KeyEqual, class AllocatorOrContainer, class Bucket, class Pred>
+auto erase_if(ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>& map, Pred pred)
+ -> size_t {
+ using map_t = ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
+
+ // going back to front because erase() invalidates the end iterator
+ auto const old_size = map.size();
+ auto idx = old_size;
+ while (idx) {
+ --idx;
+ auto it = map.begin() + static_cast<typename map_t::difference_type>(idx);
+ if (pred(*it)) {
+ map.erase(it);
+ }
+ }
+
+ return map.size() - old_size;
+}
+
+} // namespace std
+
+#endif
+#endif
diff --git a/misc/benchmarks/external/emhash/hash_table7.hpp b/misc/benchmarks/external/emhash/hash_table7.hpp
new file mode 100644
index 00000000..fdc33fe1
--- /dev/null
+++ b/misc/benchmarks/external/emhash/hash_table7.hpp
@@ -0,0 +1,1876 @@
+// emhash7::HashMap for C++11/14/17
+// version 2.2.3
+// https://github.com/ktprime/ktprime/blob/master/hash_table7.hpp
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2019-2022 Huang Yuanbing & bailuzhou AT 163.com
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+// From
+// NUMBER OF PROBES / LOOKUP Successful Unsuccessful
+// Quadratic collision resolution 1 - ln(1-L) - L/2 1/(1-L) - L - ln(1-L)
+// Linear collision resolution [1+1/(1-L)]/2 [1+1/(1-L)2]/2
+// separator chain resolution 1 + L / 2 exp(-L) + L
+
+// -- enlarge_factor -- 0.10 0.50 0.60 0.75 0.80 0.90 0.99
+// QUADRATIC COLLISION RES.
+// probes/successful lookup 1.05 1.44 1.62 2.01 2.21 2.85 5.11
+// probes/unsuccessful lookup 1.11 2.19 2.82 4.64 5.81 11.4 103.6
+// LINEAR COLLISION RES.
+// probes/successful lookup 1.06 1.5 1.75 2.5 3.0 5.5 50.5
+// probes/unsuccessful lookup 1.12 2.5 3.6 8.5 13.0 50.0
+// SEPARATE CHAN RES.
+// probes/successful lookup 1.05 1.25 1.3 1.25 1.4 1.45 1.50
+// probes/unsuccessful lookup 1.00 1.11 1.15 1.22 1.25 1.31 1.37
+// clacul/unsuccessful lookup 1.01 1.25 1.36, 1.56, 1.64, 1.81, 1.97
+
+/****************
+ under random hashCodes, the frequency of nodes in bins follows a Poisson
+distribution(http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of about 0.5
+on average for the default resizing threshold of 0.75, although with a large variance because
+of resizing granularity. Ignoring variance, the expected occurrences of list size k are
+(exp(-0.5) * pow(0.5, k)/factorial(k)). The first values are:
+0: 0.60653066
+1: 0.30326533
+2: 0.07581633
+3: 0.01263606
+4: 0.00157952
+5: 0.00015795
+6: 0.00001316
+7: 0.00000094
+8: 0.00000006
+
+ ============== buckets size ration ========
+ 1 1543981 0.36884964|0.36787944 36.885
+ 2 768655 0.36725597|0.36787944 73.611
+ 3 256236 0.18364065|0.18393972 91.975
+ 4 64126 0.06127757|0.06131324 98.102
+ 5 12907 0.01541710|0.01532831 99.644
+ 6 2050 0.00293841|0.00306566 99.938
+ 7 310 0.00051840|0.00051094 99.990
+ 8 49 0.00009365|0.00007299 99.999
+ 9 4 0.00000860|0.00000913 100.000
+========== collision miss ration ===========
+ _num_filled aver_size k.v size_kv = 4185936, 1.58, x.x 24
+ collision,possion,cache_miss hit_find|hit_miss, load_factor = 36.73%,36.74%,31.31% 1.50|2.00, 1.00
+============== buckets size ration ========
+*******************************************************/
+
+#pragma once
+
+#include <cstring>
+#include <string>
+#include <cmath>
+#include <cstdlib>
+#include <type_traits>
+#include <cassert>
+#include <utility>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <algorithm>
+
+#if EMH_WY_HASH
+ #include "wyhash.h"
+#endif
+
+#ifdef EMH_KEY
+ #undef EMH_KEY
+ #undef EMH_VAL
+ #undef EMH_PKV
+ #undef EMH_NEW
+ #undef EMH_SET
+ #undef EMH_BUCKET
+ #undef EMH_EMPTY
+#endif
+
+// likely/unlikely
+#if (__GNUC__ >= 4 || __clang__)
+# define EMH_LIKELY(condition) __builtin_expect(condition, 1)
+# define EMH_UNLIKELY(condition) __builtin_expect(condition, 0)
+#else
+# define EMH_LIKELY(condition) condition
+# define EMH_UNLIKELY(condition) condition
+#endif
+
+#ifndef EMH_BUCKET_INDEX
+ #define EMH_BUCKET_INDEX 1
+#endif
+
+#if EMH_BUCKET_INDEX == 0
+ #define EMH_KEY(p,n) p[n].second.first
+ #define EMH_VAL(p,n) p[n].second.second
+ #define EMH_BUCKET(p,n) p[n].first
+ #define EMH_PKV(p,n) p[n].second
+ #define EMH_NEW(key, val, bucket)\
+ new(_pairs + bucket) PairT(bucket, value_type(key, val));\
+ _num_filled ++; EMH_SET(bucket)
+#elif EMH_BUCKET_INDEX == 2
+ #define EMH_KEY(p,n) p[n].first.first
+ #define EMH_VAL(p,n) p[n].first.second
+ #define EMH_BUCKET(p,n) p[n].second
+ #define EMH_PKV(p,n) p[n].first
+ #define EMH_NEW(key, val, bucket)\
+ new(_pairs + bucket) PairT(value_type(key, val), bucket);\
+ _num_filled ++; EMH_SET(bucket)
+#else
+ #define EMH_KEY(p,n) p[n].first
+ #define EMH_VAL(p,n) p[n].second
+ #define EMH_BUCKET(p,n) p[n].bucket
+ #define EMH_PKV(p,n) p[n]
+ #define EMH_NEW(key, val, bucket)\
+ new(_pairs + bucket) PairT(key, val, bucket);\
+ _num_filled ++; EMH_SET(bucket)
+#endif
+
+#define EMH_MASK(bucket) 1 << (bucket % MASK_BIT)
+#define EMH_SET(bucket) _bitmask[bucket / MASK_BIT] &= ~(EMH_MASK(bucket))
+#define EMH_CLS(bucket) _bitmask[bucket / MASK_BIT] |= EMH_MASK(bucket)
+#define EMH_EMPTY(bitmask, bucket) (_bitmask[bucket / MASK_BIT] & (EMH_MASK(bucket))) != 0
+
+#if _WIN32
+ #include <intrin.h>
+#if _WIN64
+ #pragma intrinsic(_umul128)
+#endif
+#endif
+
+namespace emhash7 {
+
+#ifdef EMH_SIZE_TYPE_16BIT
+ typedef uint16_t size_type;
+ static constexpr size_type INACTIVE = 0xFFFF;
+#elif EMH_SIZE_TYPE_64BIT
+ typedef uint64_t size_type;
+ static constexpr size_type INACTIVE = 0 - 0x1ull;
+#else
+ typedef uint32_t size_type;
+ static constexpr size_type INACTIVE = 0 - 0x1u;
+#endif
+
+#ifndef EMH_SIZE_TYPE_16BIT
+static_assert((int)INACTIVE < 0, "INACTIVE must negative (to int)");
+#endif
+
+//count the leading zero bit
+inline static int CTZ(size_t n)
+{
+#if defined(__x86_64__) || defined(_WIN32) || (__BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+
+#elif __BIG_ENDIAN__ || (__BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ n = __builtin_bswap64(n);
+#else
+ static uint32_t endianness = 0x12345678;
+ const auto is_big = *(const char *)&endianness == 0x12;
+ if (is_big)
+ n = __builtin_bswap64(n);
+#endif
+
+#if _WIN32
+ unsigned long index;
+ #if defined(_WIN64)
+ _BitScanForward64(&index, n);
+ #else
+ _BitScanForward(&index, n);
+ #endif
+#elif defined (__LP64__) || (SIZE_MAX == UINT64_MAX) || defined (__x86_64__)
+ auto index = __builtin_ctzll(n);
+#elif 1
+ auto index = __builtin_ctzl(n);
+#else
+ #if defined (__LP64__) || (SIZE_MAX == UINT64_MAX) || defined (__x86_64__)
+ size_type index;
+ __asm__("bsfq %1, %0\n" : "=r" (index) : "rm" (n) : "cc");
+ #else
+ size_type index;
+ __asm__("bsf %1, %0\n" : "=r" (index) : "rm" (n) : "cc");
+ #endif
+#endif
+
+ return (int)index;
+}
+
+template <typename First, typename Second>
+struct entry {
+ using first_type = First;
+ using second_type = Second;
+ entry(const First& key, const Second& val, size_type ibucket)
+ :second(val), first(key)
+ {
+ bucket = ibucket;
+ }
+
+ entry(First&& key, Second&& val, size_type ibucket)
+ :second(std::move(val)), first(std::move(key))
+ {
+ bucket = ibucket;
+ }
+
+ template<typename K, typename V>
+ entry(K&& key, V&& val, size_type ibucket)
+ :second(std::forward<V>(val)), first(std::forward<K>(key))
+ {
+ bucket = ibucket;
+ }
+
+ entry(const std::pair<First, Second>& pair)
+ :second(pair.second), first(pair.first)
+ {
+ bucket = INACTIVE;
+ }
+
+ entry(std::pair<First, Second>&& pair)
+ :second(std::move(pair.second)), first(std::move(pair.first))
+ {
+ bucket = INACTIVE;
+ }
+
+ entry(std::tuple<First, Second>&& tup)
+ :second(std::move(std::get<2>(tup))), first(std::move(std::get<1>(tup)))
+ {
+ bucket = INACTIVE;
+ }
+
+ entry(const entry& rhs)
+ :second(rhs.second), first(rhs.first)
+ {
+ bucket = rhs.bucket;
+ }
+
+ entry(entry&& rhs) noexcept
+ :second(std::move(rhs.second)), first(std::move(rhs.first))
+ {
+ bucket = rhs.bucket;
+ }
+
+ entry& operator = (entry&& rhs) noexcept
+ {
+ second = std::move(rhs.second);
+ bucket = rhs.bucket;
+ first = std::move(rhs.first);
+ return *this;
+ }
+
+ entry& operator = (const entry& rhs)
+ {
+ second = rhs.second;
+ bucket = rhs.bucket;
+ first = rhs.first;
+ return *this;
+ }
+
+ bool operator == (const entry<First, Second>& p) const
+ {
+ return first == p.first && second == p.second;
+ }
+
+ bool operator == (const std::pair<First, Second>& p) const
+ {
+ return first == p.first && second == p.second;
+ }
+
+ void swap(entry<First, Second>& o)
+ {
+ std::swap(second, o.second);
+ std::swap(first, o.first);
+ }
+
+#if EMH_ORDER_KV || EMH_SIZE_TYPE_64BIT
+ First first;
+ size_type bucket;
+ Second second;
+#else
+ Second second;
+ size_type bucket;
+ First first;
+#endif
+};
+
+/// A cache-friendly hash table with open addressing, linear/qua probing and power-of-two capacity
+template <typename KeyT, typename ValueT, typename HashT = std::hash<KeyT>, typename EqT = std::equal_to<KeyT>>
+class HashMap
+{
+#ifndef EMH_DEFAULT_LOAD_FACTOR
+ constexpr static float EMH_DEFAULT_LOAD_FACTOR = 0.80f;
+ constexpr static float EMH_MIN_LOAD_FACTOR = 0.25f; //< 0.5
+#endif
+
+public:
+ typedef HashMap<KeyT, ValueT, HashT, EqT> htype;
+ typedef std::pair<KeyT, ValueT> value_type;
+
+#if EMH_BUCKET_INDEX == 0
+ typedef value_type value_pair;
+ typedef std::pair<size_type, value_type> PairT;
+#elif EMH_BUCKET_INDEX == 2
+ typedef value_type value_pair;
+ typedef std::pair<value_type, size_type> PairT;
+#else
+ typedef entry<KeyT, ValueT> value_pair;
+ typedef entry<KeyT, ValueT> PairT;
+#endif
+
+ typedef KeyT key_type;
+ typedef ValueT val_type;
+ typedef ValueT mapped_type;
+ typedef HashT hasher;
+ typedef EqT key_equal;
+ typedef PairT& reference;
+ typedef const PairT& const_reference;
+
+ class const_iterator;
+ class iterator
+ {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_pair value_type;
+
+ typedef value_pair* pointer;
+ typedef value_pair& reference;
+
+ iterator() = default;
+ iterator(const const_iterator& it) : _map(it._map), _bucket(it._bucket), _from(it._from), _bmask(it._bmask) { }
+ iterator(const htype* hash_map, size_type bucket, bool) : _map(hash_map), _bucket(bucket) { init(); }
+#if EMH_ITER_SAFE
+ iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { init(); }
+#else
+ iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { _bmask = _from = 0; }
+#endif
+
+ void init()
+ {
+ _from = (_bucket / SIZE_BIT) * SIZE_BIT;
+ if (_bucket < _map->bucket_count()) {
+ _bmask = *(size_t*)((size_t*)_map->_bitmask + _from / SIZE_BIT);
+ _bmask |= (1ull << _bucket % SIZE_BIT) - 1;
+ _bmask = ~_bmask;
+ } else {
+ _bmask = 0;
+ }
+ }
+
+ size_type bucket() const
+ {
+ return _bucket;
+ }
+
+ void clear(size_type bucket)
+ {
+ if (_bucket / SIZE_BIT == bucket / SIZE_BIT)
+ _bmask &= ~(1ull << (bucket % SIZE_BIT));
+ }
+
+ iterator& next()
+ {
+ goto_next_element();
+ return *this;
+ }
+
+ iterator& operator++()
+ {
+ _bmask &= _bmask - 1;
+ goto_next_element();
+ return *this;
+ }
+
+ iterator operator++(int)
+ {
+ iterator old = *this;
+ _bmask &= _bmask - 1;
+ goto_next_element();
+ return old;
+ }
+
+ reference operator*() const
+ {
+ return _map->EMH_PKV(_pairs, _bucket);
+ }
+
+ pointer operator->() const
+ {
+ return &(_map->EMH_PKV(_pairs, _bucket));
+ }
+
+ bool operator==(const iterator& rhs) const { return _bucket == rhs._bucket; }
+ bool operator!=(const iterator& rhs) const { return _bucket != rhs._bucket; }
+ bool operator==(const const_iterator& rhs) const { return _bucket == rhs._bucket; }
+ bool operator!=(const const_iterator& rhs) const { return _bucket != rhs._bucket; }
+
+ private:
+ void goto_next_element()
+ {
+ if (EMH_LIKELY(_bmask != 0)) {
+ _bucket = _from + CTZ(_bmask);
+ return;
+ }
+
+ do {
+ _bmask = ~*(size_t*)((size_t*)_map->_bitmask + (_from += SIZE_BIT) / SIZE_BIT);
+ } while (_bmask == 0);
+
+ _bucket = _from + CTZ(_bmask);
+ }
+
+ public:
+ const htype* _map;
+ size_type _bucket;
+ size_type _from;
+ size_t _bmask;
+ };
+
+ class const_iterator
+ {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_pair value_type;
+
+ typedef const value_pair* pointer;
+ typedef const value_pair& reference;
+
+ const_iterator(const iterator& it) : _map(it._map), _bucket(it._bucket), _from(it._from), _bmask(it._bmask) { }
+ const_iterator(const htype* hash_map, size_type bucket, bool) : _map(hash_map), _bucket(bucket) { init(); }
+#if EMH_ITER_SAFE
+ const_iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { init(); }
+#else
+ const_iterator(const htype* hash_map, size_type bucket) : _map(hash_map), _bucket(bucket) { _bmask = _from = 0; }
+#endif
+
+ void init()
+ {
+ _from = (_bucket / SIZE_BIT) * SIZE_BIT;
+ if (_bucket < _map->bucket_count()) {
+ _bmask = *(size_t*)((size_t*)_map->_bitmask + _from / SIZE_BIT);
+ _bmask |= (1ull << _bucket % SIZE_BIT) - 1;
+ _bmask = ~_bmask;
+ } else {
+ _bmask = 0;
+ }
+ }
+
+ size_type bucket() const
+ {
+ return _bucket;
+ }
+
+ const_iterator& operator++()
+ {
+ goto_next_element();
+ return *this;
+ }
+
+ const_iterator operator++(int)
+ {
+ const_iterator old(*this);
+ goto_next_element();
+ return old;
+ }
+
+ reference operator*() const
+ {
+ return _map->EMH_PKV(_pairs, _bucket);
+ }
+
+ pointer operator->() const
+ {
+ return &(_map->EMH_PKV(_pairs, _bucket));
+ }
+
+ bool operator==(const const_iterator& rhs) const { return _bucket == rhs._bucket; }
+ bool operator!=(const const_iterator& rhs) const { return _bucket != rhs._bucket; }
+
+ private:
+ void goto_next_element()
+ {
+ _bmask &= _bmask - 1;
+ if (EMH_LIKELY(_bmask != 0)) {
+ _bucket = _from + CTZ(_bmask);
+ return;
+ }
+
+ do {
+ _bmask = ~*(size_t*)((size_t*)_map->_bitmask + (_from += SIZE_BIT) / SIZE_BIT);
+ } while (_bmask == 0);
+
+ _bucket = _from + CTZ(_bmask);
+ }
+
+ public:
+ const htype* _map;
+ size_type _bucket;
+ size_type _from;
+ size_t _bmask;
+ };
+
+ void init(size_type bucket, float mlf = EMH_DEFAULT_LOAD_FACTOR)
+ {
+ _pairs = nullptr;
+ _bitmask = nullptr;
+ _num_buckets = _num_filled = 0;
+ max_load_factor(mlf);
+ rehash(bucket);
+ }
+
+ HashMap(size_type bucket = 2, float mlf = EMH_DEFAULT_LOAD_FACTOR) noexcept
+ {
+ init(bucket, mlf);
+ }
+
+ size_t AllocSize(uint64_t num_buckets) const
+ {
+ return (num_buckets + EPACK_SIZE) * sizeof(PairT) + (num_buckets + 7) / 8 + BIT_PACK;
+ }
+
+ HashMap(const HashMap& rhs) noexcept
+ {
+ if (rhs.load_factor() > EMH_MIN_LOAD_FACTOR) {
+ _pairs = (PairT*)malloc(AllocSize(rhs._num_buckets));
+ clone(rhs);
+ } else {
+ init(rhs._num_filled + 2, EMH_DEFAULT_LOAD_FACTOR);
+ for (auto it = rhs.begin(); it != rhs.end(); ++it)
+ insert_unique(it->first, it->second);
+ }
+ }
+
+ HashMap(HashMap&& rhs) noexcept
+ {
+#ifndef EMH_ZERO_MOVE
+ init(4);
+#else
+ _num_buckets = _num_filled = _mask = 0;
+ _pairs = nullptr;
+#endif
+ swap(rhs);
+ }
+
+ HashMap(std::initializer_list<value_type> ilist)
+ {
+ init((size_type)ilist.size());
+ for (auto it = ilist.begin(); it != ilist.end(); ++it)
+ do_insert(*it);
+ }
+
+ template<class InputIt>
+ HashMap(InputIt first, InputIt last, size_type bucket_count=4)
+ {
+ init(std::distance(first, last) + bucket_count);
+ for (; first != last; ++first)
+ emplace(*first);
+ }
+
+ HashMap& operator= (const HashMap& rhs) noexcept
+ {
+ if (this == &rhs)
+ return *this;
+
+ if (rhs.load_factor() < EMH_MIN_LOAD_FACTOR) {
+ clear(); free(_pairs); _pairs = nullptr;
+ rehash(rhs._num_filled + 2);
+ for (auto it = rhs.begin(); it != rhs.end(); ++it)
+ insert_unique(it->first, it->second);
+ return *this;
+ }
+
+ if (_num_filled)
+ clearkv();
+
+ if (_num_buckets != rhs._num_buckets) {
+ free(_pairs);
+ _pairs = (PairT*)malloc(AllocSize(rhs._num_buckets));
+ }
+
+ clone(rhs);
+ return *this;
+ }
+
+ HashMap& operator= (HashMap&& rhs) noexcept
+ {
+ if (this != &rhs) {
+ swap(rhs);
+ rhs.clear();
+ }
+ return *this;
+ }
+
+ template<typename Con>
+ bool operator == (const Con& rhs) const
+ {
+ if (size() != rhs.size())
+ return false;
+
+ for (auto it = begin(), last = end(); it != last; ++it) {
+ auto oi = rhs.find(it->first);
+ if (oi == rhs.end() || it->second != oi->second)
+ return false;
+ }
+ return true;
+ }
+
+ template<typename Con>
+ bool operator != (const Con& rhs) const { return !(*this == rhs); }
+
+ ~HashMap() noexcept
+ {
+ if (is_triviall_destructable() && _num_filled) {
+ for (auto it = cbegin(); _num_filled; ++it) {
+ _num_filled --;
+ it->~value_pair();
+ }
+ }
+ free(_pairs);
+ }
+
+ void clone(const HashMap& rhs) noexcept
+ {
+ _hasher = rhs._hasher;
+ //_eq = rhs._eq;
+
+ _num_filled = rhs._num_filled;
+ _mask = rhs._mask;
+ _mlf = rhs._mlf;
+ _num_buckets = rhs._num_buckets;
+
+ _bitmask = decltype(_bitmask)(_pairs + EPACK_SIZE + _num_buckets);
+ auto* opairs = rhs._pairs;
+
+ if (is_copy_trivially())
+ memcpy(_pairs, opairs, AllocSize(_num_buckets));
+ else {
+ memcpy(_pairs + _num_buckets, opairs + _num_buckets, EPACK_SIZE * sizeof(PairT) + (_num_buckets + 7) / 8 + BIT_PACK);
+ for (auto it = rhs.cbegin(); it.bucket() <= _mask; ++it) {
+ const auto bucket = it.bucket();
+ EMH_BUCKET(_pairs, bucket) = EMH_BUCKET(opairs, bucket);
+ new(_pairs + bucket) PairT(opairs[bucket]);
+ }
+ }
+ }
+
+ void swap(HashMap& rhs)
+ {
+ std::swap(_hasher, rhs._hasher);
+ //std::swap(_eq, rhs._eq);
+ std::swap(_pairs, rhs._pairs);
+ std::swap(_num_buckets, rhs._num_buckets);
+ std::swap(_num_filled, rhs._num_filled);
+ std::swap(_mask, rhs._mask);
+ std::swap(_mlf, rhs._mlf);
+ std::swap(_bitmask, rhs._bitmask);
+ }
+
+ // -------------------------------------------------------------
+ iterator begin() noexcept
+ {
+#ifdef EMH_ZERO_MOVE
+ if (0 == _num_filled)
+ return {this, _num_buckets};
+#endif
+
+ const auto bmask = ~(*(size_t*)_bitmask);
+ if (bmask != 0)
+ return {this, (size_type)CTZ(bmask), true};
+
+ iterator it(this, sizeof(bmask) * 8 - 1);
+ return it.next();
+ }
+
+ const_iterator cbegin() const noexcept
+ {
+#ifdef EMH_ZERO_MOVE
+ if (0 == _num_filled)
+ return {this, _num_buckets};
+#endif
+
+ const auto bmask = ~(*(size_t*)_bitmask);
+ if (bmask != 0)
+ return {this, (size_type)CTZ(bmask), true};
+
+ iterator it(this, sizeof(bmask) * 8 - 1);
+ return it.next();
+ }
+
+ iterator last() const
+ {
+ if (_num_filled == 0)
+ return end();
+
+ auto bucket = _mask;
+ while (EMH_EMPTY(_pairs, bucket)) bucket--;
+ return {this, bucket, true};
+ }
+
+ const_iterator begin() const noexcept { return cbegin(); }
+
+ iterator end() noexcept { return {this, _num_buckets}; }
+ const_iterator cend() const { return {this, _num_buckets}; }
+ const_iterator end() const { return cend(); }
+
+ size_type size() const { return _num_filled; }
+ bool empty() const { return _num_filled == 0; }
+
+ size_type bucket_count() const { return _num_buckets; }
+ float load_factor() const { return static_cast<float>(_num_filled) / (_mask + 1); }
+
+ HashT& hash_function() const { return _hasher; }
+ EqT& key_eq() const { return _eq; }
+
+ void max_load_factor(float mlf)
+ {
+ if (mlf < 0.999f && mlf > EMH_MIN_LOAD_FACTOR)
+ _mlf = (uint32_t)((1 << 27) / mlf);
+ }
+
+ constexpr float max_load_factor() const { return (1 << 27) / (float)_mlf; }
+ constexpr size_type max_size() const { return 1ull << (sizeof(size_type) * 8 - 1); }
+ constexpr size_type max_bucket_count() const { return max_size(); }
+
+ size_type bucket_main() const
+ {
+ auto main_size = 0;
+ for (size_type bucket = 0; bucket < _num_buckets; ++bucket) {
+ if (EMH_BUCKET(_pairs, bucket) == bucket)
+ main_size ++;
+ }
+ return main_size;
+ }
+
+#if EMH_STATIS
+ //Returns the bucket number where the element with key k is located.
+ size_type bucket(const KeyT& key) const
+ {
+ const auto bucket = hash_key(key) & _mask;
+ const auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if (EMH_EMPTY(_pairs, bucket))
+ return 0;
+ else if (bucket == next_bucket)
+ return bucket + 1;
+
+ const auto& bucket_key = EMH_KEY(_pairs, bucket);
+ return (hash_key(bucket_key) & _mask) + 1;
+ }
+
+ //Returns the number of elements in bucket n.
+ size_type bucket_size(const size_type bucket) const
+ {
+ if (EMH_EMPTY(_pairs, bucket))
+ return 0;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ next_bucket = hash_key(EMH_KEY(_pairs, bucket)) & _mask;
+ size_type bucket_size = 1;
+
+ //iterator each item in current main bucket
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket) {
+ break;
+ }
+ bucket_size++;
+ next_bucket = nbucket;
+ }
+ return bucket_size;
+ }
+
+ size_type get_main_bucket(const size_type bucket) const
+ {
+ if (EMH_EMPTY(_pairs, bucket))
+ return INACTIVE;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ const auto& bucket_key = EMH_KEY(_pairs, bucket);
+ const auto main_bucket = hash_key(bucket_key) & _mask;
+ return main_bucket;
+ }
+
+ size_type get_diss(size_type bucket, size_type next_bucket, const size_type slots) const
+ {
+ const int cahe_line_size = 64;
+ auto pbucket = reinterpret_cast<uint64_t>(&_pairs[bucket]);
+ auto pnext = reinterpret_cast<uint64_t>(&_pairs[next_bucket]);
+ if (pbucket / cahe_line_size == pnext / cahe_line_size)
+ return 0;
+ size_type diff = pbucket > pnext ? (pbucket - pnext) : (pnext - pbucket);
+ if (diff / cahe_line_size + 1 < slots)
+ return (diff / cahe_line_size + 1);
+ return slots - 1;
+ }
+
+ int get_bucket_info(const size_type bucket, size_type steps[], const size_type slots) const
+ {
+ if (EMH_EMPTY(_pairs, bucket))
+ return -1;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if ((hash_key(EMH_KEY(_pairs, bucket)) & _mask) != bucket)
+ return 0;
+ else if (next_bucket == bucket)
+ return 1;
+
+ steps[get_diss(bucket, next_bucket, slots)] ++;
+ size_type bucket_size = 2;
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ break;
+
+ steps[get_diss(nbucket, next_bucket, slots)] ++;
+ bucket_size ++;
+ next_bucket = nbucket;
+ }
+
+ return bucket_size;
+ }
+
+ void dump_statics(bool show_cache) const
+ {
+ const int slots = 128;
+ size_type buckets[slots + 1] = {0};
+ size_type steps[slots + 1] = {0};
+ char buff[1024 * 8];
+ for (size_type bucket = 0; bucket < _num_buckets; ++bucket) {
+ auto bsize = get_bucket_info(bucket, steps, slots);
+ if (bsize >= 0)
+ buckets[bsize] ++;
+ }
+
+ size_type sumb = 0, sums = 0, sumn = 0;
+ size_type miss = 0, finds = 0, bucket_coll = 0;
+ double lf = load_factor(), fk = 1.0 / exp(lf), sum_poisson = 0;
+ int bsize = sprintf (buff, "============== buckets size ration ========\n");
+
+ miss += _num_buckets - _num_filled;
+ for (int i = 1, factorial = 1; i < sizeof(buckets) / sizeof(buckets[0]); i++) {
+ double poisson = fk / factorial; factorial *= i; fk *= lf;
+ if (poisson > 1e-13 && i < 20)
+ sum_poisson += poisson * 100.0 * (i - 1) / i;
+
+ const int64_t bucketsi = buckets[i];
+ if (bucketsi == 0)
+ continue;
+
+ sumb += bucketsi;
+ sumn += bucketsi * i;
+ bucket_coll += bucketsi * (i - 1);
+ finds += bucketsi * i * (i + 1) / 2;
+ miss += bucketsi * i * i;
+ auto errs = (bucketsi * 1.0 * i / _num_filled - poisson) * 100 / poisson;
+ bsize += sprintf(buff + bsize, " %2d %8ld %0.8lf|%0.2lf%% %2.3lf\n",
+ i, bucketsi, bucketsi * 1.0 * i / _num_filled, errs, sumn * 100.0 / _num_filled);
+ if (sumn >= _num_filled)
+ break;
+ }
+
+ bsize += sprintf(buff + bsize, "========== collision miss ration ===========\n");
+ for (size_type i = 0; show_cache && i < sizeof(steps) / sizeof(steps[0]); i++) {
+ sums += steps[i];
+ if (steps[i] == 0)
+ continue;
+ if (steps[i] > 10)
+ bsize += sprintf(buff + bsize, " %2d %8u %0.2lf %.2lf\n", (int)i, steps[i], steps[i] * 100.0 / bucket_coll, sums * 100.0 / bucket_coll);
+ }
+
+ if (sumb == 0) return;
+
+ bsize += sprintf(buff + bsize, " _num_filled aver_size k.v size_kv = %u, %.2lf, %s.%s %zd\n",
+ _num_filled, _num_filled * 1.0 / sumb, typeid(KeyT).name(), typeid(ValueT).name(), sizeof(PairT));
+
+ bsize += sprintf(buff + bsize, " collision, poisson, cache_miss hit_find|hit_miss, load_factor = %.2lf%%,%.2lf%%,%.2lf%% %.2lf|%.2lf, %.2lf\n",
+ (bucket_coll * 100.0 / _num_filled), sum_poisson, (bucket_coll - steps[0]) * 100.0 / _num_filled,
+ finds * 1.0 / _num_filled, miss * 1.0 / _num_buckets, _num_filled * 1.0 / _num_buckets);
+
+ bsize += sprintf(buff + bsize, "============== buckets size end =============\n");
+ buff[bsize + 1] = 0;
+
+#ifdef EMH_LOG
+ EMH_LOG << __FUNCTION__ << "|" << buff << endl;
+#else
+ puts(buff);
+#endif
+ assert(sumn == _num_filled);
+ assert(sums == bucket_coll || !show_cache);
+ assert(bucket_coll == buckets[0]);
+ }
+#endif
+
+ // ------------------------------------------------------------
+ template<typename Key = KeyT>
+ iterator find(const Key& key, size_t key_hash) noexcept
+ {
+ return {this, find_filled_hash(key, key_hash)};
+ }
+
+ template<typename Key = KeyT>
+ const_iterator find(const Key& key, size_t key_hash) const noexcept
+ {
+ return {this, find_filled_hash(key, key_hash)};
+ }
+
+ template<typename Key=KeyT>
+ iterator find(const Key& key) noexcept
+ {
+ return {this, find_filled_bucket(key)};
+ }
+
+ template<typename Key = KeyT>
+ const_iterator find(const Key& key) const noexcept
+ {
+ return {this, find_filled_bucket(key)};
+ }
+
+ template<typename Key = KeyT>
+ ValueT& at(const KeyT& key)
+ {
+ const auto bucket = find_filled_bucket(key);
+ //throw
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ template<typename Key = KeyT>
+ const ValueT& at(const KeyT& key) const
+ {
+ const auto bucket = find_filled_bucket(key);
+ //throw
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ template<typename Key = KeyT>
+ bool contains(const Key& key) const noexcept
+ {
+ return find_filled_bucket(key) != _num_buckets;
+ }
+
+ template<typename Key = KeyT>
+ size_type count(const Key& key) const noexcept
+ {
+ return find_filled_bucket(key) != _num_buckets ? 1 : 0;
+ }
+
+ template<typename Key = KeyT>
+ std::pair<iterator, iterator> equal_range(const Key& key) const noexcept
+ {
+ const auto found = {this, find_filled_bucket(key), true};
+ if (found.bucket() == _num_buckets)
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+
+ template<typename K=KeyT>
+ std::pair<const_iterator, const_iterator> equal_range(const K& key) const
+ {
+ const auto found = {this, find_filled_bucket(key), true};
+ if (found.bucket() == _num_buckets)
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+
+ void merge(HashMap& rhs)
+ {
+ if (empty()) {
+ *this = std::move(rhs);
+ return;
+ }
+
+ for (auto rit = rhs.begin(); rit != rhs.end(); ) {
+ auto fit = find(rit->first);
+ if (fit.bucket() == _num_buckets) {
+ insert_unique(rit->first, std::move(rit->second));
+ rit = rhs.erase(rit);
+ } else {
+ ++rit;
+ }
+ }
+ }
+
+#ifdef EMH_EXT
+ bool try_get(const KeyT& key, ValueT& val) const noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ const auto found = bucket != _num_buckets;
+ if (found) {
+ val = EMH_VAL(_pairs, bucket);
+ }
+ return found;
+ }
+
+ /// Returns the matching ValueT or nullptr if k isn't found.
+ ValueT* try_get(const KeyT& key) noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ return bucket == _num_buckets ? nullptr : &EMH_VAL(_pairs, bucket);
+ }
+
+ /// Const version of the above
+ ValueT* try_get(const KeyT& key) const noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ return bucket == _num_buckets ? nullptr : &EMH_VAL(_pairs, bucket);
+ }
+
+ /// Convenience function.
+ ValueT get_or_return_default(const KeyT& key) const noexcept
+ {
+ const auto bucket = find_filled_bucket(key);
+ return bucket == _num_buckets ? ValueT() : EMH_VAL(_pairs, bucket);
+ }
+#endif
+
+ // -----------------------------------------------------
+ template<typename K = KeyT, typename V = ValueT>
+ std::pair<iterator, bool> do_assign(K&& key, V&& val)
+ {
+ reserve(_num_filled);
+
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket);
+ } else {
+ EMH_VAL(_pairs, bucket) = std::move(val);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ std::pair<iterator, bool> do_insert(const value_type& value)
+ {
+ bool isempty;
+ const auto bucket = find_or_allocate(value.first, isempty);
+ if (isempty) {
+ EMH_NEW(value.first, value.second, bucket);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ std::pair<iterator, bool> do_insert(value_type&& value)
+ {
+ bool isempty;
+ const auto bucket = find_or_allocate(value.first, isempty);
+ if (isempty) {
+ EMH_NEW(std::move(value.first), std::move(value.second), bucket);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ template<typename K = KeyT, typename V = ValueT>
+ std::pair<iterator, bool> do_insert(K&& key, V&& val)
+ {
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket);
+ }
+ return { {this, bucket}, isempty };
+ }
+
+ std::pair<iterator, bool> insert(const value_type& value)
+ {
+ check_expand_need();
+ return do_insert(value);
+ }
+
+ std::pair<iterator, bool> insert(value_type&& value)
+ {
+ check_expand_need();
+ return do_insert(std::move(value));
+ }
+
+ void insert(std::initializer_list<value_type> ilist)
+ {
+ reserve(ilist.size() + _num_filled);
+ for (auto it = ilist.begin(); it != ilist.end(); ++it)
+ do_insert(*it);
+ }
+
+ template <typename Iter>
+ void insert(Iter first, Iter last)
+ {
+ reserve(std::distance(first, last) + _num_filled);
+ for (auto it = first; it != last; ++it)
+ do_insert(it->first, it->second);
+ }
+
+#if 0
+ template <typename Iter>
+ void insert_unique(Iter begin, Iter end)
+ {
+ reserve(std::distance(begin, end) + _num_filled);
+ for (; begin != end; ++begin)
+ do_insert_unqiue(*begin);
+ }
+#endif
+
+ template<typename K, typename V>
+ size_type insert_unique(K&& key, V&& val)
+ {
+ return do_insert_unqiue(std::forward<K>(key), std::forward<V>(val));
+ }
+
+ size_type insert_unique(value_type&& value)
+ {
+ return do_insert_unqiue(std::move(value.first), std::move(value.second));
+ }
+
+ size_type insert_unique(const value_type& value)
+ {
+ return do_insert_unqiue(value.first, value.second);
+ }
+
+ template<typename K, typename V>
+ inline size_type do_insert_unqiue(K&& key, V&& val)
+ {
+ check_expand_need();
+ auto bucket = find_unique_bucket(key);
+ EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket);
+ return bucket;
+ }
+
+ std::pair<iterator, bool> insert_or_assign(const KeyT& key, ValueT&& val) { return do_assign(key, std::forward<ValueT>(val)); }
+ std::pair<iterator, bool> insert_or_assign(KeyT&& key, ValueT&& val) { return do_assign(std::move(key), std::forward<ValueT>(val)); }
+
+ template <typename... Args>
+ inline std::pair<iterator, bool> emplace(Args&&... args) noexcept
+ {
+ check_expand_need();
+ return do_insert(std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator hint, Args&&... args)
+ {
+ (void)hint;
+ check_expand_need();
+ return do_insert(std::forward<Args>(args)...).first;
+ }
+
+ template<class... Args>
+ std::pair<iterator, bool> try_emplace(const KeyT& key, Args&&... args)
+ {
+ check_expand_need();
+ return do_insert(key, std::forward<Args>(args)...);
+ }
+
+ template<class... Args>
+ std::pair<iterator, bool> try_emplace(KeyT&& key, Args&&... args)
+ {
+ check_expand_need();
+ return do_insert(std::forward<KeyT>(key), std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ inline size_type emplace_unique(Args&&... args) noexcept
+ {
+ return insert_unique(std::forward<Args>(args)...);
+ }
+
+ /* Check if inserting a new value rather than overwriting an old entry */
+ ValueT& operator[](const KeyT& key) noexcept
+ {
+ check_expand_need();
+
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(key, std::move(ValueT()), bucket);
+ }
+
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ ValueT& operator[](KeyT&& key) noexcept
+ {
+ check_expand_need();
+
+ bool isempty;
+ const auto bucket = find_or_allocate(key, isempty);
+ if (isempty) {
+ EMH_NEW(std::move(key), std::move(ValueT()), bucket);
+ }
+
+ return EMH_VAL(_pairs, bucket);
+ }
+
+ // -------------------------------------------------------
+ /// Erase an element from the hash table.
+ /// return 0 if element was not found
+ template<typename Key = KeyT>
+ size_type erase(const Key& key)
+ {
+ const auto bucket = erase_key(key);
+ if (bucket == INACTIVE)
+ return 0;
+
+ clear_bucket(bucket);
+ return 1;
+ }
+
+ //iterator erase const_iterator
+ iterator erase(const_iterator cit)
+ {
+ iterator it(cit);
+ return erase(it);
+ }
+
+ /// Erase an element typedef an iterator.
+ /// Returns an iterator to the next element (or end()).
+ iterator erase(iterator it)
+ {
+ const auto bucket = erase_bucket(it._bucket);
+ clear_bucket(bucket);
+ if (bucket == it._bucket) {
+ return ++it;
+ } else {
+ //erase main bucket as next
+ it.clear(bucket);
+ return it;
+ }
+ }
+
+ /// Erase an element typedef an iterator without return next iterator
+ void _erase(const_iterator it)
+ {
+ const auto bucket = erase_bucket(it._bucket);
+ clear_bucket(bucket);
+ }
+
+ template<typename Pred>
+ size_type erase_if(Pred pred)
+ {
+ auto old_size = size();
+ for (auto it = begin(), last = end(); it != last; ) {
+ if (pred(*it))
+ it = erase(it);
+ else
+ ++it;
+ }
+ return old_size - size();
+ }
+
+ static constexpr bool is_triviall_destructable()
+ {
+#if __cplusplus >= 201402L || _MSC_VER > 1600
+ return !(std::is_trivially_destructible<KeyT>::value && std::is_trivially_destructible<ValueT>::value);
+#else
+ return !(std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
+#endif
+ }
+
+ static constexpr bool is_copy_trivially()
+ {
+#if __cplusplus >= 201402L || _MSC_VER > 1600
+ return (std::is_trivially_copyable<KeyT>::value && std::is_trivially_copyable<ValueT>::value);
+#else
+ return (std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
+#endif
+ }
+
+ void clearkv()
+ {
+ if (is_triviall_destructable()) {
+ for (auto it = cbegin(); _num_filled; ++it)
+ clear_bucket(it.bucket());
+ }
+ }
+
+ /// Remove all elements, keeping full capacity.
+ void clear()
+ {
+ if (!is_triviall_destructable() && _num_filled) {
+ memset(_bitmask, 0xFFFFFFFF, (_num_buckets + 7) / 8);
+ if (_num_buckets < 8) _bitmask[0] = (1 << _num_buckets) - 1;
+ }
+ else if (_num_filled)
+ clearkv();
+
+ //EMH_BUCKET(_pairs, _num_buckets) = 0; //_last
+ _num_filled = 0;
+ }
+
+ void shrink_to_fit()
+ {
+ rehash(_num_filled + 1);
+ }
+
+ /// Make room for this many elements
+ bool reserve(uint64_t num_elems)
+ {
+ const auto required_buckets = (num_elems * _mlf >> 27);
+ if (EMH_LIKELY(required_buckets < _num_buckets))
+ return false;
+
+#if EMH_HIGH_LOAD
+ if (required_buckets < 64 && _num_filled < _num_buckets)
+ return false;
+#endif
+
+#if EMH_STATIS
+ if (_num_filled > EMH_STATIS) dump_statics(true);
+#endif
+ rehash(required_buckets + 2);
+ return true;
+ }
+
+ void rehash(uint64_t required_buckets)
+ {
+ if (required_buckets < _num_filled)
+ return;
+
+ uint64_t buckets = _num_filled > (1u << 16) ? (1u << 16) : 2u;
+ while (buckets < required_buckets) { buckets *= 2; }
+
+ // no need alloc large bucket for small key sizeof(KeyT) < sizeof(int).
+ // set small a max_load_factor, insert/reserve() will fail and introduce rehash issiue TODO: dothing ?
+ if (sizeof(KeyT) < sizeof(size_type) && buckets >= (1ul << (2 * 8)))
+ buckets = 2ul << (sizeof(KeyT) * 8);
+
+ assert(buckets < max_size() && buckets > _num_filled);
+ //TODO: throwOverflowError
+
+ auto num_buckets = (size_type)buckets;
+ auto old_num_filled = _num_filled;
+ auto old_mask = _num_buckets - 1;
+ auto old_pairs = _pairs;
+ auto* obmask = _bitmask;
+
+ _num_filled = 0;
+ _num_buckets = num_buckets;
+ _mask = num_buckets - 1;
+
+ _pairs = (PairT*)malloc(AllocSize(_num_buckets));
+ memset((char*)(_pairs + _num_buckets), 0, sizeof(PairT) * EPACK_SIZE);
+
+ _bitmask = decltype(_bitmask)(_pairs + EPACK_SIZE + num_buckets);
+
+ const auto mask_byte = (num_buckets + 7) / 8;
+ memset(_bitmask, 0xFFFFFFFF, mask_byte);
+ memset(((char*)_bitmask) + mask_byte, 0, BIT_PACK);
+ if (num_buckets < 8)
+ _bitmask[0] = (1 << num_buckets) - 1;
+
+ //for (size_type src_bucket = 0; _num_filled < old_num_filled; src_bucket++) {
+ for (size_type src_bucket = old_mask; _num_filled < old_num_filled; src_bucket --) {
+ if (obmask[src_bucket / MASK_BIT] & (EMH_MASK(src_bucket)))
+ continue;
+
+ auto& key = EMH_KEY(old_pairs, src_bucket);
+ const auto bucket = find_unique_bucket(key);
+ EMH_NEW(std::move(key), std::move(EMH_VAL(old_pairs, src_bucket)), bucket);
+ if (is_triviall_destructable())
+ old_pairs[src_bucket].~PairT();
+ }
+
+#if EMH_REHASH_LOG
+ if (_num_filled > EMH_REHASH_LOG) {
+ auto mbucket = bucket_main();
+ char buff[255] = {0};
+ sprintf(buff, " _num_filled/collision/main/K.V/pack/ = %u/%.2lf%%(%.2lf%%)/%s.%s/%zd",
+ _num_filled, 200.0f * (_num_filled - mbucket) / _mask, 100.0f * mbucket / _mask,
+ typeid(KeyT).name(), typeid(ValueT).name(), sizeof(_pairs[0]));
+#ifdef EMH_LOG
+ static size_t ihashs = 0;
+ EMH_LOG << "rhash_nums = " << ihashs ++ << "|" <<__FUNCTION__ << "|" << buff << endl;
+#else
+ puts(buff);
+#endif
+ }
+#endif
+
+ free(old_pairs);
+ assert(old_num_filled == _num_filled);
+ }
+
+private:
+ // Can we fit another element?
+ inline bool check_expand_need()
+ {
+ return reserve(_num_filled);
+ }
+
+ void clear_bucket(size_type bucket)
+ {
+ EMH_CLS(bucket);
+ _num_filled--;
+ if (is_triviall_destructable())
+ _pairs[bucket].~PairT();
+ }
+
+#if 1
+ //template<typename UType, typename std::enable_if<std::is_integral<UType>::value, size_type>::type = 0>
+ template<typename UType>
+ size_type erase_key(const UType& key)
+ {
+ const auto bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return INACTIVE;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ const auto eqkey = _eq(key, EMH_KEY(_pairs, bucket));
+ if (eqkey) {
+ if (next_bucket == bucket)
+ return bucket;
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (is_copy_trivially())
+ EMH_PKV(_pairs, bucket) = EMH_PKV(_pairs, next_bucket);
+ else
+ EMH_PKV(_pairs, bucket).swap(EMH_PKV(_pairs, next_bucket));
+
+ EMH_BUCKET(_pairs, bucket) = (nbucket == next_bucket) ? bucket : nbucket;
+ return next_bucket;
+ } else if (next_bucket == bucket)
+ return INACTIVE;
+ /* else if (EMH_UNLIKELY(bucket != hash_key(EMH_KEY(_pairs, bucket)) & _mask))
+ return INACTIVE;
+ */
+
+ auto prev_bucket = bucket;
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (_eq(key, EMH_KEY(_pairs, next_bucket))) {
+ EMH_BUCKET(_pairs, prev_bucket) = (nbucket == next_bucket) ? prev_bucket : nbucket;
+ return next_bucket;
+ }
+
+ if (nbucket == next_bucket)
+ break;
+ prev_bucket = next_bucket;
+ next_bucket = nbucket;
+ }
+
+ return INACTIVE;
+ }
+#else
+ template<typename UType, typename std::enable_if<!std::is_integral<UType>::value, size_type>::type = 0>
+ size_type erase_key(const UType& key)
+ {
+ const auto bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return INACTIVE;
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if (next_bucket == bucket)
+ return _eq(key, EMH_KEY(_pairs, bucket)) ? bucket : INACTIVE;
+// else if (bucket != hash_key(EMH_KEY(_pairs, bucket)))
+// return INACTIVE;
+
+ //find erase key and swap to last bucket
+ size_type prev_bucket = bucket, find_bucket = INACTIVE;
+ next_bucket = bucket;
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (_eq(key, EMH_KEY(_pairs, next_bucket))) {
+ find_bucket = next_bucket;
+ if (nbucket == next_bucket) {
+ EMH_BUCKET(_pairs, prev_bucket) = prev_bucket;
+ break;
+ }
+ }
+ if (nbucket == next_bucket) {
+ if (find_bucket != INACTIVE) {
+ EMH_PKV(_pairs, find_bucket).swap(EMH_PKV(_pairs, nbucket));
+// EMH_PKV(_pairs, find_bucket) = EMH_PKV(_pairs, nbucket);
+ EMH_BUCKET(_pairs, prev_bucket) = prev_bucket;
+ find_bucket = nbucket;
+ }
+ break;
+ }
+ prev_bucket = next_bucket;
+ next_bucket = nbucket;
+ }
+
+ return find_bucket;
+ }
+#endif
+
+ size_type erase_bucket(const size_type bucket)
+ {
+ const auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ const auto main_bucket = hash_key(EMH_KEY(_pairs, bucket)) & _mask;
+ if (bucket == main_bucket) {
+ if (bucket != next_bucket) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (is_copy_trivially())
+ EMH_PKV(_pairs, bucket) = EMH_PKV(_pairs, next_bucket);
+ else
+ EMH_PKV(_pairs, bucket).swap(EMH_PKV(_pairs, next_bucket));
+ EMH_BUCKET(_pairs, bucket) = (nbucket == next_bucket) ? bucket : nbucket;
+ }
+ return next_bucket;
+ }
+
+ const auto prev_bucket = find_prev_bucket(main_bucket, bucket);
+ EMH_BUCKET(_pairs, prev_bucket) = (bucket == next_bucket) ? prev_bucket : next_bucket;
+ return bucket;
+ }
+
+ // Find the bucket with this key, or return bucket size
+ template<typename K = KeyT>
+ size_type find_filled_hash(const K& key, const size_t key_hash) const
+ {
+ const auto bucket = key_hash & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return _num_buckets;
+
+ auto next_bucket = bucket;
+ while (true) {
+ if (_eq(key, EMH_KEY(_pairs, next_bucket)))
+ return next_bucket;
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ break;
+ next_bucket = nbucket;
+ }
+
+ return _num_buckets;
+ }
+
+ // Find the bucket with this key, or return bucket size
+ template<typename K = KeyT>
+ size_type find_filled_bucket(const K& key) const
+ {
+ const auto bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return _num_buckets;
+
+ auto next_bucket = bucket;
+// else if (bucket != (hash_key(bucket_key) & _mask))
+// return _num_buckets;
+
+ while (true) {
+ if (_eq(key, EMH_KEY(_pairs, next_bucket)))
+ return next_bucket;
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ return _num_buckets;
+ next_bucket = nbucket;
+ }
+
+ return 0;
+ }
+
+ //kick out bucket and find empty to occpuy
+ //it will break the orgin link and relnik again.
+ //before: main_bucket-->prev_bucket --> bucket --> next_bucket
+ //atfer : main_bucket-->prev_bucket --> (removed)--> new_bucket--> next_bucket
+ size_type kickout_bucket(const size_type kmain, const size_type kbucket)
+ {
+ const auto next_bucket = EMH_BUCKET(_pairs, kbucket);
+ const auto new_bucket = find_empty_bucket(next_bucket, kbucket);
+ const auto prev_bucket = find_prev_bucket(kmain, kbucket);
+ new(_pairs + new_bucket) PairT(std::move(_pairs[kbucket]));
+ if (is_triviall_destructable())
+ _pairs[kbucket].~PairT();
+
+ if (next_bucket == kbucket)
+ EMH_BUCKET(_pairs, new_bucket) = new_bucket;
+ EMH_BUCKET(_pairs, prev_bucket) = new_bucket;
+
+ EMH_SET(new_bucket);
+ return kbucket;
+ }
+
+/*
+** inserts a new key into a hash table; first check whether key's main
+** bucket/position is free. If not, check whether colliding node/bucket is in its main
+** position or not: if it is not, move colliding bucket to an empty place and
+** put new key in its main position; otherwise (colliding bucket is in its main
+** position), new key goes to an empty position. ***/
+
+ template<typename K=KeyT>
+ size_type find_or_allocate(const K& key, bool& isempty)
+ {
+ const auto bucket = hash_key(key) & _mask;
+ const auto& bucket_key = EMH_KEY(_pairs, bucket);
+ if (EMH_EMPTY(_pairs, bucket)) {
+ isempty = true;
+ return bucket;
+ }
+ else if (_eq(key, bucket_key)) {
+ isempty = false;
+ return bucket;
+ }
+
+ isempty = true;
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ //check current bucket_key is in main bucket or not
+ const auto kmain_bucket = hash_key(bucket_key) & _mask;
+ if (kmain_bucket != bucket)
+ return kickout_bucket(kmain_bucket, bucket);
+ else if (next_bucket == bucket)
+ return EMH_BUCKET(_pairs, next_bucket) = find_empty_bucket(next_bucket, bucket);
+
+#if EMH_LRU_SET
+ auto prev_bucket = bucket;
+#endif
+ //find next linked bucket and check key, if lru is set then swap current key with prev_bucket
+ while (true) {
+ if (EMH_UNLIKELY(_eq(key, EMH_KEY(_pairs, next_bucket)))) {
+ isempty = false;
+#if EMH_LRU_SET
+ EMH_PKV(_pairs, next_bucket).swap(EMH_PKV(_pairs, prev_bucket));
+ return prev_bucket;
+#else
+ return next_bucket;
+#endif
+ }
+
+#if EMH_LRU_SET
+ prev_bucket = next_bucket;
+#endif
+
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ break;
+ next_bucket = nbucket;
+ }
+
+ //find a new empty and link it to tail, TODO link after main bucket?
+ const auto new_bucket = find_empty_bucket(next_bucket, bucket);// : find_empty_bucket(next_bucket);
+ return EMH_BUCKET(_pairs, next_bucket) = new_bucket;
+ }
+
+ // key is not in this map. Find a place to put it.
+ size_type find_empty_bucket(const size_type bucket_from, const size_type main_bucket)
+ {
+#ifdef EMH_ALIGN64 // only works 64bit
+ const auto boset = bucket_from % MASK_BIT;
+ auto* const align = _bitmask + bucket_from / MASK_BIT;
+ const auto bmask = ((size_t)align[1] << (MASK_BIT - boset)) | (align[0] >> boset);
+ if (EMH_LIKELY(bmask != 0))
+ return bucket_from + CTZ(bmask);
+#elif EMH_ITER_SAFE
+ const auto boset = bucket_from % 8;
+ auto* const start = (uint8_t*)_bitmask + bucket_from / 8;
+ size_t bmask; memcpy(&bmask, start + 0, sizeof(bmask)); bmask >>= boset;// bmask |= ((size_t)start[8] << (SIZE_BIT - boset));
+ if (EMH_LIKELY(bmask != 0))
+ return bucket_from + CTZ(bmask);
+#else
+ const auto boset = main_bucket % 8;
+ auto* const align = (uint8_t*)_bitmask + main_bucket / 8; (void)bucket_from;
+ const size_t bmask = (*(size_t*)(align) >> boset);// & 0xF0F0F0F0FF0FF0FFull;//
+ if (EMH_LIKELY(bmask != 0))
+ return main_bucket + CTZ(bmask);
+#endif
+
+ const auto qmask = _mask / SIZE_BIT;
+ if (1) {
+ const size_type step = (main_bucket - SIZE_BIT / 4) & qmask;
+ const auto bmask3 = *((size_t*)_bitmask + step);
+ if (bmask3 != 0)
+ return step * SIZE_BIT + CTZ(bmask3);
+ }
+
+ for (; ;) {
+ auto& _last = EMH_BUCKET(_pairs, _num_buckets);
+ const auto bmask2 = *((size_t*)_bitmask + _last);
+ if (bmask2 != 0)
+ return _last * SIZE_BIT + CTZ(bmask2);
+
+ const auto next1 = (qmask / 2 + _last) & qmask;
+ const auto bmask1 = *((size_t*)_bitmask + next1);
+ if (bmask1 != 0) {
+ //_last = next1;
+ return next1 * SIZE_BIT + CTZ(bmask1);
+ }
+ _last = (_last + 1) & qmask;
+ }
+
+ return 0;
+ }
+
+ // key is not in this map. Find a place to put it.
+ size_type find_unique_empty(const size_type bucket_from, const size_t main_bucket)
+ {
+#ifdef EMH_ALIGN64
+ const auto boset = bucket_from % MASK_BIT;
+ auto* const align = _bitmask + bucket_from / MASK_BIT;
+ const auto bmask = ((size_t)align[1] << (MASK_BIT - boset)) | (align[0] >> boset);
+ static_assert(sizeof(size_t) > 4);
+#elif EMH_ITER_SAFE
+ const auto boset = bucket_from % 8;
+ auto* const start = (uint8_t*)_bitmask + bucket_from / 8;
+ size_t bmask; memcpy(&bmask, start + 0, sizeof(bmask)); bmask >>= boset;
+#else
+ const auto boset = bucket_from % 8; (void)main_bucket;
+ auto* const align = (uint8_t*)_bitmask + bucket_from / 8;
+ const auto bmask = (*(size_t*)(align) >> boset); //maybe not aligned and warning
+#endif
+ if (EMH_LIKELY(bmask != 0))
+ return bucket_from + CTZ(bmask);
+
+ const auto qmask = _mask / SIZE_BIT;
+ for (auto last = (bucket_from + _mask) & qmask; ;) {
+ const auto bmask2 = *((size_t*)_bitmask + last);// & 0xF0F0F0F0FF0FF0FFull;
+ if (EMH_LIKELY(bmask2 != 0))
+ return last * SIZE_BIT + CTZ(bmask2);
+ last = (last + 1) & qmask;
+ }
+
+ return 0;
+ }
+
+ size_type find_last_bucket(size_type main_bucket) const
+ {
+ auto next_bucket = EMH_BUCKET(_pairs, main_bucket);
+ if (next_bucket == main_bucket)
+ return main_bucket;
+
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == next_bucket)
+ return next_bucket;
+ next_bucket = nbucket;
+ }
+ }
+
+ size_type find_prev_bucket(size_type main_bucket, const size_type bucket) const
+ {
+ auto next_bucket = EMH_BUCKET(_pairs, main_bucket);
+ if (next_bucket == bucket)
+ return main_bucket;
+
+ while (true) {
+ const auto nbucket = EMH_BUCKET(_pairs, next_bucket);
+ if (nbucket == bucket)
+ return next_bucket;
+ next_bucket = nbucket;
+ }
+ }
+
+ size_type find_unique_bucket(const KeyT& key)
+ {
+ const size_type bucket = hash_key(key) & _mask;
+ if (EMH_EMPTY(_pairs, bucket))
+ return bucket;
+
+ //check current bucket_key is in main bucket or not
+ const auto kmain_bucket = hash_key(EMH_KEY(_pairs, bucket)) & _mask;
+ if (EMH_UNLIKELY(kmain_bucket != bucket))
+ return kickout_bucket(kmain_bucket, bucket);
+
+ auto next_bucket = EMH_BUCKET(_pairs, bucket);
+ if (next_bucket != bucket)
+ next_bucket = find_last_bucket(next_bucket);
+
+ //find a new empty and link it to tail
+ return EMH_BUCKET(_pairs, next_bucket) = find_unique_empty(next_bucket, bucket);
+ }
+
+#if EMH_INT_HASH
+ static constexpr uint64_t KC = UINT64_C(11400714819323198485);
+ static inline uint64_t hash64(uint64_t key)
+ {
+#if __SIZEOF_INT128__ && EMH_INT_HASH == 1
+ __uint128_t r = key; r *= KC;
+ return (uint64_t)(r >> 64) + (uint64_t)r;
+#elif EMH_INT_HASH == 2
+ //MurmurHash3Mixer
+ uint64_t h = key;
+ h ^= h >> 33;
+ h *= 0xff51afd7ed558ccd;
+ h ^= h >> 33;
+ h *= 0xc4ceb9fe1a85ec53;
+ h ^= h >> 33;
+ return h;
+#elif _WIN64 && EMH_INT_HASH == 1
+ uint64_t high;
+ return _umul128(key, KC, &high) + high;
+#elif EMH_INT_HASH == 3
+ auto ror = (key >> 32) | (key << 32);
+ auto low = key * 0xA24BAED4963EE407ull;
+ auto high = ror * 0x9FB21C651E98DF25ull;
+ auto mix = low + high;
+ return mix;
+#elif EMH_INT_HASH == 1
+ uint64_t r = key * UINT64_C(0xca4bcaa75ec3f625);
+ return (r >> 32) + r;
+#elif EMH_WYHASH64
+ return wyhash64(key, KC);
+#else
+ uint64_t x = key;
+ x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);
+ x = (x ^ (x >> 27)) * UINT64_C(0x94d049bb133111eb);
+ x = x ^ (x >> 31);
+ return x;
+#endif
+ }
+#endif
+
+ template<typename UType, typename std::enable_if<std::is_integral<UType>::value, size_type>::type = 0>
+ inline size_type hash_key(const UType key) const
+ {
+#if EMH_INT_HASH
+ return hash64(key);
+#elif EMH_IDENTITY_HASH
+ return key + (key >> 24);
+#else
+ return (size_type)_hasher(key);
+#endif
+ }
+
+ template<typename UType, typename std::enable_if<std::is_same<UType, std::string>::value, size_type>::type = 0>
+ inline size_type hash_key(const UType& key) const
+ {
+#if EMH_WY_HASH
+ return wyhash(key.data(), key.size(), 0);
+#else
+ return (size_type)_hasher(key);
+#endif
+ }
+
+ template<typename UType, typename std::enable_if<!std::is_integral<UType>::value && !std::is_same<UType, std::string>::value, size_type>::type = 0>
+ inline size_type hash_key(const UType& key) const
+ {
+ return (size_type)_hasher(key);
+ }
+
+private:
+ uint32_t* _bitmask;
+ PairT* _pairs;
+ HashT _hasher;
+ EqT _eq;
+ size_type _mask;
+ size_type _num_buckets;
+
+ size_type _num_filled;
+ uint32_t _mlf;
+
+private:
+ static constexpr uint32_t BIT_PACK = sizeof(_bitmask[0]) * 2;
+ static constexpr uint32_t MASK_BIT = sizeof(_bitmask[0]) * 8;
+ static constexpr uint32_t SIZE_BIT = sizeof(size_t) * 8;
+ static constexpr uint32_t EPACK_SIZE = sizeof(PairT) >= sizeof(size_t) == 0 ? 1 : 2; // > 1
+};
+}
+// namespace emhash7
+#if __cplusplus >= 201103L
+//template <class Key, class Val> using ehmap7 = emhash7::HashMap<Key, Val, std::hash<Key>, std::equal_to<Key>>;
+#endif
+
+//TODO
+//2. improve rehash and find miss performance(reduce peak memory)
+//3. dump or Serialization interface
+//4. node hash map support
+//5. load_factor > 1.0 && add grow ration
+//... https://godbolt.org/
diff --git a/misc/benchmarks/external/khash.h b/misc/benchmarks/external/khash.h
new file mode 100644
index 00000000..61dabc4d
--- /dev/null
+++ b/misc/benchmarks/external/khash.h
@@ -0,0 +1,595 @@
+/* The MIT License
+ Copyright (c) 2008, 2009, 2011 by Attractive Chaos <[email protected]>
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/*
+ An example:
+#include "khash.h"
+KHASH_MAP_INIT_INT(32, char)
+int main() {
+ int ret, is_missing;
+ khiter_t k;
+ khash_t(32) *h = kh_init(32);
+ k = kh_put(32, h, 5, &ret);
+ kh_value(h, k) = 10;
+ k = kh_get(32, h, 10);
+ is_missing = (k == kh_end(h));
+ k = kh_get(32, h, 5);
+ kh_del(32, h, k);
+ for (k = kh_begin(h); k != kh_end(h); ++k)
+ if (kh_exist(h, k)) kh_value(h, k) = 1;
+ kh_destroy(32, h);
+ return 0;
+}
+*/
+
+/*
+ 2013-05-02 (0.2.8):
+ * Use quadratic probing. When the capacity is power of 2, stepping function
+ i*(i+1)/2 guarantees to traverse each bucket. It is better than double
+ hashing on cache performance and is more robust than linear probing.
+ In theory, double hashing should be more robust than quadratic probing.
+ However, my implementation is probably not for large hash tables, because
+ the second hash function is closely tied to the first hash function,
+ which reduce the effectiveness of double hashing.
+ Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php
+ 2011-12-29 (0.2.7):
+ * Minor code clean up; no actual effect.
+ 2011-09-16 (0.2.6):
+ * The capacity is a power of 2. This seems to dramatically improve the
+ speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
+ - http://code.google.com/p/ulib/
+ - http://nothings.org/computer/judy/
+ * Allow to optionally use linear probing which usually has better
+ performance for random input. Double hashing is still the default as it
+ is more robust to certain non-random input.
+ * Added Wang's integer hash function (not used by default). This hash
+ function is more robust to certain non-random input.
+ 2011-02-14 (0.2.5):
+ * Allow to declare global functions.
+ 2009-09-26 (0.2.4):
+ * Improve portability
+ 2008-09-19 (0.2.3):
+ * Corrected the example
+ * Improved interfaces
+ 2008-09-11 (0.2.2):
+ * Improved speed a little in kh_put()
+ 2008-09-10 (0.2.1):
+ * Added kh_clear()
+ * Fixed a compiling error
+ 2008-09-02 (0.2.0):
+ * Changed to token concatenation which increases flexibility.
+ 2008-08-31 (0.1.2):
+ * Fixed a bug in kh_get(), which has not been tested previously.
+ 2008-08-31 (0.1.1):
+ * Added destructor
+*/
+
+
+#ifndef __AC_KHASH_H
+#define __AC_KHASH_H
+
+/*!
+ @header
+ Generic hash table library.
+ */
+
+#define AC_VERSION_KHASH_H "0.2.8"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+/* compiler specific configuration */
+
+#if UINT_MAX == 0xffffffffu
+typedef unsigned int khint32_t;
+#elif ULONG_MAX == 0xffffffffu
+typedef unsigned long khint32_t;
+#endif
+
+#if ULONG_MAX == ULLONG_MAX
+typedef unsigned long khint64_t;
+#else
+typedef unsigned long long khint64_t;
+#endif
+
+#ifndef kh_inline
+#ifdef _MSC_VER
+#define kh_inline __inline
+#else
+#define kh_inline inline
+#endif
+#endif /* kh_inline */
+
+#ifndef klib_unused
+#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
+#define klib_unused __attribute__ ((__unused__))
+#else
+#define klib_unused
+#endif
+#endif /* klib_unused */
+
+typedef khint32_t khint_t;
+typedef khint_t khiter_t;
+
+#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2)
+#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1)
+#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3)
+#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1)))
+#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1)))
+#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1)))
+#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1))
+
+#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
+
+#ifndef kroundup32
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#endif
+
+#ifndef kcalloc
+#define kcalloc(N,Z) calloc(N,Z)
+#endif
+#ifndef kmalloc
+#define kmalloc(Z) malloc(Z)
+#endif
+#ifndef krealloc
+#define krealloc(P,Z) realloc(P,Z)
+#endif
+#ifndef kfree
+#define kfree(P) free(P)
+#endif
+
+static const double __ac_HASH_UPPER = 0.77;
+
+#define __KHASH_TYPE(name, khkey_t, khval_t) \
+ typedef struct kh_##name##_s { \
+ khint_t n_buckets, size, n_occupied, upper_bound; \
+ khint32_t *flags; \
+ khkey_t *keys; \
+ khval_t *vals; \
+ } kh_##name##_t;
+
+#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
+ extern kh_##name##_t *kh_init_##name(void); \
+ extern void kh_destroy_##name(kh_##name##_t *h); \
+ extern void kh_clear_##name(kh_##name##_t *h); \
+ extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
+ extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
+ extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
+ extern void kh_del_##name(kh_##name##_t *h, khint_t x);
+
+#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ SCOPE kh_##name##_t *kh_init_##name(void) { \
+ return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \
+ } \
+ SCOPE void kh_destroy_##name(kh_##name##_t *h) \
+ { \
+ if (h) { \
+ kfree((void *)h->keys); kfree(h->flags); \
+ kfree((void *)h->vals); \
+ kfree(h); \
+ } \
+ } \
+ SCOPE void kh_clear_##name(kh_##name##_t *h) \
+ { \
+ if (h && h->flags) { \
+ memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \
+ h->size = h->n_occupied = 0; \
+ } \
+ } \
+ SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
+ { \
+ if (h->n_buckets) { \
+ khint_t k, i, last, mask, step = 0; \
+ mask = h->n_buckets - 1; \
+ k = __hash_func(key); i = k & mask; \
+ last = i; \
+ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+ i = (i + (++step)) & mask; \
+ if (i == last) return h->n_buckets; \
+ } \
+ return __ac_iseither(h->flags, i)? h->n_buckets : i; \
+ } else return 0; \
+ } \
+ SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
+ { /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
+ khint32_t *new_flags = 0; \
+ khint_t j = 1; \
+ { \
+ kroundup32(new_n_buckets); \
+ if (new_n_buckets < 4) new_n_buckets = 4; \
+ if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
+ else { /* hash table size to be changed (shrink or expand); rehash */ \
+ new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ if (!new_flags) return -1; \
+ memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ if (h->n_buckets < new_n_buckets) { /* expand */ \
+ khkey_t *new_keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (!new_keys) { kfree(new_flags); return -1; } \
+ h->keys = new_keys; \
+ if (kh_is_map) { \
+ khval_t *new_vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+ if (!new_vals) { kfree(new_flags); return -1; } \
+ h->vals = new_vals; \
+ } \
+ } /* otherwise shrink */ \
+ } \
+ } \
+ if (j) { /* rehashing is needed */ \
+ for (j = 0; j != h->n_buckets; ++j) { \
+ if (__ac_iseither(h->flags, j) == 0) { \
+ khkey_t key = h->keys[j]; \
+ khval_t val; \
+ khint_t new_mask; \
+ new_mask = new_n_buckets - 1; \
+ if (kh_is_map) val = h->vals[j]; \
+ __ac_set_isdel_true(h->flags, j); \
+ while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
+ khint_t k, i, step = 0; \
+ k = __hash_func(key); \
+ i = k & new_mask; \
+ while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \
+ __ac_set_isempty_false(new_flags, i); \
+ if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \
+ { khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
+ if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \
+ __ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \
+ } else { /* write the element and jump out of the loop */ \
+ h->keys[i] = key; \
+ if (kh_is_map) h->vals[i] = val; \
+ break; \
+ } \
+ } \
+ } \
+ } \
+ if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
+ h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (kh_is_map) h->vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+ } \
+ kfree(h->flags); /* free the working space */ \
+ h->flags = new_flags; \
+ h->n_buckets = new_n_buckets; \
+ h->n_occupied = h->size; \
+ h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
+ } \
+ return 0; \
+ } \
+ SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
+ { \
+ khint_t x; \
+ if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
+ if (h->n_buckets > (h->size<<1)) { \
+ if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \
+ *ret = -1; return h->n_buckets; \
+ } \
+ } else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \
+ *ret = -1; return h->n_buckets; \
+ } \
+ } /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
+ { \
+ khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
+ x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
+ if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \
+ else { \
+ last = i; \
+ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+ if (__ac_isdel(h->flags, i)) site = i; \
+ i = (i + (++step)) & mask; \
+ if (i == last) { x = site; break; } \
+ } \
+ if (x == h->n_buckets) { \
+ if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \
+ else x = i; \
+ } \
+ } \
+ } \
+ if (__ac_isempty(h->flags, x)) { /* not present at all */ \
+ h->keys[x] = key; \
+ __ac_set_isboth_false(h->flags, x); \
+ ++h->size; ++h->n_occupied; \
+ *ret = 1; \
+ } else if (__ac_isdel(h->flags, x)) { /* deleted */ \
+ h->keys[x] = key; \
+ __ac_set_isboth_false(h->flags, x); \
+ ++h->size; \
+ *ret = 2; \
+ } else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
+ return x; \
+ } \
+ SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
+ { \
+ if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
+ __ac_set_isdel_true(h->flags, x); \
+ --h->size; \
+ } \
+ }
+
+#define KHASH_DECLARE(name, khkey_t, khval_t) \
+ __KHASH_TYPE(name, khkey_t, khval_t) \
+ __KHASH_PROTOTYPES(name, khkey_t, khval_t)
+
+#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ __KHASH_TYPE(name, khkey_t, khval_t) \
+ __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ KHASH_INIT2(name, static kh_inline klib_unused, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+/* --- BEGIN OF HASH FUNCTIONS --- */
+
+/*! @function
+ @abstract Integer hash function
+ @param key The integer [khint32_t]
+ @return The hash value [khint_t]
+ */
+#define kh_int_hash_func(key) (khint32_t)(key)
+/*! @function
+ @abstract Integer comparison function
+ */
+#define kh_int_hash_equal(a, b) ((a) == (b))
+/*! @function
+ @abstract 64-bit integer hash function
+ @param key The integer [khint64_t]
+ @return The hash value [khint_t]
+ */
+#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11)
+/*! @function
+ @abstract 64-bit integer comparison function
+ */
+#define kh_int64_hash_equal(a, b) ((a) == (b))
+/*! @function
+ @abstract const char* hash function
+ @param s Pointer to a null terminated string
+ @return The hash value
+ */
+static kh_inline khint_t __ac_X31_hash_string(const char *s)
+{
+ khint_t h = (khint_t)*s;
+ if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s;
+ return h;
+}
+/*! @function
+ @abstract Another interface to const char* hash function
+ @param key Pointer to a null terminated string [const char*]
+ @return The hash value [khint_t]
+ */
+#define kh_str_hash_func(key) __ac_X31_hash_string(key)
+/*! @function
+ @abstract Const char* comparison function
+ */
+#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
+
+static kh_inline khint_t __ac_Wang_hash(khint_t key)
+{
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+}
+#define kh_int_hash_func2(key) __ac_Wang_hash((khint_t)key)
+
+/* --- END OF HASH FUNCTIONS --- */
+
+/* Other convenient macros... */
+
+/*!
+ @abstract Type of the hash table.
+ @param name Name of the hash table [symbol]
+ */
+#define khash_t(name) kh_##name##_t
+
+/*! @function
+ @abstract Initiate a hash table.
+ @param name Name of the hash table [symbol]
+ @return Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_init(name) kh_init_##name()
+
+/*! @function
+ @abstract Destroy a hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_destroy(name, h) kh_destroy_##name(h)
+
+/*! @function
+ @abstract Reset a hash table without deallocating memory.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_clear(name, h) kh_clear_##name(h)
+
+/*! @function
+ @abstract Resize a hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param s New size [khint_t]
+ */
+#define kh_resize(name, h, s) kh_resize_##name(h, s)
+
+/*! @function
+ @abstract Insert a key to the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Key [type of keys]
+ @param r Extra return code: -1 if the operation failed;
+ 0 if the key is present in the hash table;
+ 1 if the bucket is empty (never used); 2 if the element in
+ the bucket has been deleted [int*]
+ @return Iterator to the inserted element [khint_t]
+ */
+#define kh_put(name, h, k, r) kh_put_##name(h, k, r)
+
+/*! @function
+ @abstract Retrieve a key from the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Key [type of keys]
+ @return Iterator to the found element, or kh_end(h) if the element is absent [khint_t]
+ */
+#define kh_get(name, h, k) kh_get_##name(h, k)
+
+/*! @function
+ @abstract Remove a key from the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Iterator to the element to be deleted [khint_t]
+ */
+#define kh_del(name, h, k) kh_del_##name(h, k)
+
+/*! @function
+ @abstract Test whether a bucket contains data.
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return 1 if containing data; 0 otherwise [int]
+ */
+#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
+
+/*! @function
+ @abstract Get key given an iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return Key [type of keys]
+ */
+#define kh_key(h, x) ((h)->keys[x])
+
+/*! @function
+ @abstract Get value given an iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return Value [type of values]
+ @discussion For hash sets, calling this results in segfault.
+ */
+#define kh_val(h, x) ((h)->vals[x])
+
+/*! @function
+ @abstract Alias of kh_val()
+ */
+#define kh_value(h, x) ((h)->vals[x])
+
+/*! @function
+ @abstract Get the start iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return The start iterator [khint_t]
+ */
+#define kh_begin(h) (khint_t)(0)
+
+/*! @function
+ @abstract Get the end iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return The end iterator [khint_t]
+ */
+#define kh_end(h) ((h)->n_buckets)
+
+/*! @function
+ @abstract Get the number of elements in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return Number of elements in the hash table [khint_t]
+ */
+#define kh_size(h) ((h)->size)
+
+/*! @function
+ @abstract Get the number of buckets in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return Number of buckets in the hash table [khint_t]
+ */
+#define kh_n_buckets(h) ((h)->n_buckets)
+
+/*! @function
+ @abstract Iterate over the entries in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param kvar Variable to which key will be assigned
+ @param vvar Variable to which value will be assigned
+ @param code Block of code to execute
+ */
+#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
+ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
+ if (!kh_exist(h,__i)) continue; \
+ (kvar) = kh_key(h,__i); \
+ (vvar) = kh_val(h,__i); \
+ code; \
+ } }
+
+/*! @function
+ @abstract Iterate over the values in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param vvar Variable to which value will be assigned
+ @param code Block of code to execute
+ */
+#define kh_foreach_value(h, vvar, code) { khint_t __i; \
+ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
+ if (!kh_exist(h,__i)) continue; \
+ (vvar) = kh_val(h,__i); \
+ code; \
+ } }
+
+/* More convenient interfaces */
+
+/*! @function
+ @abstract Instantiate a hash set containing integer keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_INT(name) \
+ KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing integer keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_INT(name, khval_t) \
+ KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash set containing 64-bit integer keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_INT64(name) \
+ KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing 64-bit integer keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_INT64(name, khval_t) \
+ KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
+
+typedef const char *kh_cstr_t;
+/*! @function
+ @abstract Instantiate a hash map containing const char* keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_STR(name) \
+ KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing const char* keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_STR(name, khval_t) \
+ KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal)
+
+#endif /* __AC_KHASH_H */
diff --git a/misc/benchmarks/external/skarupke/flat_hash_map.hpp b/misc/benchmarks/external/skarupke/flat_hash_map.hpp
new file mode 100644
index 00000000..a8723ee8
--- /dev/null
+++ b/misc/benchmarks/external/skarupke/flat_hash_map.hpp
@@ -0,0 +1,1496 @@
+// Copyright Malte Skarupke 2017.
+// Distributed under the Boost Software License, Version 1.0.
+// (See http://www.boost.org/LICENSE_1_0.txt)
+
+#pragma once
+
+#include <cstdint>
+#include <cstddef>
+#include <functional>
+#include <cmath>
+#include <algorithm>
+#include <iterator>
+#include <utility>
+#include <type_traits>
+
+#ifdef _MSC_VER
+#define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__
+#else
+#define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline))
+#endif
+
+namespace ska
+{
+struct prime_number_hash_policy;
+struct power_of_two_hash_policy;
+struct fibonacci_hash_policy;
+
+namespace detailv3
+{
+template<typename Result, typename Functor>
+struct functor_storage : Functor
+{
+ functor_storage() = default;
+ functor_storage(const Functor & functor)
+ : Functor(functor)
+ {
+ }
+ template<typename... Args>
+ Result operator()(Args &&... args)
+ {
+ return static_cast<Functor &>(*this)(std::forward<Args>(args)...);
+ }
+ template<typename... Args>
+ Result operator()(Args &&... args) const
+ {
+ return static_cast<const Functor &>(*this)(std::forward<Args>(args)...);
+ }
+};
+template<typename Result, typename... Args>
+struct functor_storage<Result, Result (*)(Args...)>
+{
+ typedef Result (*function_ptr)(Args...);
+ function_ptr function;
+ functor_storage(function_ptr function)
+ : function(function)
+ {
+ }
+ Result operator()(Args... args) const
+ {
+ return function(std::forward<Args>(args)...);
+ }
+ operator function_ptr &()
+ {
+ return function;
+ }
+ operator const function_ptr &()
+ {
+ return function;
+ }
+};
+template<typename key_type, typename value_type, typename hasher>
+struct KeyOrValueHasher : functor_storage<size_t, hasher>
+{
+ typedef functor_storage<size_t, hasher> hasher_storage;
+ KeyOrValueHasher() = default;
+ KeyOrValueHasher(const hasher & hash)
+ : hasher_storage(hash)
+ {
+ }
+ size_t operator()(const key_type & key)
+ {
+ return static_cast<hasher_storage &>(*this)(key);
+ }
+ size_t operator()(const key_type & key) const
+ {
+ return static_cast<const hasher_storage &>(*this)(key);
+ }
+ size_t operator()(const value_type & value)
+ {
+ return static_cast<hasher_storage &>(*this)(value.first);
+ }
+ size_t operator()(const value_type & value) const
+ {
+ return static_cast<const hasher_storage &>(*this)(value.first);
+ }
+ template<typename F, typename S>
+ size_t operator()(const std::pair<F, S> & value)
+ {
+ return static_cast<hasher_storage &>(*this)(value.first);
+ }
+ template<typename F, typename S>
+ size_t operator()(const std::pair<F, S> & value) const
+ {
+ return static_cast<const hasher_storage &>(*this)(value.first);
+ }
+};
+template<typename key_type, typename value_type, typename key_equal>
+struct KeyOrValueEquality : functor_storage<bool, key_equal>
+{
+ typedef functor_storage<bool, key_equal> equality_storage;
+ KeyOrValueEquality() = default;
+ KeyOrValueEquality(const key_equal & equality)
+ : equality_storage(equality)
+ {
+ }
+ bool operator()(const key_type & lhs, const key_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs, rhs);
+ }
+ bool operator()(const key_type & lhs, const value_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs, rhs.first);
+ }
+ bool operator()(const value_type & lhs, const key_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs);
+ }
+ bool operator()(const value_type & lhs, const value_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+ template<typename F, typename S>
+ bool operator()(const key_type & lhs, const std::pair<F, S> & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs, rhs.first);
+ }
+ template<typename F, typename S>
+ bool operator()(const std::pair<F, S> & lhs, const key_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs);
+ }
+ template<typename F, typename S>
+ bool operator()(const value_type & lhs, const std::pair<F, S> & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+ template<typename F, typename S>
+ bool operator()(const std::pair<F, S> & lhs, const value_type & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+ template<typename FL, typename SL, typename FR, typename SR>
+ bool operator()(const std::pair<FL, SL> & lhs, const std::pair<FR, SR> & rhs)
+ {
+ return static_cast<equality_storage &>(*this)(lhs.first, rhs.first);
+ }
+};
+static constexpr int8_t min_lookups = 4;
+template<typename T>
+struct sherwood_v3_entry
+{
+ sherwood_v3_entry()
+ {
+ }
+ sherwood_v3_entry(int8_t distance_from_desired)
+ : distance_from_desired(distance_from_desired)
+ {
+ }
+ ~sherwood_v3_entry()
+ {
+ }
+ static sherwood_v3_entry * empty_default_table()
+ {
+ static sherwood_v3_entry result[min_lookups] = { {}, {}, {}, {special_end_value} };
+ return result;
+ }
+
+ bool has_value() const
+ {
+ return distance_from_desired >= 0;
+ }
+ bool is_empty() const
+ {
+ return distance_from_desired < 0;
+ }
+ bool is_at_desired_position() const
+ {
+ return distance_from_desired <= 0;
+ }
+ template<typename... Args>
+ void emplace(int8_t distance, Args &&... args)
+ {
+ new (std::addressof(value)) T(std::forward<Args>(args)...);
+ distance_from_desired = distance;
+ }
+
+ void destroy_value()
+ {
+ value.~T();
+ distance_from_desired = -1;
+ }
+
+ int8_t distance_from_desired = -1;
+ static constexpr int8_t special_end_value = 0;
+ union { T value; };
+};
+
+inline int8_t log2(size_t value)
+{
+ static constexpr int8_t table[64] =
+ {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5
+ };
+ value |= value >> 1;
+ value |= value >> 2;
+ value |= value >> 4;
+ value |= value >> 8;
+ value |= value >> 16;
+ value |= value >> 32;
+ return table[((value - (value >> 1)) * 0x07EDD5E59A4E28C2) >> 58];
+}
+
+template<typename T, bool>
+struct AssignIfTrue
+{
+ void operator()(T & lhs, const T & rhs)
+ {
+ lhs = rhs;
+ }
+ void operator()(T & lhs, T && rhs)
+ {
+ lhs = std::move(rhs);
+ }
+};
+template<typename T>
+struct AssignIfTrue<T, false>
+{
+ void operator()(T &, const T &)
+ {
+ }
+ void operator()(T &, T &&)
+ {
+ }
+};
+
+inline size_t next_power_of_two(size_t i)
+{
+ --i;
+ i |= i >> 1;
+ i |= i >> 2;
+ i |= i >> 4;
+ i |= i >> 8;
+ i |= i >> 16;
+ i |= i >> 32;
+ ++i;
+ return i;
+}
+
+template<typename...> using void_t = void;
+
+template<typename T, typename = void>
+struct HashPolicySelector
+{
+ typedef fibonacci_hash_policy type;
+};
+template<typename T>
+struct HashPolicySelector<T, void_t<typename T::hash_policy>>
+{
+ typedef typename T::hash_policy type;
+};
+
+template<typename T, typename FindKey, typename ArgumentHash, typename Hasher, typename ArgumentEqual, typename Equal, typename ArgumentAlloc, typename EntryAlloc>
+class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal
+{
+ using Entry = detailv3::sherwood_v3_entry<T>;
+ using AllocatorTraits = std::allocator_traits<EntryAlloc>;
+ using EntryPointer = typename AllocatorTraits::pointer;
+ struct convertible_to_iterator;
+
+public:
+
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = std::ptrdiff_t;
+ using hasher = ArgumentHash;
+ using key_equal = ArgumentEqual;
+ using allocator_type = EntryAlloc;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+
+ sherwood_v3_table()
+ {
+ }
+ explicit sherwood_v3_table(size_type bucket_count, const ArgumentHash & hash = ArgumentHash(), const ArgumentEqual & equal = ArgumentEqual(), const ArgumentAlloc & alloc = ArgumentAlloc())
+ : EntryAlloc(alloc), Hasher(hash), Equal(equal)
+ {
+ rehash(bucket_count);
+ }
+ sherwood_v3_table(size_type bucket_count, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(bucket_count, ArgumentHash(), ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(size_type bucket_count, const ArgumentHash & hash, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(bucket_count, hash, ArgumentEqual(), alloc)
+ {
+ }
+ explicit sherwood_v3_table(const ArgumentAlloc & alloc)
+ : EntryAlloc(alloc)
+ {
+ }
+ template<typename It>
+ sherwood_v3_table(It first, It last, size_type bucket_count = 0, const ArgumentHash & hash = ArgumentHash(), const ArgumentEqual & equal = ArgumentEqual(), const ArgumentAlloc & alloc = ArgumentAlloc())
+ : sherwood_v3_table(bucket_count, hash, equal, alloc)
+ {
+ insert(first, last);
+ }
+ template<typename It>
+ sherwood_v3_table(It first, It last, size_type bucket_count, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(first, last, bucket_count, ArgumentHash(), ArgumentEqual(), alloc)
+ {
+ }
+ template<typename It>
+ sherwood_v3_table(It first, It last, size_type bucket_count, const ArgumentHash & hash, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(first, last, bucket_count, hash, ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(std::initializer_list<T> il, size_type bucket_count = 0, const ArgumentHash & hash = ArgumentHash(), const ArgumentEqual & equal = ArgumentEqual(), const ArgumentAlloc & alloc = ArgumentAlloc())
+ : sherwood_v3_table(bucket_count, hash, equal, alloc)
+ {
+ if (bucket_count == 0)
+ rehash(il.size());
+ insert(il.begin(), il.end());
+ }
+ sherwood_v3_table(std::initializer_list<T> il, size_type bucket_count, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(il, bucket_count, ArgumentHash(), ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(std::initializer_list<T> il, size_type bucket_count, const ArgumentHash & hash, const ArgumentAlloc & alloc)
+ : sherwood_v3_table(il, bucket_count, hash, ArgumentEqual(), alloc)
+ {
+ }
+ sherwood_v3_table(const sherwood_v3_table & other)
+ : sherwood_v3_table(other, AllocatorTraits::select_on_container_copy_construction(other.get_allocator()))
+ {
+ }
+ sherwood_v3_table(const sherwood_v3_table & other, const ArgumentAlloc & alloc)
+ : EntryAlloc(alloc), Hasher(other), Equal(other), _max_load_factor(other._max_load_factor)
+ {
+ rehash_for_other_container(other);
+ try
+ {
+ insert(other.begin(), other.end());
+ }
+ catch(...)
+ {
+ clear();
+ deallocate_data(entries, num_slots_minus_one, max_lookups);
+ throw;
+ }
+ }
+ sherwood_v3_table(sherwood_v3_table && other) noexcept
+ : EntryAlloc(std::move(other)), Hasher(std::move(other)), Equal(std::move(other))
+ {
+ swap_pointers(other);
+ }
+ sherwood_v3_table(sherwood_v3_table && other, const ArgumentAlloc & alloc) noexcept
+ : EntryAlloc(alloc), Hasher(std::move(other)), Equal(std::move(other))
+ {
+ swap_pointers(other);
+ }
+ sherwood_v3_table & operator=(const sherwood_v3_table & other)
+ {
+ if (this == std::addressof(other))
+ return *this;
+
+ clear();
+ if (AllocatorTraits::propagate_on_container_copy_assignment::value)
+ {
+ if (static_cast<EntryAlloc &>(*this) != static_cast<const EntryAlloc &>(other))
+ {
+ reset_to_empty_state();
+ }
+ AssignIfTrue<EntryAlloc, AllocatorTraits::propagate_on_container_copy_assignment::value>()(*this, other);
+ }
+ _max_load_factor = other._max_load_factor;
+ static_cast<Hasher &>(*this) = other;
+ static_cast<Equal &>(*this) = other;
+ rehash_for_other_container(other);
+ insert(other.begin(), other.end());
+ return *this;
+ }
+ sherwood_v3_table & operator=(sherwood_v3_table && other) noexcept
+ {
+ if (this == std::addressof(other))
+ return *this;
+ else if (AllocatorTraits::propagate_on_container_move_assignment::value)
+ {
+ clear();
+ reset_to_empty_state();
+ AssignIfTrue<EntryAlloc, AllocatorTraits::propagate_on_container_move_assignment::value>()(*this, std::move(other));
+ swap_pointers(other);
+ }
+ else if (static_cast<EntryAlloc &>(*this) == static_cast<EntryAlloc &>(other))
+ {
+ swap_pointers(other);
+ }
+ else
+ {
+ clear();
+ _max_load_factor = other._max_load_factor;
+ rehash_for_other_container(other);
+ for (T & elem : other)
+ emplace(std::move(elem));
+ other.clear();
+ }
+ static_cast<Hasher &>(*this) = std::move(other);
+ static_cast<Equal &>(*this) = std::move(other);
+ return *this;
+ }
+ ~sherwood_v3_table()
+ {
+ clear();
+ deallocate_data(entries, num_slots_minus_one, max_lookups);
+ }
+
+ const allocator_type & get_allocator() const
+ {
+ return static_cast<const allocator_type &>(*this);
+ }
+ const ArgumentEqual & key_eq() const
+ {
+ return static_cast<const ArgumentEqual &>(*this);
+ }
+ const ArgumentHash & hash_function() const
+ {
+ return static_cast<const ArgumentHash &>(*this);
+ }
+
+ template<typename ValueType>
+ struct templated_iterator
+ {
+ templated_iterator() = default;
+ templated_iterator(EntryPointer current)
+ : current(current)
+ {
+ }
+ EntryPointer current = EntryPointer();
+
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = ValueType;
+ using difference_type = ptrdiff_t;
+ using pointer = ValueType *;
+ using reference = ValueType &;
+
+ friend bool operator==(const templated_iterator & lhs, const templated_iterator & rhs)
+ {
+ return lhs.current == rhs.current;
+ }
+ friend bool operator!=(const templated_iterator & lhs, const templated_iterator & rhs)
+ {
+ return !(lhs == rhs);
+ }
+
+ templated_iterator & operator++()
+ {
+ do
+ {
+ ++current;
+ }
+ while(current->is_empty());
+ return *this;
+ }
+ templated_iterator operator++(int)
+ {
+ templated_iterator copy(*this);
+ ++*this;
+ return copy;
+ }
+
+ ValueType & operator*() const
+ {
+ return current->value;
+ }
+ ValueType * operator->() const
+ {
+ return std::addressof(current->value);
+ }
+
+ operator templated_iterator<const value_type>() const
+ {
+ return { current };
+ }
+ };
+ using iterator = templated_iterator<value_type>;
+ using const_iterator = templated_iterator<const value_type>;
+
+ iterator begin()
+ {
+ for (EntryPointer it = entries;; ++it)
+ {
+ if (it->has_value())
+ return { it };
+ }
+ }
+ const_iterator begin() const
+ {
+ for (EntryPointer it = entries;; ++it)
+ {
+ if (it->has_value())
+ return { it };
+ }
+ }
+ const_iterator cbegin() const
+ {
+ return begin();
+ }
+ iterator end()
+ {
+ return { entries + static_cast<ptrdiff_t>(num_slots_minus_one + max_lookups) };
+ }
+ const_iterator end() const
+ {
+ return { entries + static_cast<ptrdiff_t>(num_slots_minus_one + max_lookups) };
+ }
+ const_iterator cend() const
+ {
+ return end();
+ }
+
+ iterator find(const FindKey & key)
+ {
+ size_t index = hash_policy.index_for_hash(hash_object(key), num_slots_minus_one);
+ EntryPointer it = entries + ptrdiff_t(index);
+ for (int8_t distance = 0; it->distance_from_desired >= distance; ++distance, ++it)
+ {
+ if (compares_equal(key, it->value))
+ return { it };
+ }
+ return end();
+ }
+ const_iterator find(const FindKey & key) const
+ {
+ return const_cast<sherwood_v3_table *>(this)->find(key);
+ }
+ size_t count(const FindKey & key) const
+ {
+ return find(key) == end() ? 0 : 1;
+ }
+ std::pair<iterator, iterator> equal_range(const FindKey & key)
+ {
+ iterator found = find(key);
+ if (found == end())
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+ std::pair<const_iterator, const_iterator> equal_range(const FindKey & key) const
+ {
+ const_iterator found = find(key);
+ if (found == end())
+ return { found, found };
+ else
+ return { found, std::next(found) };
+ }
+
+ template<typename Key, typename... Args>
+ std::pair<iterator, bool> emplace(Key && key, Args &&... args)
+ {
+ size_t index = hash_policy.index_for_hash(hash_object(key), num_slots_minus_one);
+ EntryPointer current_entry = entries + ptrdiff_t(index);
+ int8_t distance_from_desired = 0;
+ for (; current_entry->distance_from_desired >= distance_from_desired; ++current_entry, ++distance_from_desired)
+ {
+ if (compares_equal(key, current_entry->value))
+ return { { current_entry }, false };
+ }
+ return emplace_new_key(distance_from_desired, current_entry, std::forward<Key>(key), std::forward<Args>(args)...);
+ }
+
+ std::pair<iterator, bool> insert(const value_type & value)
+ {
+ return emplace(value);
+ }
+ std::pair<iterator, bool> insert(value_type && value)
+ {
+ return emplace(std::move(value));
+ }
+ template<typename... Args>
+ iterator emplace_hint(const_iterator, Args &&... args)
+ {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+ iterator insert(const_iterator, const value_type & value)
+ {
+ return emplace(value).first;
+ }
+ iterator insert(const_iterator, value_type && value)
+ {
+ return emplace(std::move(value)).first;
+ }
+
+ template<typename It>
+ void insert(It begin, It end)
+ {
+ for (; begin != end; ++begin)
+ {
+ emplace(*begin);
+ }
+ }
+ void insert(std::initializer_list<value_type> il)
+ {
+ insert(il.begin(), il.end());
+ }
+
+ void rehash(size_t num_buckets)
+ {
+ num_buckets = std::max(num_buckets, static_cast<size_t>(std::ceil(num_elements / static_cast<double>(_max_load_factor))));
+ if (num_buckets == 0)
+ {
+ reset_to_empty_state();
+ return;
+ }
+ auto new_prime_index = hash_policy.next_size_over(num_buckets);
+ if (num_buckets == bucket_count())
+ return;
+ int8_t new_max_lookups = compute_max_lookups(num_buckets);
+ EntryPointer new_buckets(AllocatorTraits::allocate(*this, num_buckets + new_max_lookups));
+ EntryPointer special_end_item = new_buckets + static_cast<ptrdiff_t>(num_buckets + new_max_lookups - 1);
+ for (EntryPointer it = new_buckets; it != special_end_item; ++it)
+ it->distance_from_desired = -1;
+ special_end_item->distance_from_desired = Entry::special_end_value;
+ std::swap(entries, new_buckets);
+ std::swap(num_slots_minus_one, num_buckets);
+ --num_slots_minus_one;
+ hash_policy.commit(new_prime_index);
+ int8_t old_max_lookups = max_lookups;
+ max_lookups = new_max_lookups;
+ num_elements = 0;
+ for (EntryPointer it = new_buckets, end = it + static_cast<ptrdiff_t>(num_buckets + old_max_lookups); it != end; ++it)
+ {
+ if (it->has_value())
+ {
+ emplace(std::move(it->value));
+ it->destroy_value();
+ }
+ }
+ deallocate_data(new_buckets, num_buckets, old_max_lookups);
+ }
+
+ void reserve(size_t num_elements)
+ {
+ size_t required_buckets = num_buckets_for_reserve(num_elements);
+ if (required_buckets > bucket_count())
+ rehash(required_buckets);
+ }
+
+ // the return value is a type that can be converted to an iterator
+ // the reason for doing this is that it's not free to find the
+ // iterator pointing at the next element. if you care about the
+ // next iterator, turn the return value into an iterator
+ convertible_to_iterator erase(const_iterator to_erase)
+ {
+ EntryPointer current = to_erase.current;
+ current->destroy_value();
+ --num_elements;
+ for (EntryPointer next = current + ptrdiff_t(1); !next->is_at_desired_position(); ++current, ++next)
+ {
+ current->emplace(next->distance_from_desired - 1, std::move(next->value));
+ next->destroy_value();
+ }
+ return { to_erase.current };
+ }
+
+ iterator erase(const_iterator begin_it, const_iterator end_it)
+ {
+ if (begin_it == end_it)
+ return { begin_it.current };
+ for (EntryPointer it = begin_it.current, end = end_it.current; it != end; ++it)
+ {
+ if (it->has_value())
+ {
+ it->destroy_value();
+ --num_elements;
+ }
+ }
+ if (end_it == this->end())
+ return this->end();
+ ptrdiff_t num_to_move = std::min(static_cast<ptrdiff_t>(end_it.current->distance_from_desired), end_it.current - begin_it.current);
+ EntryPointer to_return = end_it.current - num_to_move;
+ for (EntryPointer it = end_it.current; !it->is_at_desired_position();)
+ {
+ EntryPointer target = it - num_to_move;
+ target->emplace(it->distance_from_desired - num_to_move, std::move(it->value));
+ it->destroy_value();
+ ++it;
+ num_to_move = std::min(static_cast<ptrdiff_t>(it->distance_from_desired), num_to_move);
+ }
+ return { to_return };
+ }
+
+ size_t erase(const FindKey & key)
+ {
+ auto found = find(key);
+ if (found == end())
+ return 0;
+ else
+ {
+ erase(found);
+ return 1;
+ }
+ }
+
+ void clear()
+ {
+ for (EntryPointer it = entries, end = it + static_cast<ptrdiff_t>(num_slots_minus_one + max_lookups); it != end; ++it)
+ {
+ if (it->has_value())
+ it->destroy_value();
+ }
+ num_elements = 0;
+ }
+
+ void shrink_to_fit()
+ {
+ rehash_for_other_container(*this);
+ }
+
+ void swap(sherwood_v3_table & other)
+ {
+ using std::swap;
+ swap_pointers(other);
+ swap(static_cast<ArgumentHash &>(*this), static_cast<ArgumentHash &>(other));
+ swap(static_cast<ArgumentEqual &>(*this), static_cast<ArgumentEqual &>(other));
+ if (AllocatorTraits::propagate_on_container_swap::value)
+ swap(static_cast<EntryAlloc &>(*this), static_cast<EntryAlloc &>(other));
+ }
+
+ size_t size() const
+ {
+ return num_elements;
+ }
+ size_t max_size() const
+ {
+ return (AllocatorTraits::max_size(*this)) / sizeof(Entry);
+ }
+ size_t bucket_count() const
+ {
+ return num_slots_minus_one ? num_slots_minus_one + 1 : 0;
+ }
+ size_type max_bucket_count() const
+ {
+ return (AllocatorTraits::max_size(*this) - min_lookups) / sizeof(Entry);
+ }
+ size_t bucket(const FindKey & key) const
+ {
+ return hash_policy.index_for_hash(hash_object(key), num_slots_minus_one);
+ }
+ float load_factor() const
+ {
+ size_t buckets = bucket_count();
+ if (buckets)
+ return static_cast<float>(num_elements) / bucket_count();
+ else
+ return 0;
+ }
+ void max_load_factor(float value)
+ {
+ _max_load_factor = value;
+ }
+ float max_load_factor() const
+ {
+ return _max_load_factor;
+ }
+
+ bool empty() const
+ {
+ return num_elements == 0;
+ }
+
+private:
+ EntryPointer entries = Entry::empty_default_table();
+ size_t num_slots_minus_one = 0;
+ typename HashPolicySelector<ArgumentHash>::type hash_policy;
+ int8_t max_lookups = detailv3::min_lookups - 1;
+ float _max_load_factor = 0.5f;
+ size_t num_elements = 0;
+
+ static int8_t compute_max_lookups(size_t num_buckets)
+ {
+ int8_t desired = detailv3::log2(num_buckets);
+ return std::max(detailv3::min_lookups, desired);
+ }
+
+ size_t num_buckets_for_reserve(size_t num_elements) const
+ {
+ return static_cast<size_t>(std::ceil(num_elements / std::min(0.5, static_cast<double>(_max_load_factor))));
+ }
+ void rehash_for_other_container(const sherwood_v3_table & other)
+ {
+ rehash(std::min(num_buckets_for_reserve(other.size()), other.bucket_count()));
+ }
+
+ void swap_pointers(sherwood_v3_table & other)
+ {
+ using std::swap;
+ swap(hash_policy, other.hash_policy);
+ swap(entries, other.entries);
+ swap(num_slots_minus_one, other.num_slots_minus_one);
+ swap(num_elements, other.num_elements);
+ swap(max_lookups, other.max_lookups);
+ swap(_max_load_factor, other._max_load_factor);
+ }
+
+ template<typename Key, typename... Args>
+ SKA_NOINLINE(std::pair<iterator, bool>) emplace_new_key(int8_t distance_from_desired, EntryPointer current_entry, Key && key, Args &&... args)
+ {
+ using std::swap;
+ if (num_slots_minus_one == 0 || distance_from_desired == max_lookups || num_elements + 1 > (num_slots_minus_one + 1) * static_cast<double>(_max_load_factor))
+ {
+ grow();
+ return emplace(std::forward<Key>(key), std::forward<Args>(args)...);
+ }
+ else if (current_entry->is_empty())
+ {
+ current_entry->emplace(distance_from_desired, std::forward<Key>(key), std::forward<Args>(args)...);
+ ++num_elements;
+ return { { current_entry }, true };
+ }
+ value_type to_insert(std::forward<Key>(key), std::forward<Args>(args)...);
+ swap(distance_from_desired, current_entry->distance_from_desired);
+ swap(to_insert, current_entry->value);
+ iterator result = { current_entry };
+ for (++distance_from_desired, ++current_entry;; ++current_entry)
+ {
+ if (current_entry->is_empty())
+ {
+ current_entry->emplace(distance_from_desired, std::move(to_insert));
+ ++num_elements;
+ return { result, true };
+ }
+ else if (current_entry->distance_from_desired < distance_from_desired)
+ {
+ swap(distance_from_desired, current_entry->distance_from_desired);
+ swap(to_insert, current_entry->value);
+ ++distance_from_desired;
+ }
+ else
+ {
+ ++distance_from_desired;
+ if (distance_from_desired == max_lookups)
+ {
+ swap(to_insert, result.current->value);
+ grow();
+ return emplace(std::move(to_insert));
+ }
+ }
+ }
+ }
+
+ void grow()
+ {
+ rehash(std::max(size_t(4), 2 * bucket_count()));
+ }
+
+ void deallocate_data(EntryPointer begin, size_t num_slots_minus_one, int8_t max_lookups)
+ {
+ if (begin != Entry::empty_default_table())
+ {
+ AllocatorTraits::deallocate(*this, begin, num_slots_minus_one + max_lookups + 1);
+ }
+ }
+
+ void reset_to_empty_state()
+ {
+ deallocate_data(entries, num_slots_minus_one, max_lookups);
+ entries = Entry::empty_default_table();
+ num_slots_minus_one = 0;
+ hash_policy.reset();
+ max_lookups = detailv3::min_lookups - 1;
+ }
+
+ template<typename U>
+ size_t hash_object(const U & key)
+ {
+ return static_cast<Hasher &>(*this)(key);
+ }
+ template<typename U>
+ size_t hash_object(const U & key) const
+ {
+ return static_cast<const Hasher &>(*this)(key);
+ }
+ template<typename L, typename R>
+ bool compares_equal(const L & lhs, const R & rhs)
+ {
+ return static_cast<Equal &>(*this)(lhs, rhs);
+ }
+
+ struct convertible_to_iterator
+ {
+ EntryPointer it;
+
+ operator iterator()
+ {
+ if (it->has_value())
+ return { it };
+ else
+ return ++iterator{it};
+ }
+ operator const_iterator()
+ {
+ if (it->has_value())
+ return { it };
+ else
+ return ++const_iterator{it};
+ }
+ };
+
+};
+}
+
+struct prime_number_hash_policy
+{
+ static size_t mod0(size_t) { return 0llu; }
+ static size_t mod2(size_t hash) { return hash % 2llu; }
+ static size_t mod3(size_t hash) { return hash % 3llu; }
+ static size_t mod5(size_t hash) { return hash % 5llu; }
+ static size_t mod7(size_t hash) { return hash % 7llu; }
+ static size_t mod11(size_t hash) { return hash % 11llu; }
+ static size_t mod13(size_t hash) { return hash % 13llu; }
+ static size_t mod17(size_t hash) { return hash % 17llu; }
+ static size_t mod23(size_t hash) { return hash % 23llu; }
+ static size_t mod29(size_t hash) { return hash % 29llu; }
+ static size_t mod37(size_t hash) { return hash % 37llu; }
+ static size_t mod47(size_t hash) { return hash % 47llu; }
+ static size_t mod59(size_t hash) { return hash % 59llu; }
+ static size_t mod73(size_t hash) { return hash % 73llu; }
+ static size_t mod97(size_t hash) { return hash % 97llu; }
+ static size_t mod127(size_t hash) { return hash % 127llu; }
+ static size_t mod151(size_t hash) { return hash % 151llu; }
+ static size_t mod197(size_t hash) { return hash % 197llu; }
+ static size_t mod251(size_t hash) { return hash % 251llu; }
+ static size_t mod313(size_t hash) { return hash % 313llu; }
+ static size_t mod397(size_t hash) { return hash % 397llu; }
+ static size_t mod499(size_t hash) { return hash % 499llu; }
+ static size_t mod631(size_t hash) { return hash % 631llu; }
+ static size_t mod797(size_t hash) { return hash % 797llu; }
+ static size_t mod1009(size_t hash) { return hash % 1009llu; }
+ static size_t mod1259(size_t hash) { return hash % 1259llu; }
+ static size_t mod1597(size_t hash) { return hash % 1597llu; }
+ static size_t mod2011(size_t hash) { return hash % 2011llu; }
+ static size_t mod2539(size_t hash) { return hash % 2539llu; }
+ static size_t mod3203(size_t hash) { return hash % 3203llu; }
+ static size_t mod4027(size_t hash) { return hash % 4027llu; }
+ static size_t mod5087(size_t hash) { return hash % 5087llu; }
+ static size_t mod6421(size_t hash) { return hash % 6421llu; }
+ static size_t mod8089(size_t hash) { return hash % 8089llu; }
+ static size_t mod10193(size_t hash) { return hash % 10193llu; }
+ static size_t mod12853(size_t hash) { return hash % 12853llu; }
+ static size_t mod16193(size_t hash) { return hash % 16193llu; }
+ static size_t mod20399(size_t hash) { return hash % 20399llu; }
+ static size_t mod25717(size_t hash) { return hash % 25717llu; }
+ static size_t mod32401(size_t hash) { return hash % 32401llu; }
+ static size_t mod40823(size_t hash) { return hash % 40823llu; }
+ static size_t mod51437(size_t hash) { return hash % 51437llu; }
+ static size_t mod64811(size_t hash) { return hash % 64811llu; }
+ static size_t mod81649(size_t hash) { return hash % 81649llu; }
+ static size_t mod102877(size_t hash) { return hash % 102877llu; }
+ static size_t mod129607(size_t hash) { return hash % 129607llu; }
+ static size_t mod163307(size_t hash) { return hash % 163307llu; }
+ static size_t mod205759(size_t hash) { return hash % 205759llu; }
+ static size_t mod259229(size_t hash) { return hash % 259229llu; }
+ static size_t mod326617(size_t hash) { return hash % 326617llu; }
+ static size_t mod411527(size_t hash) { return hash % 411527llu; }
+ static size_t mod518509(size_t hash) { return hash % 518509llu; }
+ static size_t mod653267(size_t hash) { return hash % 653267llu; }
+ static size_t mod823117(size_t hash) { return hash % 823117llu; }
+ static size_t mod1037059(size_t hash) { return hash % 1037059llu; }
+ static size_t mod1306601(size_t hash) { return hash % 1306601llu; }
+ static size_t mod1646237(size_t hash) { return hash % 1646237llu; }
+ static size_t mod2074129(size_t hash) { return hash % 2074129llu; }
+ static size_t mod2613229(size_t hash) { return hash % 2613229llu; }
+ static size_t mod3292489(size_t hash) { return hash % 3292489llu; }
+ static size_t mod4148279(size_t hash) { return hash % 4148279llu; }
+ static size_t mod5226491(size_t hash) { return hash % 5226491llu; }
+ static size_t mod6584983(size_t hash) { return hash % 6584983llu; }
+ static size_t mod8296553(size_t hash) { return hash % 8296553llu; }
+ static size_t mod10453007(size_t hash) { return hash % 10453007llu; }
+ static size_t mod13169977(size_t hash) { return hash % 13169977llu; }
+ static size_t mod16593127(size_t hash) { return hash % 16593127llu; }
+ static size_t mod20906033(size_t hash) { return hash % 20906033llu; }
+ static size_t mod26339969(size_t hash) { return hash % 26339969llu; }
+ static size_t mod33186281(size_t hash) { return hash % 33186281llu; }
+ static size_t mod41812097(size_t hash) { return hash % 41812097llu; }
+ static size_t mod52679969(size_t hash) { return hash % 52679969llu; }
+ static size_t mod66372617(size_t hash) { return hash % 66372617llu; }
+ static size_t mod83624237(size_t hash) { return hash % 83624237llu; }
+ static size_t mod105359939(size_t hash) { return hash % 105359939llu; }
+ static size_t mod132745199(size_t hash) { return hash % 132745199llu; }
+ static size_t mod167248483(size_t hash) { return hash % 167248483llu; }
+ static size_t mod210719881(size_t hash) { return hash % 210719881llu; }
+ static size_t mod265490441(size_t hash) { return hash % 265490441llu; }
+ static size_t mod334496971(size_t hash) { return hash % 334496971llu; }
+ static size_t mod421439783(size_t hash) { return hash % 421439783llu; }
+ static size_t mod530980861(size_t hash) { return hash % 530980861llu; }
+ static size_t mod668993977(size_t hash) { return hash % 668993977llu; }
+ static size_t mod842879579(size_t hash) { return hash % 842879579llu; }
+ static size_t mod1061961721(size_t hash) { return hash % 1061961721llu; }
+ static size_t mod1337987929(size_t hash) { return hash % 1337987929llu; }
+ static size_t mod1685759167(size_t hash) { return hash % 1685759167llu; }
+ static size_t mod2123923447(size_t hash) { return hash % 2123923447llu; }
+ static size_t mod2675975881(size_t hash) { return hash % 2675975881llu; }
+ static size_t mod3371518343(size_t hash) { return hash % 3371518343llu; }
+ static size_t mod4247846927(size_t hash) { return hash % 4247846927llu; }
+ static size_t mod5351951779(size_t hash) { return hash % 5351951779llu; }
+ static size_t mod6743036717(size_t hash) { return hash % 6743036717llu; }
+ static size_t mod8495693897(size_t hash) { return hash % 8495693897llu; }
+ static size_t mod10703903591(size_t hash) { return hash % 10703903591llu; }
+ static size_t mod13486073473(size_t hash) { return hash % 13486073473llu; }
+ static size_t mod16991387857(size_t hash) { return hash % 16991387857llu; }
+ static size_t mod21407807219(size_t hash) { return hash % 21407807219llu; }
+ static size_t mod26972146961(size_t hash) { return hash % 26972146961llu; }
+ static size_t mod33982775741(size_t hash) { return hash % 33982775741llu; }
+ static size_t mod42815614441(size_t hash) { return hash % 42815614441llu; }
+ static size_t mod53944293929(size_t hash) { return hash % 53944293929llu; }
+ static size_t mod67965551447(size_t hash) { return hash % 67965551447llu; }
+ static size_t mod85631228929(size_t hash) { return hash % 85631228929llu; }
+ static size_t mod107888587883(size_t hash) { return hash % 107888587883llu; }
+ static size_t mod135931102921(size_t hash) { return hash % 135931102921llu; }
+ static size_t mod171262457903(size_t hash) { return hash % 171262457903llu; }
+ static size_t mod215777175787(size_t hash) { return hash % 215777175787llu; }
+ static size_t mod271862205833(size_t hash) { return hash % 271862205833llu; }
+ static size_t mod342524915839(size_t hash) { return hash % 342524915839llu; }
+ static size_t mod431554351609(size_t hash) { return hash % 431554351609llu; }
+ static size_t mod543724411781(size_t hash) { return hash % 543724411781llu; }
+ static size_t mod685049831731(size_t hash) { return hash % 685049831731llu; }
+ static size_t mod863108703229(size_t hash) { return hash % 863108703229llu; }
+ static size_t mod1087448823553(size_t hash) { return hash % 1087448823553llu; }
+ static size_t mod1370099663459(size_t hash) { return hash % 1370099663459llu; }
+ static size_t mod1726217406467(size_t hash) { return hash % 1726217406467llu; }
+ static size_t mod2174897647073(size_t hash) { return hash % 2174897647073llu; }
+ static size_t mod2740199326961(size_t hash) { return hash % 2740199326961llu; }
+ static size_t mod3452434812973(size_t hash) { return hash % 3452434812973llu; }
+ static size_t mod4349795294267(size_t hash) { return hash % 4349795294267llu; }
+ static size_t mod5480398654009(size_t hash) { return hash % 5480398654009llu; }
+ static size_t mod6904869625999(size_t hash) { return hash % 6904869625999llu; }
+ static size_t mod8699590588571(size_t hash) { return hash % 8699590588571llu; }
+ static size_t mod10960797308051(size_t hash) { return hash % 10960797308051llu; }
+ static size_t mod13809739252051(size_t hash) { return hash % 13809739252051llu; }
+ static size_t mod17399181177241(size_t hash) { return hash % 17399181177241llu; }
+ static size_t mod21921594616111(size_t hash) { return hash % 21921594616111llu; }
+ static size_t mod27619478504183(size_t hash) { return hash % 27619478504183llu; }
+ static size_t mod34798362354533(size_t hash) { return hash % 34798362354533llu; }
+ static size_t mod43843189232363(size_t hash) { return hash % 43843189232363llu; }
+ static size_t mod55238957008387(size_t hash) { return hash % 55238957008387llu; }
+ static size_t mod69596724709081(size_t hash) { return hash % 69596724709081llu; }
+ static size_t mod87686378464759(size_t hash) { return hash % 87686378464759llu; }
+ static size_t mod110477914016779(size_t hash) { return hash % 110477914016779llu; }
+ static size_t mod139193449418173(size_t hash) { return hash % 139193449418173llu; }
+ static size_t mod175372756929481(size_t hash) { return hash % 175372756929481llu; }
+ static size_t mod220955828033581(size_t hash) { return hash % 220955828033581llu; }
+ static size_t mod278386898836457(size_t hash) { return hash % 278386898836457llu; }
+ static size_t mod350745513859007(size_t hash) { return hash % 350745513859007llu; }
+ static size_t mod441911656067171(size_t hash) { return hash % 441911656067171llu; }
+ static size_t mod556773797672909(size_t hash) { return hash % 556773797672909llu; }
+ static size_t mod701491027718027(size_t hash) { return hash % 701491027718027llu; }
+ static size_t mod883823312134381(size_t hash) { return hash % 883823312134381llu; }
+ static size_t mod1113547595345903(size_t hash) { return hash % 1113547595345903llu; }
+ static size_t mod1402982055436147(size_t hash) { return hash % 1402982055436147llu; }
+ static size_t mod1767646624268779(size_t hash) { return hash % 1767646624268779llu; }
+ static size_t mod2227095190691797(size_t hash) { return hash % 2227095190691797llu; }
+ static size_t mod2805964110872297(size_t hash) { return hash % 2805964110872297llu; }
+ static size_t mod3535293248537579(size_t hash) { return hash % 3535293248537579llu; }
+ static size_t mod4454190381383713(size_t hash) { return hash % 4454190381383713llu; }
+ static size_t mod5611928221744609(size_t hash) { return hash % 5611928221744609llu; }
+ static size_t mod7070586497075177(size_t hash) { return hash % 7070586497075177llu; }
+ static size_t mod8908380762767489(size_t hash) { return hash % 8908380762767489llu; }
+ static size_t mod11223856443489329(size_t hash) { return hash % 11223856443489329llu; }
+ static size_t mod14141172994150357(size_t hash) { return hash % 14141172994150357llu; }
+ static size_t mod17816761525534927(size_t hash) { return hash % 17816761525534927llu; }
+ static size_t mod22447712886978529(size_t hash) { return hash % 22447712886978529llu; }
+ static size_t mod28282345988300791(size_t hash) { return hash % 28282345988300791llu; }
+ static size_t mod35633523051069991(size_t hash) { return hash % 35633523051069991llu; }
+ static size_t mod44895425773957261(size_t hash) { return hash % 44895425773957261llu; }
+ static size_t mod56564691976601587(size_t hash) { return hash % 56564691976601587llu; }
+ static size_t mod71267046102139967(size_t hash) { return hash % 71267046102139967llu; }
+ static size_t mod89790851547914507(size_t hash) { return hash % 89790851547914507llu; }
+ static size_t mod113129383953203213(size_t hash) { return hash % 113129383953203213llu; }
+ static size_t mod142534092204280003(size_t hash) { return hash % 142534092204280003llu; }
+ static size_t mod179581703095829107(size_t hash) { return hash % 179581703095829107llu; }
+ static size_t mod226258767906406483(size_t hash) { return hash % 226258767906406483llu; }
+ static size_t mod285068184408560057(size_t hash) { return hash % 285068184408560057llu; }
+ static size_t mod359163406191658253(size_t hash) { return hash % 359163406191658253llu; }
+ static size_t mod452517535812813007(size_t hash) { return hash % 452517535812813007llu; }
+ static size_t mod570136368817120201(size_t hash) { return hash % 570136368817120201llu; }
+ static size_t mod718326812383316683(size_t hash) { return hash % 718326812383316683llu; }
+ static size_t mod905035071625626043(size_t hash) { return hash % 905035071625626043llu; }
+ static size_t mod1140272737634240411(size_t hash) { return hash % 1140272737634240411llu; }
+ static size_t mod1436653624766633509(size_t hash) { return hash % 1436653624766633509llu; }
+ static size_t mod1810070143251252131(size_t hash) { return hash % 1810070143251252131llu; }
+ static size_t mod2280545475268481167(size_t hash) { return hash % 2280545475268481167llu; }
+ static size_t mod2873307249533267101(size_t hash) { return hash % 2873307249533267101llu; }
+ static size_t mod3620140286502504283(size_t hash) { return hash % 3620140286502504283llu; }
+ static size_t mod4561090950536962147(size_t hash) { return hash % 4561090950536962147llu; }
+ static size_t mod5746614499066534157(size_t hash) { return hash % 5746614499066534157llu; }
+ static size_t mod7240280573005008577(size_t hash) { return hash % 7240280573005008577llu; }
+ static size_t mod9122181901073924329(size_t hash) { return hash % 9122181901073924329llu; }
+ static size_t mod11493228998133068689(size_t hash) { return hash % 11493228998133068689llu; }
+ static size_t mod14480561146010017169(size_t hash) { return hash % 14480561146010017169llu; }
+ static size_t mod18446744073709551557(size_t hash) { return hash % 18446744073709551557llu; }
+
+ using mod_function = size_t (*)(size_t);
+
+ mod_function next_size_over(size_t & size) const
+ {
+ // prime numbers generated by the following method:
+ // 1. start with a prime p = 2
+ // 2. go to wolfram alpha and get p = NextPrime(2 * p)
+ // 3. repeat 2. until you overflow 64 bits
+ // you now have large gaps which you would hit if somebody called reserve() with an unlucky number.
+ // 4. to fill the gaps for every prime p go to wolfram alpha and get ClosestPrime(p * 2^(1/3)) and ClosestPrime(p * 2^(2/3)) and put those in the gaps
+ // 5. get PrevPrime(2^64) and put it at the end
+ static constexpr const size_t prime_list[] =
+ {
+ 2llu, 3llu, 5llu, 7llu, 11llu, 13llu, 17llu, 23llu, 29llu, 37llu, 47llu,
+ 59llu, 73llu, 97llu, 127llu, 151llu, 197llu, 251llu, 313llu, 397llu,
+ 499llu, 631llu, 797llu, 1009llu, 1259llu, 1597llu, 2011llu, 2539llu,
+ 3203llu, 4027llu, 5087llu, 6421llu, 8089llu, 10193llu, 12853llu, 16193llu,
+ 20399llu, 25717llu, 32401llu, 40823llu, 51437llu, 64811llu, 81649llu,
+ 102877llu, 129607llu, 163307llu, 205759llu, 259229llu, 326617llu,
+ 411527llu, 518509llu, 653267llu, 823117llu, 1037059llu, 1306601llu,
+ 1646237llu, 2074129llu, 2613229llu, 3292489llu, 4148279llu, 5226491llu,
+ 6584983llu, 8296553llu, 10453007llu, 13169977llu, 16593127llu, 20906033llu,
+ 26339969llu, 33186281llu, 41812097llu, 52679969llu, 66372617llu,
+ 83624237llu, 105359939llu, 132745199llu, 167248483llu, 210719881llu,
+ 265490441llu, 334496971llu, 421439783llu, 530980861llu, 668993977llu,
+ 842879579llu, 1061961721llu, 1337987929llu, 1685759167llu, 2123923447llu,
+ 2675975881llu, 3371518343llu, 4247846927llu, 5351951779llu, 6743036717llu,
+ 8495693897llu, 10703903591llu, 13486073473llu, 16991387857llu,
+ 21407807219llu, 26972146961llu, 33982775741llu, 42815614441llu,
+ 53944293929llu, 67965551447llu, 85631228929llu, 107888587883llu,
+ 135931102921llu, 171262457903llu, 215777175787llu, 271862205833llu,
+ 342524915839llu, 431554351609llu, 543724411781llu, 685049831731llu,
+ 863108703229llu, 1087448823553llu, 1370099663459llu, 1726217406467llu,
+ 2174897647073llu, 2740199326961llu, 3452434812973llu, 4349795294267llu,
+ 5480398654009llu, 6904869625999llu, 8699590588571llu, 10960797308051llu,
+ 13809739252051llu, 17399181177241llu, 21921594616111llu, 27619478504183llu,
+ 34798362354533llu, 43843189232363llu, 55238957008387llu, 69596724709081llu,
+ 87686378464759llu, 110477914016779llu, 139193449418173llu,
+ 175372756929481llu, 220955828033581llu, 278386898836457llu,
+ 350745513859007llu, 441911656067171llu, 556773797672909llu,
+ 701491027718027llu, 883823312134381llu, 1113547595345903llu,
+ 1402982055436147llu, 1767646624268779llu, 2227095190691797llu,
+ 2805964110872297llu, 3535293248537579llu, 4454190381383713llu,
+ 5611928221744609llu, 7070586497075177llu, 8908380762767489llu,
+ 11223856443489329llu, 14141172994150357llu, 17816761525534927llu,
+ 22447712886978529llu, 28282345988300791llu, 35633523051069991llu,
+ 44895425773957261llu, 56564691976601587llu, 71267046102139967llu,
+ 89790851547914507llu, 113129383953203213llu, 142534092204280003llu,
+ 179581703095829107llu, 226258767906406483llu, 285068184408560057llu,
+ 359163406191658253llu, 452517535812813007llu, 570136368817120201llu,
+ 718326812383316683llu, 905035071625626043llu, 1140272737634240411llu,
+ 1436653624766633509llu, 1810070143251252131llu, 2280545475268481167llu,
+ 2873307249533267101llu, 3620140286502504283llu, 4561090950536962147llu,
+ 5746614499066534157llu, 7240280573005008577llu, 9122181901073924329llu,
+ 11493228998133068689llu, 14480561146010017169llu, 18446744073709551557llu
+ };
+ static constexpr size_t (* const mod_functions[])(size_t) =
+ {
+ &mod0, &mod2, &mod3, &mod5, &mod7, &mod11, &mod13, &mod17, &mod23, &mod29, &mod37,
+ &mod47, &mod59, &mod73, &mod97, &mod127, &mod151, &mod197, &mod251, &mod313, &mod397,
+ &mod499, &mod631, &mod797, &mod1009, &mod1259, &mod1597, &mod2011, &mod2539, &mod3203,
+ &mod4027, &mod5087, &mod6421, &mod8089, &mod10193, &mod12853, &mod16193, &mod20399,
+ &mod25717, &mod32401, &mod40823, &mod51437, &mod64811, &mod81649, &mod102877,
+ &mod129607, &mod163307, &mod205759, &mod259229, &mod326617, &mod411527, &mod518509,
+ &mod653267, &mod823117, &mod1037059, &mod1306601, &mod1646237, &mod2074129,
+ &mod2613229, &mod3292489, &mod4148279, &mod5226491, &mod6584983, &mod8296553,
+ &mod10453007, &mod13169977, &mod16593127, &mod20906033, &mod26339969, &mod33186281,
+ &mod41812097, &mod52679969, &mod66372617, &mod83624237, &mod105359939, &mod132745199,
+ &mod167248483, &mod210719881, &mod265490441, &mod334496971, &mod421439783,
+ &mod530980861, &mod668993977, &mod842879579, &mod1061961721, &mod1337987929,
+ &mod1685759167, &mod2123923447, &mod2675975881, &mod3371518343, &mod4247846927,
+ &mod5351951779, &mod6743036717, &mod8495693897, &mod10703903591, &mod13486073473,
+ &mod16991387857, &mod21407807219, &mod26972146961, &mod33982775741, &mod42815614441,
+ &mod53944293929, &mod67965551447, &mod85631228929, &mod107888587883, &mod135931102921,
+ &mod171262457903, &mod215777175787, &mod271862205833, &mod342524915839,
+ &mod431554351609, &mod543724411781, &mod685049831731, &mod863108703229,
+ &mod1087448823553, &mod1370099663459, &mod1726217406467, &mod2174897647073,
+ &mod2740199326961, &mod3452434812973, &mod4349795294267, &mod5480398654009,
+ &mod6904869625999, &mod8699590588571, &mod10960797308051, &mod13809739252051,
+ &mod17399181177241, &mod21921594616111, &mod27619478504183, &mod34798362354533,
+ &mod43843189232363, &mod55238957008387, &mod69596724709081, &mod87686378464759,
+ &mod110477914016779, &mod139193449418173, &mod175372756929481, &mod220955828033581,
+ &mod278386898836457, &mod350745513859007, &mod441911656067171, &mod556773797672909,
+ &mod701491027718027, &mod883823312134381, &mod1113547595345903, &mod1402982055436147,
+ &mod1767646624268779, &mod2227095190691797, &mod2805964110872297, &mod3535293248537579,
+ &mod4454190381383713, &mod5611928221744609, &mod7070586497075177, &mod8908380762767489,
+ &mod11223856443489329, &mod14141172994150357, &mod17816761525534927,
+ &mod22447712886978529, &mod28282345988300791, &mod35633523051069991,
+ &mod44895425773957261, &mod56564691976601587, &mod71267046102139967,
+ &mod89790851547914507, &mod113129383953203213, &mod142534092204280003,
+ &mod179581703095829107, &mod226258767906406483, &mod285068184408560057,
+ &mod359163406191658253, &mod452517535812813007, &mod570136368817120201,
+ &mod718326812383316683, &mod905035071625626043, &mod1140272737634240411,
+ &mod1436653624766633509, &mod1810070143251252131, &mod2280545475268481167,
+ &mod2873307249533267101, &mod3620140286502504283, &mod4561090950536962147,
+ &mod5746614499066534157, &mod7240280573005008577, &mod9122181901073924329,
+ &mod11493228998133068689, &mod14480561146010017169, &mod18446744073709551557
+ };
+ const size_t * found = std::lower_bound(std::begin(prime_list), std::end(prime_list) - 1, size);
+ size = *found;
+ return mod_functions[1 + found - prime_list];
+ }
+ void commit(mod_function new_mod_function)
+ {
+ current_mod_function = new_mod_function;
+ }
+ void reset()
+ {
+ current_mod_function = &mod0;
+ }
+
+ size_t index_for_hash(size_t hash, size_t /*num_slots_minus_one*/) const
+ {
+ return current_mod_function(hash);
+ }
+ size_t keep_in_range(size_t index, size_t num_slots_minus_one) const
+ {
+ return index > num_slots_minus_one ? current_mod_function(index) : index;
+ }
+
+private:
+ mod_function current_mod_function = &mod0;
+};
+
+struct power_of_two_hash_policy
+{
+ size_t index_for_hash(size_t hash, size_t num_slots_minus_one) const
+ {
+ return hash & num_slots_minus_one;
+ }
+ size_t keep_in_range(size_t index, size_t num_slots_minus_one) const
+ {
+ return index_for_hash(index, num_slots_minus_one);
+ }
+ int8_t next_size_over(size_t & size) const
+ {
+ size = detailv3::next_power_of_two(size);
+ return 0;
+ }
+ void commit(int8_t)
+ {
+ }
+ void reset()
+ {
+ }
+
+};
+
+struct fibonacci_hash_policy
+{
+ size_t index_for_hash(size_t hash, size_t /*num_slots_minus_one*/) const
+ {
+ return (11400714819323198485ull * hash) >> shift;
+ }
+ size_t keep_in_range(size_t index, size_t num_slots_minus_one) const
+ {
+ return index & num_slots_minus_one;
+ }
+
+ int8_t next_size_over(size_t & size) const
+ {
+ size = std::max(size_t(2), detailv3::next_power_of_two(size));
+ return 64 - detailv3::log2(size);
+ }
+ void commit(int8_t shift)
+ {
+ this->shift = shift;
+ }
+ void reset()
+ {
+ shift = 63;
+ }
+
+private:
+ int8_t shift = 63;
+};
+
+template<typename K, typename V, typename H = std::hash<K>, typename E = std::equal_to<K>, typename A = std::allocator<std::pair<K, V> > >
+class flat_hash_map
+ : public detailv3::sherwood_v3_table
+ <
+ std::pair<K, V>,
+ K,
+ H,
+ detailv3::KeyOrValueHasher<K, std::pair<K, V>, H>,
+ E,
+ detailv3::KeyOrValueEquality<K, std::pair<K, V>, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<std::pair<K, V>>>
+ >
+{
+ using Table = detailv3::sherwood_v3_table
+ <
+ std::pair<K, V>,
+ K,
+ H,
+ detailv3::KeyOrValueHasher<K, std::pair<K, V>, H>,
+ E,
+ detailv3::KeyOrValueEquality<K, std::pair<K, V>, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<std::pair<K, V>>>
+ >;
+public:
+
+ using key_type = K;
+ using mapped_type = V;
+
+ using Table::Table;
+ flat_hash_map()
+ {
+ }
+
+ inline V & operator[](const K & key)
+ {
+ return emplace(key, convertible_to_value()).first->second;
+ }
+ inline V & operator[](K && key)
+ {
+ return emplace(std::move(key), convertible_to_value()).first->second;
+ }
+ V & at(const K & key)
+ {
+ auto found = this->find(key);
+ if (found == this->end())
+ throw std::out_of_range("Argument passed to at() was not in the map.");
+ return found->second;
+ }
+ const V & at(const K & key) const
+ {
+ auto found = this->find(key);
+ if (found == this->end())
+ throw std::out_of_range("Argument passed to at() was not in the map.");
+ return found->second;
+ }
+
+ using Table::emplace;
+ std::pair<typename Table::iterator, bool> emplace()
+ {
+ return emplace(key_type(), convertible_to_value());
+ }
+ template<typename M>
+ std::pair<typename Table::iterator, bool> insert_or_assign(const key_type & key, M && m)
+ {
+ auto emplace_result = emplace(key, std::forward<M>(m));
+ if (!emplace_result.second)
+ emplace_result.first->second = std::forward<M>(m);
+ return emplace_result;
+ }
+ template<typename M>
+ std::pair<typename Table::iterator, bool> insert_or_assign(key_type && key, M && m)
+ {
+ auto emplace_result = emplace(std::move(key), std::forward<M>(m));
+ if (!emplace_result.second)
+ emplace_result.first->second = std::forward<M>(m);
+ return emplace_result;
+ }
+ template<typename M>
+ typename Table::iterator insert_or_assign(typename Table::const_iterator, const key_type & key, M && m)
+ {
+ return insert_or_assign(key, std::forward<M>(m)).first;
+ }
+ template<typename M>
+ typename Table::iterator insert_or_assign(typename Table::const_iterator, key_type && key, M && m)
+ {
+ return insert_or_assign(std::move(key), std::forward<M>(m)).first;
+ }
+
+ friend bool operator==(const flat_hash_map & lhs, const flat_hash_map & rhs)
+ {
+ if (lhs.size() != rhs.size())
+ return false;
+ for (const typename Table::value_type & value : lhs)
+ {
+ auto found = rhs.find(value.first);
+ if (found == rhs.end())
+ return false;
+ else if (value.second != found->second)
+ return false;
+ }
+ return true;
+ }
+ friend bool operator!=(const flat_hash_map & lhs, const flat_hash_map & rhs)
+ {
+ return !(lhs == rhs);
+ }
+
+private:
+ struct convertible_to_value
+ {
+ operator V() const
+ {
+ return V();
+ }
+ };
+};
+
+template<typename T, typename H = std::hash<T>, typename E = std::equal_to<T>, typename A = std::allocator<T> >
+class flat_hash_set
+ : public detailv3::sherwood_v3_table
+ <
+ T,
+ T,
+ H,
+ detailv3::functor_storage<size_t, H>,
+ E,
+ detailv3::functor_storage<bool, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<T>>
+ >
+{
+ using Table = detailv3::sherwood_v3_table
+ <
+ T,
+ T,
+ H,
+ detailv3::functor_storage<size_t, H>,
+ E,
+ detailv3::functor_storage<bool, E>,
+ A,
+ typename std::allocator_traits<A>::template rebind_alloc<detailv3::sherwood_v3_entry<T>>
+ >;
+public:
+
+ using key_type = T;
+
+ using Table::Table;
+ flat_hash_set()
+ {
+ }
+
+ template<typename... Args>
+ std::pair<typename Table::iterator, bool> emplace(Args &&... args)
+ {
+ return Table::emplace(T(std::forward<Args>(args)...));
+ }
+ std::pair<typename Table::iterator, bool> emplace(const key_type & arg)
+ {
+ return Table::emplace(arg);
+ }
+ std::pair<typename Table::iterator, bool> emplace(key_type & arg)
+ {
+ return Table::emplace(arg);
+ }
+ std::pair<typename Table::iterator, bool> emplace(const key_type && arg)
+ {
+ return Table::emplace(std::move(arg));
+ }
+ std::pair<typename Table::iterator, bool> emplace(key_type && arg)
+ {
+ return Table::emplace(std::move(arg));
+ }
+
+ friend bool operator==(const flat_hash_set & lhs, const flat_hash_set & rhs)
+ {
+ if (lhs.size() != rhs.size())
+ return false;
+ for (const T & value : lhs)
+ {
+ if (rhs.find(value) == rhs.end())
+ return false;
+ }
+ return true;
+ }
+ friend bool operator!=(const flat_hash_set & lhs, const flat_hash_set & rhs)
+ {
+ return !(lhs == rhs);
+ }
+};
+
+
+template<typename T>
+struct power_of_two_std_hash : std::hash<T>
+{
+ typedef ska::power_of_two_hash_policy hash_policy;
+};
+
+} // end namespace ska
diff --git a/misc/benchmarks/external/tsl/robin_growth_policy.h b/misc/benchmarks/external/tsl/robin_growth_policy.h
new file mode 100644
index 00000000..eba8cdfa
--- /dev/null
+++ b/misc/benchmarks/external/tsl/robin_growth_policy.h
@@ -0,0 +1,406 @@
+/**
+ * MIT License
+ *
+ * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TSL_ROBIN_GROWTH_POLICY_H
+#define TSL_ROBIN_GROWTH_POLICY_H
+
+#include <algorithm>
+#include <array>
+#include <climits>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <limits>
+#include <ratio>
+#include <stdexcept>
+
+#ifdef TSL_DEBUG
+#define tsl_rh_assert(expr) assert(expr)
+#else
+#define tsl_rh_assert(expr) (static_cast<void>(0))
+#endif
+
+/**
+ * If exceptions are enabled, throw the exception passed in parameter, otherwise
+ * call std::terminate.
+ */
+#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || \
+ (defined(_MSC_VER) && defined(_CPPUNWIND))) && \
+ !defined(TSL_NO_EXCEPTIONS)
+#define TSL_RH_THROW_OR_TERMINATE(ex, msg) throw ex(msg)
+#else
+#define TSL_RH_NO_EXCEPTIONS
+#ifdef TSL_DEBUG
+#include <iostream>
+#define TSL_RH_THROW_OR_TERMINATE(ex, msg) \
+ do { \
+ std::cerr << msg << std::endl; \
+ std::terminate(); \
+ } while (0)
+#else
+#define TSL_RH_THROW_OR_TERMINATE(ex, msg) std::terminate()
+#endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define TSL_RH_LIKELY(exp) (__builtin_expect(!!(exp), true))
+#else
+#define TSL_RH_LIKELY(exp) (exp)
+#endif
+
+#define TSL_RH_UNUSED(x) static_cast<void>(x)
+
+namespace tsl {
+namespace rh {
+
+/**
+ * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a
+ * power of two. It allows the table to use a mask operation instead of a modulo
+ * operation to map a hash to a bucket.
+ *
+ * GrowthFactor must be a power of two >= 2.
+ */
+template <std::size_t GrowthFactor>
+class power_of_two_growth_policy {
+ public:
+ /**
+ * Called on the hash table creation and on rehash. The number of buckets for
+ * the table is passed in parameter. This number is a minimum, the policy may
+ * update this value with a higher value if needed (but not lower).
+ *
+ * If 0 is given, min_bucket_count_in_out must still be 0 after the policy
+ * creation and bucket_for_hash must always return 0 in this case.
+ */
+ explicit power_of_two_growth_policy(std::size_t& min_bucket_count_in_out) {
+ if (min_bucket_count_in_out > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ if (min_bucket_count_in_out > 0) {
+ min_bucket_count_in_out =
+ round_up_to_power_of_two(min_bucket_count_in_out);
+ m_mask = min_bucket_count_in_out - 1;
+ } else {
+ m_mask = 0;
+ }
+ }
+
+ /**
+ * Return the bucket [0, bucket_count()) to which the hash belongs.
+ * If bucket_count() is 0, it must always return 0.
+ */
+ std::size_t bucket_for_hash(std::size_t hash) const noexcept {
+ return hash & m_mask;
+ }
+
+ /**
+ * Return the number of buckets that should be used on next growth.
+ */
+ std::size_t next_bucket_count() const {
+ if ((m_mask + 1) > max_bucket_count() / GrowthFactor) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ return (m_mask + 1) * GrowthFactor;
+ }
+
+ /**
+ * Return the maximum number of buckets supported by the policy.
+ */
+ std::size_t max_bucket_count() const {
+ // Largest power of two.
+ return (std::numeric_limits<std::size_t>::max() / 2) + 1;
+ }
+
+ /**
+ * Reset the growth policy as if it was created with a bucket count of 0.
+ * After a clear, the policy must always return 0 when bucket_for_hash is
+ * called.
+ */
+ void clear() noexcept { m_mask = 0; }
+
+ private:
+ static std::size_t round_up_to_power_of_two(std::size_t value) {
+ if (is_power_of_two(value)) {
+ return value;
+ }
+
+ if (value == 0) {
+ return 1;
+ }
+
+ --value;
+ for (std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) {
+ value |= value >> i;
+ }
+
+ return value + 1;
+ }
+
+ static constexpr bool is_power_of_two(std::size_t value) {
+ return value != 0 && (value & (value - 1)) == 0;
+ }
+
+ protected:
+ static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2,
+ "GrowthFactor must be a power of two >= 2.");
+
+ std::size_t m_mask;
+};
+
+/**
+ * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo
+ * to map a hash to a bucket. Slower but it can be useful if you want a slower
+ * growth.
+ */
+template <class GrowthFactor = std::ratio<3, 2>>
+class mod_growth_policy {
+ public:
+ explicit mod_growth_policy(std::size_t& min_bucket_count_in_out) {
+ if (min_bucket_count_in_out > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ if (min_bucket_count_in_out > 0) {
+ m_mod = min_bucket_count_in_out;
+ } else {
+ m_mod = 1;
+ }
+ }
+
+ std::size_t bucket_for_hash(std::size_t hash) const noexcept {
+ return hash % m_mod;
+ }
+
+ std::size_t next_bucket_count() const {
+ if (m_mod == max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ const double next_bucket_count =
+ std::ceil(double(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR);
+ if (!std::isnormal(next_bucket_count)) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ if (next_bucket_count > double(max_bucket_count())) {
+ return max_bucket_count();
+ } else {
+ return std::size_t(next_bucket_count);
+ }
+ }
+
+ std::size_t max_bucket_count() const { return MAX_BUCKET_COUNT; }
+
+ void clear() noexcept { m_mod = 1; }
+
+ private:
+ static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR =
+ 1.0 * GrowthFactor::num / GrowthFactor::den;
+ static const std::size_t MAX_BUCKET_COUNT =
+ std::size_t(double(std::numeric_limits<std::size_t>::max() /
+ REHASH_SIZE_MULTIPLICATION_FACTOR));
+
+ static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1,
+ "Growth factor should be >= 1.1.");
+
+ std::size_t m_mod;
+};
+
+namespace detail {
+
+#if SIZE_MAX >= ULLONG_MAX
+#define TSL_RH_NB_PRIMES 51
+#elif SIZE_MAX >= ULONG_MAX
+#define TSL_RH_NB_PRIMES 40
+#else
+#define TSL_RH_NB_PRIMES 23
+#endif
+
+static constexpr const std::array<std::size_t, TSL_RH_NB_PRIMES> PRIMES = {{
+ 1u,
+ 5u,
+ 17u,
+ 29u,
+ 37u,
+ 53u,
+ 67u,
+ 79u,
+ 97u,
+ 131u,
+ 193u,
+ 257u,
+ 389u,
+ 521u,
+ 769u,
+ 1031u,
+ 1543u,
+ 2053u,
+ 3079u,
+ 6151u,
+ 12289u,
+ 24593u,
+ 49157u,
+#if SIZE_MAX >= ULONG_MAX
+ 98317ul,
+ 196613ul,
+ 393241ul,
+ 786433ul,
+ 1572869ul,
+ 3145739ul,
+ 6291469ul,
+ 12582917ul,
+ 25165843ul,
+ 50331653ul,
+ 100663319ul,
+ 201326611ul,
+ 402653189ul,
+ 805306457ul,
+ 1610612741ul,
+ 3221225473ul,
+ 4294967291ul,
+#endif
+#if SIZE_MAX >= ULLONG_MAX
+ 6442450939ull,
+ 12884901893ull,
+ 25769803751ull,
+ 51539607551ull,
+ 103079215111ull,
+ 206158430209ull,
+ 412316860441ull,
+ 824633720831ull,
+ 1649267441651ull,
+ 3298534883309ull,
+ 6597069766657ull,
+#endif
+}};
+
+template <unsigned int IPrime>
+static constexpr std::size_t mod(std::size_t hash) {
+ return hash % PRIMES[IPrime];
+}
+
+// MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for
+// faster modulo as the compiler can optimize the modulo code better with a
+// constant known at the compilation.
+static constexpr const std::array<std::size_t (*)(std::size_t),
+ TSL_RH_NB_PRIMES>
+ MOD_PRIME = {{
+ &mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>,
+ &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, &mod<11>,
+ &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>,
+ &mod<18>, &mod<19>, &mod<20>, &mod<21>, &mod<22>,
+#if SIZE_MAX >= ULONG_MAX
+ &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>,
+ &mod<29>, &mod<30>, &mod<31>, &mod<32>, &mod<33>, &mod<34>,
+ &mod<35>, &mod<36>, &mod<37>, &mod<38>, &mod<39>,
+#endif
+#if SIZE_MAX >= ULLONG_MAX
+ &mod<40>, &mod<41>, &mod<42>, &mod<43>, &mod<44>, &mod<45>,
+ &mod<46>, &mod<47>, &mod<48>, &mod<49>, &mod<50>,
+#endif
+ }};
+
+} // namespace detail
+
+/**
+ * Grow the hash table by using prime numbers as bucket count. Slower than
+ * tsl::rh::power_of_two_growth_policy in general but will probably distribute
+ * the values around better in the buckets with a poor hash function.
+ *
+ * To allow the compiler to optimize the modulo operation, a lookup table is
+ * used with constant primes numbers.
+ *
+ * With a switch the code would look like:
+ * \code
+ * switch(iprime) { // iprime is the current prime of the hash table
+ * case 0: hash % 5ul;
+ * break;
+ * case 1: hash % 17ul;
+ * break;
+ * case 2: hash % 29ul;
+ * break;
+ * ...
+ * }
+ * \endcode
+ *
+ * Due to the constant variable in the modulo the compiler is able to optimize
+ * the operation by a series of multiplications, substractions and shifts.
+ *
+ * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34)
+ * * 5' in a 64 bits environment.
+ */
+class prime_growth_policy {
+ public:
+ explicit prime_growth_policy(std::size_t& min_bucket_count_in_out) {
+ auto it_prime = std::lower_bound(
+ detail::PRIMES.begin(), detail::PRIMES.end(), min_bucket_count_in_out);
+ if (it_prime == detail::PRIMES.end()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ m_iprime = static_cast<unsigned int>(
+ std::distance(detail::PRIMES.begin(), it_prime));
+ if (min_bucket_count_in_out > 0) {
+ min_bucket_count_in_out = *it_prime;
+ } else {
+ min_bucket_count_in_out = 0;
+ }
+ }
+
+ std::size_t bucket_for_hash(std::size_t hash) const noexcept {
+ return detail::MOD_PRIME[m_iprime](hash);
+ }
+
+ std::size_t next_bucket_count() const {
+ if (m_iprime + 1 >= detail::PRIMES.size()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The hash table exceeds its maximum size.");
+ }
+
+ return detail::PRIMES[m_iprime + 1];
+ }
+
+ std::size_t max_bucket_count() const { return detail::PRIMES.back(); }
+
+ void clear() noexcept { m_iprime = 0; }
+
+ private:
+ unsigned int m_iprime;
+
+ static_assert(std::numeric_limits<decltype(m_iprime)>::max() >=
+ detail::PRIMES.size(),
+ "The type of m_iprime is not big enough.");
+};
+
+} // namespace rh
+} // namespace tsl
+
+#endif
diff --git a/misc/benchmarks/external/tsl/robin_hash.h b/misc/benchmarks/external/tsl/robin_hash.h
new file mode 100644
index 00000000..89c7c96f
--- /dev/null
+++ b/misc/benchmarks/external/tsl/robin_hash.h
@@ -0,0 +1,1639 @@
+/**
+ * MIT License
+ *
+ * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TSL_ROBIN_HASH_H
+#define TSL_ROBIN_HASH_H
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <exception>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <stdexcept>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "robin_growth_policy.h"
+
+namespace tsl {
+
+namespace detail_robin_hash {
+
+template <typename T>
+struct make_void {
+ using type = void;
+};
+
+template <typename T, typename = void>
+struct has_is_transparent : std::false_type {};
+
+template <typename T>
+struct has_is_transparent<T,
+ typename make_void<typename T::is_transparent>::type>
+ : std::true_type {};
+
+template <typename U>
+struct is_power_of_two_policy : std::false_type {};
+
+template <std::size_t GrowthFactor>
+struct is_power_of_two_policy<tsl::rh::power_of_two_growth_policy<GrowthFactor>>
+ : std::true_type {};
+
+// Only available in C++17, we need to be compatible with C++11
+template <class T>
+const T& clamp(const T& v, const T& lo, const T& hi) {
+ return std::min(hi, std::max(lo, v));
+}
+
+template <typename T, typename U>
+static T numeric_cast(U value,
+ const char* error_message = "numeric_cast() failed.") {
+ T ret = static_cast<T>(value);
+ if (static_cast<U>(ret) != value) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message);
+ }
+
+ const bool is_same_signedness =
+ (std::is_unsigned<T>::value && std::is_unsigned<U>::value) ||
+ (std::is_signed<T>::value && std::is_signed<U>::value);
+ if (!is_same_signedness && (ret < T{}) != (value < U{})) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message);
+ }
+
+ return ret;
+}
+
+template <class T, class Deserializer>
+static T deserialize_value(Deserializer& deserializer) {
+ // MSVC < 2017 is not conformant, circumvent the problem by removing the
+ // template keyword
+#if defined(_MSC_VER) && _MSC_VER < 1910
+ return deserializer.Deserializer::operator()<T>();
+#else
+ return deserializer.Deserializer::template operator()<T>();
+#endif
+}
+
+/**
+ * Fixed size type used to represent size_type values on serialization. Need to
+ * be big enough to represent a std::size_t on 32 and 64 bits platforms, and
+ * must be the same size on both platforms.
+ */
+using slz_size_type = std::uint64_t;
+static_assert(std::numeric_limits<slz_size_type>::max() >=
+ std::numeric_limits<std::size_t>::max(),
+ "slz_size_type must be >= std::size_t");
+
+using truncated_hash_type = std::uint32_t;
+
+/**
+ * Helper class that stores a truncated hash if StoreHash is true and nothing
+ * otherwise.
+ */
+template <bool StoreHash>
+class bucket_entry_hash {
+ public:
+ bool bucket_hash_equal(std::size_t /*hash*/) const noexcept { return true; }
+
+ truncated_hash_type truncated_hash() const noexcept { return 0; }
+
+ protected:
+ void set_hash(truncated_hash_type /*hash*/) noexcept {}
+};
+
+template <>
+class bucket_entry_hash<true> {
+ public:
+ bool bucket_hash_equal(std::size_t hash) const noexcept {
+ return m_hash == truncated_hash_type(hash);
+ }
+
+ truncated_hash_type truncated_hash() const noexcept { return m_hash; }
+
+ protected:
+ void set_hash(truncated_hash_type hash) noexcept {
+ m_hash = truncated_hash_type(hash);
+ }
+
+ private:
+ truncated_hash_type m_hash;
+};
+
+/**
+ * Each bucket entry has:
+ * - A value of type `ValueType`.
+ * - An integer to store how far the value of the bucket, if any, is from its
+ * ideal bucket (ex: if the current bucket 5 has the value 'foo' and
+ * `hash('foo') % nb_buckets` == 3, `dist_from_ideal_bucket()` will return 2 as
+ * the current value of the bucket is two buckets away from its ideal bucket) If
+ * there is no value in the bucket (i.e. `empty()` is true)
+ * `dist_from_ideal_bucket()` will be < 0.
+ * - A marker which tells us if the bucket is the last bucket of the bucket
+ * array (useful for the iterator of the hash table).
+ * - If `StoreHash` is true, 32 bits of the hash of the value, if any, are also
+ * stored in the bucket. If the size of the hash is more than 32 bits, it is
+ * truncated. We don't store the full hash as storing the hash is a potential
+ * opportunity to use the unused space due to the alignment of the bucket_entry
+ * structure. We can thus potentially store the hash without any extra space
+ * (which would not be possible with 64 bits of the hash).
+ */
+template <typename ValueType, bool StoreHash>
+class bucket_entry : public bucket_entry_hash<StoreHash> {
+ using bucket_hash = bucket_entry_hash<StoreHash>;
+
+ public:
+ using value_type = ValueType;
+ using distance_type = std::int16_t;
+
+ bucket_entry() noexcept
+ : bucket_hash(),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(false) {
+ tsl_rh_assert(empty());
+ }
+
+ bucket_entry(bool last_bucket) noexcept
+ : bucket_hash(),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(last_bucket) {
+ tsl_rh_assert(empty());
+ }
+
+ bucket_entry(const bucket_entry& other) noexcept(
+ std::is_nothrow_copy_constructible<value_type>::value)
+ : bucket_hash(other),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(other.m_last_bucket) {
+ if (!other.empty()) {
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(other.value());
+ m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;
+ }
+ tsl_rh_assert(empty() == other.empty());
+ }
+
+ /**
+ * Never really used, but still necessary as we must call resize on an empty
+ * `std::vector<bucket_entry>`. and we need to support move-only types. See
+ * robin_hash constructor for details.
+ */
+ bucket_entry(bucket_entry&& other) noexcept(
+ std::is_nothrow_move_constructible<value_type>::value)
+ : bucket_hash(std::move(other)),
+ m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),
+ m_last_bucket(other.m_last_bucket) {
+ if (!other.empty()) {
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(std::move(other.value()));
+ m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;
+ }
+ tsl_rh_assert(empty() == other.empty());
+ }
+
+ bucket_entry& operator=(const bucket_entry& other) noexcept(
+ std::is_nothrow_copy_constructible<value_type>::value) {
+ if (this != &other) {
+ clear();
+
+ bucket_hash::operator=(other);
+ if (!other.empty()) {
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(other.value());
+ }
+
+ m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;
+ m_last_bucket = other.m_last_bucket;
+ }
+
+ return *this;
+ }
+
+ bucket_entry& operator=(bucket_entry&&) = delete;
+
+ ~bucket_entry() noexcept { clear(); }
+
+ void clear() noexcept {
+ if (!empty()) {
+ destroy_value();
+ m_dist_from_ideal_bucket = EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;
+ }
+ }
+
+ bool empty() const noexcept {
+ return m_dist_from_ideal_bucket == EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;
+ }
+
+ value_type& value() noexcept {
+ tsl_rh_assert(!empty());
+#if defined(__cplusplus) && __cplusplus >= 201703L
+ return *std::launder(
+ reinterpret_cast<value_type*>(std::addressof(m_value)));
+#else
+ return *reinterpret_cast<value_type*>(std::addressof(m_value));
+#endif
+ }
+
+ const value_type& value() const noexcept {
+ tsl_rh_assert(!empty());
+#if defined(__cplusplus) && __cplusplus >= 201703L
+ return *std::launder(
+ reinterpret_cast<const value_type*>(std::addressof(m_value)));
+#else
+ return *reinterpret_cast<const value_type*>(std::addressof(m_value));
+#endif
+ }
+
+ distance_type dist_from_ideal_bucket() const noexcept {
+ return m_dist_from_ideal_bucket;
+ }
+
+ bool last_bucket() const noexcept { return m_last_bucket; }
+
+ void set_as_last_bucket() noexcept { m_last_bucket = true; }
+
+ template <typename... Args>
+ void set_value_of_empty_bucket(distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash,
+ Args&&... value_type_args) {
+ tsl_rh_assert(dist_from_ideal_bucket >= 0);
+ tsl_rh_assert(empty());
+
+ ::new (static_cast<void*>(std::addressof(m_value)))
+ value_type(std::forward<Args>(value_type_args)...);
+ this->set_hash(hash);
+ m_dist_from_ideal_bucket = dist_from_ideal_bucket;
+
+ tsl_rh_assert(!empty());
+ }
+
+ void swap_with_value_in_bucket(distance_type& dist_from_ideal_bucket,
+ truncated_hash_type& hash, value_type& value) {
+ tsl_rh_assert(!empty());
+ tsl_rh_assert(dist_from_ideal_bucket > m_dist_from_ideal_bucket);
+
+ using std::swap;
+ swap(value, this->value());
+ swap(dist_from_ideal_bucket, m_dist_from_ideal_bucket);
+
+ if (StoreHash) {
+ const truncated_hash_type tmp_hash = this->truncated_hash();
+ this->set_hash(hash);
+ hash = tmp_hash;
+ } else {
+ // Avoid warning of unused variable if StoreHash is false
+ TSL_RH_UNUSED(hash);
+ }
+ }
+
+ static truncated_hash_type truncate_hash(std::size_t hash) noexcept {
+ return truncated_hash_type(hash);
+ }
+
+ private:
+ void destroy_value() noexcept {
+ tsl_rh_assert(!empty());
+ value().~value_type();
+ }
+
+ public:
+ static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1;
+ static const distance_type DIST_FROM_IDEAL_BUCKET_LIMIT = 4096;
+ static_assert(DIST_FROM_IDEAL_BUCKET_LIMIT <=
+ std::numeric_limits<distance_type>::max() - 1,
+ "DIST_FROM_IDEAL_BUCKET_LIMIT must be <= "
+ "std::numeric_limits<distance_type>::max() - 1.");
+
+ private:
+ using storage = typename std::aligned_storage<sizeof(value_type),
+ alignof(value_type)>::type;
+
+ distance_type m_dist_from_ideal_bucket;
+ bool m_last_bucket;
+ storage m_value;
+};
+
+/**
+ * Internal common class used by `robin_map` and `robin_set`.
+ *
+ * ValueType is what will be stored by `robin_hash` (usually `std::pair<Key, T>`
+ * for map and `Key` for set).
+ *
+ * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in
+ * parameter and returns a reference to the key.
+ *
+ * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in
+ * parameter and returns a reference to the value. `ValueSelect` should be void
+ * if there is no value (in a set for example).
+ *
+ * The strong exception guarantee only holds if the expression
+ * `std::is_nothrow_swappable<ValueType>::value &&
+ * std::is_nothrow_move_constructible<ValueType>::value` is true.
+ *
+ * Behaviour is undefined if the destructor of `ValueType` throws.
+ */
+template <class ValueType, class KeySelect, class ValueSelect, class Hash,
+ class KeyEqual, class Allocator, bool StoreHash, class GrowthPolicy>
+class robin_hash : private Hash, private KeyEqual, private GrowthPolicy {
+ private:
+ template <typename U>
+ using has_mapped_type =
+ typename std::integral_constant<bool, !std::is_same<U, void>::value>;
+
+ static_assert(
+ noexcept(std::declval<GrowthPolicy>().bucket_for_hash(std::size_t(0))),
+ "GrowthPolicy::bucket_for_hash must be noexcept.");
+ static_assert(noexcept(std::declval<GrowthPolicy>().clear()),
+ "GrowthPolicy::clear must be noexcept.");
+
+ public:
+ template <bool IsConst>
+ class robin_iterator;
+
+ using key_type = typename KeySelect::key_type;
+ using value_type = ValueType;
+ using size_type = std::size_t;
+ using difference_type = std::ptrdiff_t;
+ using hasher = Hash;
+ using key_equal = KeyEqual;
+ using allocator_type = Allocator;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = value_type*;
+ using const_pointer = const value_type*;
+ using iterator = robin_iterator<false>;
+ using const_iterator = robin_iterator<true>;
+
+ private:
+ /**
+ * Either store the hash because we are asked by the `StoreHash` template
+ * parameter or store the hash because it doesn't cost us anything in size and
+ * can be used to speed up rehash.
+ */
+ static constexpr bool STORE_HASH =
+ StoreHash ||
+ ((sizeof(tsl::detail_robin_hash::bucket_entry<value_type, true>) ==
+ sizeof(tsl::detail_robin_hash::bucket_entry<value_type, false>)) &&
+ (sizeof(std::size_t) == sizeof(truncated_hash_type) ||
+ is_power_of_two_policy<GrowthPolicy>::value) &&
+ // Don't store the hash for primitive types with default hash.
+ (!std::is_arithmetic<key_type>::value ||
+ !std::is_same<Hash, std::hash<key_type>>::value));
+
+ /**
+ * Only use the stored hash on lookup if we are explicitly asked. We are not
+ * sure how slow the KeyEqual operation is. An extra comparison may slow
+ * things down with a fast KeyEqual.
+ */
+ static constexpr bool USE_STORED_HASH_ON_LOOKUP = StoreHash;
+
+ /**
+ * We can only use the hash on rehash if the size of the hash type is the same
+ * as the stored one or if we use a power of two modulo. In the case of the
+ * power of two modulo, we just mask the least significant bytes, we just have
+ * to check that the truncated_hash_type didn't truncated more bytes.
+ */
+ static bool USE_STORED_HASH_ON_REHASH(size_type bucket_count) {
+ if (STORE_HASH && sizeof(std::size_t) == sizeof(truncated_hash_type)) {
+ TSL_RH_UNUSED(bucket_count);
+ return true;
+ } else if (STORE_HASH && is_power_of_two_policy<GrowthPolicy>::value) {
+ return bucket_count == 0 ||
+ (bucket_count - 1) <=
+ std::numeric_limits<truncated_hash_type>::max();
+ } else {
+ TSL_RH_UNUSED(bucket_count);
+ return false;
+ }
+ }
+
+ using bucket_entry =
+ tsl::detail_robin_hash::bucket_entry<value_type, STORE_HASH>;
+ using distance_type = typename bucket_entry::distance_type;
+
+ using buckets_allocator = typename std::allocator_traits<
+ allocator_type>::template rebind_alloc<bucket_entry>;
+ using buckets_container_type = std::vector<bucket_entry, buckets_allocator>;
+
+ public:
+ /**
+ * The 'operator*()' and 'operator->()' methods return a const reference and
+ * const pointer respectively to the stored value type.
+ *
+ * In case of a map, to get a mutable reference to the value associated to a
+ * key (the '.second' in the stored pair), you have to call 'value()'.
+ *
+ * The main reason for this is that if we returned a `std::pair<Key, T>&`
+ * instead of a `const std::pair<Key, T>&`, the user may modify the key which
+ * will put the map in a undefined state.
+ */
+ template <bool IsConst>
+ class robin_iterator {
+ friend class robin_hash;
+
+ private:
+ using bucket_entry_ptr =
+ typename std::conditional<IsConst, const bucket_entry*,
+ bucket_entry*>::type;
+
+ robin_iterator(bucket_entry_ptr bucket) noexcept : m_bucket(bucket) {}
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = const typename robin_hash::value_type;
+ using difference_type = std::ptrdiff_t;
+ using reference = value_type&;
+ using pointer = value_type*;
+
+ robin_iterator() noexcept {}
+
+ // Copy constructor from iterator to const_iterator.
+ template <bool TIsConst = IsConst,
+ typename std::enable_if<TIsConst>::type* = nullptr>
+ robin_iterator(const robin_iterator<!TIsConst>& other) noexcept
+ : m_bucket(other.m_bucket) {}
+
+ robin_iterator(const robin_iterator& other) = default;
+ robin_iterator(robin_iterator&& other) = default;
+ robin_iterator& operator=(const robin_iterator& other) = default;
+ robin_iterator& operator=(robin_iterator&& other) = default;
+
+ const typename robin_hash::key_type& key() const {
+ return KeySelect()(m_bucket->value());
+ }
+
+ template <class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value &&
+ IsConst>::type* = nullptr>
+ const typename U::value_type& value() const {
+ return U()(m_bucket->value());
+ }
+
+ template <class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value &&
+ !IsConst>::type* = nullptr>
+ typename U::value_type& value() const {
+ return U()(m_bucket->value());
+ }
+
+ reference operator*() const { return m_bucket->value(); }
+
+ pointer operator->() const { return std::addressof(m_bucket->value()); }
+
+ robin_iterator& operator++() {
+ while (true) {
+ if (m_bucket->last_bucket()) {
+ ++m_bucket;
+ return *this;
+ }
+
+ ++m_bucket;
+ if (!m_bucket->empty()) {
+ return *this;
+ }
+ }
+ }
+
+ robin_iterator operator++(int) {
+ robin_iterator tmp(*this);
+ ++*this;
+
+ return tmp;
+ }
+
+ friend bool operator==(const robin_iterator& lhs,
+ const robin_iterator& rhs) {
+ return lhs.m_bucket == rhs.m_bucket;
+ }
+
+ friend bool operator!=(const robin_iterator& lhs,
+ const robin_iterator& rhs) {
+ return !(lhs == rhs);
+ }
+
+ private:
+ bucket_entry_ptr m_bucket;
+ };
+
+ public:
+#if defined(__cplusplus) && __cplusplus >= 201402L
+ robin_hash(size_type bucket_count, const Hash& hash, const KeyEqual& equal,
+ const Allocator& alloc,
+ float min_load_factor = DEFAULT_MIN_LOAD_FACTOR,
+ float max_load_factor = DEFAULT_MAX_LOAD_FACTOR)
+ : Hash(hash),
+ KeyEqual(equal),
+ GrowthPolicy(bucket_count),
+ m_buckets_data(bucket_count, alloc),
+ m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data()),
+ m_bucket_count(bucket_count),
+ m_nb_elements(0),
+ m_grow_on_next_insert(false),
+ m_try_shrink_on_next_insert(false) {
+ if (bucket_count > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The map exceeds its maximum bucket count.");
+ }
+
+ if (m_bucket_count > 0) {
+ tsl_rh_assert(!m_buckets_data.empty());
+ m_buckets_data.back().set_as_last_bucket();
+ }
+
+ this->min_load_factor(min_load_factor);
+ this->max_load_factor(max_load_factor);
+ }
+#else
+ /**
+ * C++11 doesn't support the creation of a std::vector with a custom allocator
+ * and 'count' default-inserted elements. The needed contructor `explicit
+ * vector(size_type count, const Allocator& alloc = Allocator());` is only
+ * available in C++14 and later. We thus must resize after using the
+ * `vector(const Allocator& alloc)` constructor.
+ *
+ * We can't use `vector(size_type count, const T& value, const Allocator&
+ * alloc)` as it requires the value T to be copyable.
+ */
+ robin_hash(size_type bucket_count, const Hash& hash, const KeyEqual& equal,
+ const Allocator& alloc,
+ float min_load_factor = DEFAULT_MIN_LOAD_FACTOR,
+ float max_load_factor = DEFAULT_MAX_LOAD_FACTOR)
+ : Hash(hash),
+ KeyEqual(equal),
+ GrowthPolicy(bucket_count),
+ m_buckets_data(alloc),
+ m_buckets(static_empty_bucket_ptr()),
+ m_bucket_count(bucket_count),
+ m_nb_elements(0),
+ m_grow_on_next_insert(false),
+ m_try_shrink_on_next_insert(false) {
+ if (bucket_count > max_bucket_count()) {
+ TSL_RH_THROW_OR_TERMINATE(std::length_error,
+ "The map exceeds its maximum bucket count.");
+ }
+
+ if (m_bucket_count > 0) {
+ m_buckets_data.resize(m_bucket_count);
+ m_buckets = m_buckets_data.data();
+
+ tsl_rh_assert(!m_buckets_data.empty());
+ m_buckets_data.back().set_as_last_bucket();
+ }
+
+ this->min_load_factor(min_load_factor);
+ this->max_load_factor(max_load_factor);
+ }
+#endif
+
+ robin_hash(const robin_hash& other)
+ : Hash(other),
+ KeyEqual(other),
+ GrowthPolicy(other),
+ m_buckets_data(other.m_buckets_data),
+ m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data()),
+ m_bucket_count(other.m_bucket_count),
+ m_nb_elements(other.m_nb_elements),
+ m_load_threshold(other.m_load_threshold),
+ m_min_load_factor(other.m_min_load_factor),
+ m_max_load_factor(other.m_max_load_factor),
+ m_grow_on_next_insert(other.m_grow_on_next_insert),
+ m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert) {}
+
+ robin_hash(robin_hash&& other) noexcept(
+ std::is_nothrow_move_constructible<
+ Hash>::value&& std::is_nothrow_move_constructible<KeyEqual>::value&&
+ std::is_nothrow_move_constructible<GrowthPolicy>::value&&
+ std::is_nothrow_move_constructible<buckets_container_type>::value)
+ : Hash(std::move(static_cast<Hash&>(other))),
+ KeyEqual(std::move(static_cast<KeyEqual&>(other))),
+ GrowthPolicy(std::move(static_cast<GrowthPolicy&>(other))),
+ m_buckets_data(std::move(other.m_buckets_data)),
+ m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data()),
+ m_bucket_count(other.m_bucket_count),
+ m_nb_elements(other.m_nb_elements),
+ m_load_threshold(other.m_load_threshold),
+ m_min_load_factor(other.m_min_load_factor),
+ m_max_load_factor(other.m_max_load_factor),
+ m_grow_on_next_insert(other.m_grow_on_next_insert),
+ m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert) {
+ other.clear_and_shrink();
+ }
+
+ robin_hash& operator=(const robin_hash& other) {
+ if (&other != this) {
+ Hash::operator=(other);
+ KeyEqual::operator=(other);
+ GrowthPolicy::operator=(other);
+
+ m_buckets_data = other.m_buckets_data;
+ m_buckets = m_buckets_data.empty() ? static_empty_bucket_ptr()
+ : m_buckets_data.data();
+ m_bucket_count = other.m_bucket_count;
+ m_nb_elements = other.m_nb_elements;
+
+ m_load_threshold = other.m_load_threshold;
+ m_min_load_factor = other.m_min_load_factor;
+ m_max_load_factor = other.m_max_load_factor;
+
+ m_grow_on_next_insert = other.m_grow_on_next_insert;
+ m_try_shrink_on_next_insert = other.m_try_shrink_on_next_insert;
+ }
+
+ return *this;
+ }
+
+ robin_hash& operator=(robin_hash&& other) {
+ other.swap(*this);
+ other.clear_and_shrink();
+
+ return *this;
+ }
+
+ allocator_type get_allocator() const {
+ return m_buckets_data.get_allocator();
+ }
+
+ /*
+ * Iterators
+ */
+ iterator begin() noexcept {
+ std::size_t i = 0;
+ while (i < m_bucket_count && m_buckets[i].empty()) {
+ i++;
+ }
+
+ return iterator(m_buckets + i);
+ }
+
+ const_iterator begin() const noexcept { return cbegin(); }
+
+ const_iterator cbegin() const noexcept {
+ std::size_t i = 0;
+ while (i < m_bucket_count && m_buckets[i].empty()) {
+ i++;
+ }
+
+ return const_iterator(m_buckets + i);
+ }
+
+ iterator end() noexcept { return iterator(m_buckets + m_bucket_count); }
+
+ const_iterator end() const noexcept { return cend(); }
+
+ const_iterator cend() const noexcept {
+ return const_iterator(m_buckets + m_bucket_count);
+ }
+
+ /*
+ * Capacity
+ */
+ bool empty() const noexcept { return m_nb_elements == 0; }
+
+ size_type size() const noexcept { return m_nb_elements; }
+
+ size_type max_size() const noexcept { return m_buckets_data.max_size(); }
+
+ /*
+ * Modifiers
+ */
+ void clear() noexcept {
+ if (m_min_load_factor > 0.0f) {
+ clear_and_shrink();
+ } else {
+ for (auto& bucket : m_buckets_data) {
+ bucket.clear();
+ }
+
+ m_nb_elements = 0;
+ m_grow_on_next_insert = false;
+ }
+ }
+
+ template <typename P>
+ std::pair<iterator, bool> insert(P&& value) {
+ return insert_impl(KeySelect()(value), std::forward<P>(value));
+ }
+
+ template <typename P>
+ iterator insert_hint(const_iterator hint, P&& value) {
+ if (hint != cend() &&
+ compare_keys(KeySelect()(*hint), KeySelect()(value))) {
+ return mutable_iterator(hint);
+ }
+
+ return insert(std::forward<P>(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ if (std::is_base_of<
+ std::forward_iterator_tag,
+ typename std::iterator_traits<InputIt>::iterator_category>::value) {
+ const auto nb_elements_insert = std::distance(first, last);
+ const size_type nb_free_buckets = m_load_threshold - size();
+ tsl_rh_assert(m_load_threshold >= size());
+
+ if (nb_elements_insert > 0 &&
+ nb_free_buckets < size_type(nb_elements_insert)) {
+ reserve(size() + size_type(nb_elements_insert));
+ }
+ }
+
+ for (; first != last; ++first) {
+ insert(*first);
+ }
+ }
+
+ template <class K, class M>
+ std::pair<iterator, bool> insert_or_assign(K&& key, M&& obj) {
+ auto it = try_emplace(std::forward<K>(key), std::forward<M>(obj));
+ if (!it.second) {
+ it.first.value() = std::forward<M>(obj);
+ }
+
+ return it;
+ }
+
+ template <class K, class M>
+ iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) {
+ if (hint != cend() && compare_keys(KeySelect()(*hint), key)) {
+ auto it = mutable_iterator(hint);
+ it.value() = std::forward<M>(obj);
+
+ return it;
+ }
+
+ return insert_or_assign(std::forward<K>(key), std::forward<M>(obj)).first;
+ }
+
+ template <class... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return insert(value_type(std::forward<Args>(args)...));
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator hint, Args&&... args) {
+ return insert_hint(hint, value_type(std::forward<Args>(args)...));
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> try_emplace(K&& key, Args&&... args) {
+ return insert_impl(key, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+
+ template <class K, class... Args>
+ iterator try_emplace_hint(const_iterator hint, K&& key, Args&&... args) {
+ if (hint != cend() && compare_keys(KeySelect()(*hint), key)) {
+ return mutable_iterator(hint);
+ }
+
+ return try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
+ }
+
+ /**
+ * Here to avoid `template<class K> size_type erase(const K& key)` being used
+ * when we use an `iterator` instead of a `const_iterator`.
+ */
+ iterator erase(iterator pos) {
+ erase_from_bucket(pos);
+
+ /**
+ * Erase bucket used a backward shift after clearing the bucket.
+ * Check if there is a new value in the bucket, if not get the next
+ * non-empty.
+ */
+ if (pos.m_bucket->empty()) {
+ ++pos;
+ }
+
+ m_try_shrink_on_next_insert = true;
+
+ return pos;
+ }
+
+ iterator erase(const_iterator pos) { return erase(mutable_iterator(pos)); }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ if (first == last) {
+ return mutable_iterator(first);
+ }
+
+ auto first_mutable = mutable_iterator(first);
+ auto last_mutable = mutable_iterator(last);
+ for (auto it = first_mutable.m_bucket; it != last_mutable.m_bucket; ++it) {
+ if (!it->empty()) {
+ it->clear();
+ m_nb_elements--;
+ }
+ }
+
+ if (last_mutable == end()) {
+ m_try_shrink_on_next_insert = true;
+ return end();
+ }
+
+ /*
+ * Backward shift on the values which come after the deleted values.
+ * We try to move the values closer to their ideal bucket.
+ */
+ std::size_t icloser_bucket =
+ static_cast<std::size_t>(first_mutable.m_bucket - m_buckets);
+ std::size_t ito_move_closer_value =
+ static_cast<std::size_t>(last_mutable.m_bucket - m_buckets);
+ tsl_rh_assert(ito_move_closer_value > icloser_bucket);
+
+ const std::size_t ireturn_bucket =
+ ito_move_closer_value -
+ std::min(
+ ito_move_closer_value - icloser_bucket,
+ std::size_t(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));
+
+ while (ito_move_closer_value < m_bucket_count &&
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) {
+ icloser_bucket =
+ ito_move_closer_value -
+ std::min(
+ ito_move_closer_value - icloser_bucket,
+ std::size_t(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));
+
+ tsl_rh_assert(m_buckets[icloser_bucket].empty());
+ const distance_type new_distance = distance_type(
+ m_buckets[ito_move_closer_value].dist_from_ideal_bucket() -
+ (ito_move_closer_value - icloser_bucket));
+ m_buckets[icloser_bucket].set_value_of_empty_bucket(
+ new_distance, m_buckets[ito_move_closer_value].truncated_hash(),
+ std::move(m_buckets[ito_move_closer_value].value()));
+ m_buckets[ito_move_closer_value].clear();
+
+ ++icloser_bucket;
+ ++ito_move_closer_value;
+ }
+
+ m_try_shrink_on_next_insert = true;
+
+ return iterator(m_buckets + ireturn_bucket);
+ }
+
+ template <class K>
+ size_type erase(const K& key) {
+ return erase(key, hash_key(key));
+ }
+
+ template <class K>
+ size_type erase(const K& key, std::size_t hash) {
+ auto it = find(key, hash);
+ if (it != end()) {
+ erase_from_bucket(it);
+ m_try_shrink_on_next_insert = true;
+
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ void swap(robin_hash& other) {
+ using std::swap;
+
+ swap(static_cast<Hash&>(*this), static_cast<Hash&>(other));
+ swap(static_cast<KeyEqual&>(*this), static_cast<KeyEqual&>(other));
+ swap(static_cast<GrowthPolicy&>(*this), static_cast<GrowthPolicy&>(other));
+ swap(m_buckets_data, other.m_buckets_data);
+ swap(m_buckets, other.m_buckets);
+ swap(m_bucket_count, other.m_bucket_count);
+ swap(m_nb_elements, other.m_nb_elements);
+ swap(m_load_threshold, other.m_load_threshold);
+ swap(m_min_load_factor, other.m_min_load_factor);
+ swap(m_max_load_factor, other.m_max_load_factor);
+ swap(m_grow_on_next_insert, other.m_grow_on_next_insert);
+ swap(m_try_shrink_on_next_insert, other.m_try_shrink_on_next_insert);
+ }
+
+ /*
+ * Lookup
+ */
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ typename U::value_type& at(const K& key) {
+ return at(key, hash_key(key));
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ typename U::value_type& at(const K& key, std::size_t hash) {
+ return const_cast<typename U::value_type&>(
+ static_cast<const robin_hash*>(this)->at(key, hash));
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ const typename U::value_type& at(const K& key) const {
+ return at(key, hash_key(key));
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ const typename U::value_type& at(const K& key, std::size_t hash) const {
+ auto it = find(key, hash);
+ if (it != cend()) {
+ return it.value();
+ } else {
+ TSL_RH_THROW_OR_TERMINATE(std::out_of_range, "Couldn't find key.");
+ }
+ }
+
+ template <class K, class U = ValueSelect,
+ typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>
+ typename U::value_type& operator[](K&& key) {
+ return try_emplace(std::forward<K>(key)).first.value();
+ }
+
+ template <class K>
+ size_type count(const K& key) const {
+ return count(key, hash_key(key));
+ }
+
+ template <class K>
+ size_type count(const K& key, std::size_t hash) const {
+ if (find(key, hash) != cend()) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ template <class K>
+ iterator find(const K& key) {
+ return find_impl(key, hash_key(key));
+ }
+
+ template <class K>
+ iterator find(const K& key, std::size_t hash) {
+ return find_impl(key, hash);
+ }
+
+ template <class K>
+ const_iterator find(const K& key) const {
+ return find_impl(key, hash_key(key));
+ }
+
+ template <class K>
+ const_iterator find(const K& key, std::size_t hash) const {
+ return find_impl(key, hash);
+ }
+
+ template <class K>
+ bool contains(const K& key) const {
+ return contains(key, hash_key(key));
+ }
+
+ template <class K>
+ bool contains(const K& key, std::size_t hash) const {
+ return count(key, hash) != 0;
+ }
+
+ template <class K>
+ std::pair<iterator, iterator> equal_range(const K& key) {
+ return equal_range(key, hash_key(key));
+ }
+
+ template <class K>
+ std::pair<iterator, iterator> equal_range(const K& key, std::size_t hash) {
+ iterator it = find(key, hash);
+ return std::make_pair(it, (it == end()) ? it : std::next(it));
+ }
+
+ template <class K>
+ std::pair<const_iterator, const_iterator> equal_range(const K& key) const {
+ return equal_range(key, hash_key(key));
+ }
+
+ template <class K>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const K& key, std::size_t hash) const {
+ const_iterator it = find(key, hash);
+ return std::make_pair(it, (it == cend()) ? it : std::next(it));
+ }
+
+ /*
+ * Bucket interface
+ */
+ size_type bucket_count() const { return m_bucket_count; }
+
+ size_type max_bucket_count() const {
+ return std::min(GrowthPolicy::max_bucket_count(),
+ m_buckets_data.max_size());
+ }
+
+ /*
+ * Hash policy
+ */
+ float load_factor() const {
+ if (bucket_count() == 0) {
+ return 0;
+ }
+
+ return float(m_nb_elements) / float(bucket_count());
+ }
+
+ float min_load_factor() const { return m_min_load_factor; }
+
+ float max_load_factor() const { return m_max_load_factor; }
+
+ void min_load_factor(float ml) {
+ m_min_load_factor = clamp(ml, float(MINIMUM_MIN_LOAD_FACTOR),
+ float(MAXIMUM_MIN_LOAD_FACTOR));
+ }
+
+ void max_load_factor(float ml) {
+ m_max_load_factor = clamp(ml, float(MINIMUM_MAX_LOAD_FACTOR),
+ float(MAXIMUM_MAX_LOAD_FACTOR));
+ m_load_threshold = size_type(float(bucket_count()) * m_max_load_factor);
+ tsl_rh_assert(bucket_count() == 0 || m_load_threshold < bucket_count());
+ }
+
+ void rehash(size_type count_) {
+ count_ = std::max(count_,
+ size_type(std::ceil(float(size()) / max_load_factor())));
+ rehash_impl(count_);
+ }
+
+ void reserve(size_type count_) {
+ rehash(size_type(std::ceil(float(count_) / max_load_factor())));
+ }
+
+ /*
+ * Observers
+ */
+ hasher hash_function() const { return static_cast<const Hash&>(*this); }
+
+ key_equal key_eq() const { return static_cast<const KeyEqual&>(*this); }
+
+ /*
+ * Other
+ */
+ iterator mutable_iterator(const_iterator pos) {
+ return iterator(const_cast<bucket_entry*>(pos.m_bucket));
+ }
+
+ template <class Serializer>
+ void serialize(Serializer& serializer) const {
+ serialize_impl(serializer);
+ }
+
+ template <class Deserializer>
+ void deserialize(Deserializer& deserializer, bool hash_compatible) {
+ deserialize_impl(deserializer, hash_compatible);
+ }
+
+ private:
+ template <class K>
+ std::size_t hash_key(const K& key) const {
+ return Hash::operator()(key);
+ }
+
+ template <class K1, class K2>
+ bool compare_keys(const K1& key1, const K2& key2) const {
+ return KeyEqual::operator()(key1, key2);
+ }
+
+ std::size_t bucket_for_hash(std::size_t hash) const {
+ const std::size_t bucket = GrowthPolicy::bucket_for_hash(hash);
+ tsl_rh_assert(bucket < m_bucket_count ||
+ (bucket == 0 && m_bucket_count == 0));
+
+ return bucket;
+ }
+
+ template <class U = GrowthPolicy,
+ typename std::enable_if<is_power_of_two_policy<U>::value>::type* =
+ nullptr>
+ std::size_t next_bucket(std::size_t index) const noexcept {
+ tsl_rh_assert(index < bucket_count());
+
+ return (index + 1) & this->m_mask;
+ }
+
+ template <class U = GrowthPolicy,
+ typename std::enable_if<!is_power_of_two_policy<U>::value>::type* =
+ nullptr>
+ std::size_t next_bucket(std::size_t index) const noexcept {
+ tsl_rh_assert(index < bucket_count());
+
+ index++;
+ return (index != bucket_count()) ? index : 0;
+ }
+
+ template <class K>
+ iterator find_impl(const K& key, std::size_t hash) {
+ return mutable_iterator(
+ static_cast<const robin_hash*>(this)->find(key, hash));
+ }
+
+ template <class K>
+ const_iterator find_impl(const K& key, std::size_t hash) const {
+ std::size_t ibucket = bucket_for_hash(hash);
+ distance_type dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (TSL_RH_LIKELY(
+ (!USE_STORED_HASH_ON_LOOKUP ||
+ m_buckets[ibucket].bucket_hash_equal(hash)) &&
+ compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) {
+ return const_iterator(m_buckets + ibucket);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ return cend();
+ }
+
+ void erase_from_bucket(iterator pos) {
+ pos.m_bucket->clear();
+ m_nb_elements--;
+
+ /**
+ * Backward shift, swap the empty bucket, previous_ibucket, with the values
+ * on its right, ibucket, until we cross another empty bucket or if the
+ * other bucket has a distance_from_ideal_bucket == 0.
+ *
+ * We try to move the values closer to their ideal bucket.
+ */
+ std::size_t previous_ibucket =
+ static_cast<std::size_t>(pos.m_bucket - m_buckets);
+ std::size_t ibucket = next_bucket(previous_ibucket);
+
+ while (m_buckets[ibucket].dist_from_ideal_bucket() > 0) {
+ tsl_rh_assert(m_buckets[previous_ibucket].empty());
+
+ const distance_type new_distance =
+ distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1);
+ m_buckets[previous_ibucket].set_value_of_empty_bucket(
+ new_distance, m_buckets[ibucket].truncated_hash(),
+ std::move(m_buckets[ibucket].value()));
+ m_buckets[ibucket].clear();
+
+ previous_ibucket = ibucket;
+ ibucket = next_bucket(ibucket);
+ }
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> insert_impl(const K& key,
+ Args&&... value_type_args) {
+ const std::size_t hash = hash_key(key);
+
+ std::size_t ibucket = bucket_for_hash(hash);
+ distance_type dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if ((!USE_STORED_HASH_ON_LOOKUP ||
+ m_buckets[ibucket].bucket_hash_equal(hash)) &&
+ compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) {
+ return std::make_pair(iterator(m_buckets + ibucket), false);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ if (rehash_on_extreme_load()) {
+ ibucket = bucket_for_hash(hash);
+ dist_from_ideal_bucket = 0;
+
+ while (dist_from_ideal_bucket <=
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+ }
+
+ if (m_buckets[ibucket].empty()) {
+ m_buckets[ibucket].set_value_of_empty_bucket(
+ dist_from_ideal_bucket, bucket_entry::truncate_hash(hash),
+ std::forward<Args>(value_type_args)...);
+ } else {
+ insert_value(ibucket, dist_from_ideal_bucket,
+ bucket_entry::truncate_hash(hash),
+ std::forward<Args>(value_type_args)...);
+ }
+
+ m_nb_elements++;
+ /*
+ * The value will be inserted in ibucket in any case, either because it was
+ * empty or by stealing the bucket (robin hood).
+ */
+ return std::make_pair(iterator(m_buckets + ibucket), true);
+ }
+
+ template <class... Args>
+ void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, Args&&... value_type_args) {
+ value_type value(std::forward<Args>(value_type_args)...);
+ insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);
+ }
+
+ void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type&& value) {
+ insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);
+ }
+
+ /*
+ * We don't use `value_type&& value` as last argument due to a bug in MSVC
+ * when `value_type` is a pointer, The compiler is not able to see the
+ * difference between `std::string*` and `std::string*&&` resulting in a
+ * compilation error.
+ *
+ * The `value` will be in a moved state at the end of the function.
+ */
+ void insert_value_impl(std::size_t ibucket,
+ distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type& value) {
+ tsl_rh_assert(dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket());
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash,
+ value);
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+
+ while (!m_buckets[ibucket].empty()) {
+ if (dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (dist_from_ideal_bucket >=
+ bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT) {
+ /**
+ * The number of probes is really high, rehash the map on the next
+ * insert. Difficult to do now as rehash may throw an exception.
+ */
+ m_grow_on_next_insert = true;
+ }
+
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket,
+ hash, value);
+ }
+
+ ibucket = next_bucket(ibucket);
+ dist_from_ideal_bucket++;
+ }
+
+ m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash,
+ std::move(value));
+ }
+
+ void rehash_impl(size_type count_) {
+ robin_hash new_table(count_, static_cast<Hash&>(*this),
+ static_cast<KeyEqual&>(*this), get_allocator(),
+ m_min_load_factor, m_max_load_factor);
+ tsl_rh_assert(size() <= new_table.m_load_threshold);
+
+ const bool use_stored_hash =
+ USE_STORED_HASH_ON_REHASH(new_table.bucket_count());
+ for (auto& bucket : m_buckets_data) {
+ if (bucket.empty()) {
+ continue;
+ }
+
+ const std::size_t hash =
+ use_stored_hash ? bucket.truncated_hash()
+ : new_table.hash_key(KeySelect()(bucket.value()));
+
+ new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0,
+ bucket_entry::truncate_hash(hash),
+ std::move(bucket.value()));
+ }
+
+ new_table.m_nb_elements = m_nb_elements;
+ new_table.swap(*this);
+ }
+
+ void clear_and_shrink() noexcept {
+ GrowthPolicy::clear();
+ m_buckets_data.clear();
+ m_buckets = static_empty_bucket_ptr();
+ m_bucket_count = 0;
+ m_nb_elements = 0;
+ m_load_threshold = 0;
+ m_grow_on_next_insert = false;
+ m_try_shrink_on_next_insert = false;
+ }
+
+ void insert_value_on_rehash(std::size_t ibucket,
+ distance_type dist_from_ideal_bucket,
+ truncated_hash_type hash, value_type&& value) {
+ while (true) {
+ if (dist_from_ideal_bucket >
+ m_buckets[ibucket].dist_from_ideal_bucket()) {
+ if (m_buckets[ibucket].empty()) {
+ m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket,
+ hash, std::move(value));
+ return;
+ } else {
+ m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket,
+ hash, value);
+ }
+ }
+
+ dist_from_ideal_bucket++;
+ ibucket = next_bucket(ibucket);
+ }
+ }
+
+ /**
+ * Grow the table if m_grow_on_next_insert is true or we reached the
+ * max_load_factor. Shrink the table if m_try_shrink_on_next_insert is true
+ * (an erase occurred) and we're below the min_load_factor.
+ *
+ * Return true if the table has been rehashed.
+ */
+ bool rehash_on_extreme_load() {
+ if (m_grow_on_next_insert || size() >= m_load_threshold) {
+ rehash_impl(GrowthPolicy::next_bucket_count());
+ m_grow_on_next_insert = false;
+
+ return true;
+ }
+
+ if (m_try_shrink_on_next_insert) {
+ m_try_shrink_on_next_insert = false;
+ if (m_min_load_factor != 0.0f && load_factor() < m_min_load_factor) {
+ reserve(size() + 1);
+
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ template <class Serializer>
+ void serialize_impl(Serializer& serializer) const {
+ const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION;
+ serializer(version);
+
+ // Indicate if the truncated hash of each bucket is stored. Use a
+ // std::int16_t instead of a bool to avoid the need for the serializer to
+ // support an extra 'bool' type.
+ const std::int16_t hash_stored_for_bucket =
+ static_cast<std::int16_t>(STORE_HASH);
+ serializer(hash_stored_for_bucket);
+
+ const slz_size_type nb_elements = m_nb_elements;
+ serializer(nb_elements);
+
+ const slz_size_type bucket_count = m_buckets_data.size();
+ serializer(bucket_count);
+
+ const float min_load_factor = m_min_load_factor;
+ serializer(min_load_factor);
+
+ const float max_load_factor = m_max_load_factor;
+ serializer(max_load_factor);
+
+ for (const bucket_entry& bucket : m_buckets_data) {
+ if (bucket.empty()) {
+ const std::int16_t empty_bucket =
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;
+ serializer(empty_bucket);
+ } else {
+ const std::int16_t dist_from_ideal_bucket =
+ bucket.dist_from_ideal_bucket();
+ serializer(dist_from_ideal_bucket);
+ if (STORE_HASH) {
+ const std::uint32_t truncated_hash = bucket.truncated_hash();
+ serializer(truncated_hash);
+ }
+ serializer(bucket.value());
+ }
+ }
+ }
+
+ template <class Deserializer>
+ void deserialize_impl(Deserializer& deserializer, bool hash_compatible) {
+ tsl_rh_assert(m_buckets_data.empty()); // Current hash table must be empty
+
+ const slz_size_type version =
+ deserialize_value<slz_size_type>(deserializer);
+ // For now we only have one version of the serialization protocol.
+ // If it doesn't match there is a problem with the file.
+ if (version != SERIALIZATION_PROTOCOL_VERSION) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error,
+ "Can't deserialize the ordered_map/set. "
+ "The protocol version header is invalid.");
+ }
+
+ const bool hash_stored_for_bucket =
+ deserialize_value<std::int16_t>(deserializer) ? true : false;
+ if (hash_compatible && STORE_HASH != hash_stored_for_bucket) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Can't deserialize a map with a different StoreHash "
+ "than the one used during the serialization when "
+ "hash compatibility is used");
+ }
+
+ const slz_size_type nb_elements =
+ deserialize_value<slz_size_type>(deserializer);
+ const slz_size_type bucket_count_ds =
+ deserialize_value<slz_size_type>(deserializer);
+ const float min_load_factor = deserialize_value<float>(deserializer);
+ const float max_load_factor = deserialize_value<float>(deserializer);
+
+ if (min_load_factor < MINIMUM_MIN_LOAD_FACTOR ||
+ min_load_factor > MAXIMUM_MIN_LOAD_FACTOR) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Invalid min_load_factor. Check that the serializer "
+ "and deserializer support floats correctly as they "
+ "can be converted implicitly to ints.");
+ }
+
+ if (max_load_factor < MINIMUM_MAX_LOAD_FACTOR ||
+ max_load_factor > MAXIMUM_MAX_LOAD_FACTOR) {
+ TSL_RH_THROW_OR_TERMINATE(
+ std::runtime_error,
+ "Invalid max_load_factor. Check that the serializer "
+ "and deserializer support floats correctly as they "
+ "can be converted implicitly to ints.");
+ }
+
+ this->min_load_factor(min_load_factor);
+ this->max_load_factor(max_load_factor);
+
+ if (bucket_count_ds == 0) {
+ tsl_rh_assert(nb_elements == 0);
+ return;
+ }
+
+ if (!hash_compatible) {
+ reserve(numeric_cast<size_type>(nb_elements,
+ "Deserialized nb_elements is too big."));
+ for (slz_size_type ibucket = 0; ibucket < bucket_count_ds; ibucket++) {
+ const distance_type dist_from_ideal_bucket =
+ deserialize_value<std::int16_t>(deserializer);
+ if (dist_from_ideal_bucket !=
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) {
+ if (hash_stored_for_bucket) {
+ TSL_RH_UNUSED(deserialize_value<std::uint32_t>(deserializer));
+ }
+
+ insert(deserialize_value<value_type>(deserializer));
+ }
+ }
+
+ tsl_rh_assert(nb_elements == size());
+ } else {
+ m_bucket_count = numeric_cast<size_type>(
+ bucket_count_ds, "Deserialized bucket_count is too big.");
+
+ GrowthPolicy::operator=(GrowthPolicy(m_bucket_count));
+ // GrowthPolicy should not modify the bucket count we got from
+ // deserialization
+ if (m_bucket_count != bucket_count_ds) {
+ TSL_RH_THROW_OR_TERMINATE(std::runtime_error,
+ "The GrowthPolicy is not the same even "
+ "though hash_compatible is true.");
+ }
+
+ m_nb_elements = numeric_cast<size_type>(
+ nb_elements, "Deserialized nb_elements is too big.");
+ m_buckets_data.resize(m_bucket_count);
+ m_buckets = m_buckets_data.data();
+
+ for (bucket_entry& bucket : m_buckets_data) {
+ const distance_type dist_from_ideal_bucket =
+ deserialize_value<std::int16_t>(deserializer);
+ if (dist_from_ideal_bucket !=
+ bucket_entry::EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET) {
+ truncated_hash_type truncated_hash = 0;
+ if (hash_stored_for_bucket) {
+ tsl_rh_assert(hash_stored_for_bucket);
+ truncated_hash = deserialize_value<std::uint32_t>(deserializer);
+ }
+
+ bucket.set_value_of_empty_bucket(
+ dist_from_ideal_bucket, truncated_hash,
+ deserialize_value<value_type>(deserializer));
+ }
+ }
+
+ if (!m_buckets_data.empty()) {
+ m_buckets_data.back().set_as_last_bucket();
+ }
+ }
+ }
+
+ public:
+ static const size_type DEFAULT_INIT_BUCKETS_SIZE = 0;
+
+ static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f;
+ static constexpr float MINIMUM_MAX_LOAD_FACTOR = 0.2f;
+ static constexpr float MAXIMUM_MAX_LOAD_FACTOR = 0.95f;
+
+ static constexpr float DEFAULT_MIN_LOAD_FACTOR = 0.0f;
+ static constexpr float MINIMUM_MIN_LOAD_FACTOR = 0.0f;
+ static constexpr float MAXIMUM_MIN_LOAD_FACTOR = 0.15f;
+
+ static_assert(MINIMUM_MAX_LOAD_FACTOR < MAXIMUM_MAX_LOAD_FACTOR,
+ "MINIMUM_MAX_LOAD_FACTOR should be < MAXIMUM_MAX_LOAD_FACTOR");
+ static_assert(MINIMUM_MIN_LOAD_FACTOR < MAXIMUM_MIN_LOAD_FACTOR,
+ "MINIMUM_MIN_LOAD_FACTOR should be < MAXIMUM_MIN_LOAD_FACTOR");
+ static_assert(MAXIMUM_MIN_LOAD_FACTOR < MINIMUM_MAX_LOAD_FACTOR,
+ "MAXIMUM_MIN_LOAD_FACTOR should be < MINIMUM_MAX_LOAD_FACTOR");
+
+ private:
+ /**
+ * Protocol version currenlty used for serialization.
+ */
+ static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1;
+
+ /**
+ * Return an always valid pointer to an static empty bucket_entry with
+ * last_bucket() == true.
+ */
+ bucket_entry* static_empty_bucket_ptr() noexcept {
+ static bucket_entry empty_bucket(true);
+ tsl_rh_assert(empty_bucket.empty());
+ return &empty_bucket;
+ }
+
+ private:
+ buckets_container_type m_buckets_data;
+
+ /**
+ * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points
+ * to static_empty_bucket_ptr. This variable is useful to avoid the cost of
+ * checking if m_buckets_data is empty when trying to find an element.
+ *
+ * TODO Remove m_buckets_data and only use a pointer instead of a
+ * pointer+vector to save some space in the robin_hash object. Manage the
+ * Allocator manually.
+ */
+ bucket_entry* m_buckets;
+
+ /**
+ * Used a lot in find, avoid the call to m_buckets_data.size() which is a bit
+ * slower.
+ */
+ size_type m_bucket_count;
+
+ size_type m_nb_elements;
+
+ size_type m_load_threshold;
+
+ float m_min_load_factor;
+ float m_max_load_factor;
+
+ bool m_grow_on_next_insert;
+
+ /**
+ * We can't shrink down the map on erase operations as the erase methods need
+ * to return the next iterator. Shrinking the map would invalidate all the
+ * iterators and we could not return the next iterator in a meaningful way, On
+ * erase, we thus just indicate on erase that we should try to shrink the hash
+ * table on the next insert if we go below the min_load_factor.
+ */
+ bool m_try_shrink_on_next_insert;
+};
+
+} // namespace detail_robin_hash
+
+} // namespace tsl
+
+#endif
diff --git a/misc/benchmarks/external/tsl/robin_map.h b/misc/benchmarks/external/tsl/robin_map.h
new file mode 100644
index 00000000..aeb354c3
--- /dev/null
+++ b/misc/benchmarks/external/tsl/robin_map.h
@@ -0,0 +1,807 @@
+/**
+ * MIT License
+ *
+ * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TSL_ROBIN_MAP_H
+#define TSL_ROBIN_MAP_H
+
+#include <cstddef>
+#include <functional>
+#include <initializer_list>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "robin_hash.h"
+
+namespace tsl {
+
+/**
+ * Implementation of a hash map using open-addressing and the robin hood hashing
+ * algorithm with backward shift deletion.
+ *
+ * For operations modifying the hash map (insert, erase, rehash, ...), the
+ * strong exception guarantee is only guaranteed when the expression
+ * `std::is_nothrow_swappable<std::pair<Key, T>>::value &&
+ * std::is_nothrow_move_constructible<std::pair<Key, T>>::value` is true,
+ * otherwise if an exception is thrown during the swap or the move, the hash map
+ * may end up in a undefined state. Per the standard a `Key` or `T` with a
+ * noexcept copy constructor and no move constructor also satisfies the
+ * `std::is_nothrow_move_constructible<std::pair<Key, T>>::value` criterion (and
+ * will thus guarantee the strong exception for the map).
+ *
+ * When `StoreHash` is true, 32 bits of the hash are stored alongside the
+ * values. It can improve the performance during lookups if the `KeyEqual`
+ * function takes time (if it engenders a cache-miss for example) as we then
+ * compare the stored hashes before comparing the keys. When
+ * `tsl::rh::power_of_two_growth_policy` is used as `GrowthPolicy`, it may also
+ * speed-up the rehash process as we can avoid to recalculate the hash. When it
+ * is detected that storing the hash will not incur any memory penalty due to
+ * alignment (i.e. `sizeof(tsl::detail_robin_hash::bucket_entry<ValueType,
+ * true>) == sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, false>)`)
+ * and `tsl::rh::power_of_two_growth_policy` is used, the hash will be stored
+ * even if `StoreHash` is false so that we can speed-up the rehash (but it will
+ * not be used on lookups unless `StoreHash` is true).
+ *
+ * `GrowthPolicy` defines how the map grows and consequently how a hash value is
+ * mapped to a bucket. By default the map uses
+ * `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of
+ * buckets to a power of two and uses a mask to map the hash to a bucket instead
+ * of the slow modulo. Other growth policies are available and you may define
+ * your own growth policy, check `tsl::rh::power_of_two_growth_policy` for the
+ * interface.
+ *
+ * `std::pair<Key, T>` must be swappable.
+ *
+ * `Key` and `T` must be copy and/or move constructible.
+ *
+ * If the destructor of `Key` or `T` throws an exception, the behaviour of the
+ * class is undefined.
+ *
+ * Iterators invalidation:
+ * - clear, operator=, reserve, rehash: always invalidate the iterators.
+ * - insert, emplace, emplace_hint, operator[]: if there is an effective
+ * insert, invalidate the iterators.
+ * - erase: always invalidate the iterators.
+ */
+template <class Key, class T, class Hash = std::hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Allocator = std::allocator<std::pair<Key, T>>,
+ bool StoreHash = false,
+ class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>>
+class robin_map {
+ private:
+ template <typename U>
+ using has_is_transparent = tsl::detail_robin_hash::has_is_transparent<U>;
+
+ class KeySelect {
+ public:
+ using key_type = Key;
+
+ const key_type& operator()(
+ const std::pair<Key, T>& key_value) const noexcept {
+ return key_value.first;
+ }
+
+ key_type& operator()(std::pair<Key, T>& key_value) noexcept {
+ return key_value.first;
+ }
+ };
+
+ class ValueSelect {
+ public:
+ using value_type = T;
+
+ const value_type& operator()(
+ const std::pair<Key, T>& key_value) const noexcept {
+ return key_value.second;
+ }
+
+ value_type& operator()(std::pair<Key, T>& key_value) noexcept {
+ return key_value.second;
+ }
+ };
+
+ using ht = detail_robin_hash::robin_hash<std::pair<Key, T>, KeySelect,
+ ValueSelect, Hash, KeyEqual,
+ Allocator, StoreHash, GrowthPolicy>;
+
+ public:
+ using key_type = typename ht::key_type;
+ using mapped_type = T;
+ using value_type = typename ht::value_type;
+ using size_type = typename ht::size_type;
+ using difference_type = typename ht::difference_type;
+ using hasher = typename ht::hasher;
+ using key_equal = typename ht::key_equal;
+ using allocator_type = typename ht::allocator_type;
+ using reference = typename ht::reference;
+ using const_reference = typename ht::const_reference;
+ using pointer = typename ht::pointer;
+ using const_pointer = typename ht::const_pointer;
+ using iterator = typename ht::iterator;
+ using const_iterator = typename ht::const_iterator;
+
+ public:
+ /*
+ * Constructors
+ */
+ robin_map() : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) {}
+
+ explicit robin_map(size_type bucket_count, const Hash& hash = Hash(),
+ const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : m_ht(bucket_count, hash, equal, alloc) {}
+
+ robin_map(size_type bucket_count, const Allocator& alloc)
+ : robin_map(bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ robin_map(size_type bucket_count, const Hash& hash, const Allocator& alloc)
+ : robin_map(bucket_count, hash, KeyEqual(), alloc) {}
+
+ explicit robin_map(const Allocator& alloc)
+ : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {}
+
+ template <class InputIt>
+ robin_map(InputIt first, InputIt last,
+ size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,
+ const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : robin_map(bucket_count, hash, equal, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIt>
+ robin_map(InputIt first, InputIt last, size_type bucket_count,
+ const Allocator& alloc)
+ : robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ robin_map(InputIt first, InputIt last, size_type bucket_count,
+ const Hash& hash, const Allocator& alloc)
+ : robin_map(first, last, bucket_count, hash, KeyEqual(), alloc) {}
+
+ robin_map(std::initializer_list<value_type> init,
+ size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,
+ const Hash& hash = Hash(), const KeyEqual& equal = KeyEqual(),
+ const Allocator& alloc = Allocator())
+ : robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) {}
+
+ robin_map(std::initializer_list<value_type> init, size_type bucket_count,
+ const Allocator& alloc)
+ : robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(),
+ alloc) {}
+
+ robin_map(std::initializer_list<value_type> init, size_type bucket_count,
+ const Hash& hash, const Allocator& alloc)
+ : robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(),
+ alloc) {}
+
+ robin_map& operator=(std::initializer_list<value_type> ilist) {
+ m_ht.clear();
+
+ m_ht.reserve(ilist.size());
+ m_ht.insert(ilist.begin(), ilist.end());
+
+ return *this;
+ }
+
+ allocator_type get_allocator() const { return m_ht.get_allocator(); }
+
+ /*
+ * Iterators
+ */
+ iterator begin() noexcept { return m_ht.begin(); }
+ const_iterator begin() const noexcept { return m_ht.begin(); }
+ const_iterator cbegin() const noexcept { return m_ht.cbegin(); }
+
+ iterator end() noexcept { return m_ht.end(); }
+ const_iterator end() const noexcept { return m_ht.end(); }
+ const_iterator cend() const noexcept { return m_ht.cend(); }
+
+ /*
+ * Capacity
+ */
+ bool empty() const noexcept { return m_ht.empty(); }
+ size_type size() const noexcept { return m_ht.size(); }
+ size_type max_size() const noexcept { return m_ht.max_size(); }
+
+ /*
+ * Modifiers
+ */
+ void clear() noexcept { m_ht.clear(); }
+
+ std::pair<iterator, bool> insert(const value_type& value) {
+ return m_ht.insert(value);
+ }
+
+ template <class P, typename std::enable_if<std::is_constructible<
+ value_type, P&&>::value>::type* = nullptr>
+ std::pair<iterator, bool> insert(P&& value) {
+ return m_ht.emplace(std::forward<P>(value));
+ }
+
+ std::pair<iterator, bool> insert(value_type&& value) {
+ return m_ht.insert(std::move(value));
+ }
+
+ iterator insert(const_iterator hint, const value_type& value) {
+ return m_ht.insert_hint(hint, value);
+ }
+
+ template <class P, typename std::enable_if<std::is_constructible<
+ value_type, P&&>::value>::type* = nullptr>
+ iterator insert(const_iterator hint, P&& value) {
+ return m_ht.emplace_hint(hint, std::forward<P>(value));
+ }
+
+ iterator insert(const_iterator hint, value_type&& value) {
+ return m_ht.insert_hint(hint, std::move(value));
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ m_ht.insert(first, last);
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ m_ht.insert(ilist.begin(), ilist.end());
+ }
+
+ template <class M>
+ std::pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj) {
+ return m_ht.insert_or_assign(k, std::forward<M>(obj));
+ }
+
+ template <class M>
+ std::pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj) {
+ return m_ht.insert_or_assign(std::move(k), std::forward<M>(obj));
+ }
+
+ template <class M>
+ iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj) {
+ return m_ht.insert_or_assign(hint, k, std::forward<M>(obj));
+ }
+
+ template <class M>
+ iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj) {
+ return m_ht.insert_or_assign(hint, std::move(k), std::forward<M>(obj));
+ }
+
+ /**
+ * Due to the way elements are stored, emplace will need to move or copy the
+ * key-value once. The method is equivalent to
+ * insert(value_type(std::forward<Args>(args)...));
+ *
+ * Mainly here for compatibility with the std::unordered_map interface.
+ */
+ template <class... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return m_ht.emplace(std::forward<Args>(args)...);
+ }
+
+ /**
+ * Due to the way elements are stored, emplace_hint will need to move or copy
+ * the key-value once. The method is equivalent to insert(hint,
+ * value_type(std::forward<Args>(args)...));
+ *
+ * Mainly here for compatibility with the std::unordered_map interface.
+ */
+ template <class... Args>
+ iterator emplace_hint(const_iterator hint, Args&&... args) {
+ return m_ht.emplace_hint(hint, std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args) {
+ return m_ht.try_emplace(k, std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args) {
+ return m_ht.try_emplace(std::move(k), std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ iterator try_emplace(const_iterator hint, const key_type& k, Args&&... args) {
+ return m_ht.try_emplace_hint(hint, k, std::forward<Args>(args)...);
+ }
+
+ template <class... Args>
+ iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args) {
+ return m_ht.try_emplace_hint(hint, std::move(k),
+ std::forward<Args>(args)...);
+ }
+
+ iterator erase(iterator pos) { return m_ht.erase(pos); }
+ iterator erase(const_iterator pos) { return m_ht.erase(pos); }
+ iterator erase(const_iterator first, const_iterator last) {
+ return m_ht.erase(first, last);
+ }
+ size_type erase(const key_type& key) { return m_ht.erase(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup to the value if you already have the hash.
+ */
+ size_type erase(const key_type& key, std::size_t precalculated_hash) {
+ return m_ht.erase(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type erase(const K& key) {
+ return m_ht.erase(key);
+ }
+
+ /**
+ * @copydoc erase(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup to the value if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type erase(const K& key, std::size_t precalculated_hash) {
+ return m_ht.erase(key, precalculated_hash);
+ }
+
+ void swap(robin_map& other) { other.m_ht.swap(m_ht); }
+
+ /*
+ * Lookup
+ */
+ T& at(const Key& key) { return m_ht.at(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ T& at(const Key& key, std::size_t precalculated_hash) {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ const T& at(const Key& key) const { return m_ht.at(key); }
+
+ /**
+ * @copydoc at(const Key& key, std::size_t precalculated_hash)
+ */
+ const T& at(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ T& at(const K& key) {
+ return m_ht.at(key);
+ }
+
+ /**
+ * @copydoc at(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ T& at(const K& key, std::size_t precalculated_hash) {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ /**
+ * @copydoc at(const K& key)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const T& at(const K& key) const {
+ return m_ht.at(key);
+ }
+
+ /**
+ * @copydoc at(const K& key, std::size_t precalculated_hash)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const T& at(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.at(key, precalculated_hash);
+ }
+
+ T& operator[](const Key& key) { return m_ht[key]; }
+ T& operator[](Key&& key) { return m_ht[std::move(key)]; }
+
+ size_type count(const Key& key) const { return m_ht.count(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ size_type count(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.count(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type count(const K& key) const {
+ return m_ht.count(key);
+ }
+
+ /**
+ * @copydoc count(const K& key) const
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ size_type count(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.count(key, precalculated_hash);
+ }
+
+ iterator find(const Key& key) { return m_ht.find(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ iterator find(const Key& key, std::size_t precalculated_hash) {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ const_iterator find(const Key& key) const { return m_ht.find(key); }
+
+ /**
+ * @copydoc find(const Key& key, std::size_t precalculated_hash)
+ */
+ const_iterator find(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ iterator find(const K& key) {
+ return m_ht.find(key);
+ }
+
+ /**
+ * @copydoc find(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ iterator find(const K& key, std::size_t precalculated_hash) {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ /**
+ * @copydoc find(const K& key)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const_iterator find(const K& key) const {
+ return m_ht.find(key);
+ }
+
+ /**
+ * @copydoc find(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ const_iterator find(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.find(key, precalculated_hash);
+ }
+
+ bool contains(const Key& key) const { return m_ht.contains(key); }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ bool contains(const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.contains(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ bool contains(const K& key) const {
+ return m_ht.contains(key);
+ }
+
+ /**
+ * @copydoc contains(const K& key) const
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ bool contains(const K& key, std::size_t precalculated_hash) const {
+ return m_ht.contains(key, precalculated_hash);
+ }
+
+ std::pair<iterator, iterator> equal_range(const Key& key) {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ std::pair<iterator, iterator> equal_range(const Key& key,
+ std::size_t precalculated_hash) {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ std::pair<const_iterator, const_iterator> equal_range(const Key& key) const {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * @copydoc equal_range(const Key& key, std::size_t precalculated_hash)
+ */
+ std::pair<const_iterator, const_iterator> equal_range(
+ const Key& key, std::size_t precalculated_hash) const {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ /**
+ * This overload only participates in the overload resolution if the typedef
+ * KeyEqual::is_transparent exists. If so, K must be hashable and comparable
+ * to Key.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<iterator, iterator> equal_range(const K& key) {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * @copydoc equal_range(const K& key)
+ *
+ * Use the hash value 'precalculated_hash' instead of hashing the key. The
+ * hash value should be the same as hash_function()(key). Useful to speed-up
+ * the lookup if you already have the hash.
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<iterator, iterator> equal_range(const K& key,
+ std::size_t precalculated_hash) {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ /**
+ * @copydoc equal_range(const K& key)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<const_iterator, const_iterator> equal_range(const K& key) const {
+ return m_ht.equal_range(key);
+ }
+
+ /**
+ * @copydoc equal_range(const K& key, std::size_t precalculated_hash)
+ */
+ template <
+ class K, class KE = KeyEqual,
+ typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const K& key, std::size_t precalculated_hash) const {
+ return m_ht.equal_range(key, precalculated_hash);
+ }
+
+ /*
+ * Bucket interface
+ */
+ size_type bucket_count() const { return m_ht.bucket_count(); }
+ size_type max_bucket_count() const { return m_ht.max_bucket_count(); }
+
+ /*
+ * Hash policy
+ */
+ float load_factor() const { return m_ht.load_factor(); }
+
+ float min_load_factor() const { return m_ht.min_load_factor(); }
+ float max_load_factor() const { return m_ht.max_load_factor(); }
+
+ /**
+ * Set the `min_load_factor` to `ml`. When the `load_factor` of the map goes
+ * below `min_load_factor` after some erase operations, the map will be
+ * shrunk when an insertion occurs. The erase method itself never shrinks
+ * the map.
+ *
+ * The default value of `min_load_factor` is 0.0f, the map never shrinks by
+ * default.
+ */
+ void min_load_factor(float ml) { m_ht.min_load_factor(ml); }
+ void max_load_factor(float ml) { m_ht.max_load_factor(ml); }
+
+ void rehash(size_type count_) { m_ht.rehash(count_); }
+ void reserve(size_type count_) { m_ht.reserve(count_); }
+
+ /*
+ * Observers
+ */
+ hasher hash_function() const { return m_ht.hash_function(); }
+ key_equal key_eq() const { return m_ht.key_eq(); }
+
+ /*
+ * Other
+ */
+
+ /**
+ * Convert a const_iterator to an iterator.
+ */
+ iterator mutable_iterator(const_iterator pos) {
+ return m_ht.mutable_iterator(pos);
+ }
+
+ /**
+ * Serialize the map through the `serializer` parameter.
+ *
+ * The `serializer` parameter must be a function object that supports the
+ * following call:
+ * - `template<typename U> void operator()(const U& value);` where the types
+ * `std::int16_t`, `std::uint32_t`, `std::uint64_t`, `float` and
+ * `std::pair<Key, T>` must be supported for U.
+ *
+ * The implementation leaves binary compatibility (endianness, IEEE 754 for
+ * floats, ...) of the types it serializes in the hands of the `Serializer`
+ * function object if compatibility is required.
+ */
+ template <class Serializer>
+ void serialize(Serializer& serializer) const {
+ m_ht.serialize(serializer);
+ }
+
+ /**
+ * Deserialize a previously serialized map through the `deserializer`
+ * parameter.
+ *
+ * The `deserializer` parameter must be a function object that supports the
+ * following call:
+ * - `template<typename U> U operator()();` where the types `std::int16_t`,
+ * `std::uint32_t`, `std::uint64_t`, `float` and `std::pair<Key, T>` must be
+ * supported for U.
+ *
+ * If the deserialized hash map type is hash compatible with the serialized
+ * map, the deserialization process can be sped up by setting
+ * `hash_compatible` to true. To be hash compatible, the Hash, KeyEqual and
+ * GrowthPolicy must behave the same way than the ones used on the serialized
+ * map and the StoreHash must have the same value. The `std::size_t` must also
+ * be of the same size as the one on the platform used to serialize the map.
+ * If these criteria are not met, the behaviour is undefined with
+ * `hash_compatible` sets to true.
+ *
+ * The behaviour is undefined if the type `Key` and `T` of the `robin_map` are
+ * not the same as the types used during serialization.
+ *
+ * The implementation leaves binary compatibility (endianness, IEEE 754 for
+ * floats, size of int, ...) of the types it deserializes in the hands of the
+ * `Deserializer` function object if compatibility is required.
+ */
+ template <class Deserializer>
+ static robin_map deserialize(Deserializer& deserializer,
+ bool hash_compatible = false) {
+ robin_map map(0);
+ map.m_ht.deserialize(deserializer, hash_compatible);
+
+ return map;
+ }
+
+ friend bool operator==(const robin_map& lhs, const robin_map& rhs) {
+ if (lhs.size() != rhs.size()) {
+ return false;
+ }
+
+ for (const auto& element_lhs : lhs) {
+ const auto it_element_rhs = rhs.find(element_lhs.first);
+ if (it_element_rhs == rhs.cend() ||
+ element_lhs.second != it_element_rhs->second) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ friend bool operator!=(const robin_map& lhs, const robin_map& rhs) {
+ return !operator==(lhs, rhs);
+ }
+
+ friend void swap(robin_map& lhs, robin_map& rhs) { lhs.swap(rhs); }
+
+ private:
+ ht m_ht;
+};
+
+/**
+ * Same as `tsl::robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash,
+ * tsl::rh::prime_growth_policy>`.
+ */
+template <class Key, class T, class Hash = std::hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Allocator = std::allocator<std::pair<Key, T>>,
+ bool StoreHash = false>
+using robin_pg_map = robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash,
+ tsl::rh::prime_growth_policy>;
+
+} // end namespace tsl
+
+#endif
diff --git a/misc/benchmarks/external/update.sh b/misc/benchmarks/external/update.sh
new file mode 100644
index 00000000..45b472b1
--- /dev/null
+++ b/misc/benchmarks/external/update.sh
@@ -0,0 +1,38 @@
+tsl_h="https://raw.github.com/Tessil/hopscotch-map/master/include/tsl"
+tsl_r="https://raw.github.com/Tessil/robin-map/master/include/tsl"
+greg_s="https://raw.github.com/greg7mdp/sparsepp/master/sparsepp"
+greg_p="https://raw.github.com/greg7mdp/parallel-hashmap"
+martinus_r="https://raw.github.com/martinus/robin-hood-hashing/master/src/include"
+martinus_d="https://raw.github.com/martinus/unordered_dense/master/include/ankerl"
+skarupke="https://raw.github.com/skarupke/flat_hash_map/master"
+ktprime="https://raw.github.com/ktprime/emhash/master"
+
+mkdir -p ankerl skarupke tsl emhash # sparsepp
+
+wget "$martinus_r/robin_hood.h" -O "ankerl/robin_hood.h"
+wget "$martinus_d/unordered_dense.h" -O "ankerl/unordered_dense.h"
+
+wget "$skarupke/flat_hash_map.hpp" -O "skarupke/flat_hash_map.hpp"
+
+wget "$tsl_r/robin_growth_policy.h" -O "tsl/robin_growth_policy.h"
+wget "$tsl_r/robin_hash.h" -O "tsl/robin_hash.h"
+wget "$tsl_r/robin_map.h" -O "tsl/robin_map.h"
+
+#wget "$ktprime/thirdparty/wyhash.h" -O "emhash/wyhash.h"
+wget "$ktprime/hash_table7.hpp" -O "emhash/hash_table7.hpp"
+
+#wget "$tsl_h/hopscotch_growth_policy.h" -O "tsl/hopscotch_growth_policy.h"
+#wget "$tsl_h/hopscotch_hash.h" -O "tsl/hopscotch_hash.h"
+#wget "$tsl_h/hopscotch_map.h" -O "tsl/hopscotch_map.h"
+
+#wget "$skarupke/bytell_hash_map.hpp" -O "skarupke/bytell_hash_map.hpp"
+
+#wget "$greg_s/spp.h" -O "sparsepp/spp.h"
+#wget "$greg_s/spp_config.h" -O "sparsepp/spp_config.h"
+#wget "$greg_s/spp_dlalloc.h" -O "sparsepp/spp_dlalloc.h"
+#wget "$greg_s/spp_memory.h" -O "sparsepp/spp_memory.h"
+#wget "$greg_s/spp_smartptr.h" -O "sparsepp/spp_smartptr.h"
+#wget "$greg_s/spp_stdint.h" -O "sparsepp/spp_stdint.h"
+#wget "$greg_s/spp_timer.h" -O "sparsepp/spp_timer.h"
+#wget "$greg_s/spp_traits.h" -O "sparsepp/spp_traits.h"
+#wget "$greg_s/spp_utils.h" -O "sparsepp/spp_utils.h"
diff --git a/misc/benchmarks/misc/cbits_benchmark.cpp b/misc/benchmarks/misc/cbits_benchmark.cpp
new file mode 100644
index 00000000..dd709db1
--- /dev/null
+++ b/misc/benchmarks/misc/cbits_benchmark.cpp
@@ -0,0 +1,123 @@
+#include <iostream>
+#include <bitset>
+#include <cstdlib> // rand
+#include <ctime> // timer
+
+enum{ N=1<<22 }; // 4.2 mill.
+#define i_static
+#include <stc/crandom.h>
+#define i_type cbits
+#define i_len N
+#include <stc/cbits.h>
+
+inline unsigned long get_time_in_ms()
+{
+ return (unsigned long)(1000 * clock() / CLOCKS_PER_SEC);
+}
+
+
+void one_sec_delay()
+{
+ unsigned long end_time = get_time_in_ms() + 1000;
+
+ while(get_time_in_ms() < end_time)
+ {
+ }
+}
+
+
+int main(int argc, char **argv)
+{
+ size_t seed = time(NULL);
+
+ using namespace std;
+ bool *bools = new bool[N];
+
+ unsigned long current_time, difference1, difference2;
+ uint64_t total;
+
+ one_sec_delay();
+
+ total = 0;
+ csrandom(seed);
+ current_time = get_time_in_ms();
+
+ c_forrange (40 * N)
+ {
+ uint64_t r = crandom();
+ bools[r & (N-1)] = r & 1<<29;
+ }
+
+ difference1 = get_time_in_ms() - current_time;
+ current_time = get_time_in_ms();
+
+ c_forrange (100) c_forrange (num, N)
+ {
+ total += bools[num];
+ }
+
+ delete [] bools;
+
+ difference2 = get_time_in_ms() - current_time;
+
+ cout << "Bool:" << endl << "sum total = " << total << ", random access time = " << difference1
+ << ", sequential access time = " << difference2 << endl << endl;
+
+ one_sec_delay();
+
+ total = 0;
+ csrandom(seed);
+ current_time = get_time_in_ms();
+ bitset<N> bits;
+
+ c_forrange (40 * N)
+ {
+ uint64_t r = crandom();
+ bits[r & (N-1)] = r & 1<<29;
+ }
+
+ difference1 = get_time_in_ms() - current_time;
+ current_time = get_time_in_ms();
+
+ c_forrange (100) c_forrange (num, N)
+ {
+ total += bits[num];
+ }
+
+ difference2 = get_time_in_ms() - current_time;
+
+ cout << "Bitset:" << endl << "sum total = " << total << ", random access time = " << difference1
+ << ", sequential access time = " << difference2 << endl << endl;
+
+ one_sec_delay();
+
+ total = 0;
+ csrandom(seed);
+ current_time = get_time_in_ms();
+ cbits bits2 = cbits_with_size(N, false);
+
+ c_forrange (40 * N)
+ {
+ uint64_t r = crandom();
+ cbits_set_value(&bits2, r & (N-1), r & 1<<29);
+ }
+
+ difference1 = get_time_in_ms() - current_time;
+ current_time = get_time_in_ms();
+
+ c_forrange (100) c_forrange (num, N)
+ {
+ total += cbits_at(&bits2, num);
+ }
+
+ cbits_drop(&bits2);
+
+ difference2 = get_time_in_ms() - current_time;
+
+ cout << "cbits:" << endl << "sum total = " << total << ", random access time = " << difference1
+ << ", sequential access time = " << difference2 << endl << endl;
+
+ //cin.get();
+
+ return 0;
+}
diff --git a/misc/benchmarks/misc/names.txt b/misc/benchmarks/misc/names.txt
new file mode 100644
index 00000000..561acbbf
--- /dev/null
+++ b/misc/benchmarks/misc/names.txt
@@ -0,0 +1,5163 @@
+Mary
+Patricia
+Linda
+Barbara
+Elizabeth
+Jennifer
+Maria
+Susan
+Margaret
+Dorothy
+Lisa
+Nancy
+Karen
+Betty
+Helen
+Sandra
+Donna
+Carol
+Ruth
+Sharon
+Michelle
+Laura
+Sarah
+Kimberly
+Deborah
+Jessica
+Shirley
+Cynthia
+Angela
+Melissa
+Brenda
+Amy
+Anna
+Rebecca
+Virginia
+Kathleen
+Pamela
+Martha
+Debra
+Amanda
+Stephanie
+Carolyn
+Christine
+Marie
+Janet
+Catherine
+Frances
+Ann
+Joyce
+Diane
+Alice
+Julie
+Heather
+Teresa
+Doris
+Gloria
+Evelyn
+Jean
+Cheryl
+Mildred
+Katherine
+Joan
+Ashley
+Judith
+Rose
+Janice
+Kelly
+Nicole
+Judy
+Christina
+Kathy
+Theresa
+Beverly
+Denise
+Tammy
+Irene
+Jane
+Lori
+Rachel
+Marilyn
+Andrea
+Kathryn
+Louise
+Sara
+Anne
+Jacqueline
+Wanda
+Bonnie
+Julia
+Ruby
+Lois
+Tina
+Phyllis
+Norma
+Paula
+Diana
+Annie
+Lillian
+Emily
+Robin
+Peggy
+Crystal
+Gladys
+Rita
+Dawn
+Connie
+Florence
+Tracy
+Edna
+Tiffany
+Carmen
+Rosa
+Cindy
+Grace
+Wendy
+Victoria
+Edith
+Kim
+Sherry
+Sylvia
+Josephine
+Thelma
+Shannon
+Sheila
+Ethel
+Ellen
+Elaine
+Marjorie
+Carrie
+Charlotte
+Monica
+Esther
+Pauline
+Emma
+Juanita
+Anita
+Rhonda
+Hazel
+Amber
+Eva
+Debbie
+April
+Leslie
+Clara
+Lucille
+Jamie
+Joanne
+Eleanor
+Valerie
+Danielle
+Megan
+Alicia
+Suzanne
+Michele
+Gail
+Bertha
+Darlene
+Veronica
+Jill
+Erin
+Geraldine
+Lauren
+Cathy
+Joann
+Lorraine
+Lynn
+Sally
+Regina
+Erica
+Beatrice
+Dolores
+Bernice
+Audrey
+Yvonne
+Annette
+June
+Samantha
+Marion
+Dana
+Stacy
+Ana
+Renee
+Ida
+Vivian
+Roberta
+Holly
+Brittany
+Melanie
+Loretta
+Yolanda
+Jeanette
+Laurie
+Katie
+Kristen
+Vanessa
+Alma
+Sue
+Elsie
+Beth
+Jeanne
+Vicki
+Carla
+Tara
+Rosemary
+Eileen
+Terri
+Gertrude
+Lucy
+Tonya
+Ella
+Stacey
+Wilma
+Gina
+Kristin
+Jessie
+Natalie
+Agnes
+Vera
+Willie
+Charlene
+Bessie
+Delores
+Melinda
+Pearl
+Arlene
+Maureen
+Colleen
+Allison
+Tamara
+Joy
+Georgia
+Constance
+Lillie
+Claudia
+Jackie
+Marcia
+Tanya
+Nellie
+Minnie
+Marlene
+Heidi
+Glenda
+Lydia
+Viola
+Courtney
+Marian
+Stella
+Caroline
+Dora
+Jo
+Vickie
+Mattie
+Terry
+Maxine
+Irma
+Mabel
+Marsha
+Myrtle
+Lena
+Christy
+Deanna
+Patsy
+Hilda
+Gwendolyn
+Jennie
+Nora
+Margie
+Nina
+Cassandra
+Leah
+Penny
+Kay
+Priscilla
+Naomi
+Carole
+Brandy
+Olga
+Billie
+Dianne
+Tracey
+Leona
+Jenny
+Felicia
+Sonia
+Miriam
+Velma
+Becky
+Bobbie
+Violet
+Kristina
+Toni
+Misty
+Mae
+Shelly
+Daisy
+Ramona
+Sherri
+Erika
+Katrina
+Claire
+Lindsey
+Lindsay
+Geneva
+Guadalupe
+Belinda
+Margarita
+Sheryl
+Cora
+Faye
+Ada
+Natasha
+Sabrina
+Isabel
+Marguerite
+Hattie
+Harriet
+Molly
+Cecilia
+Kristi
+Brandi
+Blanche
+Sandy
+Rosie
+Joanna
+Iris
+Eunice
+Angie
+Inez
+Lynda
+Madeline
+Amelia
+Alberta
+Genevieve
+Monique
+Jodi
+Janie
+Maggie
+Kayla
+Sonya
+Jan
+Lee
+Kristine
+Candace
+Fannie
+Maryann
+Opal
+Alison
+Yvette
+Melody
+Luz
+Susie
+Olivia
+Flora
+Shelley
+Kristy
+Mamie
+Lula
+Lola
+Verna
+Beulah
+Antoinette
+Candice
+Juana
+Jeannette
+Pam
+Kelli
+Hannah
+Whitney
+Bridget
+Karla
+Celia
+Latoya
+Patty
+Shelia
+Gayle
+Della
+Vicky
+Lynne
+Sheri
+Marianne
+Kara
+Jacquelyn
+Erma
+Blanca
+Myra
+Leticia
+Pat
+Krista
+Roxanne
+Angelica
+Johnnie
+Robyn
+Francis
+Adrienne
+Rosalie
+Alexandra
+Brooke
+Bethany
+Sadie
+Bernadette
+Traci
+Jody
+Kendra
+Jasmine
+Nichole
+Rachael
+Chelsea
+Mable
+Ernestine
+Muriel
+Marcella
+Elena
+Krystal
+Angelina
+Nadine
+Kari
+Estelle
+Dianna
+Paulette
+Lora
+Mona
+Doreen
+Rosemarie
+Angel
+Desiree
+Antonia
+Hope
+Ginger
+Janis
+Betsy
+Christie
+Freda
+Mercedes
+Meredith
+Lynette
+Teri
+Cristina
+Eula
+Leigh
+Meghan
+Sophia
+Eloise
+Rochelle
+Gretchen
+Cecelia
+Raquel
+Henrietta
+Alyssa
+Jana
+Kelley
+Gwen
+Kerry
+Jenna
+Tricia
+Laverne
+Olive
+Alexis
+Tasha
+Silvia
+Elvira
+Casey
+Delia
+Sophie
+Kate
+Patti
+Lorena
+Kellie
+Sonja
+Lila
+Lana
+Darla
+May
+Mindy
+Essie
+Mandy
+Lorene
+Elsa
+Josefina
+Jeannie
+Miranda
+Dixie
+Lucia
+Marta
+Faith
+Lela
+Johanna
+Shari
+Camille
+Tami
+Shawna
+Elisa
+Ebony
+Melba
+Ora
+Nettie
+Tabitha
+Ollie
+Jaime
+Winifred
+Kristie
+Marina
+Alisha
+Aimee
+Rena
+Myrna
+Marla
+Tammie
+Latasha
+Bonita
+Patrice
+Ronda
+Sherrie
+Addie
+Francine
+Deloris
+Stacie
+Adriana
+Cheri
+Shelby
+Abigail
+Celeste
+Jewel
+Cara
+Adele
+Rebekah
+Lucinda
+Dorthy
+Chris
+Effie
+Trina
+Reba
+Shawn
+Sallie
+Aurora
+Lenora
+Etta
+Lottie
+Kerri
+Trisha
+Nikki
+Estella
+Francisca
+Josie
+Tracie
+Marissa
+Karin
+Brittney
+Janelle
+Lourdes
+Laurel
+Helene
+Fern
+Elva
+Corinne
+Kelsey
+Ina
+Bettie
+Elisabeth
+Aida
+Caitlin
+Ingrid
+Iva
+Eugenia
+Christa
+Goldie
+Cassie
+Maude
+Jenifer
+Therese
+Frankie
+Dena
+Lorna
+Janette
+Latonya
+Candy
+Morgan
+Consuelo
+Tamika
+Rosetta
+Debora
+Cherie
+Polly
+Dina
+Jewell
+Fay
+Jillian
+Dorothea
+Nell
+Trudy
+Esperanza
+Patrica
+Kimberley
+Shanna
+Helena
+Carolina
+Cleo
+Stefanie
+Rosario
+Ola
+Janine
+Mollie
+Lupe
+Alisa
+Lou
+Maribel
+Susanne
+Bette
+Susana
+Elise
+Cecile
+Isabelle
+Lesley
+Jocelyn
+Paige
+Joni
+Rachelle
+Leola
+Daphne
+Alta
+Ester
+Petra
+Graciela
+Imogene
+Jolene
+Keisha
+Lacey
+Glenna
+Gabriela
+Keri
+Ursula
+Lizzie
+Kirsten
+Shana
+Adeline
+Mayra
+Jayne
+Jaclyn
+Gracie
+Sondra
+Carmela
+Marisa
+Rosalind
+Charity
+Tonia
+Beatriz
+Marisol
+Clarice
+Jeanine
+Sheena
+Angeline
+Frieda
+Lily
+Robbie
+Shauna
+Millie
+Claudette
+Cathleen
+Angelia
+Gabrielle
+Autumn
+Katharine
+Summer
+Jodie
+Staci
+Lea
+Christi
+Jimmie
+Justine
+Elma
+Luella
+Margret
+Dominique
+Socorro
+Rene
+Martina
+Margo
+Mavis
+Callie
+Bobbi
+Maritza
+Lucile
+Leanne
+Jeannine
+Deana
+Aileen
+Lorie
+Ladonna
+Willa
+Manuela
+Gale
+Selma
+Dolly
+Sybil
+Abby
+Lara
+Dale
+Ivy
+Dee
+Winnie
+Marcy
+Luisa
+Jeri
+Magdalena
+Ofelia
+Meagan
+Audra
+Matilda
+Leila
+Cornelia
+Bianca
+Simone
+Bettye
+Randi
+Virgie
+Latisha
+Barbra
+Georgina
+Eliza
+Leann
+Bridgette
+Rhoda
+Haley
+Adela
+Nola
+Bernadine
+Flossie
+Ila
+Greta
+Ruthie
+Nelda
+Minerva
+Lilly
+Terrie
+Letha
+Hilary
+Estela
+Valarie
+Brianna
+Rosalyn
+Earline
+Catalina
+Ava
+Mia
+Clarissa
+Lidia
+Corrine
+Alexandria
+Concepcion
+Tia
+Sharron
+Rae
+Dona
+Ericka
+Jami
+Elnora
+Chandra
+Lenore
+Neva
+Marylou
+Melisa
+Tabatha
+Serena
+Avis
+Allie
+Sofia
+Jeanie
+Odessa
+Nannie
+Harriett
+Loraine
+Penelope
+Milagros
+Emilia
+Benita
+Allyson
+Ashlee
+Tania
+Tommie
+Esmeralda
+Karina
+Eve
+Pearlie
+Zelma
+Malinda
+Noreen
+Tameka
+Saundra
+Hillary
+Amie
+Althea
+Rosalinda
+Jordan
+Lilia
+Alana
+Gay
+Clare
+Alejandra
+Elinor
+Michael
+Lorrie
+Jerri
+Darcy
+Earnestine
+Carmella
+Taylor
+Noemi
+Marcie
+Liza
+Annabelle
+Louisa
+Earlene
+Mallory
+Carlene
+Nita
+Selena
+Tanisha
+Katy
+Julianne
+John
+Lakisha
+Edwina
+Maricela
+Margery
+Kenya
+Dollie
+Roxie
+Roslyn
+Kathrine
+Nanette
+Charmaine
+Lavonne
+Ilene
+Kris
+Tammi
+Suzette
+Corine
+Kaye
+Jerry
+Merle
+Chrystal
+Lina
+Deanne
+Lilian
+Juliana
+Aline
+Luann
+Kasey
+Maryanne
+Evangeline
+Colette
+Melva
+Lawanda
+Yesenia
+Nadia
+Madge
+Kathie
+Eddie
+Ophelia
+Valeria
+Nona
+Mitzi
+Mari
+Georgette
+Claudine
+Fran
+Alissa
+Roseann
+Lakeisha
+Susanna
+Reva
+Deidre
+Chasity
+Sheree
+Carly
+James
+Elvia
+Alyce
+Deirdre
+Gena
+Briana
+Araceli
+Katelyn
+Rosanne
+Wendi
+Tessa
+Berta
+Marva
+Imelda
+Marietta
+Marci
+Leonor
+Arline
+Sasha
+Madelyn
+Janna
+Juliette
+Deena
+Aurelia
+Josefa
+Augusta
+Liliana
+Young
+Christian
+Lessie
+Amalia
+Savannah
+Anastasia
+Vilma
+Natalia
+Rosella
+Lynnette
+Corina
+Alfreda
+Leanna
+Carey
+Amparo
+Coleen
+Tamra
+Aisha
+Wilda
+Karyn
+Cherry
+Queen
+Maura
+Mai
+Evangelina
+Rosanna
+Hallie
+Erna
+Enid
+Mariana
+Lacy
+Juliet
+Jacklyn
+Freida
+Madeleine
+Mara
+Hester
+Cathryn
+Lelia
+Casandra
+Bridgett
+Angelita
+Jannie
+Dionne
+Annmarie
+Katina
+Beryl
+Phoebe
+Millicent
+Katheryn
+Diann
+Carissa
+Maryellen
+Liz
+Lauri
+Helga
+Gilda
+Adrian
+Rhea
+Marquita
+Hollie
+Tisha
+Tamera
+Angelique
+Francesca
+Britney
+Kaitlin
+Lolita
+Florine
+Rowena
+Reyna
+Twila
+Fanny
+Janell
+Ines
+Concetta
+Bertie
+Alba
+Brigitte
+Alyson
+Vonda
+Pansy
+Elba
+Noelle
+Letitia
+Kitty
+Deann
+Brandie
+Louella
+Leta
+Felecia
+Sharlene
+Lesa
+Beverley
+Robert
+Isabella
+Herminia
+Terra
+Celina
+Tori
+Octavia
+Jade
+Denice
+Germaine
+Sierra
+Michell
+Cortney
+Nelly
+Doretha
+Sydney
+Deidra
+Monika
+Lashonda
+Judi
+Chelsey
+Antionette
+Margot
+Bobby
+Adelaide
+Nan
+Leeann
+Elisha
+Dessie
+Libby
+Kathi
+Gayla
+Latanya
+Mina
+Mellisa
+Kimberlee
+Jasmin
+Renae
+Zelda
+Elda
+Ma
+Justina
+Gussie
+Emilie
+Camilla
+Abbie
+Rocio
+Kaitlyn
+Jesse
+Edythe
+Ashleigh
+Selina
+Lakesha
+Geri
+Allene
+Pamala
+Michaela
+Dayna
+Caryn
+Rosalia
+Sun
+Jacquline
+Rebeca
+Marybeth
+Krystle
+Iola
+Dottie
+Bennie
+Belle
+Aubrey
+Griselda
+Ernestina
+Elida
+Adrianne
+Demetria
+Delma
+Chong
+Jaqueline
+Destiny
+Arleen
+Virgina
+Retha
+Fatima
+Tillie
+Eleanore
+Cari
+Treva
+Birdie
+Wilhelmina
+Rosalee
+Maurine
+Latrice
+Yong
+Jena
+Taryn
+Elia
+Debby
+Maudie
+Jeanna
+Delilah
+Catrina
+Shonda
+Hortencia
+Theodora
+Teresita
+Robbin
+Danette
+Maryjane
+Freddie
+Delphine
+Brianne
+Nilda
+Danna
+Cindi
+Bess
+Iona
+Hanna
+Ariel
+Winona
+Vida
+Rosita
+Marianna
+William
+Racheal
+Guillermina
+Eloisa
+Celestine
+Caren
+Malissa
+Lona
+Chantel
+Shellie
+Marisela
+Leora
+Agatha
+Soledad
+Migdalia
+Ivette
+Christen
+Athena
+Janel
+Chloe
+Veda
+Pattie
+Tessie
+Tera
+Marilynn
+Lucretia
+Karrie
+Dinah
+Daniela
+Alecia
+Adelina
+Vernice
+Shiela
+Portia
+Merry
+Lashawn
+Devon
+Dara
+Tawana
+Oma
+Verda
+Christin
+Alene
+Zella
+Sandi
+Rafaela
+Maya
+Kira
+Candida
+Alvina
+Suzan
+Shayla
+Lyn
+Lettie
+Alva
+Samatha
+Oralia
+Matilde
+Madonna
+Larissa
+Vesta
+Renita
+India
+Delois
+Shanda
+Phillis
+Lorri
+Erlinda
+Cruz
+Cathrine
+Barb
+Zoe
+Isabell
+Ione
+Gisela
+Charlie
+Valencia
+Roxanna
+Mayme
+Kisha
+Ellie
+Mellissa
+Dorris
+Dalia
+Bella
+Annetta
+Zoila
+Reta
+Reina
+Lauretta
+Kylie
+Christal
+Pilar
+Charla
+Elissa
+Tiffani
+Tana
+Paulina
+Leota
+Breanna
+Jayme
+Carmel
+Vernell
+Tomasa
+Mandi
+Dominga
+Santa
+Melodie
+Lura
+Alexa
+Tamela
+Ryan
+Mirna
+Kerrie
+Venus
+Noel
+Felicita
+Cristy
+Carmelita
+Berniece
+Annemarie
+Tiara
+Roseanne
+Missy
+Cori
+Roxana
+Pricilla
+Kristal
+Jung
+Elyse
+Haydee
+Aletha
+Bettina
+Marge
+Gillian
+Filomena
+Charles
+Zenaida
+Harriette
+Caridad
+Vada
+Una
+Aretha
+Pearline
+Marjory
+Marcela
+Flor
+Evette
+Elouise
+Alina
+Trinidad
+David
+Damaris
+Catharine
+Carroll
+Belva
+Nakia
+Marlena
+Luanne
+Lorine
+Karon
+Dorene
+Danita
+Brenna
+Tatiana
+Sammie
+Louann
+Loren
+Julianna
+Andria
+Philomena
+Lucila
+Leonora
+Dovie
+Romona
+Mimi
+Jacquelin
+Gaye
+Tonja
+Misti
+Joe
+Gene
+Chastity
+Stacia
+Roxann
+Micaela
+Nikita
+Mei
+Velda
+Marlys
+Johnna
+Aura
+Lavern
+Ivonne
+Hayley
+Nicki
+Majorie
+Herlinda
+George
+Alpha
+Yadira
+Perla
+Gregoria
+Daniel
+Antonette
+Shelli
+Mozelle
+Mariah
+Joelle
+Cordelia
+Josette
+Chiquita
+Trista
+Louis
+Laquita
+Georgiana
+Candi
+Shanon
+Lonnie
+Hildegard
+Cecil
+Valentina
+Stephany
+Magda
+Karol
+Gerry
+Gabriella
+Tiana
+Roma
+Richelle
+Ray
+Princess
+Oleta
+Jacque
+Idella
+Alaina
+Suzanna
+Jovita
+Blair
+Tosha
+Raven
+Nereida
+Marlyn
+Kyla
+Joseph
+Delfina
+Tena
+Stephenie
+Sabina
+Nathalie
+Marcelle
+Gertie
+Darleen
+Thea
+Sharonda
+Shantel
+Belen
+Venessa
+Rosalina
+Ona
+Genoveva
+Corey
+Clementine
+Rosalba
+Renate
+Renata
+Mi
+Ivory
+Georgianna
+Floy
+Dorcas
+Ariana
+Tyra
+Theda
+Mariam
+Juli
+Jesica
+Donnie
+Vikki
+Verla
+Roselyn
+Melvina
+Jannette
+Ginny
+Debrah
+Corrie
+Asia
+Violeta
+Myrtis
+Latricia
+Collette
+Charleen
+Anissa
+Viviana
+Twyla
+Precious
+Nedra
+Latonia
+Lan
+Hellen
+Fabiola
+Annamarie
+Adell
+Sharyn
+Chantal
+Niki
+Maud
+Lizette
+Lindy
+Kia
+Kesha
+Jeana
+Danelle
+Charline
+Chanel
+Carrol
+Valorie
+Lia
+Dortha
+Cristal
+Sunny
+Leone
+Leilani
+Gerri
+Debi
+Andra
+Keshia
+Ima
+Eulalia
+Easter
+Dulce
+Natividad
+Linnie
+Kami
+Georgie
+Catina
+Brook
+Alda
+Winnifred
+Sharla
+Ruthann
+Meaghan
+Magdalene
+Lissette
+Adelaida
+Venita
+Trena
+Shirlene
+Shameka
+Elizebeth
+Dian
+Shanta
+Mickey
+Latosha
+Carlotta
+Windy
+Soon
+Rosina
+Mariann
+Leisa
+Jonnie
+Dawna
+Cathie
+Billy
+Astrid
+Sidney
+Laureen
+Janeen
+Holli
+Fawn
+Vickey
+Teressa
+Shante
+Rubye
+Marcelina
+Chanda
+Cary
+Terese
+Scarlett
+Marty
+Marnie
+Lulu
+Lisette
+Jeniffer
+Elenor
+Dorinda
+Donita
+Carman
+Bernita
+Altagracia
+Aleta
+Adrianna
+Zoraida
+Ronnie
+Nicola
+Lyndsey
+Kendall
+Janina
+Chrissy
+Ami
+Starla
+Phylis
+Phuong
+Kyra
+Charisse
+Blanch
+Sanjuanita
+Rona
+Nanci
+Marilee
+Maranda
+Cory
+Brigette
+Sanjuana
+Marita
+Kassandra
+Joycelyn
+Ira
+Felipa
+Chelsie
+Bonny
+Mireya
+Lorenza
+Kyong
+Ileana
+Candelaria
+Tony
+Toby
+Sherie
+Ok
+Mark
+Lucie
+Leatrice
+Lakeshia
+Gerda
+Edie
+Bambi
+Marylin
+Lavon
+Hortense
+Garnet
+Evie
+Tressa
+Shayna
+Lavina
+Kyung
+Jeanetta
+Sherrill
+Shara
+Phyliss
+Mittie
+Anabel
+Alesia
+Thuy
+Tawanda
+Richard
+Joanie
+Tiffanie
+Lashanda
+Karissa
+Enriqueta
+Daria
+Daniella
+Corinna
+Alanna
+Abbey
+Roxane
+Roseanna
+Magnolia
+Lida
+Kyle
+Joellen
+Era
+Coral
+Carleen
+Tresa
+Peggie
+Novella
+Nila
+Maybelle
+Jenelle
+Carina
+Nova
+Melina
+Marquerite
+Margarette
+Josephina
+Evonne
+Devin
+Cinthia
+Albina
+Toya
+Tawnya
+Sherita
+Santos
+Myriam
+Lizabeth
+Lise
+Keely
+Jenni
+Giselle
+Cheryle
+Ardith
+Ardis
+Alesha
+Adriane
+Shaina
+Linnea
+Karolyn
+Hong
+Florida
+Felisha
+Dori
+Darci
+Artie
+Armida
+Zola
+Xiomara
+Vergie
+Shamika
+Nena
+Nannette
+Maxie
+Lovie
+Jeane
+Jaimie
+Inge
+Farrah
+Elaina
+Caitlyn
+Starr
+Felicitas
+Cherly
+Caryl
+Yolonda
+Yasmin
+Teena
+Prudence
+Pennie
+Nydia
+Mackenzie
+Orpha
+Marvel
+Lizbeth
+Laurette
+Jerrie
+Hermelinda
+Carolee
+Tierra
+Mirian
+Meta
+Melony
+Kori
+Jennette
+Jamila
+Ena
+Anh
+Yoshiko
+Susannah
+Salina
+Rhiannon
+Joleen
+Cristine
+Ashton
+Aracely
+Tomeka
+Shalonda
+Marti
+Lacie
+Kala
+Jada
+Ilse
+Hailey
+Brittani
+Zona
+Syble
+Sherryl
+Randy
+Nidia
+Marlo
+Kandice
+Kandi
+Deb
+Dean
+America
+Alycia
+Tommy
+Ronna
+Norene
+Mercy
+Jose
+Ingeborg
+Giovanna
+Gemma
+Christel
+Audry
+Zora
+Vita
+Van
+Trish
+Stephaine
+Shirlee
+Shanika
+Melonie
+Mazie
+Jazmin
+Inga
+Hoa
+Hettie
+Geralyn
+Fonda
+Estrella
+Adella
+Su
+Sarita
+Rina
+Milissa
+Maribeth
+Golda
+Evon
+Ethelyn
+Enedina
+Cherise
+Chana
+Velva
+Tawanna
+Sade
+Mirta
+Li
+Karie
+Jacinta
+Elna
+Davina
+Cierra
+Ashlie
+Albertha
+Tanesha
+Stephani
+Nelle
+Mindi
+Lu
+Lorinda
+Larue
+Florene
+Demetra
+Dedra
+Ciara
+Chantelle
+Ashly
+Suzy
+Rosalva
+Noelia
+Lyda
+Leatha
+Krystyna
+Kristan
+Karri
+Darline
+Darcie
+Cinda
+Cheyenne
+Cherrie
+Awilda
+Almeda
+Rolanda
+Lanette
+Jerilyn
+Gisele
+Evalyn
+Cyndi
+Cleta
+Carin
+Zina
+Zena
+Velia
+Tanika
+Paul
+Charissa
+Thomas
+Talia
+Margarete
+Lavonda
+Kaylee
+Kathlene
+Jonna
+Irena
+Ilona
+Idalia
+Candis
+Candance
+Brandee
+Anitra
+Alida
+Sigrid
+Nicolette
+Maryjo
+Linette
+Hedwig
+Christiana
+Cassidy
+Alexia
+Tressie
+Modesta
+Lupita
+Lita
+Gladis
+Evelia
+Davida
+Cherri
+Cecily
+Ashely
+Annabel
+Agustina
+Wanita
+Shirly
+Rosaura
+Hulda
+Eun
+Bailey
+Yetta
+Verona
+Thomasina
+Sibyl
+Shannan
+Mechelle
+Lue
+Leandra
+Lani
+Kylee
+Kandy
+Jolynn
+Ferne
+Eboni
+Corene
+Alysia
+Zula
+Nada
+Moira
+Lyndsay
+Lorretta
+Juan
+Jammie
+Hortensia
+Gaynell
+Cameron
+Adria
+Vina
+Vicenta
+Tangela
+Stephine
+Norine
+Nella
+Liana
+Leslee
+Kimberely
+Iliana
+Glory
+Felica
+Emogene
+Elfriede
+Eden
+Eartha
+Carma
+Bea
+Ocie
+Marry
+Lennie
+Kiara
+Jacalyn
+Carlota
+Arielle
+Yu
+Star
+Otilia
+Kirstin
+Kacey
+Johnetta
+Joey
+Joetta
+Jeraldine
+Jaunita
+Elana
+Dorthea
+Cami
+Amada
+Adelia
+Vernita
+Tamar
+Siobhan
+Renea
+Rashida
+Ouida
+Odell
+Nilsa
+Meryl
+Kristyn
+Julieta
+Danica
+Breanne
+Aurea
+Anglea
+Sherron
+Odette
+Malia
+Lorelei
+Lin
+Leesa
+Kenna
+Kathlyn
+Fiona
+Charlette
+Suzie
+Shantell
+Sabra
+Racquel
+Myong
+Mira
+Martine
+Lucienne
+Lavada
+Juliann
+Johnie
+Elvera
+Delphia
+Clair
+Christiane
+Charolette
+Carri
+Augustine
+Asha
+Angella
+Paola
+Ninfa
+Leda
+Lai
+Eda
+Sunshine
+Stefani
+Shanell
+Palma
+Machelle
+Lissa
+Kecia
+Kathryne
+Karlene
+Julissa
+Jettie
+Jenniffer
+Hui
+Corrina
+Christopher
+Carolann
+Alena
+Tess
+Rosaria
+Myrtice
+Marylee
+Liane
+Kenyatta
+Judie
+Janey
+In
+Elmira
+Eldora
+Denna
+Cristi
+Cathi
+Zaida
+Vonnie
+Viva
+Vernie
+Rosaline
+Mariela
+Luciana
+Lesli
+Karan
+Felice
+Deneen
+Adina
+Wynona
+Tarsha
+Sheron
+Shasta
+Shanita
+Shani
+Shandra
+Randa
+Pinkie
+Paris
+Nelida
+Marilou
+Lyla
+Laurene
+Laci
+Joi
+Janene
+Dorotha
+Daniele
+Dani
+Carolynn
+Carlyn
+Berenice
+Ayesha
+Anneliese
+Alethea
+Thersa
+Tamiko
+Rufina
+Oliva
+Mozell
+Marylyn
+Madison
+Kristian
+Kathyrn
+Kasandra
+Kandace
+Janae
+Gabriel
+Domenica
+Debbra
+Dannielle
+Chun
+Buffy
+Barbie
+Arcelia
+Aja
+Zenobia
+Sharen
+Sharee
+Patrick
+Page
+My
+Lavinia
+Kum
+Kacie
+Jackeline
+Huong
+Felisa
+Emelia
+Eleanora
+Cythia
+Cristin
+Clyde
+Claribel
+Caron
+Anastacia
+Zulma
+Zandra
+Yoko
+Tenisha
+Susann
+Sherilyn
+Shay
+Shawanda
+Sabine
+Romana
+Mathilda
+Linsey
+Keiko
+Joana
+Isela
+Gretta
+Georgetta
+Eugenie
+Dusty
+Desirae
+Delora
+Corazon
+Antonina
+Anika
+Willene
+Tracee
+Tamatha
+Regan
+Nichelle
+Mickie
+Maegan
+Luana
+Lanita
+Kelsie
+Edelmira
+Bree
+Afton
+Teodora
+Tamie
+Shena
+Meg
+Linh
+Keli
+Kaci
+Danyelle
+Britt
+Arlette
+Albertine
+Adelle
+Tiffiny
+Stormy
+Simona
+Numbers
+Nicolasa
+Nichol
+Nia
+Nakisha
+Mee
+Maira
+Loreen
+Kizzy
+Johnny
+Jay
+Fallon
+Christene
+Bobbye
+Anthony
+Ying
+Vincenza
+Tanja
+Rubie
+Roni
+Queenie
+Margarett
+Kimberli
+Irmgard
+Idell
+Hilma
+Evelina
+Esta
+Emilee
+Dennise
+Dania
+Carl
+Carie
+Antonio
+Wai
+Sang
+Risa
+Rikki
+Particia
+Mui
+Masako
+Mario
+Luvenia
+Loree
+Loni
+Lien
+Kevin
+Gigi
+Florencia
+Dorian
+Denita
+Dallas
+Chi
+Billye
+Alexander
+Tomika
+Sharita
+Rana
+Nikole
+Neoma
+Margarite
+Madalyn
+Lucina
+Laila
+Kali
+Jenette
+Gabriele
+Evelyne
+Elenora
+Clementina
+Alejandrina
+Zulema
+Violette
+Vannessa
+Thresa
+Retta
+Pia
+Patience
+Noella
+Nickie
+Jonell
+Delta
+Chung
+Chaya
+Camelia
+Bethel
+Anya
+Andrew
+Thanh
+Suzann
+Spring
+Shu
+Mila
+Lilla
+Laverna
+Keesha
+Kattie
+Gia
+Georgene
+Eveline
+Estell
+Elizbeth
+Vivienne
+Vallie
+Trudie
+Stephane
+Michel
+Magaly
+Madie
+Kenyetta
+Karren
+Janetta
+Hermine
+Harmony
+Drucilla
+Debbi
+Celestina
+Candie
+Britni
+Beckie
+Amina
+Zita
+Yun
+Yolande
+Vivien
+Vernetta
+Trudi
+Sommer
+Pearle
+Patrina
+Ossie
+Nicolle
+Loyce
+Letty
+Larisa
+Katharina
+Joselyn
+Jonelle
+Jenell
+Iesha
+Heide
+Florinda
+Florentina
+Flo
+Elodia
+Dorine
+Brunilda
+Brigid
+Ashli
+Ardella
+Twana
+Thu
+Tarah
+Sung
+Shea
+Shavon
+Shane
+Serina
+Rayna
+Ramonita
+Nga
+Margurite
+Lucrecia
+Kourtney
+Kati
+Jesus
+Jesenia
+Diamond
+Crista
+Ayana
+Alica
+Alia
+Vinnie
+Suellen
+Romelia
+Rachell
+Piper
+Olympia
+Michiko
+Kathaleen
+Jolie
+Jessi
+Janessa
+Hana
+Ha
+Elease
+Carletta
+Britany
+Shona
+Salome
+Rosamond
+Regena
+Raina
+Ngoc
+Nelia
+Louvenia
+Lesia
+Latrina
+Laticia
+Larhonda
+Jina
+Jacki
+Hollis
+Holley
+Emmy
+Deeann
+Coretta
+Arnetta
+Velvet
+Thalia
+Shanice
+Neta
+Mikki
+Micki
+Lonna
+Leana
+Lashunda
+Kiley
+Joye
+Jacqulyn
+Ignacia
+Hyun
+Hiroko
+Henry
+Henriette
+Elayne
+Delinda
+Darnell
+Dahlia
+Coreen
+Consuela
+Conchita
+Celine
+Babette
+Ayanna
+Anette
+Albertina
+Skye
+Shawnee
+Shaneka
+Quiana
+Pamelia
+Min
+Merri
+Merlene
+Margit
+Kiesha
+Kiera
+Kaylene
+Jodee
+Jenise
+Erlene
+Emmie
+Else
+Daryl
+Dalila
+Daisey
+Cody
+Casie
+Belia
+Babara
+Versie
+Vanesa
+Shelba
+Shawnda
+Sam
+Norman
+Nikia
+Naoma
+Marna
+Margeret
+Madaline
+Lawana
+Kindra
+Jutta
+Jazmine
+Janett
+Hannelore
+Glendora
+Gertrud
+Garnett
+Freeda
+Frederica
+Florance
+Flavia
+Dennis
+Carline
+Beverlee
+Anjanette
+Valda
+Trinity
+Tamala
+Stevie
+Shonna
+Sha
+Sarina
+Oneida
+Micah
+Merilyn
+Marleen
+Lurline
+Lenna
+Katherin
+Jin
+Jeni
+Hae
+Gracia
+Glady
+Farah
+Eric
+Enola
+Ema
+Dominque
+Devona
+Delana
+Cecila
+Caprice
+Alysha
+Ali
+Alethia
+Vena
+Theresia
+Tawny
+Song
+Shakira
+Samara
+Sachiko
+Rachele
+Pamella
+Nicky
+Marni
+Mariel
+Maren
+Malisa
+Ligia
+Lera
+Latoria
+Larae
+Kimber
+Kathern
+Karey
+Jennefer
+Janeth
+Halina
+Fredia
+Delisa
+Debroah
+Ciera
+Chin
+Angelika
+Andree
+Altha
+Yen
+Vivan
+Terresa
+Tanna
+Suk
+Sudie
+Soo
+Signe
+Salena
+Ronni
+Rebbecca
+Myrtie
+Mckenzie
+Malika
+Maida
+Loan
+Leonarda
+Kayleigh
+France
+Ethyl
+Ellyn
+Dayle
+Cammie
+Brittni
+Birgit
+Avelina
+Asuncion
+Arianna
+Akiko
+Venice
+Tyesha
+Tonie
+Tiesha
+Takisha
+Steffanie
+Sindy
+Santana
+Meghann
+Manda
+Macie
+Lady
+Kellye
+Kellee
+Joslyn
+Jason
+Inger
+Indira
+Glinda
+Glennis
+Fernanda
+Faustina
+Eneida
+Elicia
+Dot
+Digna
+Dell
+Arletta
+Andre
+Willia
+Tammara
+Tabetha
+Sherrell
+Sari
+Refugio
+Rebbeca
+Pauletta
+Nieves
+Natosha
+Nakita
+Mammie
+Kenisha
+Kazuko
+Kassie
+Gary
+Earlean
+Daphine
+Corliss
+Clotilde
+Carolyne
+Bernetta
+Augustina
+Audrea
+Annis
+Annabell
+Yan
+Tennille
+Tamica
+Selene
+Sean
+Rosana
+Regenia
+Qiana
+Markita
+Macy
+Leeanne
+Laurine
+Kym
+Jessenia
+Janita
+Georgine
+Genie
+Emiko
+Elvie
+Deandra
+Dagmar
+Corie
+Collen
+Cherish
+Romaine
+Porsha
+Pearlene
+Micheline
+Merna
+Margorie
+Margaretta
+Lore
+Kenneth
+Jenine
+Hermina
+Fredericka
+Elke
+Drusilla
+Dorathy
+Dione
+Desire
+Celena
+Brigida
+Angeles
+Allegra
+Theo
+Tamekia
+Synthia
+Stephen
+Sook
+Slyvia
+Rosann
+Reatha
+Raye
+Marquetta
+Margart
+Ling
+Layla
+Kymberly
+Kiana
+Kayleen
+Katlyn
+Karmen
+Joella
+Irina
+Emelda
+Eleni
+Detra
+Clemmie
+Cheryll
+Chantell
+Cathey
+Arnita
+Arla
+Angle
+Angelic
+Alyse
+Zofia
+Thomasine
+Tennie
+Son
+Sherly
+Sherley
+Sharyl
+Remedios
+Petrina
+Nickole
+Myung
+Myrle
+Mozella
+Louanne
+Lisha
+Latia
+Lane
+Krysta
+Julienne
+Joel
+Jeanene
+Jacqualine
+Isaura
+Gwenda
+Earleen
+Donald
+Cleopatra
+Carlie
+Audie
+Antonietta
+Alise
+Alex
+Verdell
+Val
+Tyler
+Tomoko
+Thao
+Talisha
+Steven
+So
+Shemika
+Shaun
+Scarlet
+Savanna
+Santina
+Rosia
+Raeann
+Odilia
+Nana
+Minna
+Magan
+Lynelle
+Le
+Karma
+Joeann
+Ivana
+Inell
+Ilana
+Hye
+Honey
+Hee
+Gudrun
+Frank
+Dreama
+Crissy
+Chante
+Carmelina
+Arvilla
+Arthur
+Annamae
+Alvera
+Aleida
+Aaron
+Yee
+Yanira
+Vanda
+Tianna
+Tam
+Stefania
+Shira
+Perry
+Nicol
+Nancie
+Monserrate
+Minh
+Melynda
+Melany
+Matthew
+Lovella
+Laure
+Kirby
+Kacy
+Jacquelynn
+Hyon
+Gertha
+Francisco
+Eliana
+Christena
+Christeen
+Charise
+Caterina
+Carley
+Candyce
+Arlena
+Ammie
+Yang
+Willette
+Vanita
+Tuyet
+Tiny
+Syreeta
+Silva
+Scott
+Ronald
+Penney
+Nyla
+Michal
+Maurice
+Maryam
+Marya
+Magen
+Ludie
+Loma
+Livia
+Lanell
+Kimberlie
+Julee
+Donetta
+Diedra
+Denisha
+Deane
+Dawne
+Clarine
+Cherryl
+Bronwyn
+Brandon
+Alla
+Valery
+Tonda
+Sueann
+Soraya
+Shoshana
+Shela
+Sharleen
+Shanelle
+Nerissa
+Micheal
+Meridith
+Mellie
+Maye
+Maple
+Magaret
+Luis
+Lili
+Leonila
+Leonie
+Leeanna
+Lavonia
+Lavera
+Kristel
+Kathey
+Kathe
+Justin
+Julian
+Jimmy
+Jann
+Ilda
+Hildred
+Hildegarde
+Genia
+Fumiko
+Evelin
+Ermelinda
+Elly
+Dung
+Doloris
+Dionna
+Danae
+Berneice
+Annice
+Alix
+Verena
+Verdie
+Tristan
+Shawnna
+Shawana
+Shaunna
+Rozella
+Randee
+Ranae
+Milagro
+Lynell
+Luise
+Louie
+Loida
+Lisbeth
+Karleen
+Junita
+Jona
+Isis
+Hyacinth
+Hedy
+Gwenn
+Ethelene
+Erline
+Edward
+Donya
+Domonique
+Delicia
+Dannette
+Cicely
+Branda
+Blythe
+Bethann
+Ashlyn
+Annalee
+Alline
+Yuko
+Vella
+Trang
+Towanda
+Tesha
+Sherlyn
+Narcisa
+Miguelina
+Meri
+Maybell
+Marlana
+Marguerita
+Madlyn
+Luna
+Lory
+Loriann
+Liberty
+Leonore
+Leighann
+Laurice
+Latesha
+Laronda
+Katrice
+Kasie
+Karl
+Kaley
+Jadwiga
+Glennie
+Gearldine
+Francina
+Epifania
+Dyan
+Dorie
+Diedre
+Denese
+Demetrice
+Delena
+Darby
+Cristie
+Cleora
+Catarina
+Carisa
+Bernie
+Barbera
+Almeta
+Trula
+Tereasa
+Solange
+Sheilah
+Shavonne
+Sanora
+Rochell
+Mathilde
+Margareta
+Maia
+Lynsey
+Lawanna
+Launa
+Kena
+Keena
+Katia
+Jamey
+Glynda
+Gaylene
+Elvina
+Elanor
+Danuta
+Danika
+Cristen
+Cordie
+Coletta
+Clarita
+Carmon
+Brynn
+Azucena
+Aundrea
+Angele
+Yi
+Walter
+Verlie
+Verlene
+Tamesha
+Silvana
+Sebrina
+Samira
+Reda
+Raylene
+Penni
+Pandora
+Norah
+Noma
+Mireille
+Melissia
+Maryalice
+Laraine
+Kimbery
+Karyl
+Karine
+Kam
+Jolanda
+Johana
+Jesusa
+Jaleesa
+Jae
+Jacquelyne
+Irish
+Iluminada
+Hilaria
+Hanh
+Gennie
+Francie
+Floretta
+Exie
+Edda
+Drema
+Delpha
+Bev
+Barbar
+Assunta
+Ardell
+Annalisa
+Alisia
+Yukiko
+Yolando
+Wonda
+Wei
+Waltraud
+Veta
+Tequila
+Temeka
+Tameika
+Shirleen
+Shenita
+Piedad
+Ozella
+Mirtha
+Marilu
+Kimiko
+Juliane
+Jenice
+Jen
+Janay
+Jacquiline
+Hilde
+Fe
+Fae
+Evan
+Eugene
+Elois
+Echo
+Devorah
+Chau
+Brinda
+Betsey
+Arminda
+Aracelis
+Apryl
+Annett
+Alishia
+Veola
+Usha
+Toshiko
+Theola
+Tashia
+Talitha
+Shery
+Rudy
+Renetta
+Reiko
+Rasheeda
+Omega
+Obdulia
+Mika
+Melaine
+Meggan
+Martin
+Marlen
+Marget
+Marceline
+Mana
+Magdalen
+Librada
+Lezlie
+Lexie
+Latashia
+Lasandra
+Kelle
+Isidra
+Isa
+Inocencia
+Gwyn
+Francoise
+Erminia
+Erinn
+Dimple
+Devora
+Criselda
+Armanda
+Arie
+Ariane
+Angelo
+Angelena
+Allen
+Aliza
+Adriene
+Adaline
+Xochitl
+Twanna
+Tran
+Tomiko
+Tamisha
+Taisha
+Susy
+Siu
+Rutha
+Roxy
+Rhona
+Raymond
+Otha
+Noriko
+Natashia
+Merrie
+Melvin
+Marinda
+Mariko
+Margert
+Loris
+Lizzette
+Leisha
+Kaila
+Ka
+Joannie
+Jerrica
+Jene
+Jannet
+Janee
+Jacinda
+Herta
+Elenore
+Doretta
+Delaine
+Daniell
+Claudie
+China
+Britta
+Apolonia
+Amberly
+Alease
+Yuri
+Yuk
+Wen
+Waneta
+Ute
+Tomi
+Sharri
+Sandie
+Roselle
+Reynalda
+Raguel
+Phylicia
+Patria
+Olimpia
+Odelia
+Mitzie
+Mitchell
+Miss
+Minda
+Mignon
+Mica
+Mendy
+Marivel
+Maile
+Lynetta
+Lavette
+Lauryn
+Latrisha
+Lakiesha
+Kiersten
+Kary
+Josphine
+Jolyn
+Jetta
+Janise
+Jacquie
+Ivelisse
+Glynis
+Gianna
+Gaynelle
+Emerald
+Demetrius
+Danyell
+Danille
+Dacia
+Coralee
+Cher
+Ceola
+Brett
+Bell
+Arianne
+Aleshia
+Yung
+Williemae
+Troy
+Trinh
+Thora
+Tai
+Svetlana
+Sherika
+Shemeka
+Shaunda
+Roseline
+Ricki
+Melda
+Mallie
+Lavonna
+Latina
+Larry
+Laquanda
+Lala
+Lachelle
+Klara
+Kandis
+Johna
+Jeanmarie
+Jaye
+Hang
+Grayce
+Gertude
+Emerita
+Ebonie
+Clorinda
+Ching
+Chery
+Carola
+Breann
+Blossom
+Bernardine
+Becki
+Arletha
+Argelia
+Ara
+Alita
+Yulanda
+Yon
+Yessenia
+Tobi
+Tasia
+Sylvie
+Shirl
+Shirely
+Sheridan
+Shella
+Shantelle
+Sacha
+Royce
+Rebecka
+Reagan
+Providencia
+Paulene
+Misha
+Miki
+Marline
+Marica
+Lorita
+Latoyia
+Lasonya
+Kerstin
+Kenda
+Keitha
+Kathrin
+Jaymie
+Jack
+Gricelda
+Ginette
+Eryn
+Elina
+Elfrieda
+Danyel
+Cheree
+Chanelle
+Barrie
+Avery
+Aurore
+Annamaria
+Alleen
+Ailene
+Aide
+Yasmine
+Vashti
+Valentine
+Treasa
+Tory
+Tiffaney
+Sheryll
+Sharie
+Shanae
+Sau
+Raisa
+Pa
+Neda
+Mitsuko
+Mirella
+Milda
+Maryanna
+Maragret
+Mabelle
+Luetta
+Lorina
+Letisha
+Latarsha
+Lanelle
+Lajuana
+Krissy
+Karly
+Karena
+Jon
+Jessika
+Jerica
+Jeanelle
+January
+Jalisa
+Jacelyn
+Izola
+Ivey
+Gregory
+Euna
+Etha
+Drew
+Domitila
+Dominica
+Daina
+Creola
+Carli
+Camie
+Bunny
+Brittny
+Ashanti
+Anisha
+Aleen
+Adah
+Yasuko
+Winter
+Viki
+Valrie
+Tona
+Tinisha
+Thi
+Terisa
+Tatum
+Taneka
+Simonne
+Shalanda
+Serita
+Ressie
+Refugia
+Paz
+Olene
+Na
+Merrill
+Margherita
+Mandie
+Man
+Maire
+Lyndia
+Luci
+Lorriane
+Loreta
+Leonia
+Lavona
+Lashawnda
+Lakia
+Kyoko
+Krystina
+Krysten
+Kenia
+Kelsi
+Jude
+Jeanice
+Isobel
+Georgiann
+Genny
+Felicidad
+Eilene
+Deon
+Deloise
+Deedee
+Dannie
+Conception
+Clora
+Cherilyn
+Chang
+Calandra
+Berry
+Armandina
+Anisa
+Ula
+Timothy
+Tiera
+Theressa
+Stephania
+Sima
+Shyla
+Shonta
+Shera
+Shaquita
+Shala
+Sammy
+Rossana
+Nohemi
+Nery
+Moriah
+Melita
+Melida
+Melani
+Marylynn
+Marisha
+Mariette
+Malorie
+Madelene
+Ludivina
+Loria
+Lorette
+Loralee
+Lianne
+Leon
+Lavenia
+Laurinda
+Lashon
+Kit
+Kimi
+Keila
+Katelynn
+Kai
+Jone
+Joane
+Ji
+Jayna
+Janella
+Ja
+Hue
+Hertha
+Francene
+Elinore
+Despina
+Delsie
+Deedra
+Clemencia
+Carry
+Carolin
+Carlos
+Bulah
+Brittanie
+Bok
+Blondell
+Bibi
+Beaulah
+Beata
+Annita
+Agripina
+Virgen
+Valene
+Un
+Twanda
+Tommye
+Toi
+Tarra
+Tari
+Tammera
+Shakia
+Sadye
+Ruthanne
+Rochel
+Rivka
+Pura
+Nenita
+Natisha
+Ming
+Merrilee
+Melodee
+Marvis
+Lucilla
+Leena
+Laveta
+Larita
+Lanie
+Keren
+Ileen
+Georgeann
+Genna
+Genesis
+Frida
+Ewa
+Eufemia
+Emely
+Ela
+Edyth
+Deonna
+Deadra
+Darlena
+Chanell
+Chan
+Cathern
+Cassondra
+Cassaundra
+Bernarda
+Berna
+Arlinda
+Anamaria
+Albert
+Wesley
+Vertie
+Valeri
+Torri
+Tatyana
+Stasia
+Sherise
+Sherill
+Season
+Scottie
+Sanda
+Ruthe
+Rosy
+Roberto
+Robbi
+Ranee
+Quyen
+Pearly
+Palmira
+Onita
+Nisha
+Niesha
+Nida
+Nevada
+Nam
+Merlyn
+Mayola
+Marylouise
+Maryland
+Marx
+Marth
+Margene
+Madelaine
+Londa
+Leontine
+Leoma
+Leia
+Lawrence
+Lauralee
+Lanora
+Lakita
+Kiyoko
+Keturah
+Katelin
+Kareen
+Jonie
+Johnette
+Jenee
+Jeanett
+Izetta
+Hiedi
+Heike
+Hassie
+Harold
+Giuseppina
+Georgann
+Fidela
+Fernande
+Elwanda
+Ellamae
+Eliz
+Dusti
+Dotty
+Cyndy
+Coralie
+Celesta
+Argentina
+Alverta
+Xenia
+Wava
+Vanetta
+Torrie
+Tashina
+Tandy
+Tambra
+Tama
+Stepanie
+Shila
+Shaunta
+Sharan
+Shaniqua
+Shae
+Setsuko
+Serafina
+Sandee
+Rosamaria
+Priscila
+Olinda
+Nadene
+Muoi
+Michelina
+Mercedez
+Maryrose
+Marin
+Marcene
+Mao
+Magali
+Mafalda
+Logan
+Linn
+Lannie
+Kayce
+Karoline
+Kamilah
+Kamala
+Justa
+Joline
+Jennine
+Jacquetta
+Iraida
+Gerald
+Georgeanna
+Franchesca
+Fairy
+Emeline
+Elane
+Ehtel
+Earlie
+Dulcie
+Dalene
+Cris
+Classie
+Chere
+Charis
+Caroyln
+Carmina
+Carita
+Brian
+Bethanie
+Ayako
+Arica
+An
+Alysa
+Alessandra
+Akilah
+Adrien
+Zetta
+Youlanda
+Yelena
+Yahaira
+Xuan
+Wendolyn
+Victor
+Tijuana
+Terrell
+Terina
+Teresia
+Suzi
+Sunday
+Sherell
+Shavonda
+Shaunte
+Sharda
+Shakita
+Sena
+Ryann
+Rubi
+Riva
+Reginia
+Rea
+Rachal
+Parthenia
+Pamula
+Monnie
+Monet
+Michaele
+Melia
+Marine
+Malka
+Maisha
+Lisandra
+Leo
+Lekisha
+Lean
+Laurence
+Lakendra
+Krystin
+Kortney
+Kizzie
+Kittie
+Kera
+Kendal
+Kemberly
+Kanisha
+Julene
+Jule
+Joshua
+Johanne
+Jeffrey
+Jamee
+Han
+Halley
+Gidget
+Galina
+Fredricka
+Fleta
+Fatimah
+Eusebia
+Elza
+Eleonore
+Dorthey
+Doria
+Donella
+Dinorah
+Delorse
+Claretha
+Christinia
+Charlyn
+Bong
+Belkis
+Azzie
+Andera
+Aiko
+Adena
+Yer
+Yajaira
+Wan
+Vania
+Ulrike
+Toshia
+Tifany
+Stefany
+Shizue
+Shenika
+Shawanna
+Sharolyn
+Sharilyn
+Shaquana
+Shantay
+See
+Rozanne
+Roselee
+Rickie
+Remona
+Reanna
+Raelene
+Quinn
+Phung
+Petronila
+Natacha
+Nancey
+Myrl
+Miyoko
+Miesha
+Merideth
+Marvella
+Marquitta
+Marhta
+Marchelle
+Lizeth
+Libbie
+Lahoma
+Ladawn
+Kina
+Katheleen
+Katharyn
+Karisa
+Kaleigh
+Junie
+Julieann
+Johnsie
+Janean
+Jaimee
+Jackqueline
+Hisako
+Herma
+Helaine
+Gwyneth
+Glenn
+Gita
+Eustolia
+Emelina
+Elin
+Edris
+Donnette
+Donnetta
+Dierdre
+Denae
+Darcel
+Claude
+Clarisa
+Cinderella
+Chia
+Charlesetta
+Charita
+Celsa
+Cassy
+Cassi
+Carlee
+Bruna
+Brittaney
+Brande
+Billi
+Bao
+Antonetta
+Angla
+Angelyn
+Analisa
+Alane
+Wenona
+Wendie
+Veronique
+Vannesa
+Tobie
+Tempie
+Sumiko
+Sulema
+Sparkle
+Somer
+Sheba
+Shayne
+Sharice
+Shanel
+Shalon
+Sage
+Roy
+Rosio
+Roselia
+Renay
+Rema
+Reena
+Porsche
+Ping
+Peg
+Ozie
+Oretha
+Oralee
+Oda
+Nu
+Ngan
+Nakesha
+Milly
+Marybelle
+Marlin
+Maris
+Margrett
+Maragaret
+Manie
+Lurlene
+Lillia
+Lieselotte
+Lavelle
+Lashaunda
+Lakeesha
+Keith
+Kaycee
+Kalyn
+Joya
+Joette
+Jenae
+Janiece
+Illa
+Grisel
+Glayds
+Genevie
+Gala
+Fredda
+Fred
+Elmer
+Eleonor
+Debera
+Deandrea
+Dan
+Corrinne
+Cordia
+Contessa
+Colene
+Cleotilde
+Charlott
+Chantay
+Cecille
+Beatris
+Azalee
+Arlean
+Ardath
+Anjelica
+Anja
+Alfredia
+Aleisha
+Adam
+Zada
+Yuonne
+Xiao
+Willodean
+Whitley
+Vennie
+Vanna
+Tyisha
+Tova
+Torie
+Tonisha
+Tilda
+Tien
+Temple
+Sirena
+Sherril
+Shanti
+Shan
+Senaida
+Samella
+Robbyn
+Renda
+Reita
+Phebe
+Paulita
+Nobuko
+Nguyet
+Neomi
+Moon
+Mikaela
+Melania
+Maximina
+Marg
+Maisie
+Lynna
+Lilli
+Layne
+Lashaun
+Lakenya
+Lael
+Kirstie
+Kathline
+Kasha
+Karlyn
+Karima
+Jovan
+Josefine
+Jennell
+Jacqui
+Jackelyn
+Hyo
+Hien
+Grazyna
+Florrie
+Floria
+Eleonora
+Dwana
+Dorla
+Dong
+Delmy
+Deja
+Dede
+Dann
+Crysta
+Clelia
+Claris
+Clarence
+Chieko
+Cherlyn
+Cherelle
+Charmain
+Chara
+Cammy
+Bee
+Arnette
+Ardelle
+Annika
+Amiee
+Amee
+Allena
+Yvone
+Yuki
+Yoshie
+Yevette
+Yael
+Willetta
+Voncile
+Venetta
+Tula
+Tonette
+Timika
+Temika
+Telma
+Teisha
+Taren
+Ta
+Stacee
+Shin
+Shawnta
+Saturnina
+Ricarda
+Pok
+Pasty
+Onie
+Nubia
+Mora
+Mike
+Marielle
+Mariella
+Marianela
+Mardell
+Many
+Luanna
+Loise
+Lisabeth
+Lindsy
+Lilliana
+Lilliam
+Lelah
+Leigha
+Leanora
+Lang
+Kristeen
+Khalilah
+Keeley
+Kandra
+Junko
+Joaquina
+Jerlene
+Jani
+Jamika
+Jame
+Hsiu
+Hermila
+Golden
+Genevive
+Evia
+Eugena
+Emmaline
+Elfreda
+Elene
+Donette
+Delcie
+Deeanna
+Darcey
+Cuc
+Clarinda
+Cira
+Chae
+Celinda
+Catheryn
+Catherin
+Casimira
+Carmelia
+Camellia
+Breana
+Bobette
+Bernardina
+Bebe
+Basilia
+Arlyne
+Amal
+Alayna
+Zonia
+Zenia
+Yuriko
+Yaeko
+Wynell
+Willow
+Willena
+Vernia
+Tu
+Travis
+Tora
+Terrilyn
+Terica
+Tenesha
+Tawna
+Tajuana
+Taina
+Stephnie
+Sona
+Sol
+Sina
+Shondra
+Shizuko
+Sherlene
+Sherice
+Sharika
+Rossie
+Rosena
+Rory
+Rima
+Ria
+Rheba
+Renna
+Peter
+Natalya
+Nancee
+Melodi
+Meda
+Maxima
+Matha
+Marketta
+Maricruz
+Marcelene
+Malvina
+Luba
+Louetta
+Leida
+Lecia
+Lauran
+Lashawna
+Laine
+Khadijah
+Katerine
+Kasi
+Kallie
+Julietta
+Jesusita
+Jestine
+Jessia
+Jeremy
+Jeffie
+Janyce
+Isadora
+Georgianne
+Fidelia
+Evita
+Eura
+Eulah
+Estefana
+Elsy
+Elizabet
+Eladia
+Dodie
+Dion
+Dia
+Denisse
+Deloras
+Delila
+Daysi
+Dakota
+Curtis
+Crystle
+Concha
+Colby
+Claretta
+Chu
+Christia
+Charlsie
+Charlena
+Carylon
+Bettyann
+Asley
+Ashlea
+Amira
+Ai
+Agueda
+Agnus
+Yuette
+Vinita
+Victorina
+Tynisha
+Treena
+Toccara
+Tish
+Thomasena
+Tegan
+Soila
+Shiloh
+Shenna
+Sharmaine
+Shantae
+Shandi
+September
+Saran
+Sarai
+Sana
+Samuel
+Salley
+Rosette
+Rolande
+Regine
+Otelia
+Oscar
+Olevia
+Nicholle
+Necole
+Naida
+Myrta
+Myesha
+Mitsue
+Minta
+Mertie
+Margy
+Mahalia
+Madalene
+Love
+Loura
+Lorean
+Lewis
+Lesha
+Leonida
+Lenita
+Lavone
+Lashell
+Lashandra
+Lamonica
+Kimbra
+Katherina
+Karry
+Kanesha
+Julio
+Jong
+Jeneva
+Jaquelyn
+Hwa
+Gilma
+Ghislaine
+Gertrudis
+Fransisca
+Fermina
+Ettie
+Etsuko
+Ellis
+Ellan
+Elidia
+Edra
+Dorethea
+Doreatha
+Denyse
+Denny
+Deetta
+Daine
+Cyrstal
+Corrin
+Cayla
+Carlita
+Camila
+Burma
+Bula
+Buena
+Blake
+Barabara
+Avril
+Austin
+Alaine
+Zana
+Wilhemina
+Wanetta
+Virgil
+Vi
+Veronika
+Vernon
+Verline
+Vasiliki
+Tonita
+Tisa
+Teofila
+Tayna
+Taunya
+Tandra
+Takako
+Sunni
+Suanne
+Sixta
+Sharell
+Seema
+Russell
+Rosenda
+Robena
+Raymonde
+Pei
+Pamila
+Ozell
+Neida
+Neely
+Mistie
+Micha
+Merissa
+Maurita
+Maryln
+Maryetta
+Marshall
+Marcell
+Malena
+Makeda
+Maddie
+Lovetta
+Lourie
+Lorrine
+Lorilee
+Lester
+Laurena
+Lashay
+Larraine
+Laree
+Lacresha
+Kristle
+Krishna
+Keva
+Keira
+Karole
+Joie
+Jinny
+Jeannetta
+Jama
+Heidy
+Gilberte
+Gema
+Faviola
+Evelynn
+Enda
+Elli
+Ellena
+Divina
+Dagny
+Collene
+Codi
+Cindie
+Chassidy
+Chasidy
+Catrice
+Catherina
+Cassey
+Caroll
+Carlena
+Candra
+Calista
+Bryanna
+Britteny
+Beula
+Bari
+Audrie
+Audria
+Ardelia
+Annelle
+Angila
+Alona
+Allyn
+Douglas
+Roger
+Jonathan
+Ralph
+Nicholas
+Benjamin
+Bruce
+Harry
+Wayne
+Steve
+Howard
+Ernest
+Phillip
+Todd
+Craig
+Alan
+Philip
+Earl
+Danny
+Bryan
+Stanley
+Leonard
+Nathan
+Manuel
+Rodney
+Marvin
+Vincent
+Jeffery
+Jeff
+Chad
+Jacob
+Alfred
+Bradley
+Herbert
+Frederick
+Edwin
+Don
+Ricky
+Randall
+Barry
+Bernard
+Leroy
+Marcus
+Theodore
+Clifford
+Miguel
+Jim
+Tom
+Calvin
+Bill
+Lloyd
+Derek
+Warren
+Darrell
+Jerome
+Floyd
+Alvin
+Tim
+Gordon
+Greg
+Jorge
+Dustin
+Pedro
+Derrick
+Zachary
+Herman
+Glen
+Hector
+Ricardo
+Rick
+Brent
+Ramon
+Gilbert
+Marc
+Reginald
+Ruben
+Nathaniel
+Rafael
+Edgar
+Milton
+Raul
+Ben
+Chester
+Duane
+Franklin
+Brad
+Ron
+Roland
+Arnold
+Harvey
+Jared
+Erik
+Darryl
+Neil
+Javier
+Fernando
+Clinton
+Ted
+Mathew
+Tyrone
+Darren
+Lance
+Kurt
+Allan
+Nelson
+Guy
+Clayton
+Hugh
+Max
+Dwayne
+Dwight
+Armando
+Felix
+Everett
+Ian
+Wallace
+Ken
+Bob
+Alfredo
+Alberto
+Dave
+Ivan
+Byron
+Isaac
+Morris
+Clifton
+Willard
+Ross
+Andy
+Salvador
+Kirk
+Sergio
+Seth
+Kent
+Terrance
+Eduardo
+Terrence
+Enrique
+Wade
+Stuart
+Fredrick
+Arturo
+Alejandro
+Nick
+Luther
+Wendell
+Jeremiah
+Julius
+Otis
+Trevor
+Oliver
+Luke
+Homer
+Gerard
+Doug
+Kenny
+Hubert
+Lyle
+Matt
+Alfonso
+Orlando
+Rex
+Carlton
+Ernesto
+Neal
+Pablo
+Lorenzo
+Omar
+Wilbur
+Grant
+Horace
+Roderick
+Abraham
+Willis
+Rickey
+Andres
+Cesar
+Johnathan
+Malcolm
+Rudolph
+Damon
+Kelvin
+Preston
+Alton
+Archie
+Marco
+Wm
+Pete
+Randolph
+Garry
+Geoffrey
+Jonathon
+Felipe
+Gerardo
+Ed
+Dominic
+Delbert
+Colin
+Guillermo
+Earnest
+Lucas
+Benny
+Spencer
+Rodolfo
+Myron
+Edmund
+Garrett
+Salvatore
+Cedric
+Lowell
+Gregg
+Sherman
+Wilson
+Sylvester
+Roosevelt
+Israel
+Jermaine
+Forrest
+Wilbert
+Leland
+Simon
+Clark
+Irving
+Bryant
+Owen
+Rufus
+Woodrow
+Kristopher
+Mack
+Levi
+Marcos
+Gustavo
+Jake
+Lionel
+Gilberto
+Clint
+Nicolas
+Ismael
+Orville
+Ervin
+Dewey
+Al
+Wilfred
+Josh
+Hugo
+Ignacio
+Caleb
+Tomas
+Sheldon
+Erick
+Stewart
+Doyle
+Darrel
+Rogelio
+Terence
+Santiago
+Alonzo
+Elias
+Bert
+Elbert
+Ramiro
+Conrad
+Noah
+Grady
+Phil
+Cornelius
+Lamar
+Rolando
+Clay
+Percy
+Dexter
+Bradford
+Darin
+Amos
+Moses
+Irvin
+Saul
+Roman
+Randal
+Timmy
+Darrin
+Winston
+Brendan
+Abel
+Dominick
+Boyd
+Emilio
+Elijah
+Domingo
+Emmett
+Marlon
+Emanuel
+Jerald
+Edmond
+Emil
+Dewayne
+Will
+Otto
+Teddy
+Reynaldo
+Bret
+Jess
+Trent
+Humberto
+Emmanuel
+Stephan
+Vicente
+Lamont
+Garland
+Miles
+Efrain
+Heath
+Rodger
+Harley
+Ethan
+Eldon
+Rocky
+Pierre
+Junior
+Freddy
+Eli
+Bryce
+Antoine
+Sterling
+Chase
+Grover
+Elton
+Cleveland
+Dylan
+Chuck
+Damian
+Reuben
+Stan
+August
+Leonardo
+Jasper
+Russel
+Erwin
+Benito
+Hans
+Monte
+Blaine
+Ernie
+Curt
+Quentin
+Agustin
+Murray
+Jamal
+Adolfo
+Harrison
+Tyson
+Burton
+Brady
+Elliott
+Wilfredo
+Bart
+Jarrod
+Vance
+Denis
+Damien
+Joaquin
+Harlan
+Desmond
+Elliot
+Darwin
+Gregorio
+Buddy
+Xavier
+Kermit
+Roscoe
+Esteban
+Anton
+Solomon
+Scotty
+Norbert
+Elvin
+Williams
+Nolan
+Rod
+Quinton
+Hal
+Brain
+Rob
+Elwood
+Kendrick
+Darius
+Moises
+Fidel
+Thaddeus
+Cliff
+Marcel
+Jackson
+Raphael
+Bryon
+Armand
+Alvaro
+Jeffry
+Dane
+Joesph
+Thurman
+Ned
+Rusty
+Monty
+Fabian
+Reggie
+Mason
+Graham
+Isaiah
+Vaughn
+Gus
+Loyd
+Diego
+Adolph
+Norris
+Millard
+Rocco
+Gonzalo
+Derick
+Rodrigo
+Wiley
+Rigoberto
+Alphonso
+Ty
+Noe
+Vern
+Reed
+Jefferson
+Elvis
+Bernardo
+Mauricio
+Hiram
+Donovan
+Basil
+Riley
+Nickolas
+Maynard
+Scot
+Vince
+Quincy
+Eddy
+Sebastian
+Federico
+Ulysses
+Heriberto
+Donnell
+Cole
+Davis
+Gavin
+Emery
+Ward
+Romeo
+Jayson
+Dante
+Clement
+Coy
+Maxwell
+Jarvis
+Bruno
+Issac
+Dudley
+Brock
+Sanford
+Carmelo
+Barney
+Nestor
+Stefan
+Donny
+Art
+Linwood
+Beau
+Weldon
+Galen
+Isidro
+Truman
+Delmar
+Johnathon
+Silas
+Frederic
+Dick
+Irwin
+Merlin
+Charley
+Marcelino
+Harris
+Carlo
+Trenton
+Kurtis
+Hunter
+Aurelio
+Winfred
+Vito
+Collin
+Denver
+Carter
+Leonel
+Emory
+Pasquale
+Mohammad
+Mariano
+Danial
+Landon
+Dirk
+Branden
+Adan
+Buford
+German
+Wilmer
+Emerson
+Zachery
+Fletcher
+Jacques
+Errol
+Dalton
+Monroe
+Josue
+Edwardo
+Booker
+Wilford
+Sonny
+Shelton
+Carson
+Theron
+Raymundo
+Daren
+Houston
+Robby
+Lincoln
+Genaro
+Bennett
+Octavio
+Cornell
+Hung
+Arron
+Antony
+Herschel
+Giovanni
+Garth
+Cyrus
+Cyril
+Ronny
+Lon
+Freeman
+Duncan
+Kennith
+Carmine
+Erich
+Chadwick
+Wilburn
+Russ
+Reid
+Myles
+Anderson
+Morton
+Jonas
+Forest
+Mitchel
+Mervin
+Zane
+Rich
+Jamel
+Lazaro
+Alphonse
+Randell
+Major
+Jarrett
+Brooks
+Abdul
+Luciano
+Seymour
+Eugenio
+Mohammed
+Valentin
+Chance
+Arnulfo
+Lucien
+Ferdinand
+Thad
+Ezra
+Aldo
+Rubin
+Royal
+Mitch
+Earle
+Abe
+Wyatt
+Marquis
+Lanny
+Kareem
+Jamar
+Boris
+Isiah
+Emile
+Elmo
+Aron
+Leopoldo
+Everette
+Josef
+Eloy
+Rodrick
+Reinaldo
+Lucio
+Jerrod
+Weston
+Hershel
+Barton
+Parker
+Lemuel
+Burt
+Jules
+Gil
+Eliseo
+Ahmad
+Nigel
+Efren
+Antwan
+Alden
+Margarito
+Coleman
+Dino
+Osvaldo
+Les
+Deandre
+Normand
+Kieth
+Trey
+Norberto
+Napoleon
+Jerold
+Fritz
+Rosendo
+Milford
+Christoper
+Alfonzo
+Lyman
+Josiah
+Brant
+Wilton
+Rico
+Jamaal
+Dewitt
+Brenton
+Olin
+Foster
+Faustino
+Claudio
+Judson
+Gino
+Edgardo
+Alec
+Tanner
+Jarred
+Donn
+Tad
+Prince
+Porfirio
+Odis
+Lenard
+Chauncey
+Tod
+Mel
+Marcelo
+Kory
+Augustus
+Keven
+Hilario
+Bud
+Sal
+Orval
+Mauro
+Zachariah
+Olen
+Anibal
+Milo
+Jed
+Dillon
+Amado
+Newton
+Lenny
+Richie
+Horacio
+Brice
+Mohamed
+Delmer
+Dario
+Reyes
+Mac
+Jonah
+Jerrold
+Robt
+Hank
+Rupert
+Rolland
+Kenton
+Damion
+Antone
+Waldo
+Fredric
+Bradly
+Kip
+Burl
+Walker
+Tyree
+Jefferey
+Ahmed
+Willy
+Stanford
+Oren
+Noble
+Moshe
+Mikel
+Enoch
+Brendon
+Quintin
+Jamison
+Florencio
+Darrick
+Tobias
+Hassan
+Giuseppe
+Demarcus
+Cletus
+Tyrell
+Lyndon
+Keenan
+Werner
+Geraldo
+Columbus
+Chet
+Bertram
+Markus
+Huey
+Hilton
+Dwain
+Donte
+Tyron
+Omer
+Isaias
+Hipolito
+Fermin
+Adalberto
+Bo
+Barrett
+Teodoro
+Mckinley
+Maximo
+Garfield
+Raleigh
+Lawerence
+Abram
+Rashad
+King
+Emmitt
+Daron
+Samual
+Miquel
+Eusebio
+Domenic
+Darron
+Buster
+Wilber
+Renato
+Jc
+Hoyt
+Haywood
+Ezekiel
+Chas
+Florentino
+Elroy
+Clemente
+Arden
+Neville
+Edison
+Deshawn
+Nathanial
+Jordon
+Danilo
+Claud
+Sherwood
+Raymon
+Rayford
+Cristobal
+Ambrose
+Titus
+Hyman
+Felton
+Ezequiel
+Erasmo
+Stanton
+Lonny
+Len
+Ike
+Milan
+Lino
+Jarod
+Herb
+Andreas
+Walton
+Rhett
+Palmer
+Douglass
+Cordell
+Oswaldo
+Ellsworth
+Virgilio
+Toney
+Nathanael
+Del
+Benedict
+Mose
+Johnson
+Isreal
+Garret
+Fausto
+Asa
+Arlen
+Zack
+Warner
+Modesto
+Francesco
+Manual
+Gaylord
+Gaston
+Filiberto
+Deangelo
+Michale
+Granville
+Wes
+Malik
+Zackary
+Tuan
+Eldridge
+Cristopher
+Cortez
+Antione
+Malcom
+Long
+Korey
+Jospeh
+Colton
+Waylon
+Von
+Hosea
+Shad
+Santo
+Rudolf
+Rolf
+Rey
+Renaldo
+Marcellus
+Lucius
+Kristofer
+Boyce
+Benton
+Hayden
+Harland
+Arnoldo
+Rueben
+Leandro
+Kraig
+Jerrell
+Jeromy
+Hobert
+Cedrick
+Arlie
+Winford
+Wally
+Luigi
+Keneth
+Jacinto
+Graig
+Franklyn
+Edmundo
+Sid
+Porter
+Leif
+Jeramy
+Buck
+Willian
+Vincenzo
+Shon
+Lynwood
+Jere
+Hai
+Elden
+Dorsey
+Darell
+Broderick
+Alonso
diff --git a/misc/benchmarks/misc/prng_bench.cpp b/misc/benchmarks/misc/prng_bench.cpp
new file mode 100644
index 00000000..6f4e0e47
--- /dev/null
+++ b/misc/benchmarks/misc/prng_bench.cpp
@@ -0,0 +1,223 @@
+#include <cstdint>
+#include <iostream>
+#include <ctime>
+#include <random>
+#include <stc/crandom.h>
+
+static inline uint64_t rotl64(const uint64_t x, const int k)
+ { return (x << k) | (x >> (64 - k)); }
+
+static uint64_t splitmix64_x = 87213627321ull; /* The state can be seeded with any value. */
+
+uint64_t splitmix64(void) {
+ uint64_t z = (splitmix64_x += 0x9e3779b97f4a7c15);
+ z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
+ z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
+ return z ^ (z >> 31);
+}
+
+static void init_state(uint64_t *rng, uint64_t seed) {
+ splitmix64_x = seed;
+ for (int i=0; i<4; ++i) rng[i] = splitmix64();
+}
+
+/* romu_trio */
+
+uint64_t romu_trio(uint64_t s[3]) {
+ uint64_t xp = s[0], yp = s[1], zp = s[2];
+ s[0] = 15241094284759029579u * zp;
+ s[1] = yp - xp; s[1] = rotl64(s[1], 12);
+ s[2] = zp - yp; s[2] = rotl64(s[2], 44);
+ return xp;
+}
+
+/* sfc64 */
+
+static inline uint64_t sfc64(uint64_t s[4]) {
+ uint64_t result = s[0] + s[1] + s[3]++;
+ s[0] = s[1] ^ (s[1] >> 11);
+ s[1] = s[2] + (s[2] << 3);
+ s[2] = rotl64(s[2], 24) + result;
+ return result;
+}
+
+uint32_t sfc32(uint32_t s[4]) {
+ uint32_t t = s[0] + s[1] + s[3]++;
+ s[0] = s[1] ^ (s[1] >> 9);
+ s[1] = s[2] + (s[2] << 3);
+ s[2] = ((s[2] << 21) | (s[2] >> 11)) + t;
+ return t;
+}
+
+uint32_t stc32(uint32_t s[5]) {
+ uint32_t t = (s[0] ^ (s[3] += s[4])) + s[1];
+ s[0] = s[1] ^ (s[1] >> 9);
+ s[1] = s[2] + (s[2] << 3);
+ s[2] = ((s[2] << 21) | (s[2] >> 11)) + t;
+ return t;
+}
+
+uint32_t pcg32(uint32_t s[2]) {
+ uint64_t oldstate = s[0];
+ s[0] = oldstate * 6364136223846793005ULL + (s[1]|1);
+ uint32_t xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u;
+ uint32_t rot = oldstate >> 59u;
+ return (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
+}
+
+
+/* xoshiro128+ */
+
+uint64_t xoroshiro128plus(uint64_t s[2]) {
+ const uint64_t s0 = s[0];
+ uint64_t s1 = s[1];
+ const uint64_t result = s0 + s1;
+
+ s1 ^= s0;
+ s[0] = rotl64(s0, 24) ^ s1 ^ (s1 << 16); // a, b
+ s[1] = rotl64(s1, 37); // c
+
+ return result;
+}
+
+
+/* xoshiro256** */
+
+static inline uint64_t xoshiro256starstar(uint64_t s[4]) {
+ const uint64_t result = rotl64(s[1] * 5, 7) * 9;
+ const uint64_t t = s[1] << 17;
+ s[2] ^= s[0];
+ s[3] ^= s[1];
+ s[1] ^= s[2];
+ s[0] ^= s[3];
+ s[2] ^= t;
+ s[3] = rotl64(s[3], 45);
+ return result;
+}
+
+// wyrand - 2020-12-07
+static inline void _wymum(uint64_t *A, uint64_t *B){
+#if defined(__SIZEOF_INT128__)
+ __uint128_t r = *A; r *= *B;
+ *A = (uint64_t) r; *B = (uint64_t ) (r >> 64);
+#elif defined(_MSC_VER) && defined(_M_X64)
+ *A = _umul128(*A, *B, B);
+#else
+ uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo;
+ uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
+ lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c;
+ *A=lo; *B=hi;
+#endif
+}
+static inline uint64_t _wymix(uint64_t A, uint64_t B){
+ _wymum(&A,&B); return A^B;
+}
+static inline uint64_t wyrand64(uint64_t *seed){
+ static const uint64_t _wyp[] = {0xa0761d6478bd642full, 0xe7037ed1a0b428dbull};
+ *seed+=_wyp[0]; return _wymix(*seed,*seed^_wyp[1]);
+}
+
+
+using namespace std;
+
+int main(void)
+{
+ enum {N = 2000000000};
+ uint16_t* recipient = new uint16_t[N];
+ static stc64_t rng;
+ init_state(rng.state, 12345123);
+ std::mt19937 mt(12345123);
+
+ cout << "WARMUP" << endl;
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = wyrand64(rng.state);
+
+ clock_t beg, end;
+ for (size_t ti = 0; ti < 2; ti++) {
+ init_state(rng.state, 12345123);
+ cout << endl << "ROUND " << ti+1 << " ---------" << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = romu_trio(rng.state);
+ end = clock();
+ cout << "romu_trio:\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = wyrand64(rng.state);
+ end = clock();
+ cout << "wyrand64:\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = sfc32((uint32_t *)rng.state);
+ end = clock();
+ cout << "sfc32:\t\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = stc32((uint32_t *)rng.state);
+ end = clock();
+ cout << "stc32:\t\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = pcg32((uint32_t *)rng.state);
+ end = clock();
+ cout << "pcg32:\t\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = sfc64(rng.state);
+ end = clock();
+ cout << "sfc64:\t\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = stc64_rand(&rng);
+ end = clock();
+ cout << "stc64:\t\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = xoroshiro128plus(rng.state);
+ end = clock();
+ cout << "xoroshiro128+:\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = xoshiro256starstar(rng.state);
+ end = clock();
+ cout << "xoshiro256**:\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+
+ beg = clock();
+ for (size_t i = 0; i < N; i++)
+ recipient[i] = mt();
+ end = clock();
+ cout << "std::mt19937:\t"
+ << (float(end - beg) / CLOCKS_PER_SEC)
+ << "s: " << recipient[312] << endl;
+ }
+ delete[] recipient;
+ return 0;
+}
diff --git a/misc/benchmarks/misc/rust_cmap.c b/misc/benchmarks/misc/rust_cmap.c
new file mode 100644
index 00000000..83b7dd19
--- /dev/null
+++ b/misc/benchmarks/misc/rust_cmap.c
@@ -0,0 +1,61 @@
+#include <time.h>
+#include <stdio.h>
+#define i_key uint64_t
+#define i_val uint64_t
+#define i_tag u64
+#define i_max_load_factor 0.8f
+#include <stc/cmap.h>
+
+uint64_t romu_rotl(uint64_t val, uint32_t r) {
+ return (val << r) | (val >> (64 - r));
+}
+
+uint64_t romu_trio(uint64_t s[3]) {
+ const uint64_t xp = s[0],
+ yp = s[1],
+ zp = s[2];
+ s[0] = 15241094284759029579u * zp;
+ s[1] = yp - xp;
+ s[1] = romu_rotl(s[1], 12);
+ s[2] = zp - yp;
+ s[2] = romu_rotl(s[2], 44);
+ return xp;
+}
+
+int main()
+{
+ c_auto (cmap_u64, m) {
+ const size_t n = 50000000,
+ mask = (1 << 25) - 1,
+ ms = CLOCKS_PER_SEC/1000;
+ cmap_u64_reserve(&m, n);
+ printf("STC cmap n = %" c_ZU ", mask = 0x%" PRIxMAX "\n", n, mask);
+
+ uint64_t rng[3] = {1872361123, 123879177, 87739234}, sum;
+ clock_t now = clock();
+ c_forrange (n) {
+ uint64_t key = romu_trio(rng) & mask;
+ cmap_u64_insert(&m, key, 0).ref->second += 1;
+ }
+ printf("insert : %" c_ZU "ms \tsize : %" c_ZU "\n", (clock() - now)/ms, cmap_u64_size(&m));
+
+ now = clock();
+ sum = 0;
+ c_forrange (key, mask + 1) { sum += cmap_u64_contains(&m, key); }
+ printf("lookup : %" c_ZU "ms \tsum : %" c_ZU "\n", (clock() - now)/ms, sum);
+
+ now = clock();
+ sum = 0;
+ c_foreach (i, cmap_u64, m) { sum += i.ref->second; }
+ printf("iterate : %" c_ZU "ms \tsum : %" c_ZU "\n", (clock() - now)/ms, sum);
+
+ uint64_t rng2[3] = {1872361123, 123879177, 87739234};
+ now = clock();
+ c_forrange (n) {
+ uint64_t key = romu_trio(rng2) & mask;
+ cmap_u64_erase(&m, key);
+ }
+ printf("remove : %" c_ZU "ms \tsize : %" c_ZU "\n", (clock() - now)/ms, cmap_u64_size(&m));
+ printf("press a key:\n"); getchar();
+ }
+}
diff --git a/misc/benchmarks/misc/rust_hashmap.rs b/misc/benchmarks/misc/rust_hashmap.rs
new file mode 100644
index 00000000..5394a7c3
--- /dev/null
+++ b/misc/benchmarks/misc/rust_hashmap.rs
@@ -0,0 +1,82 @@
+use std::{
+ hash::{BuildHasherDefault, Hasher},
+ io::Read,
+ time::Instant,
+};
+
+struct MyHasher {
+ seed: u64,
+}
+
+impl Default for MyHasher {
+ fn default() -> Self {
+ Self { seed: 0xb5ad4eceda1ce2a9_u64 }
+ }
+}
+
+impl Hasher for MyHasher {
+ fn write(&mut self, bytes: &[u8]) {
+ use std::convert::TryInto;
+ self.seed = u64::from_ne_bytes(bytes.try_into().unwrap()).wrapping_mul(0xc6a4a7935bd1e99d);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.seed = i.wrapping_mul(0xc6a4a7935bd1e99d);
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.seed
+ }
+}
+
+type MyBuildHasher = BuildHasherDefault<MyHasher>;
+
+fn romu_trio(s: &mut [u64]) -> u64 {
+ let xp = s[0];
+ let yp = s[1];
+ let zp = s[2];
+ s[0] = 15241094284759029579_u64.wrapping_mul(zp);
+ s[1] = yp.wrapping_sub(xp);
+ s[1] = s[1].rotate_left(12);
+ s[2] = zp.wrapping_sub(yp);
+ s[2] = s[2].rotate_left(44);
+ return xp;
+}
+
+fn main() {
+ let n = 50_000_000;
+ let mask = (1 << 25) - 1;
+
+ let mut m = std::collections::HashMap::<u64, u64, MyBuildHasher>::default();
+ m.reserve(n);
+
+ let mut rng: [u64; 3] = [1872361123, 123879177, 87739234];
+ println!("Rust HashMap n = {}, mask = {:#x}", n, mask);
+ let now = Instant::now();
+ for _i in 0..n {
+ let key: u64 = romu_trio(&mut rng) & mask;
+ *m.entry(key).or_insert(0) += 1;
+ }
+ println!("insert : {}ms \tsize : {}", now.elapsed().as_millis(), m.len());
+ let now = Instant::now();
+ let mut sum = 0;
+ for i in 0..mask + 1 { if m.contains_key(&i) { sum += 1; }}
+ println!("lookup : {}ms \tsum : {}", now.elapsed().as_millis(), sum);
+
+ let now = Instant::now();
+ let mut sum = 0;
+ for (_, value) in &m { sum += value; }
+ println!("iterate : {}ms \tsum : {}", now.elapsed().as_millis(), sum);
+
+ let mut rng: [u64; 3] = [1872361123, 123879177, 87739234];
+ let now = Instant::now();
+ for _ in 0..n {
+ let key: u64 = romu_trio(&mut rng) & mask;
+ m.remove(&key);
+ }
+ println!("remove : {}ms \tsize : {}", now.elapsed().as_millis(), m.len());
+ println!("press a key:");
+ std::io::stdin().bytes().next();
+} \ No newline at end of file
diff --git a/misc/benchmarks/misc/sso_bench.cpp b/misc/benchmarks/misc/sso_bench.cpp
new file mode 100644
index 00000000..0fffef7a
--- /dev/null
+++ b/misc/benchmarks/misc/sso_bench.cpp
@@ -0,0 +1,135 @@
+#include <string>
+#include <iostream>
+#include <chrono>
+
+#include <stc/crandom.h>
+#include <stc/cstr.h>
+
+#define i_type StcVec
+#define i_val_str
+#include <stc/cstack.h>
+
+#define i_type StcSet
+#define i_val_str
+#include <stc/csset.h>
+
+#include <vector>
+using StdVec = std::vector<std::string>;
+#include <set>
+using StdSet = std::set<std::string>;
+
+
+static const int BENCHMARK_SIZE = 2000000;
+static const int MAX_STRING_SIZE = 50;
+static const char CHARS[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=+-";
+using time_point = std::chrono::high_resolution_clock::time_point;
+
+
+static inline std::string randomString_STD(int strsize) {
+ std::string s(strsize, 0);
+ char* p = &s[0];
+ union { uint64_t u8; uint8_t b[8]; } r;
+ for (int i = 0; i < strsize; ++i) {
+ if ((i & 7) == 0) r.u8 = crandom() & 0x3f3f3f3f3f3f3f3f;
+ p[i] = CHARS[r.b[i & 7]];
+ }
+ return s;
+}
+
+static inline cstr randomString_STC(int strsize) {
+ cstr s = cstr_with_size(strsize, 0);
+ char* p = cstr_data(&s);
+ union { uint64_t u8; uint8_t b[8]; } r;
+ for (int i = 0; i < strsize; ++i) {
+ if ((i & 7) == 0) r.u8 = crandom() & 0x3f3f3f3f3f3f3f3f;
+ p[i] = CHARS[r.b[i & 7]];
+ }
+ return s;
+}
+
+
+void addRandomString(StdVec& vec, int strsize) {
+ vec.push_back(std::move(randomString_STD(strsize)));
+}
+
+void addRandomString(StcVec& vec, int strsize) {
+ StcVec_push(&vec, randomString_STC(strsize));
+}
+
+void addRandomString(StdSet& set, int strsize) {
+ set.insert(std::move(randomString_STD(strsize)));
+}
+
+void addRandomString(StcSet& set, int strsize) {
+ StcSet_insert(&set, randomString_STC(strsize));
+}
+
+
+template <class C>
+int benchmark(C& container, const int n, const int strsize) {
+ time_point t1 = std::chrono::high_resolution_clock::now();
+
+ for (int i = 0; i < n; i++)
+ addRandomString(container, strsize);
+
+ time_point t2 = std::chrono::high_resolution_clock::now();
+ const auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
+ std::cerr << (strsize ? strsize : 32) << "\t" << duration;
+ return (int)duration;
+}
+
+
+int main() {
+ uint64_t seed = 4321;
+ int sum, n;
+
+ // VECTOR WITH STRINGS
+
+ csrandom(seed);
+ sum = 0, n = 0;
+ std::cerr << "\nstrsize\tmsecs\tstd::vector<std::string>, size=" << BENCHMARK_SIZE << "\n";
+ for (int strsize = 1; strsize <= MAX_STRING_SIZE; strsize += 2) {
+ StdVec vec; vec.reserve(BENCHMARK_SIZE);
+ sum += benchmark(vec, BENCHMARK_SIZE, strsize), ++n;
+ std::cout << '\t' << vec.front() << '\n';
+ }
+ std::cout << "Avg:\t" << sum/n << '\n';
+
+ csrandom(seed);
+ sum = 0, n = 0;
+ std::cerr << "\nstrsize\tmsecs\tcvec<cstr>, size=" << BENCHMARK_SIZE << "\n";
+ for (int strsize = 1; strsize <= MAX_STRING_SIZE; strsize += 2) {
+ StcVec vec = StcVec_with_capacity(BENCHMARK_SIZE);
+ sum += benchmark(vec, BENCHMARK_SIZE, strsize), ++n;
+ std::cout << '\t' << cstr_str(&vec.data[0]) << '\n';
+ StcVec_drop(&vec);
+ }
+ std::cout << "Avg:\t" << sum/n << '\n';
+
+ // SORTED SET WITH STRINGS
+
+ csrandom(seed);
+ sum = 0, n = 0;
+ std::cerr << "\nstrsize\tmsecs\tstd::set<std::string>, size=" << BENCHMARK_SIZE/16 << "\n";
+ for (int strsize = 1; strsize <= MAX_STRING_SIZE; strsize += 2) {
+ StdSet set;
+ sum += benchmark(set, BENCHMARK_SIZE/16, strsize), ++n;
+ std::cout << '\t' << *set.begin() << '\n';
+ }
+ std::cout << "Avg:\t" << sum/n << '\n';
+
+ csrandom(seed);
+ sum = 0, n = 0;
+ std::cerr << "\nstrsize\tmsecs\tcsset<cstr>, size=" << BENCHMARK_SIZE/16 << "\n";
+ for (int strsize = 1; strsize <= MAX_STRING_SIZE; strsize += 2) {
+ StcSet set = StcSet_with_capacity(BENCHMARK_SIZE/16);
+ sum += benchmark(set, BENCHMARK_SIZE/16, strsize), ++n;
+ std::cout << '\t' << cstr_str(StcSet_front(&set)) << '\n';
+ StcSet_drop(&set);
+ }
+ std::cout << "Avg:\t" << sum/n << '\n';
+
+ std::cerr << "sizeof(std::string) : " << sizeof(std::string) << std::endl
+ << "sizeof(cstr) : " << sizeof(cstr) << std::endl;
+ return 0;
+}
diff --git a/misc/benchmarks/misc/string_bench_STC.cpp b/misc/benchmarks/misc/string_bench_STC.cpp
new file mode 100644
index 00000000..ae8e4c38
--- /dev/null
+++ b/misc/benchmarks/misc/string_bench_STC.cpp
@@ -0,0 +1,300 @@
+// https://www.codeproject.com/Tips/5255442/Cplusplus14-20-Heterogeneous-Lookup-Benchmark
+// https://github.com/shaovoon/cpp_hetero_lookup_bench
+
+#include <iostream>
+#include <iomanip>
+#include <chrono>
+#define i_static
+#include <stc/cstr.h> // string
+#define i_static
+#include <stc/csview.h> // string_view
+
+#define i_key_str
+#include <stc/cvec.h> // vec of cstr with const char* lookup
+
+#define i_type cvec_sv // override default type name (cvec_csview)
+#define i_key csview
+#define i_cmp csview_cmp
+#include <stc/cvec.h> // cvec_vs: vec of csview
+
+#define i_key_str
+#define i_val size_t
+#include <stc/csmap.h> // sorted map of cstr, const char* lookup
+
+#define i_key_ssv
+#define i_val size_t
+#include <stc/csmap.h> // sorted map of cstr, csview lookup
+
+#define i_key_str
+#define i_val size_t
+#include <stc/cmap.h> // unordered map of cstr, const char* lookup
+
+#define i_key_ssv
+#define i_val size_t
+#include <stc/cmap.h> // unordered map of cstr, csview lookup
+
+
+cvec_str read_file(const char* name)
+{
+ cvec_str data = cvec_str_init();
+ c_auto (cstr, line)
+ c_with (FILE* f = fopen(name, "r"), fclose(f))
+ while (cstr_getline(&line, f))
+ cvec_str_emplace_back(&data, cstr_str(&line));
+ return data;
+}
+
+class timer
+{
+public:
+ timer() = default;
+ void start(const std::string& text_)
+ {
+ text = text_;
+ begin = std::chrono::high_resolution_clock::now();
+ }
+ void stop()
+ {
+ auto end = std::chrono::high_resolution_clock::now();
+ auto dur = end - begin;
+ auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
+ std::cout << std::setw(32) << text << " timing:" << std::setw(5) << ms << "ms" << std::endl;
+ }
+
+private:
+ std::string text;
+ std::chrono::high_resolution_clock::time_point begin;
+};
+
+void initShortStringVec(cvec_str* vs, cvec_sv* vsv)
+{
+ cvec_str_drop(vs);
+ cvec_sv_clear(vsv);
+
+ *vs = read_file("names.txt");
+/*
+ cvec_str_emplace_back(vs, "Susan");
+ cvec_str_emplace_back(vs, "Jason");
+ cvec_str_emplace_back(vs, "Lily");
+ cvec_str_emplace_back(vs, "Michael");
+ cvec_str_emplace_back(vs, "Mary");
+
+ cvec_str_emplace_back(vs, "Jerry");
+ cvec_str_emplace_back(vs, "Jenny");
+ cvec_str_emplace_back(vs, "Klaus");
+ cvec_str_emplace_back(vs, "Celine");
+ cvec_str_emplace_back(vs, "Kenny");
+
+ cvec_str_emplace_back(vs, "Kelly");
+ cvec_str_emplace_back(vs, "Jackson");
+ cvec_str_emplace_back(vs, "Mandy");
+ cvec_str_emplace_back(vs, "Terry");
+ cvec_str_emplace_back(vs, "Sandy");
+
+ cvec_str_emplace_back(vs, "Billy");
+ cvec_str_emplace_back(vs, "Cindy");
+ cvec_str_emplace_back(vs, "Phil");
+ cvec_str_emplace_back(vs, "Lindy");
+ cvec_str_emplace_back(vs, "David");
+*/
+ size_t num = 0;
+ c_foreach (i, cvec_str, *vs)
+ {
+ cvec_sv_push_back(vsv, cstr_sv(i.ref));
+ num += cstr_size(i.ref);
+ }
+ std::cout << "num strings: " << cvec_sv_size(vsv) << std::endl;
+ std::cout << "avg str len: " << num / (float)cvec_sv_size(vsv) << std::endl;
+}
+
+void initLongStringVec(cvec_str* vs, cvec_sv* vsv)
+{
+ cvec_str_drop(vs);
+ cvec_sv_clear(vsv);
+
+ *vs = read_file("names.txt");
+ c_foreach (i, cvec_str, *vs) {
+ cstr_append_s(i.ref, *i.ref);
+ cstr_append_s(i.ref, *i.ref);
+ cstr_append_s(i.ref, *i.ref);
+ }
+/*
+ cvec_str_emplace_back(vs, "Susan Susan Susan Susan Susan Susan");
+ cvec_str_emplace_back(vs, "Jason Jason Jason Jason Jason Jason");
+ cvec_str_emplace_back(vs, "Lily Lily Lily Lily Lily Lily");
+ cvec_str_emplace_back(vs, "Michael Michael Michael Michael Michael Michael");
+ cvec_str_emplace_back(vs, "Mary Mary Mary Mary Mary Mary");
+
+ cvec_str_emplace_back(vs, "Jerry Jerry Jerry Jerry Jerry Jerry");
+ cvec_str_emplace_back(vs, "Jenny Jenny Jenny Jenny Jenny Jenny");
+ cvec_str_emplace_back(vs, "Klaus Klaus Klaus Klaus Klaus Klaus");
+ cvec_str_emplace_back(vs, "Celine Celine Celine Celine Celine Celine");
+ cvec_str_emplace_back(vs, "Kenny Kenny Kenny Kenny Kenny Kenny");
+
+ cvec_str_emplace_back(vs, "Kelly Kelly Kelly Kelly Kelly Kelly");
+ cvec_str_emplace_back(vs, "Jackson Jackson Jackson Jackson Jackson Jackson");
+ cvec_str_emplace_back(vs, "Mandy Mandy Mandy Mandy Mandy Mandy");
+ cvec_str_emplace_back(vs, "Terry Terry Terry Terry Terry Terry");
+ cvec_str_emplace_back(vs, "Sandy Sandy Sandy Sandy Sandy Sandy");
+
+ cvec_str_emplace_back(vs, "Billy Billy Billy Billy Billy Billy");
+ cvec_str_emplace_back(vs, "Cindy Cindy Cindy Cindy Cindy Cindy");
+ cvec_str_emplace_back(vs, "Phil Phil Phil Phil Phil Phil");
+ cvec_str_emplace_back(vs, "Lindy Lindy Lindy Lindy Lindy Lindy");
+ cvec_str_emplace_back(vs, "David David David David David David");
+*/
+ size_t num = 0;
+ c_foreach (i, cvec_str, *vs)
+ {
+ cvec_sv_push_back(vsv, cstr_sv(i.ref));
+ num += cstr_size(i.ref);
+ }
+ std::cout << "num strings: " << cvec_sv_size(vsv) << std::endl;
+ std::cout << "avg str len: " << num / (float)cvec_sv_size(vsv) << std::endl;
+}
+
+void initMaps(const cvec_str* vs, csmap_str* mapTrans, csmap_ssv* mapSview,
+ cmap_str* unordmapTrans, cmap_ssv* unordmapSview)
+{
+ csmap_str_clear(mapTrans);
+ csmap_ssv_clear(mapSview);
+ cmap_str_clear(unordmapTrans);
+ cmap_ssv_clear(unordmapSview);
+
+ size_t n = 0;
+ c_foreach (i, cvec_str, *vs)
+ {
+ csmap_str_insert(mapTrans, cstr_clone(*i.ref), n);
+ csmap_ssv_insert(mapSview, cstr_clone(*i.ref), n);
+ cmap_str_insert(unordmapTrans, cstr_clone(*i.ref), n);
+ cmap_ssv_insert(unordmapSview, cstr_clone(*i.ref), n);
+ ++n;
+ }
+}
+
+void benchmark(
+ const cvec_str* vec_string,
+ const cvec_sv* vec_stringview,
+ const csmap_str* mapTrans,
+ const csmap_ssv* mapSview,
+ const cmap_str* unordmapTrans,
+ const cmap_ssv* unordmapSview);
+
+//const size_t MAX_LOOP = 1000000;
+const size_t MAX_LOOP = 2000;
+
+int main()
+{
+ c_auto (cvec_str, vec_string)
+ c_auto (cvec_sv, vec_stringview)
+ c_auto (csmap_str, mapTrans)
+ c_auto (csmap_ssv, mapSview)
+ c_auto (cmap_str, unordmapTrans)
+ c_auto (cmap_ssv, unordmapSview)
+ {
+ std::cout << "Short String Benchmark" << std::endl;
+ std::cout << "======================" << std::endl;
+
+ initShortStringVec(&vec_string, &vec_stringview);
+ initMaps(&vec_string, &mapTrans, &mapSview,
+ &unordmapTrans, &unordmapSview);
+
+ for (int i=0; i<3; ++i)
+ benchmark(
+ &vec_string,
+ &vec_stringview,
+ &mapTrans,
+ &mapSview,
+ &unordmapTrans,
+ &unordmapSview);
+
+ std::cout << "Long String Benchmark" << std::endl;
+ std::cout << "=====================" << std::endl;
+
+ initLongStringVec(&vec_string, &vec_stringview);
+ initMaps(&vec_string, &mapTrans, &mapSview,
+ &unordmapTrans, &unordmapSview);
+ for (int i=0; i<3; ++i)
+ benchmark(
+ &vec_string,
+ &vec_stringview,
+ &mapTrans,
+ &mapSview,
+ &unordmapTrans,
+ &unordmapSview);
+ }
+ return 0;
+}
+
+void benchmark(
+ const cvec_str* vec_string,
+ const cvec_sv* vec_stringview,
+ const csmap_str* mapTrans,
+ const csmap_ssv* mapSview,
+ const cmap_str* unordmapTrans,
+ const cmap_ssv* unordmapSview)
+{
+ size_t grandtotal = 0;
+
+ size_t total = 0;
+
+ timer stopwatch;
+ total = 0;
+ stopwatch.start("Trans Map with char*");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ c_foreach (j, cvec_str, *vec_string)
+ {
+ const csmap_str_value* v = csmap_str_get(mapTrans, cstr_str(j.ref));
+ if (v)
+ total += v->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ total = 0;
+ stopwatch.start("Trans Map with string_view");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ c_foreach (j, cvec_sv, *vec_stringview)
+ {
+ const csmap_ssv_value* v = csmap_ssv_get(mapSview, *j.ref);
+ if (v)
+ total += v->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ total = 0;
+ stopwatch.start("Trans Unord Map with char*");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ c_foreach (j, cvec_str, *vec_string)
+ {
+ const cmap_str_value* v = cmap_str_get(unordmapTrans, cstr_str(j.ref));
+ if (v)
+ total += v->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ total = 0;
+ stopwatch.start("Trans Unord Map with string_view");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ c_foreach (j, cvec_sv, *vec_stringview)
+ {
+ const cmap_ssv_value* v = cmap_ssv_get(unordmapSview, *j.ref);
+ if (v)
+ total += v->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ std::cout << "grandtotal:" << grandtotal << " <--- Ignore this\n" << std::endl;
+
+}
diff --git a/misc/benchmarks/misc/string_bench_STD.cpp b/misc/benchmarks/misc/string_bench_STD.cpp
new file mode 100644
index 00000000..8bb87937
--- /dev/null
+++ b/misc/benchmarks/misc/string_bench_STD.cpp
@@ -0,0 +1,371 @@
+// https://www.codeproject.com/Tips/5255442/Cplusplus14-20-Heterogeneous-Lookup-Benchmark
+// https://github.com/shaovoon/cpp_hetero_lookup_bench
+// Requires c++20, e.g. g++ -std=c++20
+
+#include <iostream>
+#include <iomanip>
+#include <chrono>
+#include <string>
+#include <string_view>
+#include <vector>
+#include <map>
+#include <unordered_map>
+#define i_static
+#include <stc/cstr.h>
+
+std::vector<std::string> read_file(const char* name)
+{
+ std::vector<std::string> data;
+ c_auto (cstr, line)
+ c_with (FILE* f = fopen(name, "r"), fclose(f))
+ while (cstr_getline(&line, f))
+ data.emplace_back(cstr_str(&line));
+ return data;
+}
+
+class timer
+{
+public:
+ timer() = default;
+ void start(const std::string& text_)
+ {
+ text = text_;
+ begin = std::chrono::high_resolution_clock::now();
+ }
+ void stop()
+ {
+ auto end = std::chrono::high_resolution_clock::now();
+ auto dur = end - begin;
+ auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
+ std::cout << std::setw(32) << text << " timing:" << std::setw(5) << ms << "ms" << std::endl;
+ }
+
+private:
+ std::string text;
+ std::chrono::high_resolution_clock::time_point begin;
+};
+
+void initShortStringVec(std::vector<std::string>& vs, std::vector<std::string_view>& vsv)
+{
+ vs.clear();
+ vsv.clear();
+
+ vs = read_file("names.txt");
+/*
+ vs.push_back("Susan");
+ vs.push_back("Jason");
+ vs.push_back("Lily");
+ vs.push_back("Michael");
+ vs.push_back("Mary");
+
+ vs.push_back("Jerry");
+ vs.push_back("Jenny");
+ vs.push_back("Klaus");
+ vs.push_back("Celine");
+ vs.push_back("Kenny");
+
+ vs.push_back("Kelly");
+ vs.push_back("Jackson");
+ vs.push_back("Mandy");
+ vs.push_back("Terry");
+ vs.push_back("Sandy");
+
+ vs.push_back("Billy");
+ vs.push_back("Cindy");
+ vs.push_back("Phil");
+ vs.push_back("Lindy");
+ vs.push_back("David");
+*/
+ size_t num = 0;
+ for (size_t i = 0; i < vs.size(); ++i)
+ {
+ vsv.push_back(vs.at(i));
+ num += vs.at(i).size();
+ }
+ std::cout << "num strings: " << vsv.size() << std::endl;
+ std::cout << "avg str len: " << num / (float)vsv.size() << std::endl;
+}
+
+void initLongStringVec(std::vector<std::string>& vs, std::vector<std::string_view>& vsv)
+{
+ vs.clear();
+ vsv.clear();
+
+ vs = read_file("names.txt");
+ for (size_t i = 1; i < vs.size(); ++i) {
+ vs[i] += vs[i];
+ vs[i] += vs[i];
+ vs[i] += vs[i];
+ }
+/*
+ vs.push_back("Susan Susan Susan Susan Susan Susan");
+ vs.push_back("Jason Jason Jason Jason Jason Jason");
+ vs.push_back("Lily Lily Lily Lily Lily Lily");
+ vs.push_back("Michael Michael Michael Michael Michael Michael");
+ vs.push_back("Mary Mary Mary Mary Mary Mary");
+
+ vs.push_back("Jerry Jerry Jerry Jerry Jerry Jerry");
+ vs.push_back("Jenny Jenny Jenny Jenny Jenny Jenny");
+ vs.push_back("Klaus Klaus Klaus Klaus Klaus Klaus");
+ vs.push_back("Celine Celine Celine Celine Celine Celine");
+ vs.push_back("Kenny Kenny Kenny Kenny Kenny Kenny");
+
+ vs.push_back("Kelly Kelly Kelly Kelly Kelly Kelly");
+ vs.push_back("Jackson Jackson Jackson Jackson Jackson Jackson");
+ vs.push_back("Mandy Mandy Mandy Mandy Mandy Mandy");
+ vs.push_back("Terry Terry Terry Terry Terry Terry");
+ vs.push_back("Sandy Sandy Sandy Sandy Sandy Sandy");
+
+ vs.push_back("Billy Billy Billy Billy Billy Billy");
+ vs.push_back("Cindy Cindy Cindy Cindy Cindy Cindy");
+ vs.push_back("Phil Phil Phil Phil Phil Phil");
+ vs.push_back("Lindy Lindy Lindy Lindy Lindy Lindy");
+ vs.push_back("David David David David David David");
+*/
+ size_t num = 0;
+ for (size_t i = 0; i < vs.size(); ++i)
+ {
+ vsv.push_back(vs.at(i));
+ num += vs.at(i).size();
+ }
+ std::cout << "num strings: " << vsv.size() << std::endl;
+ std::cout << "avg str len: " << num / (float)vsv.size() << std::endl;
+}
+
+void initMapNormal(const std::vector<std::string>& vs, std::map<std::string, size_t>& mapNormal)
+{
+ mapNormal.clear();
+ for (size_t i = 0; i < vs.size(); ++i)
+ {
+ mapNormal.insert(std::make_pair(vs.at(i), i));
+ }
+}
+
+void initMapTrans(const std::vector<std::string>& vs, std::map<std::string, size_t, std::less<> >& mapTrans)
+{
+ mapTrans.clear();
+ for (size_t i = 0; i < vs.size(); ++i)
+ {
+ mapTrans.insert(std::make_pair(vs.at(i), i));
+ }
+}
+
+struct MyEqual : public std::equal_to<>
+{
+ using is_transparent = void;
+};
+
+struct string_hash {
+ using is_transparent = void;
+ using key_equal = std::equal_to<>; // Pred to use
+ using hash_type = std::hash<std::string_view>; // just a helper local type
+ size_t operator()(std::string_view txt) const { return hash_type{}(txt); }
+ size_t operator()(const std::string& txt) const { return hash_type{}(txt); }
+ size_t operator()(const char* txt) const { return hash_type{}(txt); }
+};
+
+void initUnorderedMapNormal(const std::vector<std::string>& vs, std::unordered_map<std::string, size_t>& unordmapNormal)
+{
+ unordmapNormal.clear();
+ for (size_t i = 0; i < vs.size(); ++i)
+ {
+ unordmapNormal.insert(std::make_pair(vs.at(i), i));
+ }
+}
+
+void initUnorderedMapTrans(const std::vector<std::string>& vs, std::unordered_map<std::string, size_t, string_hash, MyEqual>& unordmapTrans)
+{
+ unordmapTrans.clear();
+ for (size_t i = 0; i < vs.size(); ++i)
+ {
+ unordmapTrans.insert(std::make_pair(vs.at(i), i));
+ }
+}
+
+void benchmark(
+ const std::vector<std::string>& vec_shortstr,
+ const std::vector<std::string_view>& vec_shortstrview,
+ const std::map<std::string, size_t>& mapNormal,
+ const std::map<std::string, size_t, std::less<> >& mapTrans,
+ const std::unordered_map<std::string, size_t>& unordmapNormal,
+ const std::unordered_map<std::string, size_t, string_hash, MyEqual>& unordmapTrans);
+
+//const size_t MAX_LOOP = 1000000;
+const size_t MAX_LOOP = 2000;
+
+int main()
+{
+ std::vector<std::string> vec_shortstr;
+ std::vector<std::string_view> vec_shortstrview;
+
+ std::map<std::string, size_t> mapNormal;
+ std::map<std::string, size_t, std::less<> > mapTrans;
+ initShortStringVec(vec_shortstr, vec_shortstrview);
+ initMapNormal(vec_shortstr, mapNormal);
+ initMapTrans(vec_shortstr, mapTrans);
+
+ std::unordered_map<std::string, size_t> unordmapNormal;
+ std::unordered_map<std::string, size_t, string_hash, MyEqual> unordmapTrans;
+ initUnorderedMapNormal(vec_shortstr, unordmapNormal);
+ initUnorderedMapTrans(vec_shortstr, unordmapTrans);
+
+ std::cout << "Short String Benchmark" << std::endl;
+ std::cout << "======================" << std::endl;
+
+ for (int i=0; i<3; ++i) benchmark(
+ vec_shortstr,
+ vec_shortstrview,
+ mapNormal,
+ mapTrans,
+ unordmapNormal,
+ unordmapTrans);
+
+ std::cout << "Long String Benchmark" << std::endl;
+ std::cout << "=====================" << std::endl;
+
+ initLongStringVec(vec_shortstr, vec_shortstrview);
+ initMapNormal(vec_shortstr, mapNormal);
+ initMapTrans(vec_shortstr, mapTrans);
+
+ initUnorderedMapNormal(vec_shortstr, unordmapNormal);
+ initUnorderedMapTrans(vec_shortstr, unordmapTrans);
+
+ for (int i=0; i<3; ++i) benchmark(
+ vec_shortstr,
+ vec_shortstrview,
+ mapNormal,
+ mapTrans,
+ unordmapNormal,
+ unordmapTrans);
+
+ return 0;
+}
+
+void benchmark(
+ const std::vector<std::string>& vec_shortstr,
+ const std::vector<std::string_view>& vec_shortstrview,
+ const std::map<std::string, size_t>& mapNormal,
+ const std::map<std::string, size_t, std::less<> >& mapTrans,
+ const std::unordered_map<std::string, size_t>& unordmapNormal,
+ const std::unordered_map<std::string, size_t, string_hash, MyEqual>& unordmapTrans)
+{
+ size_t grandtotal = 0;
+ size_t total = 0;
+ timer stopwatch;
+/*
+ total = 0;
+ stopwatch.start("Normal Map with string");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstr.size(); ++j)
+ {
+ const auto& it = mapNormal.find(vec_shortstr[j]);
+ if(it!=mapNormal.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ total = 0;
+ stopwatch.start("Normal Map with char*");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstr.size(); ++j)
+ {
+ const auto& it = mapNormal.find(vec_shortstr[j].c_str());
+ if (it != mapNormal.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+*/
+ total = 0;
+ stopwatch.start("Trans Map with char*");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstr.size(); ++j)
+ {
+ const auto& it = mapTrans.find(vec_shortstr[j].c_str());
+ if (it != mapTrans.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ total = 0;
+ stopwatch.start("Trans Map with string_view");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstrview.size(); ++j)
+ {
+ const auto& it = mapTrans.find(vec_shortstrview[j]);
+ if (it != mapTrans.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+/*
+ total = 0;
+ stopwatch.start("Normal Unord Map with string");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstr.size(); ++j)
+ {
+ const auto& it = unordmapNormal.find(vec_shortstr[j]);
+ if (it != unordmapNormal.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ total = 0;
+ stopwatch.start("Normal Unord Map with char*");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstr.size(); ++j)
+ {
+ const auto& it = unordmapNormal.find(vec_shortstr[j].c_str());
+ if (it != unordmapNormal.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+*/
+ total = 0;
+ stopwatch.start("Trans Unord Map with char*");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstr.size(); ++j)
+ {
+ const auto& it = unordmapTrans.find(vec_shortstr[j].c_str());
+ if (it != unordmapTrans.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+ stopwatch.stop();
+
+ total = 0;
+ stopwatch.start("Trans Unord Map with string_view");
+ for (size_t i = 0; i < MAX_LOOP; ++i)
+ {
+ for (size_t j = 0; j < vec_shortstrview.size(); ++j)
+ {
+ const auto& it = unordmapTrans.find(vec_shortstrview[j]);
+ if (it != unordmapTrans.cend())
+ total += it->second;
+ }
+ }
+ grandtotal += total;
+
+ stopwatch.stop();
+
+ std::cout << "grandtotal:" << grandtotal << " <--- Ignore this\n" << std::endl;
+
+}
diff --git a/misc/benchmarks/picobench/picobench.hpp b/misc/benchmarks/picobench/picobench.hpp
new file mode 100644
index 00000000..2e4541e0
--- /dev/null
+++ b/misc/benchmarks/picobench/picobench.hpp
@@ -0,0 +1,1479 @@
+// picobench v2.00
+// https://github.com/iboB/picobench
+//
+// A micro microbenchmarking library in a single header file
+//
+// MIT License
+//
+// Copyright(c) 2017-2018 Borislav Stanimirov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files(the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions :
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+//
+// VERSION HISTORY
+//
+// 2.x1 (2021-01-19) * Added user data per iter, changed text output.
+// 2.01 (2019-03-03) * Fixed android build when binding to a signle core
+// 2.00 (2018-10-30) * Breaking change! runner::run_benchmarks doesn't return
+// a report anymore. The report is generated by
+// runner::generate_report instead
+// * Breaking change! report_output_format doesn't accept
+// output streams as arguments. Use set_output_streams.
+// * Potentially breaking change (gcc and clang)! Always set
+// thread affinity to first core. Macro to turn this off.
+// * Added runner::run which performs a full execution
+// * Added benchmark results and results comparison
+// * Added error enum
+// * Macro option to allow a std::function as a benchmark
+// * Macros for default iterations and samples
+// * Allowing local registration of benchmarks in a runner
+// * Added local_runner which doesn't consume registry
+// * More force-inline functions in states
+// * Fixed some potential compilation warnings
+// * Removed tests from header
+// * Anonymous namespace for impl-only classes and funcs
+// * Added setters and getters for every config option
+// 1.05 (2018-07-17) * Counting iterations of state
+// * Optionally set thread affinity when running benchmarks
+// so as not to miss cpu cycles with the high res clock
+// 1.04 (2018-02-06) * User data for benchmarks, which can be seen from states
+// * `add_custom_duration` to states so the user can modify time
+// * Text table format fixes
+// * Custom cmd opts in runner
+// * --version CLI command
+// 1.03 (2018-01-05) Added helper methods for easier browsing of reports
+// 1.02 (2018-01-04) Added parsing of command line
+// 1.01 (2018-01-03) * Only taking the fastest sample into account
+// * Set default number of samples to 2
+// * Added CSV output
+// 1.00 (2018-01-01) Initial release
+// 0.01 (2017-12-28) Initial prototype release
+//
+//
+// EXAMPLE
+//
+// void my_function(); // the function you want to benchmark
+//
+// // write your benchmarking code in a function like this
+// static void benchmark_my_function(picobench::state& state)
+// {
+// // use the state in a range-based for loop to call your code
+// for (auto _ : state)
+// my_function();
+// }
+// // create a picobench with your benchmarking code
+// PICOBENCH(benchmark_my_function);
+//
+//
+// BASIC DOCUMENTATION
+//
+// A very brief usage guide follows. For more detailed documentation see the
+// README here: https://github.com/iboB/picobench/blob/master/README.md
+//
+// Simply include this file wherever you need.
+// You need to define PICOBENCH_IMPLEMENT_WITH_MAIN (or PICOBENCH_IMPLEMENT if
+// you want to write your own main function) in one compilation unit to have
+// the implementation compiled there.
+//
+// The benchmark code must be a `void (picobench::state&)` function which
+// you have written. Benchmarks are registered using the `PICOBENCH` macro
+// where the only argument is the function's name.
+//
+// You can have multiple benchmarks in multiple files. All will be run when the
+// executable starts.
+//
+// Typically a benchmark has a loop. To run the loop use the state argument in
+// a range-based for loop in your function. The time spent looping is measured
+// for the benchmark. You can have initialization/deinitialization code outside
+// of the loop and it won't be measured.
+//
+#pragma once
+
+#include <cstdint>
+#include <chrono>
+#include <vector>
+
+#if defined(PICOBENCH_STD_FUNCTION_BENCHMARKS)
+# include <functional>
+#endif
+
+#define PICOBENCH_VERSION 2.x1
+#define PICOBENCH_VERSION_STR "2.x1"
+
+#if defined(PICOBENCH_DEBUG)
+# include <cassert>
+# define I_PICOBENCH_ASSERT assert
+#else
+# define I_PICOBENCH_ASSERT(...)
+#endif
+
+#if defined(__GNUC__)
+# define PICOBENCH_INLINE __attribute__((always_inline))
+#elif defined(_MSC_VER)
+# define PICOBENCH_INLINE __forceinline
+#else
+# define PICOBENCH_INLINE inline
+#endif
+
+namespace picobench
+{
+
+#if defined(_MSC_VER) || defined(__MINGW32__) || defined(PICOBENCH_TEST)
+struct high_res_clock
+{
+ typedef long long rep;
+ typedef std::nano period;
+ typedef std::chrono::duration<rep, period> duration;
+ typedef std::chrono::time_point<high_res_clock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now();
+};
+#else
+using high_res_clock = std::chrono::high_resolution_clock;
+#endif
+
+using result_t = intptr_t;
+using udata_t = uintptr_t;
+
+class state
+{
+public:
+ explicit state(size_t num_iterations, udata_t user_data = 0, udata_t arg = 0)
+ : _user_data(user_data)
+ , _arg(arg)
+ , _iterations(num_iterations)
+ {
+ I_PICOBENCH_ASSERT(_iterations > 0);
+ }
+
+ size_t iterations() const { return _iterations; }
+
+ uint64_t duration_ns() const { return _duration_ns; }
+ void add_custom_duration(uint64_t duration_ns) { _duration_ns += duration_ns; }
+
+ udata_t user_data() const { return _user_data; }
+ udata_t arg() const { return _arg; }
+
+ // optionally set result of benchmark
+ // this can be used as a value sync to prevent optimizations
+ // or a way to check whether benchmarks produce the same results
+ void set_result(uintptr_t data) { _result = data; }
+ result_t result() const { return _result; }
+
+ PICOBENCH_INLINE
+ void start_timer()
+ {
+ _start = high_res_clock::now();
+ }
+
+ PICOBENCH_INLINE
+ void stop_timer()
+ {
+ auto duration = high_res_clock::now() - _start;
+ _duration_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count();
+ }
+
+ struct iterator
+ {
+ PICOBENCH_INLINE
+ iterator(state* parent)
+ : _counter(0)
+ , _lim(parent->iterations())
+ , _state(parent)
+ {
+ I_PICOBENCH_ASSERT(_counter < _lim);
+ }
+
+ PICOBENCH_INLINE
+ iterator()
+ : _counter(0)
+ , _lim(0)
+ , _state(nullptr)
+ {}
+
+ PICOBENCH_INLINE
+ iterator& operator++()
+ {
+ I_PICOBENCH_ASSERT(_counter < _lim);
+ ++_counter;
+ return *this;
+ }
+
+ PICOBENCH_INLINE
+ bool operator!=(const iterator&) const
+ {
+ if (_counter < _lim) return true;
+ _state->stop_timer();
+ return false;
+ }
+
+ PICOBENCH_INLINE
+ size_t operator*() const
+ {
+ return _counter;
+ }
+
+ private:
+ size_t _counter;
+ const size_t _lim;
+ state* _state;
+ };
+
+ PICOBENCH_INLINE
+ iterator begin()
+ {
+ start_timer();
+ return iterator(this);
+ }
+
+ PICOBENCH_INLINE
+ iterator end()
+ {
+ return iterator();
+ }
+
+private:
+ high_res_clock::time_point _start;
+ uint64_t _duration_ns = 0;
+ udata_t _user_data;
+ udata_t _arg;
+ size_t _iterations;
+ result_t _result = 0;
+};
+
+// this can be used for manual measurement
+class scope
+{
+public:
+ PICOBENCH_INLINE
+ scope(state& s)
+ : _state(s)
+ {
+ _state.start_timer();
+ }
+
+ PICOBENCH_INLINE
+ ~scope()
+ {
+ _state.stop_timer();
+ }
+private:
+ state& _state;
+};
+
+#if defined(PICOBENCH_STD_FUNCTION_BENCHMARKS)
+using benchmark_proc = std::function<void(state&)>;
+#else
+using benchmark_proc = void(*)(state&);
+#endif
+
+class benchmark
+{
+public:
+ const char* name() const { return _name; }
+
+ benchmark& iterations(std::vector<size_t> data) { _state_iterations = std::move(data); return *this; }
+ benchmark& samples(int n) { _samples = n; return *this; }
+ benchmark& label(const char* label) { _name = label; return *this; }
+ benchmark& baseline(bool b = true) { _baseline = b; return *this; }
+ benchmark& user_data(udata_t data) { _user_data = data; return *this; }
+ benchmark& args(std::vector<udata_t> data) { _args = std::move(data); return *this; }
+
+protected:
+ friend class runner;
+
+ benchmark(const char* name, benchmark_proc proc);
+
+ const char* _name;
+ const benchmark_proc _proc;
+ bool _baseline = false;
+
+ udata_t _user_data = 0;
+ std::vector<size_t> _state_iterations;
+ std::vector<udata_t> _args;
+ int _samples = 0;
+};
+
+// used for globally functions
+// note that you can instantiate a runner and register local benchmarks for it alone
+class global_registry
+{
+public:
+ static int set_bench_suite(const char* name);
+ static benchmark& new_benchmark(const char* name, benchmark_proc proc);
+};
+
+}
+
+#define I_PICOBENCH_PP_CAT(a, b) I_PICOBENCH_PP_INTERNAL_CAT(a, b)
+#define I_PICOBENCH_PP_INTERNAL_CAT(a, b) a##b
+
+#define PICOBENCH_SUITE(name) \
+ static int I_PICOBENCH_PP_CAT(picobench_suite, __LINE__) = \
+ picobench::global_registry::set_bench_suite(name)
+
+#define PICOBENCH(func) \
+ static auto& I_PICOBENCH_PP_CAT(picobench, __LINE__) = \
+ picobench::global_registry::new_benchmark(#func, func)
+
+#if defined(PICOBENCH_IMPLEMENT_WITH_MAIN)
+# define PICOBENCH_IMPLEMENT
+# define PICOBENCH_IMPLEMENT_MAIN
+#endif
+
+#if defined(PICOBENCH_IMPLEMENT)
+
+#include <random>
+#include <iostream>
+#include <fstream>
+#include <iomanip>
+#include <map>
+#include <memory>
+#include <cstring>
+#include <cstdlib>
+
+#if defined(_WIN32)
+# define WIN32_LEAN_AND_MEAN
+# include <Windows.h>
+#else
+# if !defined(PICOBENCH_DONT_BIND_TO_ONE_CORE)
+# if defined(__APPLE__)
+# include <mach/mach.h>
+# else
+# include <sched.h>
+# endif
+# endif
+#endif
+
+namespace picobench
+{
+
+// namespace
+// {
+
+enum error_t
+{
+ no_error,
+ error_bad_cmd_line_argument, // ill-formed command-line argument
+ error_unknown_cmd_line_argument, // command argument looks like a picobench one, but isn't
+ error_sample_compare, // benchmark produced different results across samples
+ error_benchmark_compare, // two benchmarks of the same suite and dimension produced different results
+};
+
+class report
+{
+public:
+ struct benchmark_problem_space
+ {
+ size_t dimension; // number of iterations for the problem space
+ udata_t user_data;
+ udata_t arg;
+ int samples; // number of samples taken
+ uint64_t total_time_ns; // fastest sample!!!
+ result_t result; // result of fastest sample
+ };
+ struct benchmark
+ {
+ const char* name;
+ bool is_baseline;
+ std::vector<benchmark_problem_space> data;
+ };
+
+ struct suite
+ {
+ const char* name;
+ std::vector<benchmark> benchmarks; // benchmark view
+
+ const benchmark* find_benchmark(const char* name) const
+ {
+ for (auto& b : benchmarks)
+ {
+ if (strcmp(b.name, name) == 0)
+ return &b;
+ }
+
+ return nullptr;
+ }
+
+ const benchmark* find_baseline() const
+ {
+ for (auto& b : benchmarks)
+ {
+ if (b.is_baseline)
+ return &b;
+ }
+
+ return nullptr;
+ }
+ };
+
+ std::vector<suite> suites;
+ error_t error = no_error;
+
+ const suite* find_suite(const char* name) const
+ {
+ for (auto& s : suites)
+ {
+ if (strcmp(s.name, name) == 0)
+ return &s;
+ }
+
+ return nullptr;
+ }
+
+ void to_text(std::ostream& out) const
+ {
+ using namespace std;
+ int width = 100;
+ for (auto& suite : suites)
+ {
+ if (suite.name)
+ {
+ out << suite.name << ":\n";
+ }
+ line(out, width);
+ out <<
+ " Name (* = baseline) |Iterations | Arg |Baseline | ns/op | Total ms | Ops/second\n";
+ line(out, width);
+
+ auto problem_space_view = get_problem_space_view(suite);
+ for (auto& ps : problem_space_view)
+ {
+ const problem_space_benchmark* baseline = nullptr;
+ for (auto& bm : ps.second)
+ {
+ if (bm.is_baseline)
+ {
+ baseline = &bm;
+ break;
+ }
+ }
+
+ for (auto& bm : ps.second)
+ {
+ out << (bm.is_baseline ? "* " : " ") << left << setw(26) << bm.name << right;
+
+ out << " |"
+ << setw(10) << ps.first.first << " |"
+ << setw(10) << bm.arg << " |";
+
+ if (bm.is_baseline) //(baseline == &bm)
+ {
+ baseline = &bm;
+ out << " - |";
+ }
+ else if (baseline)
+ {
+ out << setw(8) << fixed << setprecision(3)
+ << double(bm.total_time_ns) / double(baseline->total_time_ns) << " |";
+ }
+ else
+ {
+ // no baseline to compare to
+ out << " ? |";
+ }
+
+ auto ns_op = (bm.total_time_ns / ps.first.first);
+ if (ns_op > 99999999)
+ {
+ int e = 0;
+ while (ns_op > 999999)
+ {
+ ++e;
+ ns_op /= 10;
+ }
+ out << setw(8) << ns_op << 'e' << e;
+ }
+ else
+ {
+ out << setw(10) << ns_op;
+ }
+ out << " |";
+ out << setw(10) << fixed << setprecision(2) << double(bm.total_time_ns) / 1000000.0 << " |";
+
+ auto ops_per_sec = ps.first.first * (1000000000.0 / double(bm.total_time_ns));
+ out << setw(12) << fixed << setprecision(1) << ops_per_sec << "\n";
+ }
+ }
+ line(out, width);
+ }
+ }
+
+ void to_text_concise(std::ostream& out)
+ {
+ using namespace std;
+ int width = 65;
+ for (auto& suite : suites)
+ {
+ if (suite.name)
+ {
+ out << suite.name << ":\n";
+ }
+
+ line(out, width);
+
+ out <<
+ " Name (* = baseline) | Baseline | ns/op | Total ms\n"; // | Ops/second\n";
+
+ line(out, width);
+
+ const benchmark* baseline = nullptr;
+ for (auto& bm : suite.benchmarks)
+ {
+ if (bm.is_baseline)
+ {
+ baseline = &bm;
+ break;
+ }
+ }
+ I_PICOBENCH_ASSERT(baseline);
+ uint64_t baseline_total_time = 0;
+ size_t baseline_total_iterations = 0;
+ for (auto& d : baseline->data)
+ {
+ baseline_total_time += d.total_time_ns;
+ baseline_total_iterations += d.dimension;
+ }
+
+ for (auto& bm : suite.benchmarks)
+ {
+ out << (bm.is_baseline ? "* " : " ") << left << setw(26) << bm.name << right
+ << " |";
+
+ uint64_t total_time = 0;
+ size_t total_iterations = 0;
+ for (auto& d : bm.data)
+ {
+ total_time += d.total_time_ns;
+ total_iterations += d.dimension;
+ }
+ uint64_t ns_per_op = total_time / total_iterations;
+
+ if (bm.is_baseline)
+ {
+ out << " -";
+ baseline = &bm;
+ baseline_total_time = total_time;
+ baseline_total_iterations = total_iterations;
+ }
+ else
+ {
+ out << setw(9) << fixed << setprecision(3)
+ << double(total_time) / baseline_total_time;
+ }
+
+ out << " |" << setw(10) << ns_per_op << " |";
+ out << setw(12) << fixed << setprecision(2) << double(total_time) / 1000000.0 << "\n";
+
+ //auto ops_per_sec = total_iterations * (1000000000.0 / total_time);
+ //out << setw(12) << fixed << setprecision(1) << ops_per_sec << "\n";
+ }
+
+ line(out, width);
+ }
+ }
+
+ void to_csv(std::ostream& out) const
+ {
+ using namespace std;
+ const char* sep = ",";
+
+ for (auto& suite : suites)
+ {
+ out << "Suite, Baseline, Benchmark, Iterations, Arg, Ratio, Total ms, ns/op, Ops/second\n";
+
+ auto problem_space_view = get_problem_space_view(suite);
+ for (auto& ps : problem_space_view)
+ {
+ const problem_space_benchmark* baseline = nullptr;
+ for (auto& bm : ps.second)
+ {
+ if (bm.is_baseline)
+ {
+ baseline = &bm;
+ break;
+ }
+ }
+
+ for (auto& bm : ps.second)
+ {
+ out << '"' << (suite.name ? suite.name : "") << '"';
+ out << sep << (bm.is_baseline ? "true" : "false");
+ out << sep << '"' << bm.name << '"';
+ out << sep << ps.first.first
+ << sep << bm.arg << sep;
+
+ if (baseline == &bm)
+ {
+ out << 1.0;
+ }
+ else if (baseline)
+ {
+ out << fixed << setprecision(3) << double(bm.total_time_ns) / baseline->total_time_ns;
+ }
+ else
+ {
+ out << -1.0; // no baseline to compare to
+ }
+
+ out << sep << fixed << setprecision(3) << bm.total_time_ns / 1000000.0;
+
+ auto ns_op = (bm.total_time_ns / ps.first.first);
+ out << sep << ns_op;
+
+ auto ops_per_sec = ps.first.first * (1000000000.0 / bm.total_time_ns);
+ out << sep << fixed << setprecision(1) << ops_per_sec << "\n";
+ }
+ }
+ }
+ }
+
+
+
+ struct problem_space_benchmark
+ {
+ const char* name;
+ bool is_baseline;
+ udata_t user_data;
+ udata_t arg;
+ uint64_t total_time_ns; // fastest sample!!!
+ result_t result; // result of fastest sample
+ };
+ using problem_space_view_map = std::map<std::pair<size_t, udata_t>,
+ std::vector<problem_space_benchmark>>;
+ static problem_space_view_map get_problem_space_view(const suite& s)
+ {
+ problem_space_view_map res;
+ for (auto& bm : s.benchmarks)
+ {
+ for (auto& d : bm.data)
+ {
+ auto& pvbs = res[{d.dimension, d.arg}];
+ pvbs.push_back({ bm.name, bm.is_baseline, d.user_data, d.arg, d.total_time_ns, d.result });
+ }
+ }
+ return res;
+ }
+
+private:
+
+ static void line(std::ostream& out, int width = 79)
+ {
+ for (int i = 0; i < width; ++i) out.put('=');
+ out.put('\n');
+ }
+};
+
+class benchmark_impl : public benchmark
+{
+public:
+ benchmark_impl(const char* name, benchmark_proc proc)
+ : benchmark(name, proc)
+ {}
+
+private:
+ friend class runner;
+
+ // state
+ std::vector<state> _states; // length is _samples * _state_iterations.size()
+ std::vector<state>::iterator _istate;
+};
+
+class picostring
+{
+public:
+ picostring() = default;
+ explicit picostring(const char* text)
+ {
+ str = text;
+ len = int(strlen(text));
+ }
+
+ const char* str;
+ int len = 0;
+
+ // checks whether other begins with this string
+ bool cmp(const char* other) const
+ {
+ return strncmp(str, other, size_t(len)) == 0;
+ }
+};
+
+class null_streambuf : public std::streambuf
+{
+public:
+ virtual int overflow(int c) override { return c; }
+};
+
+struct null_stream : public std::ostream
+{
+ null_stream() : std::ostream(&_buf) {}
+private:
+ null_streambuf _buf;
+} cnull;
+
+enum class report_output_format
+{
+ text,
+ concise_text,
+ csv,
+ all,
+};
+
+#if !defined(PICOBENCH_DEFAULT_ITERATIONS)
+# define PICOBENCH_DEFAULT_ITERATIONS { 8, 64, 512, 4096, 8192 }
+#endif
+
+#if !defined(PICOBENCH_DEFAULT_SAMPLES)
+# define PICOBENCH_DEFAULT_SAMPLES 2
+#endif
+
+using benchmarks_vector = std::vector<std::unique_ptr<benchmark_impl>>;
+struct rsuite
+{
+ const char* name;
+ benchmarks_vector benchmarks;
+};
+
+class registry
+{
+public:
+ benchmark& add_benchmark(const char* name, benchmark_proc proc)
+ {
+ auto b = new benchmark_impl(name, proc);
+ benchmarks_for_current_suite().emplace_back(b);
+ return *b;
+ }
+
+ void set_suite(const char* name)
+ {
+ _current_suite_name = name;
+ }
+
+ const char*& current_suite_name()
+ {
+ return _current_suite_name;
+ }
+
+ benchmarks_vector& benchmarks_for_current_suite()
+ {
+ for (auto& s : _suites)
+ {
+ if (s.name == _current_suite_name)
+ return s.benchmarks;
+
+ if (s.name && _current_suite_name && strcmp(s.name, _current_suite_name) == 0)
+ return s.benchmarks;
+ }
+ _suites.push_back({ _current_suite_name, {} });
+ return _suites.back().benchmarks;
+ }
+
+protected:
+ friend class runner;
+ const char* _current_suite_name = nullptr;
+ std::vector<rsuite> _suites;
+};
+
+registry& g_registry()
+{
+ static registry r;
+ return r;
+}
+
+class runner : public registry
+{
+public:
+ runner(bool local = false)
+ : _default_state_iterations(PICOBENCH_DEFAULT_ITERATIONS)
+ , _default_samples(PICOBENCH_DEFAULT_SAMPLES)
+ {
+ if (!local)
+ {
+ _suites = std::move(g_registry()._suites);
+ }
+ }
+
+ int run(int benchmark_random_seed = -1)
+ {
+ if (should_run())
+ {
+ run_benchmarks(benchmark_random_seed);
+ auto report = generate_report();
+ std::ostream* out = _stdout;
+ std::ofstream fout;
+ report_output_format fmt[] = {report_output_format::csv,
+ report_output_format::text,
+ report_output_format::concise_text};
+ const char *ext[] = {".csv", ".txt", ".lst"}, *fn = preferred_output_filename();
+ bool all = preferred_output_format() == report_output_format::all;
+ for (int i = 0; i < 3; ++i)
+ {
+ if (all || preferred_output_format() == fmt[i])
+ {
+ if (fn)
+ {
+ std::string name(fn);
+
+ if (all || name.find(".") == std::string::npos)
+ {
+ name += ext[i];
+ }
+ fout.close();
+ fout.open(name.c_str());
+ if (!fout.is_open())
+ {
+ std::cerr << "Error: Could not open output file `" << fn << "`\n";
+ return 1;
+ }
+ out = &fout;
+ }
+
+ switch (fmt[i])
+ {
+ case report_output_format::text:
+ report.to_text(*out);
+ break;
+ case report_output_format::concise_text:
+ report.to_text_concise(*out);
+ break;
+ case report_output_format::csv:
+ report.to_csv(*out);
+ break;
+ default: break;
+ }
+ }
+ }
+ }
+ return error();
+ }
+
+ void run_benchmarks(int random_seed = -1)
+ {
+ I_PICOBENCH_ASSERT(_error == no_error && _should_run);
+
+ if (random_seed == -1)
+ {
+ random_seed = int(std::random_device()());
+ }
+
+ std::minstd_rand rnd(random_seed);
+
+ // vector of all benchmarks
+ std::vector<benchmark_impl*> benchmarks;
+ for (auto& suite : _suites)
+ {
+ // also identify a baseline in this loop
+ // if there is no explicit one, set the first one as a baseline
+ bool found_baseline = false;
+ for (auto irb = suite.benchmarks.begin(); irb != suite.benchmarks.end(); ++irb)
+ {
+ auto& rb = *irb;
+ rb->_states.clear(); // clear states so we can safely call run_benchmarks multiple times
+ benchmarks.push_back(rb.get());
+ if (rb->_baseline)
+ {
+ found_baseline = true;
+ }
+
+#if !defined(PICOBENCH_STD_FUNCTION_BENCHMARKS)
+ // check for same func
+ for (auto ib = irb+1; ib != suite.benchmarks.end(); ++ib)
+ {
+ auto& b = *ib;
+ if (rb->_proc == b->_proc)
+ {
+ *_stdwarn << "Warning: " << rb->name() << " and " << b->name()
+ << " are benchmarks of the same function.\n";
+ }
+ }
+#endif
+ }
+
+ if (!found_baseline && !suite.benchmarks.empty())
+ {
+ suite.benchmarks.front()->_baseline = true;
+ }
+ }
+
+ // initialize benchmarks
+ for (auto b : benchmarks)
+ {
+ if (b->_state_iterations.empty())
+ b->_state_iterations = _default_state_iterations;
+
+ udata_t arg = b->_args.empty() ? udata_t() : b->_args.back();
+ b->_args.resize(b->_state_iterations.size(), arg);
+
+ if (b->_samples == 0)
+ b->_samples = _default_samples;
+
+ b->_states.reserve(b->_state_iterations.size() * b->_samples);
+
+ // fill states while random shuffling them
+ for (size_t iter = 0; iter < b->_state_iterations.size(); ++iter)
+ {
+ for (int i = 0; i < b->_samples; ++i)
+ {
+ auto index = rnd() % (b->_states.size() + 1);
+ auto pos = b->_states.begin() + long(index);
+ b->_states.emplace(pos, b->_state_iterations[iter], b->_user_data, b->_args[iter]);
+ }
+ }
+
+ b->_istate = b->_states.begin();
+ }
+
+#if !defined(PICOBENCH_DONT_BIND_TO_ONE_CORE)
+ // set thread affinity to first cpu
+ // so the high resolution clock doesn't miss cycles
+ {
+#if defined(_WIN32)
+ SetThreadAffinityMask(GetCurrentThread(), 1);
+#elif defined(__APPLE__)
+ thread_affinity_policy_data_t policy = {0};
+ thread_policy_set(
+ pthread_mach_thread_np(pthread_self()),
+ THREAD_AFFINITY_POLICY,
+ (thread_policy_t)&policy, 1);
+#else
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+
+ sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
+#endif
+ }
+#endif
+
+ // we run a random benchmark from it incrementing _istate for each
+ // when _istate reaches _states.end(), we erase the benchmark
+ // when the vector becomes empty, we're done
+ while (!benchmarks.empty())
+ {
+ auto i = benchmarks.begin() + long(rnd() % benchmarks.size());
+ auto& b = *i;
+ std::cerr << '.';
+ b->_proc(*b->_istate);
+
+ ++b->_istate;
+
+ if (b->_istate == b->_states.end())
+ {
+ benchmarks.erase(i);
+ }
+ }
+ std::cerr << '\n';
+ }
+
+ // function to compare results
+ template <typename CompareResult = std::equal_to<result_t>>
+ report generate_report(CompareResult cmp = std::equal_to<result_t>()) const
+ {
+ report rpt;
+
+ rpt.suites.resize(_suites.size());
+ auto rpt_suite = rpt.suites.begin();
+
+ for (auto& suite : _suites)
+ {
+ rpt_suite->name = suite.name;
+
+ // build benchmark view
+ rpt_suite->benchmarks.resize(suite.benchmarks.size());
+ auto rpt_benchmark = rpt_suite->benchmarks.begin();
+
+ for (auto& b : suite.benchmarks)
+ {
+ rpt_benchmark->name = b->_name;
+ rpt_benchmark->is_baseline = b->_baseline;
+
+ rpt_benchmark->data.reserve(b->_state_iterations.size());
+ for (size_t i = 0; i < b->_state_iterations.size(); ++i)
+ {
+ rpt_benchmark->data.push_back({ b->_state_iterations[i], b->_user_data, b->_args[i], 0, 0ll });
+ }
+
+ for (auto& state : b->_states)
+ {
+ for (auto& d : rpt_benchmark->data)
+ {
+ if (state.iterations() == d.dimension && state.arg() == d.arg)
+ {
+ if (d.total_time_ns == 0 || d.total_time_ns > state.duration_ns())
+ {
+ d.total_time_ns = state.duration_ns();
+ d.result = state.result();
+ }
+
+ if (_compare_results_across_samples)
+ {
+ if (d.result != state.result() && !cmp(d.result, state.result()))
+ {
+ *_stderr << "Error: Two samples of " << b->name() << " @" << d.dimension << " produced different results: "
+ << d.result << " and " << state.result() << '\n';
+ _error = error_sample_compare;
+ }
+ }
+
+ ++d.samples;
+ }
+ }
+ }
+
+#if defined(PICOBENCH_DEBUG)
+ for (auto& d : rpt_benchmark->data)
+ {
+ I_PICOBENCH_ASSERT(d.samples == b->_samples);
+ }
+#endif
+
+ ++rpt_benchmark;
+ }
+
+ ++rpt_suite;
+ }
+
+ if (_compare_results_across_benchmarks)
+ {
+ for(auto& suite : rpt.suites)
+ {
+ auto psview = report::get_problem_space_view(suite);
+
+ for (auto& space : psview)
+ {
+ I_PICOBENCH_ASSERT(!space.second.empty());
+
+ if (space.second.size() == 1)
+ {
+ auto& b = space.second.front();
+ *_stdwarn << "Warning: Benchmark " << b.name << " @" << space.first.first
+ << " has a single instance and cannot be compared to others.\n";
+ continue;
+ }
+
+ auto result0 = space.second.front().result;
+
+ for (auto& b : space.second)
+ {
+ if (result0 != b.result && !cmp(result0, b.result))
+ {
+ auto& f = space.second.front();
+ *_stderr << "Error: Benchmarks " << f.name << " and " << b.name
+ << " @" << space.first.first << " produce different results: "
+ << result0 << " and " << b.result << '\n';
+ _error = error_benchmark_compare;
+ }
+ }
+ }
+ }
+ }
+
+ return rpt;
+ }
+
+ void set_default_state_iterations(const std::vector<size_t>& data)
+ {
+ _default_state_iterations = data;
+ }
+
+ const std::vector<size_t>& default_state_iterations() const
+ {
+ return _default_state_iterations;
+ }
+
+ void set_default_samples(int n)
+ {
+ _default_samples = n;
+ }
+
+ int default_samples() const
+ {
+ return _default_samples;
+ }
+
+ void add_cmd_opt(const char* cmd, const char* arg_desc, const char* cmd_desc, bool(*handler)(uintptr_t, const char*), udata_t user_data = 0)
+ {
+ cmd_line_option opt;
+ opt.cmd = picostring(cmd);
+ opt.arg_desc = picostring(arg_desc);
+ opt.desc = cmd_desc;
+ opt.handler = nullptr;
+ opt.user_data = user_data;
+ opt.user_handler = handler;
+ _opts.push_back(opt);
+ }
+
+ // returns false if there were errors parsing the command line
+ // all args starting with prefix are parsed
+ // the others are ignored
+ bool parse_cmd_line(int argc, const char* const argv[], const char* cmd_prefix = "-")
+ {
+ _cmd_prefix = picostring(cmd_prefix);
+
+ if (!_has_opts)
+ {
+ _opts.emplace_back("-iters=", "<n1,n2,n3,...>",
+ "Sets default iterations for benchmarks",
+ &runner::cmd_iters);
+ _opts.emplace_back("-samples=", "<n>",
+ "Sets default number of samples for benchmarks",
+ &runner::cmd_samples);
+ _opts.emplace_back("-out-fmt=", "<txt|con|csv>",
+ "Outputs text, concise, csv or all",
+ &runner::cmd_out_fmt);
+ _opts.emplace_back("-all", "",
+ "Outputs all formats: text, con, csv",
+ &runner::cmd_out_all);
+ _opts.emplace_back("-output=", "<filename>",
+ "Sets output filename or `stdout`",
+ &runner::cmd_output);
+ _opts.emplace_back("-no-compare-results", "",
+ "Doesn't compare benchmark results",
+ &runner::cmd_compare_results);
+ _opts.emplace_back("-no-run", "",
+ "Doesn't run benchmarks",
+ &runner::cmd_no_run);
+ _opts.emplace_back("-version", "",
+ "Show version info",
+ &runner::cmd_version);
+ _opts.emplace_back("-help", "",
+ "Prints help",
+ &runner::cmd_help);
+ _has_opts = true;
+ }
+
+ for (int i = 1; i < argc; ++i)
+ {
+ if (!_cmd_prefix.cmp(argv[i]))
+ continue;
+
+ auto arg = argv[i] + _cmd_prefix.len;
+
+ bool found = false;
+ for (auto& opt : _opts)
+ {
+ if (opt.cmd.cmp(arg))
+ {
+ found = true;
+ bool success = false;
+ if (opt.handler)
+ {
+ success = (this->*opt.handler)(arg + opt.cmd.len);
+ }
+ else
+ {
+ I_PICOBENCH_ASSERT(opt.user_handler);
+ success = opt.user_handler(opt.user_data, arg + opt.cmd.len);
+ }
+
+ if (!success)
+ {
+ *_stderr << "Error: Bad command-line argument: " << argv[i] << "\n";
+ _error = error_bad_cmd_line_argument;
+ return false;
+ }
+ break;
+ }
+ }
+
+ if (!found)
+ {
+ *_stderr << "Error: Unknown command-line argument: " << argv[i] << "\n";
+ _error = error_unknown_cmd_line_argument;
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void set_should_run(bool set) { _should_run = set; }
+ bool should_run() const { return _error == no_error && _should_run; }
+ void set_error(error_t e) { _error = e; }
+ error_t error() const { return _error; }
+
+ void set_output_streams(std::ostream& out, std::ostream& err)
+ {
+ _stdout = &out;
+ _stderr = &err;
+ _stdwarn = &out;
+ }
+
+ void set_preferred_output_format(report_output_format fmt) { _output_format = fmt; }
+ report_output_format preferred_output_format() const { return _output_format; }
+
+ // can be nullptr (run will interpret it as stdout)
+ void set_preferred_output_filename(const char* path) { _output_file = path; }
+ const char* preferred_output_filename() const { return _output_file; }
+
+ void set_compare_results_across_samples(bool b) { _compare_results_across_samples = b; }
+ bool compare_results_across_samples() const { return _compare_results_across_samples; }
+
+ void set_compare_results_across_benchmarks(bool b) { _compare_results_across_benchmarks = b; }
+ bool compare_results_across_benchmarks() const { return _compare_results_across_benchmarks; }
+
+private:
+ // runner's suites and benchmarks come from its parent: registry
+
+ // state and configuration
+ mutable error_t _error = no_error;
+ bool _should_run = true;
+
+ bool _compare_results_across_samples = true;
+ bool _compare_results_across_benchmarks = true;
+
+ report_output_format _output_format = report_output_format::concise_text;
+ const char* _output_file = nullptr; // nullptr means stdout
+
+ std::ostream* _stdout = &std::cout;
+ std::ostream* _stderr = &std::cerr;
+ std::ostream* _stdwarn = &std::cout;
+
+ // default data
+
+ // default iterations per state per benchmark
+ std::vector<size_t> _default_state_iterations;
+
+ // default samples per benchmark
+ int _default_samples;
+
+ // command line parsing
+ picostring _cmd_prefix;
+ typedef bool (runner::*cmd_handler)(const char*); // internal handler
+ typedef bool(*ext_handler)(udata_t user_data, const char* cmd_line); // external (user) handler
+ struct cmd_line_option
+ {
+ cmd_line_option() = default;
+ cmd_line_option(const char* c, const char* a, const char* d, cmd_handler h)
+ : cmd(c)
+ , arg_desc(a)
+ , desc(d)
+ , handler(h)
+ , user_data(0)
+ , user_handler(nullptr)
+ {}
+ picostring cmd;
+ picostring arg_desc;
+ const char* desc;
+ cmd_handler handler; // may be nullptr for external handlers
+ udata_t user_data; // passed as an argument to user handlers
+ ext_handler user_handler;
+ };
+ bool _has_opts = false; // have opts been added to list
+ std::vector<cmd_line_option> _opts;
+
+ bool cmd_iters(const char* line)
+ {
+ std::vector<size_t> iters;
+ auto p = line;
+ while (true)
+ {
+ auto i = strtoull(p, nullptr, 10);
+ if (i <= 0) return false;
+ iters.push_back(i);
+ p = strchr(p + 1, ',');
+ if (!p) break;
+ ++p;
+ }
+ if (iters.empty()) return false;
+ _default_state_iterations = iters;
+ return true;
+ }
+
+ bool cmd_samples(const char* line)
+ {
+ int samples = int(strtol(line, nullptr, 10));
+ if (samples <= 0) return false;
+ _default_samples = samples;
+ return true;
+ }
+
+ bool cmd_no_run(const char* line)
+ {
+ if (*line) return false;
+ _should_run = false;
+ return true;
+ }
+
+ bool cmd_version(const char* line)
+ {
+ if (*line) return false;
+ *_stdout << "picobench " PICOBENCH_VERSION_STR << "\n";
+ _should_run = false;
+ return true;
+ }
+
+ bool cmd_help(const char* line)
+ {
+ if (*line) return false;
+ cmd_version(line);
+ auto& cout = *_stdout;
+ for (auto& opt : _opts)
+ {
+ cout << ' ' << _cmd_prefix.str << opt.cmd.str << opt.arg_desc.str;
+ int w = 27 - (_cmd_prefix.len + opt.cmd.len + opt.arg_desc.len);
+ for (int i = 0; i < w; ++i)
+ {
+ cout.put(' ');
+ }
+ cout << opt.desc << "\n";
+ }
+ _should_run = false;
+ return true;
+ }
+
+ bool cmd_out_all(const char* line)
+ {
+ _output_format = report_output_format::all;
+ return true;
+ }
+ bool cmd_out_fmt(const char* line)
+ {
+ if (strcmp(line, "txt") == 0)
+ {
+ _output_format = report_output_format::text;
+ }
+ else if (strcmp(line, "con") == 0)
+ {
+ _output_format = report_output_format::concise_text;
+ }
+ else if (strcmp(line, "csv") == 0)
+ {
+ _output_format = report_output_format::csv;
+ }
+ else
+ {
+ return false;
+ }
+ return true;
+ }
+
+ bool cmd_output(const char* line)
+ {
+ if (strcmp(line, "stdout") != 0)
+ {
+ _output_file = line;
+ }
+ else
+ {
+ _output_file = nullptr;
+ }
+ return true;
+ }
+
+ bool cmd_compare_results(const char* line)
+ {
+ if (*line) return false;
+ _compare_results_across_samples = false;
+ _compare_results_across_benchmarks = false;
+ return true;
+ }
+};
+
+class local_runner : public runner
+{
+public:
+ local_runner() : runner(true)
+ {}
+};
+
+// } // anonymous namespace
+
+benchmark::benchmark(const char* name, benchmark_proc proc)
+ : _name(name)
+ , _proc(proc)
+{}
+
+benchmark& global_registry::new_benchmark(const char* name, benchmark_proc proc)
+{
+ return g_registry().add_benchmark(name, proc);
+}
+
+int global_registry::set_bench_suite(const char* name)
+{
+ g_registry().current_suite_name() = name;
+ return 0;
+}
+
+#if (defined(_MSC_VER) || defined(__MINGW32__)) && !defined(PICOBENCH_TEST)
+
+static const long long high_res_clock_freq = []() -> long long
+{
+ LARGE_INTEGER frequency;
+ QueryPerformanceFrequency(&frequency);
+ return frequency.QuadPart;
+}();
+
+high_res_clock::time_point high_res_clock::now()
+{
+ LARGE_INTEGER t;
+ QueryPerformanceCounter(&t);
+ return time_point(duration((t.QuadPart * rep(period::den)) / high_res_clock_freq));
+}
+#endif
+}
+
+#endif
+
+#if defined(PICOBENCH_IMPLEMENT_MAIN)
+int main(int argc, char* argv[])
+{
+ picobench::runner r;
+ r.parse_cmd_line(argc, argv);
+ return r.run();
+}
+#endif
+
+#if defined(PICOBENCH_TEST)
+
+// fake time keeping functions for the tests
+namespace picobench
+{
+
+void this_thread_sleep_for_ns(uint64_t ns);
+
+template <class Rep, class Period>
+void this_thread_sleep_for(const std::chrono::duration<Rep, Period>& duration)
+{
+ this_thread_sleep_for_ns(std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count());
+}
+
+#if defined(PICOBENCH_IMPLEMENT)
+static struct fake_time
+{
+ uint64_t now;
+} the_time;
+
+void this_thread_sleep_for_ns(uint64_t ns)
+{
+ the_time.now += ns;
+}
+
+high_res_clock::time_point high_res_clock::now()
+{
+ auto ret = time_point(duration(the_time.now));
+ return ret;
+}
+#endif
+
+}
+
+#endif
diff --git a/misc/benchmarks/picobench/picobench_cmap.cpp b/misc/benchmarks/picobench/picobench_cmap.cpp
new file mode 100644
index 00000000..3ffba5b9
--- /dev/null
+++ b/misc/benchmarks/picobench/picobench_cmap.cpp
@@ -0,0 +1,284 @@
+#define i_static
+#include <stc/crandom.h>
+#define i_static
+#include <stc/cstr.h>
+#include <cmath>
+#include <string>
+#include <unordered_map>
+#include <stdexcept>
+#include "../external/ankerl/unordered_dense.h"
+#include "../external/skarupke/flat_hash_map.hpp"
+#include "../external/tsl/robin_map.h"
+
+#define PICOBENCH_IMPLEMENT_WITH_MAIN
+#include "picobench.hpp"
+
+enum {N1 = 4000000, S1 = 1, MaxLoadFactor100 = 80};
+uint64_t seed = time(NULL);
+
+template <class K, class V> using umap = std::unordered_map<K, V>;
+template <class K, class V> using fmap = ska::flat_hash_map<K, V>;
+template <class K, class V> using tmap = tsl::robin_map<K, V>;
+template <class K, class V> using dmap = ankerl::unordered_dense::map<K, V>;
+#define DEFMAP(map, ...) \
+ using u##map = umap __VA_ARGS__; \
+ using f##map = fmap __VA_ARGS__; \
+ using t##map = tmap __VA_ARGS__; \
+ using d##map = dmap __VA_ARGS__
+
+DEFMAP(map_i, <int32_t, int32_t>);
+DEFMAP(map_x, <uint64_t, uint64_t>);
+DEFMAP(map_s, <std::string, std::string>);
+
+#define i_key int32_t
+#define i_val int32_t
+#define i_tag i
+#define i_max_load_factor float(MaxLoadFactor100) / 100.0f
+#include <stc/cmap.h>
+
+#define i_key uint64_t
+#define i_val uint64_t
+#define i_tag x
+#define i_max_load_factor float(MaxLoadFactor100) / 100.0f
+#include <stc/cmap.h>
+
+#define i_key_str
+#define i_val_str
+#define i_max_load_factor float(MaxLoadFactor100) / 100.0f
+#include <stc/cmap.h>
+
+PICOBENCH_SUITE("Map1");
+
+template <class MapInt>
+static void ins_and_erase_i(picobench::state& s)
+{
+ MapInt map;
+ map.max_load_factor((int)MaxLoadFactor100 / 100.0);
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (s.iterations())
+ map[crandom()];
+ map.clear();
+ csrandom(seed);
+ c_forrange (s.iterations())
+ map[crandom()];
+ csrandom(seed);
+ c_forrange (s.iterations())
+ map.erase(crandom());
+ s.set_result(map.size());
+}
+/*
+static void ins_and_erase_cmap_i(picobench::state& s)
+{
+ cmap_i map = cmap_i_init();
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (s.iterations())
+ cmap_i_insert(&map, crandom(), 0);
+ cmap_i_clear(&map);
+ csrandom(seed);
+ c_forrange (s.iterations())
+ cmap_i_insert(&map, crandom(), 0);
+ csrandom(seed);
+ c_forrange (s.iterations())
+ cmap_i_erase(&map, crandom());
+ s.set_result(cmap_i_size(&map));
+ cmap_i_drop(&map);
+}
+*/
+static void ins_and_erase_cmap_x(picobench::state& s)
+{
+ cmap_x map = cmap_x_init();
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (s.iterations())
+ cmap_x_insert(&map, crandom(), 0);
+ cmap_x_clear(&map);
+ csrandom(seed);
+ c_forrange (s.iterations())
+ cmap_x_insert(&map, crandom(), 0);
+ csrandom(seed);
+ c_forrange (s.iterations())
+ cmap_x_erase(&map, crandom());
+ s.set_result(cmap_x_size(&map));
+ cmap_x_drop(&map);
+}
+
+#define P samples(S1).iterations({N1/4})
+PICOBENCH(ins_and_erase_i<umap_x>).P;
+PICOBENCH(ins_and_erase_i<dmap_x>).P;
+PICOBENCH(ins_and_erase_i<fmap_x>).P;
+PICOBENCH(ins_and_erase_i<tmap_x>).P;
+PICOBENCH(ins_and_erase_cmap_x).P;
+#undef P
+
+PICOBENCH_SUITE("Map2");
+
+template <class MapInt>
+static void ins_and_access_i(picobench::state& s)
+{
+ uint64_t mask = (1ull << s.arg()) - 1;
+ size_t result = 0;
+ MapInt map;
+ map.max_load_factor((int)MaxLoadFactor100 / 100.0);
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (N1)
+ result += ++map[crandom() & mask];
+ s.set_result(result);
+}
+
+static void ins_and_access_cmap_i(picobench::state& s)
+{
+ uint64_t mask = (1ull << s.arg()) - 1;
+ size_t result = 0;
+ cmap_i map = cmap_i_init();
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (N1)
+ result += ++cmap_i_insert(&map, crandom() & mask, 0).ref->second;
+ s.set_result(result);
+ cmap_i_drop(&map);
+}
+
+#define P samples(S1).iterations({N1, N1, N1, N1}).args({18, 23, 25, 31})
+PICOBENCH(ins_and_access_i<umap_i>).P;
+PICOBENCH(ins_and_access_i<dmap_i>).P;
+PICOBENCH(ins_and_access_i<fmap_i>).P;
+PICOBENCH(ins_and_access_i<tmap_i>).P;
+PICOBENCH(ins_and_access_cmap_i).P;
+#undef P
+
+PICOBENCH_SUITE("Map3");
+
+static void randomize(char* str, size_t len) {
+ for (size_t k=0; k < len; ++k) {
+ union {uint64_t i; char c[8];} r = {.i = crandom()};
+ for (unsigned i=0; i<8 && k<len; ++k, ++i)
+ str[k] = (r.c[i] & 63) + 48;
+ }
+}
+
+template <class MapStr>
+static void ins_and_access_s(picobench::state& s)
+{
+ std::string str(s.arg(), 'x');
+ size_t result = 0;
+ MapStr map;
+ map.max_load_factor((int)MaxLoadFactor100 / 100.0);
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (s.iterations()) {
+ randomize(&str[0], str.size());
+ map.emplace(str, str);
+ randomize(&str[0], str.size());
+ result += map.erase(str);
+ }
+ s.set_result(result + map.size());
+}
+
+static void ins_and_access_cmap_s(picobench::state& s)
+{
+ cstr str = cstr_with_size(s.arg(), 'x');
+ char* buf = cstr_data(&str);
+ size_t result = 0;
+ cmap_str map = cmap_str_init();
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (s.iterations()) {
+ randomize(buf, s.arg());
+ //if (s.arg() > 30) { printf("%s\n", buf); exit(0); }
+ cmap_str_emplace(&map, buf, buf);
+
+ randomize(buf, s.arg());
+ result += cmap_str_erase(&map, buf);
+ }
+ s.set_result(result + cmap_str_size(&map));
+ cstr_drop(&str);
+ cmap_str_drop(&map);
+}
+
+#define P samples(S1).iterations({N1/5, N1/5, N1/5, N1/10, N1/40}).args({13, 7, 8, 100, 1000})
+PICOBENCH(ins_and_access_s<umap_s>).P;
+PICOBENCH(ins_and_access_s<dmap_s>).P;
+PICOBENCH(ins_and_access_s<fmap_s>).P;
+PICOBENCH(ins_and_access_s<tmap_s>).P;
+PICOBENCH(ins_and_access_cmap_s).P;
+#undef P
+
+PICOBENCH_SUITE("Map4");
+
+template <class MapX>
+static void iterate_x(picobench::state& s)
+{
+ MapX map;
+ map.max_load_factor((int)MaxLoadFactor100 / 100.0);
+ uint64_t K = (1ull << s.arg()) - 1;
+
+ picobench::scope scope(s);
+ csrandom(seed);
+ size_t result = 0;
+
+ // measure insert then iterate whole map
+ c_forrange (n, s.iterations()) {
+ map[crandom()] = n;
+ if (!(n & K)) for (auto const& keyVal : map)
+ result += keyVal.second;
+ }
+
+ // reset rng back to inital state
+ csrandom(seed);
+
+ // measure erase then iterate whole map
+ c_forrange (n, s.iterations()) {
+ map.erase(crandom());
+ if (!(n & K)) for (auto const& keyVal : map)
+ result += keyVal.second;
+ }
+ s.set_result(result);
+}
+
+static void iterate_cmap_x(picobench::state& s)
+{
+ cmap_x map = cmap_x_init();
+ uint64_t K = (1ull << s.arg()) - 1;
+
+ picobench::scope scope(s);
+ csrandom(seed);
+ size_t result = 0;
+
+ // measure insert then iterate whole map
+ c_forrange (n, s.iterations()) {
+ cmap_x_insert_or_assign(&map, crandom(), n);
+ if (!(n & K)) c_foreach (i, cmap_x, map)
+ result += i.ref->second;
+ }
+
+ // reset rng back to inital state
+ csrandom(seed);
+
+ // measure erase then iterate whole map
+ c_forrange (n, s.iterations()) {
+ cmap_x_erase(&map, crandom());
+ if (!(n & K)) c_foreach (i, cmap_x, map)
+ result += i.ref->second;
+ }
+ s.set_result(result);
+ cmap_x_drop(&map);
+}
+
+
+#define P samples(S1).iterations({N1/20}).args({12})
+PICOBENCH(iterate_x<umap_x>).P;
+PICOBENCH(iterate_x<dmap_x>).P;
+PICOBENCH(iterate_x<fmap_x>).P;
+PICOBENCH(iterate_x<tmap_x>).P;
+PICOBENCH(iterate_cmap_x).P;
+#undef P
diff --git a/misc/benchmarks/picobench/picobench_csmap.cpp b/misc/benchmarks/picobench/picobench_csmap.cpp
new file mode 100644
index 00000000..5caab6cc
--- /dev/null
+++ b/misc/benchmarks/picobench/picobench_csmap.cpp
@@ -0,0 +1,320 @@
+#include <iostream>
+#define i_static
+#include <stc/crandom.h>
+#define i_static
+#include <stc/cstr.h>
+#include <cmath>
+#include <string>
+#include <map>
+
+#define PICOBENCH_IMPLEMENT_WITH_MAIN
+#include "picobench.hpp"
+
+enum {N1 = 1000000, S1 = 1};
+uint64_t seed = time(NULL); // 18237129837891;
+
+using omap_i = std::map<int, int>;
+using omap_x = std::map<uint64_t, uint64_t>;
+using omap_s = std::map<std::string, std::string>;
+
+#define i_key int
+#define i_val int
+#define i_tag i
+#include <stc/csmap.h>
+
+#define i_key size_t
+#define i_val size_t
+#define i_tag x
+#include <stc/csmap.h>
+
+#define i_key_str
+#define i_val_str
+#include <stc/csmap.h>
+
+PICOBENCH_SUITE("Map1");
+
+template <class MapInt>
+static void ctor_and_ins_one_i(picobench::state& s)
+{
+ size_t result = 0;
+ picobench::scope scope(s);
+ c_forrange (n, s.iterations()) {
+ MapInt map;
+ map[n];
+ result += map.size();
+ }
+ s.set_result(result);
+}
+/*
+static void ctor_and_ins_one_csmap_i(picobench::state& s)
+{
+ size_t result = 0;
+ picobench::scope scope(s);
+ c_forrange (n, s.iterations()) {
+ csmap_i map = csmap_i_init();
+ csmap_i_insert(&map, n, 0);
+ result += csmap_i_size(&map);
+ csmap_i_drop(&map);
+ }
+ s.set_result(result);
+}
+
+#define P samples(S1).iterations({N1})
+PICOBENCH(ctor_and_ins_one_i<omap_i>).P;
+PICOBENCH(ctor_and_ins_one_csmap_i).P;
+#undef P
+*/
+
+PICOBENCH_SUITE("Map_insert_only");
+
+template <class MapInt>
+static void insert_i(picobench::state& s)
+{
+ MapInt map;
+ csrandom(seed);
+ picobench::scope scope(s);
+ c_forrange (n, s.iterations())
+ map.emplace(crandom() & 0xfffffff, n);
+ s.set_result(map.size());
+}
+
+static void insert_csmap_i(picobench::state& s)
+{
+ csmap_i map = csmap_i_init();
+ csrandom(seed);
+ picobench::scope scope(s);
+ c_forrange (n, s.iterations())
+ csmap_i_insert(&map, crandom() & 0xfffffff, n);
+ s.set_result(csmap_i_size(&map));
+ csmap_i_drop(&map);
+}
+
+#define P samples(S1).iterations({N1})
+PICOBENCH(insert_i<omap_i>).P;
+PICOBENCH(insert_csmap_i).P;
+#undef P
+
+
+PICOBENCH_SUITE("Map2");
+
+template <class MapInt>
+static void ins_and_erase_i(picobench::state& s)
+{
+ size_t result = 0;
+ uint64_t mask = (1ull << s.arg()) - 1;
+ MapInt map;
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (i, s.iterations())
+ map.emplace(crandom() & mask, i);
+ result = map.size();
+
+ map.clear();
+ csrandom(seed);
+ c_forrange (i, s.iterations())
+ map[crandom() & mask] = i;
+
+ csrandom(seed);
+ c_forrange (s.iterations())
+ map.erase(crandom() & mask);
+ s.set_result(result);
+}
+
+static void ins_and_erase_csmap_i(picobench::state& s)
+{
+ size_t result = 0;
+ uint64_t mask = (1ull << s.arg()) - 1;
+ csmap_i map = csmap_i_init();
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (i, s.iterations())
+ csmap_i_insert(&map, crandom() & mask, i);
+ result = csmap_i_size(&map);
+
+ csmap_i_clear(&map);
+ csrandom(seed);
+ c_forrange (i, s.iterations())
+ csmap_i_insert_or_assign(&map, crandom() & mask, i);
+
+ csrandom(seed);
+ c_forrange (s.iterations())
+ csmap_i_erase(&map, crandom() & mask);
+ s.set_result(result);
+ csmap_i_drop(&map);
+}
+
+#define P samples(S1).iterations({N1/2, N1/2, N1/2, N1/2}).args({18, 23, 25, 31})
+PICOBENCH(ins_and_erase_i<omap_i>).P;
+PICOBENCH(ins_and_erase_csmap_i).P;
+#undef P
+
+PICOBENCH_SUITE("Map3");
+
+template <class MapInt>
+static void ins_and_access_i(picobench::state& s)
+{
+ uint64_t mask = (1ull << s.arg()) - 1;
+ size_t result = 0;
+ MapInt map;
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (s.iterations()) {
+ result += ++map[crandom() & mask];
+ auto it = map.find(crandom() & mask);
+ if (it != map.end()) map.erase(it->first);
+ }
+ s.set_result(result + map.size());
+}
+
+static void ins_and_access_csmap_i(picobench::state& s)
+{
+ uint64_t mask = (1ull << s.arg()) - 1;
+ size_t result = 0;
+ csmap_i map = csmap_i_init();
+ csrandom(seed);
+
+ picobench::scope scope(s);
+ c_forrange (s.iterations()) {
+ result += ++csmap_i_insert(&map, crandom() & mask, 0).ref->second;
+ const csmap_i_value* val = csmap_i_get(&map, crandom() & mask);
+ if (val) csmap_i_erase(&map, val->first);
+ }
+ s.set_result(result + csmap_i_size(&map));
+ csmap_i_drop(&map);
+}
+
+#define P samples(S1).iterations({N1, N1, N1, N1}).args({18, 23, 25, 31})
+PICOBENCH(ins_and_access_i<omap_i>).P;
+PICOBENCH(ins_and_access_csmap_i).P;
+#undef P
+
+PICOBENCH_SUITE("Map4");
+
+static void randomize(char* str, int len) {
+ union {uint64_t i; char c[8];} r = {.i = crandom()};
+ for (int i = len - 7, j = 0; i < len; ++j, ++i)
+ str[i] = (r.c[j] & 63) + 48;
+}
+
+template <class MapStr>
+static void ins_and_access_s(picobench::state& s)
+{
+ std::string str(s.arg(), 'x');
+ size_t result = 0;
+ MapStr map;
+
+ picobench::scope scope(s);
+ csrandom(seed);
+ c_forrange (s.iterations()) {
+ randomize(&str[0], str.size());
+ map.emplace(str, str);
+ }
+ csrandom(seed);
+ c_forrange (s.iterations()) {
+ randomize(&str[0], str.size());
+ result += map.erase(str);
+ }
+ s.set_result(result + map.size());
+}
+
+static void ins_and_access_csmap_s(picobench::state& s)
+{
+ cstr str = cstr_with_size(s.arg(), 'x');
+ char* buf = cstr_data(&str);
+ size_t result = 0;
+ csmap_str map = csmap_str_init();
+
+ picobench::scope scope(s);
+ csrandom(seed);
+ c_forrange (s.iterations()) {
+ randomize(buf, s.arg());
+ csmap_str_emplace(&map, buf, buf);
+ }
+ csrandom(seed);
+ c_forrange (s.iterations()) {
+ randomize(buf, s.arg());
+ result += csmap_str_erase(&map, buf);
+ /*csmap_str_iter it = csmap_str_find(&map, buf);
+ if (it.ref) {
+ ++result;
+ csmap_str_erase(&map, cstr_str(&it.ref->first));
+ }*/
+ }
+ s.set_result(result + csmap_str_size(&map));
+ cstr_drop(&str);
+ csmap_str_drop(&map);
+}
+
+#define P samples(S1).iterations({N1/5, N1/5, N1/5, N1/10, N1/40}).args({13, 7, 8, 100, 1000})
+PICOBENCH(ins_and_access_s<omap_s>).P;
+PICOBENCH(ins_and_access_csmap_s).P;
+#undef P
+
+PICOBENCH_SUITE("Map5");
+
+template <class MapX>
+static void iterate_x(picobench::state& s)
+{
+ MapX map;
+ uint64_t K = (1ull << s.arg()) - 1;
+
+ picobench::scope scope(s);
+ csrandom(seed);
+ size_t result = 0;
+
+ // measure insert then iterate whole map
+ c_forrange (n, s.iterations()) {
+ map[crandom()] = n;
+ if (!(n & K)) for (auto const& keyVal : map)
+ result += keyVal.second;
+ }
+
+ // reset rng back to inital state
+ csrandom(seed);
+
+ // measure erase then iterate whole map
+ c_forrange (n, s.iterations()) {
+ map.erase(crandom());
+ if (!(n & K)) for (auto const& keyVal : map)
+ result += keyVal.second;
+ }
+ s.set_result(result);
+}
+/*
+static void iterate_csmap_x(picobench::state& s)
+{
+ csmap_x map = csmap_x_init();
+ uint64_t K = (1ull << s.arg()) - 1;
+
+ picobench::scope scope(s);
+ csrandom(seed);
+ size_t result = 0;
+
+ // measure insert then iterate whole map
+ c_forrange (n, s.iterations()) {
+ csmap_x_insert_or_assign(&map, crandom(), n);
+ if (!(n & K)) c_foreach (i, csmap_x, map)
+ result += i.ref->second;
+ }
+
+ // reset rng back to inital state
+ csrandom(seed);
+
+ // measure erase then iterate whole map
+ c_forrange (n, s.iterations()) {
+ csmap_x_erase(&map, crandom());
+ if (!(n & K)) c_foreach (i, csmap_x, map)
+ result += i.ref->second;
+ }
+ s.set_result(result);
+ csmap_x_drop(&map);
+}
+
+#define P samples(S1).iterations({N1/20}).args({12})
+PICOBENCH(iterate_x<omap_x>).P;
+PICOBENCH(iterate_csmap_x).P;
+#undef P
+*/ \ No newline at end of file
diff --git a/misc/benchmarks/pics/benchmark.gif b/misc/benchmarks/pics/benchmark.gif
new file mode 100644
index 00000000..90f47fff
--- /dev/null
+++ b/misc/benchmarks/pics/benchmark.gif
Binary files differ
diff --git a/misc/benchmarks/plotbench/cdeq_benchmark.cpp b/misc/benchmarks/plotbench/cdeq_benchmark.cpp
new file mode 100644
index 00000000..1259cc07
--- /dev/null
+++ b/misc/benchmarks/plotbench/cdeq_benchmark.cpp
@@ -0,0 +1,138 @@
+#include <stdio.h>
+#include <time.h>
+#define i_static
+#include <stc/crandom.h>
+
+#ifdef __cplusplus
+#include <deque>
+#include <algorithm>
+#endif
+
+enum {INSERT, ERASE, FIND, ITER, DESTRUCT, N_TESTS};
+const char* operations[] = {"insert", "erase", "find", "iter", "destruct"};
+typedef struct { time_t t1, t2; uint64_t sum; float fac; } Range;
+typedef struct { const char* name; Range test[N_TESTS]; } Sample;
+enum {SAMPLES = 2, N = 100000000, S = 0x3ffc, R = 4};
+uint64_t seed = 1, mask1 = 0xfffffff, mask2 = 0xffff;
+
+static float secs(Range s) { return (float)(s.t2 - s.t1) / CLOCKS_PER_SEC; }
+
+#define i_tag x
+#define i_val size_t
+#include <stc/cdeq.h>
+
+#ifdef __cplusplus
+Sample test_std_deque() {
+ typedef std::deque<size_t> container;
+ Sample s = {"std,deque"};
+ {
+ s.test[INSERT].t1 = clock();
+ container con;
+ csrandom(seed);
+ c_forrange (N/3) con.push_front(crandom() & mask1);
+ c_forrange (N/3) {con.push_back(crandom() & mask1); con.pop_front();}
+ c_forrange (N/3) con.push_back(crandom() & mask1);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = con.size();
+ s.test[ERASE].t1 = clock();
+ c_forrange (con.size()/2) { con.pop_front(); con.pop_back(); }
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = con.size();
+ }{
+ container con;
+ csrandom(seed);
+ c_forrange (N) con.push_back(crandom() & mask2);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ // Iteration - not inherent find - skipping
+ //container::iterator it;
+ //c_forrange (S) if ((it = std::find(con.begin(), con.end(), crandom() & mask2)) != con.end()) sum += *it;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) c_forrange (i, N) sum += con[i];
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+#else
+Sample test_std_deque() { Sample s = {"std-deque"}; return s;}
+#endif
+
+
+Sample test_stc_deque() {
+ typedef cdeq_x container;
+ Sample s = {"STC,deque"};
+ {
+ s.test[INSERT].t1 = clock();
+ container con = cdeq_x_init();
+ //cdeq_x_reserve(&con, N);
+ csrandom(seed);
+ c_forrange (N/3) cdeq_x_push_front(&con, crandom() & mask1);
+ c_forrange (N/3) {cdeq_x_push_back(&con, crandom() & mask1); cdeq_x_pop_front(&con);}
+ c_forrange (N/3) cdeq_x_push_back(&con, crandom() & mask1);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = cdeq_x_size(&con);
+ s.test[ERASE].t1 = clock();
+ c_forrange (cdeq_x_size(&con)/2) { cdeq_x_pop_front(&con); cdeq_x_pop_back(&con); }
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = cdeq_x_size(&con);
+ cdeq_x_drop(&con);
+ }{
+ csrandom(seed);
+ container con = cdeq_x_init();
+ c_forrange (N) cdeq_x_push_back(&con, crandom() & mask2);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ //cdeq_x_iter it, end = cdeq_x_end(&con);
+ //c_forrange (S) if ((it = cdeq_x_find(&con, crandom() & mask2)).ref != end.ref) sum += *it.ref;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) c_forrange (i, N) sum += con.data[i];
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ cdeq_x_drop(&con);
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+
+int main(int argc, char* argv[])
+{
+ Sample std_s[SAMPLES + 1], stc_s[SAMPLES + 1];
+ c_forrange (i, SAMPLES) {
+ std_s[i] = test_std_deque();
+ stc_s[i] = test_stc_deque();
+ if (i > 0) c_forrange (j, N_TESTS) {
+ if (secs(std_s[i].test[j]) < secs(std_s[0].test[j])) std_s[0].test[j] = std_s[i].test[j];
+ if (secs(stc_s[i].test[j]) < secs(stc_s[0].test[j])) stc_s[0].test[j] = stc_s[i].test[j];
+ if (stc_s[i].test[j].sum != stc_s[0].test[j].sum) printf("Error in sum: test %lld, sample %lld\n", i, j);
+ }
+ }
+ const char* comp = argc > 1 ? argv[1] : "test";
+ bool header = (argc > 2 && argv[2][0] == '1');
+ float std_sum = 0, stc_sum = 0;
+
+ c_forrange (j, N_TESTS) {
+ std_sum += secs(std_s[0].test[j]);
+ stc_sum += secs(stc_s[0].test[j]);
+ }
+ if (header) printf("Compiler,Library,C,Method,Seconds,Ratio\n");
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, operations[j], secs(std_s[0].test[j]), 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, "total", std_sum, 1.0f);
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, operations[j], secs(stc_s[0].test[j]), secs(std_s[0].test[j]) ? secs(stc_s[0].test[j])/secs(std_s[0].test[j]) : 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, "total", stc_sum, stc_sum/std_sum);
+}
diff --git a/misc/benchmarks/plotbench/clist_benchmark.cpp b/misc/benchmarks/plotbench/clist_benchmark.cpp
new file mode 100644
index 00000000..04c8e8cd
--- /dev/null
+++ b/misc/benchmarks/plotbench/clist_benchmark.cpp
@@ -0,0 +1,135 @@
+#include <stdio.h>
+#include <time.h>
+#define i_static
+#include <stc/crandom.h>
+
+#ifdef __cplusplus
+#include <forward_list>
+#include <algorithm>
+#endif
+
+enum {INSERT, ERASE, FIND, ITER, DESTRUCT, N_TESTS};
+const char* operations[] = {"insert", "erase", "find", "iter", "destruct"};
+typedef struct { time_t t1, t2; uint64_t sum; float fac; } Range;
+typedef struct { const char* name; Range test[N_TESTS]; } Sample;
+enum {SAMPLES = 2, N = 50000000, S = 0x3ffc, R = 4};
+uint64_t seed = 1, mask1 = 0xfffffff, mask2 = 0xffff;
+
+static float secs(Range s) { return (float)(s.t2 - s.t1) / CLOCKS_PER_SEC; }
+
+#define i_val size_t
+#define i_tag x
+#include <stc/clist.h>
+
+#ifdef __cplusplus
+Sample test_std_forward_list() {
+ typedef std::forward_list<size_t> container;
+ Sample s = {"std,forward_list"};
+ {
+ s.test[INSERT].t1 = clock();
+ container con;
+ csrandom(seed);
+ c_forrange (N/2) con.push_front(crandom() & mask1);
+ c_forrange (N/2) con.push_front(crandom() & mask1);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = 0;
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) con.pop_front();
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = 0;
+ }{
+ container con;
+ csrandom(seed);
+ c_forrange (N) con.push_front(crandom() & mask2);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ container::iterator it;
+ // Iteration - not inherent find - skipping
+ //c_forrange (S) if ((it = std::find(con.begin(), con.end(), crandom() & mask2)) != con.end()) sum += *it;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) for (auto i: con) sum += i;
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+#else
+Sample test_std_forward_list() { Sample s = {"std-forward_list"}; return s;}
+#endif
+
+
+Sample test_stc_forward_list() {
+ typedef clist_x container;
+ Sample s = {"STC,forward_list"};
+ {
+ s.test[INSERT].t1 = clock();
+ container con = clist_x_init();
+ csrandom(seed);
+ c_forrange (N/2) clist_x_push_front(&con, crandom() & mask1);
+ c_forrange (N/2) clist_x_push_back(&con, crandom() & mask1);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = 0;
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) clist_x_pop_front(&con);
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = 0;
+ clist_x_drop(&con);
+ }{
+ csrandom(seed);
+ container con = clist_x_init();
+ c_forrange (N) clist_x_push_front(&con, crandom() & mask2);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ //clist_x_iter it, end = clist_x_end(&con);
+ //c_forrange (S) if ((it = clist_x_find(&con, crandom() & mask2)).ref != end.ref) sum += *it.ref;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) c_foreach (i, clist_x, con) sum += *i.ref;
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ clist_x_drop(&con);
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+
+int main(int argc, char* argv[])
+{
+ Sample std_s[SAMPLES + 1], stc_s[SAMPLES + 1];
+ c_forrange (i, SAMPLES) {
+ std_s[i] = test_std_forward_list();
+ stc_s[i] = test_stc_forward_list();
+ if (i > 0) c_forrange (j, N_TESTS) {
+ if (secs(std_s[i].test[j]) < secs(std_s[0].test[j])) std_s[0].test[j] = std_s[i].test[j];
+ if (secs(stc_s[i].test[j]) < secs(stc_s[0].test[j])) stc_s[0].test[j] = stc_s[i].test[j];
+ if (stc_s[i].test[j].sum != stc_s[0].test[j].sum) printf("Error in sum: test %lld, sample %lld\n", i, j);
+ }
+ }
+ const char* comp = argc > 1 ? argv[1] : "test";
+ bool header = (argc > 2 && argv[2][0] == '1');
+ float std_sum = 0, stc_sum = 0;
+
+ c_forrange (j, N_TESTS) {
+ std_sum += secs(std_s[0].test[j]);
+ stc_sum += secs(stc_s[0].test[j]);
+ }
+ if (header) printf("Compiler,Library,C,Method,Seconds,Ratio\n");
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, operations[j], secs(std_s[0].test[j]), 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, "total", std_sum, 1.0f);
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, operations[j], secs(stc_s[0].test[j]), secs(std_s[0].test[j]) ? secs(stc_s[0].test[j])/secs(std_s[0].test[j]) : 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, "total", stc_sum, stc_sum/std_sum);
+} \ No newline at end of file
diff --git a/misc/benchmarks/plotbench/cmap_benchmark.cpp b/misc/benchmarks/plotbench/cmap_benchmark.cpp
new file mode 100644
index 00000000..7a8f29d2
--- /dev/null
+++ b/misc/benchmarks/plotbench/cmap_benchmark.cpp
@@ -0,0 +1,142 @@
+#include <stdio.h>
+#include <time.h>
+#define i_static
+#include <stc/crandom.h>
+
+#ifdef __cplusplus
+#include <unordered_map>
+#endif
+
+enum {INSERT, ERASE, FIND, ITER, DESTRUCT, N_TESTS};
+const char* operations[] = {"insert", "erase", "find", "iter", "destruct"};
+typedef struct { time_t t1, t2; uint64_t sum; float fac; } Range;
+typedef struct { const char* name; Range test[N_TESTS]; } Sample;
+enum {SAMPLES = 2, N = 8000000, R = 4};
+uint64_t seed = 1, mask1 = 0xffffffff;
+
+static float secs(Range s) { return (float)(s.t2 - s.t1) / CLOCKS_PER_SEC; }
+
+#define i_key uint64_t
+#define i_val uint64_t
+#define i_tag x
+#include <stc/cmap.h>
+
+#ifdef __cplusplus
+Sample test_std_unordered_map() {
+ typedef std::unordered_map<uint64_t, uint64_t> container;
+ Sample s = {"std,unordered_map"};
+ {
+ csrandom(seed);
+ s.test[INSERT].t1 = clock();
+ container con;
+ c_forrange (i, N/2) con.emplace(crandom() & mask1, i);
+ c_forrange (i, N/2) con.emplace(i, i);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = con.size();
+ csrandom(seed);
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) con.erase(crandom() & mask1);
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = con.size();
+ }{
+ container con;
+ csrandom(seed);
+ c_forrange (i, N/2) con.emplace(crandom() & mask1, i);
+ c_forrange (i, N/2) con.emplace(i, i);
+ csrandom(seed);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ container::iterator it;
+ c_forrange (N) if ((it = con.find(crandom() & mask1)) != con.end()) sum += it->second;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) for (auto i: con) sum += i.second;
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+#else
+Sample test_std_unordered_map() { Sample s = {"std-unordered_map"}; return s;}
+#endif
+
+
+Sample test_stc_unordered_map() {
+ typedef cmap_x container;
+ Sample s = {"STC,unordered_map"};
+ {
+ csrandom(seed);
+ s.test[INSERT].t1 = clock();
+ container con = cmap_x_init();
+ c_forrange (i, N/2) cmap_x_insert(&con, crandom() & mask1, i);
+ c_forrange (i, N/2) cmap_x_insert(&con, i, i);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = cmap_x_size(&con);
+ csrandom(seed);
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) cmap_x_erase(&con, crandom() & mask1);
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = cmap_x_size(&con);
+ cmap_x_drop(&con);
+ }{
+ container con = cmap_x_init();
+ csrandom(seed);
+ c_forrange (i, N/2) cmap_x_insert(&con, crandom() & mask1, i);
+ c_forrange (i, N/2) cmap_x_insert(&con, i, i);
+ csrandom(seed);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ const cmap_x_value* val;
+ c_forrange (N)
+ if ((val = cmap_x_get(&con, crandom() & mask1)))
+ sum += val->second;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) c_foreach (i, cmap_x, con) sum += i.ref->second;
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ cmap_x_drop(&con);
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+
+int main(int argc, char* argv[])
+{
+ Sample std_s[SAMPLES + 1], stc_s[SAMPLES + 1];
+ c_forrange (i, SAMPLES) {
+ std_s[i] = test_std_unordered_map();
+ stc_s[i] = test_stc_unordered_map();
+ if (i > 0) c_forrange (j, N_TESTS) {
+ if (secs(std_s[i].test[j]) < secs(std_s[0].test[j])) std_s[0].test[j] = std_s[i].test[j];
+ if (secs(stc_s[i].test[j]) < secs(stc_s[0].test[j])) stc_s[0].test[j] = stc_s[i].test[j];
+ if (stc_s[i].test[j].sum != stc_s[0].test[j].sum) printf("Error in sum: test %lld, sample %lld\n", i, j);
+ }
+ }
+ const char* comp = argc > 1 ? argv[1] : "test";
+ bool header = (argc > 2 && argv[2][0] == '1');
+ float std_sum = 0, stc_sum = 0;
+
+ c_forrange (j, N_TESTS) {
+ std_sum += secs(std_s[0].test[j]);
+ stc_sum += secs(stc_s[0].test[j]);
+ }
+ if (header) printf("Compiler,Library,C,Method,Seconds,Ratio\n");
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, operations[j], secs(std_s[0].test[j]), 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, "total", std_sum, 1.0f);
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, operations[j], secs(stc_s[0].test[j]), secs(std_s[0].test[j]) ? secs(stc_s[0].test[j])/secs(std_s[0].test[j]) : 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, "total", stc_sum, stc_sum/std_sum);
+} \ No newline at end of file
diff --git a/misc/benchmarks/plotbench/cpque_benchmark.cpp b/misc/benchmarks/plotbench/cpque_benchmark.cpp
new file mode 100644
index 00000000..a729c09f
--- /dev/null
+++ b/misc/benchmarks/plotbench/cpque_benchmark.cpp
@@ -0,0 +1,71 @@
+#include <stdio.h>
+#include <time.h>
+#define i_static
+#include <stc/crandom.h>
+
+#define i_val float
+#define i_cmp -c_default_cmp
+#define i_tag f
+#include <stc/cpque.h>
+
+#include <queue>
+
+static const uint32_t seed = 1234;
+
+void std_test()
+{
+ stc64_t rng;
+ int N = 10000000;
+
+ std::priority_queue<float, std::vector<float>, std::greater<float>> pq;
+ rng = stc64_new(seed);
+ clock_t start = clock();
+ c_forrange (i, N)
+ pq.push((float) stc64_randf(&rng)*100000);
+
+ printf("Built priority queue: %f secs\n", (clock() - start) / (float) CLOCKS_PER_SEC);
+ printf("%g ", pq.top());
+
+ start = clock();
+ c_forrange (i, N) {
+ pq.pop();
+ }
+
+ printf("\npopped PQ: %f secs\n\n", (clock() - start) / (float) CLOCKS_PER_SEC);
+}
+
+
+void stc_test()
+{
+ stc64_t rng;
+ int N = 10000000, M = 10;
+
+ c_auto (cpque_f, pq)
+ {
+ rng = stc64_new(seed);
+ clock_t start = clock();
+ c_forrange (i, N)
+ cpque_f_push(&pq, (float) stc64_randf(&rng)*100000);
+
+ printf("Built priority queue: %f secs\n", (clock() - start) / (float) CLOCKS_PER_SEC);
+ printf("%g ", *cpque_f_top(&pq));
+
+ c_forrange (i, M) {
+ cpque_f_pop(&pq);
+ }
+
+ start = clock();
+ c_forrange (i, M, N)
+ cpque_f_pop(&pq);
+ printf("\npopped PQ: %f secs\n", (clock() - start) / (float) CLOCKS_PER_SEC);
+ }
+}
+
+
+int main()
+{
+ puts("STD P.QUEUE:");
+ std_test();
+ puts("\nSTC P.QUEUE:");
+ stc_test();
+}
diff --git a/misc/benchmarks/plotbench/csmap_benchmark.cpp b/misc/benchmarks/plotbench/csmap_benchmark.cpp
new file mode 100644
index 00000000..46bd695c
--- /dev/null
+++ b/misc/benchmarks/plotbench/csmap_benchmark.cpp
@@ -0,0 +1,143 @@
+#include <stdio.h>
+#include <time.h>
+#define i_static
+#include <stc/crandom.h>
+
+#ifdef __cplusplus
+#include <map>
+#endif
+
+enum {INSERT, ERASE, FIND, ITER, DESTRUCT, N_TESTS};
+const char* operations[] = {"insert", "erase", "find", "iter", "destruct"};
+typedef struct { time_t t1, t2; uint64_t sum; float fac; } Range;
+typedef struct { const char* name; Range test[N_TESTS]; } Sample;
+enum {SAMPLES = 2, N = 4000000, R = 4};
+uint64_t seed = 1, mask1 = 0xfffffff;
+
+static float secs(Range s) { return (float)(s.t2 - s.t1) / CLOCKS_PER_SEC; }
+
+#define i_key size_t
+#define i_val size_t
+#define i_tag x
+#include <stc/csmap.h>
+
+#ifdef __cplusplus
+Sample test_std_map() {
+ typedef std::map<size_t, size_t> container;
+ Sample s = {"std,map"};
+ {
+ csrandom(seed);
+ s.test[INSERT].t1 = clock();
+ container con;
+ c_forrange (i, N/2) con.emplace(crandom() & mask1, i);
+ c_forrange (i, N/2) con.emplace(i, i);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = con.size();
+ csrandom(seed);
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) con.erase(crandom() & mask1);
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = con.size();
+ }{
+ container con;
+ csrandom(seed);
+ c_forrange (i, N/2) con.emplace(crandom() & mask1, i);
+ c_forrange (i, N/2) con.emplace(i, i);
+ csrandom(seed);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ container::iterator it;
+ c_forrange (N) if ((it = con.find(crandom() & mask1)) != con.end()) sum += it->second;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) for (auto i: con) sum += i.second;
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+#else
+Sample test_std_map() { Sample s = {"std-map"}; return s;}
+#endif
+
+
+
+Sample test_stc_map() {
+ typedef csmap_x container;
+ Sample s = {"STC,map"};
+ {
+ csrandom(seed);
+ s.test[INSERT].t1 = clock();
+ container con = csmap_x_init();
+ c_forrange (i, N/2) csmap_x_insert(&con, crandom() & mask1, i);
+ c_forrange (i, N/2) csmap_x_insert(&con, i, i);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = csmap_x_size(&con);
+ csrandom(seed);
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) csmap_x_erase(&con, crandom() & mask1);
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = csmap_x_size(&con);
+ csmap_x_drop(&con);
+ }{
+ container con = csmap_x_init();
+ csrandom(seed);
+ c_forrange (i, N/2) csmap_x_insert(&con, crandom() & mask1, i);
+ c_forrange (i, N/2) csmap_x_insert(&con, i, i);
+ csrandom(seed);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ const csmap_x_value* val;
+ c_forrange (N)
+ if ((val = csmap_x_get(&con, crandom() & mask1)))
+ sum += val->second;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) c_foreach (i, csmap_x, con) sum += i.ref->second;
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ csmap_x_drop(&con);
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+
+int main(int argc, char* argv[])
+{
+ Sample std_s[SAMPLES + 1], stc_s[SAMPLES + 1];
+ c_forrange (i, SAMPLES) {
+ std_s[i] = test_std_map();
+ stc_s[i] = test_stc_map();
+ if (i > 0) c_forrange (j, N_TESTS) {
+ if (secs(std_s[i].test[j]) < secs(std_s[0].test[j])) std_s[0].test[j] = std_s[i].test[j];
+ if (secs(stc_s[i].test[j]) < secs(stc_s[0].test[j])) stc_s[0].test[j] = stc_s[i].test[j];
+ if (stc_s[i].test[j].sum != stc_s[0].test[j].sum) printf("Error in sum: test %lld, sample %lld\n", i, j);
+ }
+ }
+ const char* comp = argc > 1 ? argv[1] : "test";
+ bool header = (argc > 2 && argv[2][0] == '1');
+ float std_sum = 0, stc_sum = 0;
+
+ c_forrange (j, N_TESTS) {
+ std_sum += secs(std_s[0].test[j]);
+ stc_sum += secs(stc_s[0].test[j]);
+ }
+ if (header) printf("Compiler,Library,C,Method,Seconds,Ratio\n");
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, operations[j], secs(std_s[0].test[j]), 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, "total", std_sum, 1.0f);
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, operations[j], secs(stc_s[0].test[j]), secs(std_s[0].test[j]) ? secs(stc_s[0].test[j])/secs(std_s[0].test[j]) : 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, "total", stc_sum, stc_sum/std_sum);
+}
diff --git a/misc/benchmarks/plotbench/cvec_benchmark.cpp b/misc/benchmarks/plotbench/cvec_benchmark.cpp
new file mode 100644
index 00000000..fe7e09fb
--- /dev/null
+++ b/misc/benchmarks/plotbench/cvec_benchmark.cpp
@@ -0,0 +1,134 @@
+#include <stdio.h>
+#include <time.h>
+#define i_static
+#include <stc/crandom.h>
+
+#ifdef __cplusplus
+#include <vector>
+#include <algorithm>
+#endif
+
+enum {INSERT, ERASE, FIND, ITER, DESTRUCT, N_TESTS};
+const char* operations[] = {"insert", "erase", "find", "iter", "destruct"};
+typedef struct { time_t t1, t2; uint64_t sum; float fac; } Range;
+typedef struct { const char* name; Range test[N_TESTS]; } Sample;
+enum {SAMPLES = 2, N = 150000000, S = 0x3ffc, R = 4};
+uint64_t seed = 1, mask1 = 0xfffffff, mask2 = 0xffff;
+
+static float secs(Range s) { return (float)(s.t2 - s.t1) / CLOCKS_PER_SEC; }
+
+#define i_val size_t
+#define i_tag x
+#include <stc/cvec.h>
+
+#ifdef __cplusplus
+Sample test_std_vector() {
+ typedef std::vector<size_t> container;
+ Sample s = {"std,vector"};
+ {
+ s.test[INSERT].t1 = clock();
+ container con;
+ csrandom(seed);
+ c_forrange (N) con.push_back(crandom() & mask1);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = con.size();
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) con.pop_back();
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = con.size();
+ }{
+ container con;
+ csrandom(seed);
+ c_forrange (N) con.push_back(crandom() & mask2);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ //container::iterator it;
+ // Iteration - not inherent find - skipping
+ //c_forrange (S) if ((it = std::find(con.begin(), con.end(), crandom() & mask2)) != con.end()) sum += *it;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) c_forrange (i, N) sum += con[i];
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+#else
+Sample test_std_vector() { Sample s = {"std-vector"}; return s;}
+#endif
+
+
+
+Sample test_stc_vector() {
+ typedef cvec_x container;
+ Sample s = {"STC,vector"};
+ {
+ s.test[INSERT].t1 = clock();
+ container con = cvec_x_init();
+ csrandom(seed);
+ c_forrange (N) cvec_x_push_back(&con, crandom() & mask1);
+ s.test[INSERT].t2 = clock();
+ s.test[INSERT].sum = cvec_x_size(&con);
+ s.test[ERASE].t1 = clock();
+ c_forrange (N) { cvec_x_pop_back(&con); }
+ s.test[ERASE].t2 = clock();
+ s.test[ERASE].sum = cvec_x_size(&con);
+ cvec_x_drop(&con);
+ }{
+ csrandom(seed);
+ container con = cvec_x_init();
+ c_forrange (N) cvec_x_push_back(&con, crandom() & mask2);
+ s.test[FIND].t1 = clock();
+ size_t sum = 0;
+ //cvec_x_iter it, end = cvec_x_end(&con);
+ //c_forrange (S) if ((it = cvec_x_find(&con, crandom() & mask2)).ref != end.ref) sum += *it.ref;
+ s.test[FIND].t2 = clock();
+ s.test[FIND].sum = sum;
+ s.test[ITER].t1 = clock();
+ sum = 0;
+ c_forrange (R) c_forrange (i, N) sum += con.data[i];
+ s.test[ITER].t2 = clock();
+ s.test[ITER].sum = sum;
+ s.test[DESTRUCT].t1 = clock();
+ cvec_x_drop(&con);
+ }
+ s.test[DESTRUCT].t2 = clock();
+ s.test[DESTRUCT].sum = 0;
+ return s;
+}
+
+int main(int argc, char* argv[])
+{
+ Sample std_s[SAMPLES + 1] = {{NULL}}, stc_s[SAMPLES + 1] = {{NULL}};
+ c_forrange (i, SAMPLES) {
+ std_s[i] = test_std_vector();
+ stc_s[i] = test_stc_vector();
+ if (i > 0) c_forrange (j, N_TESTS) {
+ if (secs(std_s[i].test[j]) < secs(std_s[0].test[j])) std_s[0].test[j] = std_s[i].test[j];
+ if (secs(stc_s[i].test[j]) < secs(stc_s[0].test[j])) stc_s[0].test[j] = stc_s[i].test[j];
+ if (stc_s[i].test[j].sum != stc_s[0].test[j].sum) printf("Error in sum: test %lld, sample %lld\n", i, j);
+ }
+ }
+ const char* comp = argc > 1 ? argv[1] : "test";
+ bool header = (argc > 2 && argv[2][0] == '1');
+ float std_sum = 0, stc_sum = 0;
+
+ c_forrange (j, N_TESTS) {
+ std_sum += secs(std_s[0].test[j]);
+ stc_sum += secs(stc_s[0].test[j]);
+ }
+ if (header) printf("Compiler,Library,C,Method,Seconds,Ratio\n");
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, operations[j], secs(std_s[0].test[j]), 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, std_s[0].name, N, "total", std_sum, 1.0f);
+
+ c_forrange (j, N_TESTS)
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, operations[j], secs(stc_s[0].test[j]), secs(std_s[0].test[j]) ? secs(stc_s[0].test[j])/secs(std_s[0].test[j]) : 1.0f);
+ printf("%s,%s n:%d,%s,%.3f,%.3f\n", comp, stc_s[0].name, N, "total", stc_sum, stc_sum/std_sum);
+}
diff --git a/misc/benchmarks/plotbench/plot.py b/misc/benchmarks/plotbench/plot.py
new file mode 100644
index 00000000..fa538285
--- /dev/null
+++ b/misc/benchmarks/plotbench/plot.py
@@ -0,0 +1,24 @@
+import sys
+import seaborn as sns
+import pandas as pd
+import matplotlib.pyplot as plt
+#sns.set_theme(style="whitegrid")
+
+comp = ['All compilers', 'Mingw-g++-10.30', 'Win-Clang-12', 'VC-19.28']
+n = int(sys.argv[1]) if len(sys.argv) > 1 else 0
+file = sys.argv[2] if len(sys.argv) > 2 else 'plot_win.csv'
+df = pd.read_csv(file)
+df = df[df.Method != 'total']
+if n > 0:
+ df = df[df.Compiler == comp[n]]
+
+g = sns.catplot(data=df, x='Method', y='Seconds', hue='Library', col='C', kind='bar',
+ ci=68, legend=False, col_wrap=2, sharex=False, aspect=1.4, height=3.1)
+g.set_xlabels('')
+
+g.add_legend(bbox_to_anchor=(0.75, 0.2), borderaxespad=0.)
+
+g.fig.subplots_adjust(top=0.90, left=0.06, bottom=0.07)
+g.fig.suptitle('Benchmark STC vs c++ std containers: %s' % comp[n], fontsize=15, y=0.98)
+
+plt.show()
diff --git a/misc/benchmarks/plotbench/run_all.bat b/misc/benchmarks/plotbench/run_all.bat
new file mode 100644
index 00000000..2edd0a1e
--- /dev/null
+++ b/misc/benchmarks/plotbench/run_all.bat
@@ -0,0 +1,5 @@
+set out=plot_win.csv
+echo Compiler,Library,C,Method,Seconds,Ratio> %out%
+sh run_gcc.sh >> %out%
+sh run_clang.sh >> %out%
+call run_vc.bat >> %out%
diff --git a/misc/benchmarks/plotbench/run_all.sh b/misc/benchmarks/plotbench/run_all.sh
new file mode 100644
index 00000000..f15a5881
--- /dev/null
+++ b/misc/benchmarks/plotbench/run_all.sh
@@ -0,0 +1,4 @@
+out="plot_linux.csv"
+echo "Compiler,Library,C,Method,Seconds,Ratio"> $out
+sh run_gcc.sh >> $out
+sh run_clang.sh >> $out
diff --git a/misc/benchmarks/plotbench/run_clang.sh b/misc/benchmarks/plotbench/run_clang.sh
new file mode 100644
index 00000000..ae19486e
--- /dev/null
+++ b/misc/benchmarks/plotbench/run_clang.sh
@@ -0,0 +1,14 @@
+exe=''
+if [ "$OS" = "Windows_NT" ] ; then exe=".exe" ; fi
+clang++ -I../include -O3 -o cdeq_benchmark$exe cdeq_benchmark.cpp
+clang++ -I../include -O3 -o clist_benchmark$exe clist_benchmark.cpp
+clang++ -I../include -O3 -o cmap_benchmark$exe cmap_benchmark.cpp
+clang++ -I../include -O3 -o csmap_benchmark$exe csmap_benchmark.cpp
+clang++ -I../include -O3 -o cvec_benchmark$exe cvec_benchmark.cpp
+
+c='Win-Clang-12'
+./cdeq_benchmark$exe $c
+./clist_benchmark$exe $c
+./cmap_benchmark$exe $c
+./csmap_benchmark$exe $c
+./cvec_benchmark$exe $c
diff --git a/misc/benchmarks/plotbench/run_gcc.sh b/misc/benchmarks/plotbench/run_gcc.sh
new file mode 100644
index 00000000..6a6472c0
--- /dev/null
+++ b/misc/benchmarks/plotbench/run_gcc.sh
@@ -0,0 +1,12 @@
+g++ -I../include -O3 -o cdeq_benchmark cdeq_benchmark.cpp
+g++ -I../include -O3 -o clist_benchmark clist_benchmark.cpp
+g++ -I../include -O3 -o cmap_benchmark cmap_benchmark.cpp
+g++ -I../include -O3 -o csmap_benchmark csmap_benchmark.cpp
+g++ -I../include -O3 -o cvec_benchmark cvec_benchmark.cpp
+
+c='Mingw-g++-10.30'
+./cdeq_benchmark $c
+./clist_benchmark $c
+./cmap_benchmark $c
+./csmap_benchmark $c
+./cvec_benchmark $c \ No newline at end of file
diff --git a/misc/benchmarks/plotbench/run_vc.bat b/misc/benchmarks/plotbench/run_vc.bat
new file mode 100644
index 00000000..3dca925b
--- /dev/null
+++ b/misc/benchmarks/plotbench/run_vc.bat
@@ -0,0 +1,15 @@
+@echo off
+if "%VSINSTALLDIR%"=="" call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat" >nul
+cl.exe -nologo -EHsc -std:c++latest -I../include -O2 cdeq_benchmark.cpp >nul
+cl.exe -nologo -EHsc -std:c++latest -I../include -O2 clist_benchmark.cpp >nul
+cl.exe -nologo -EHsc -std:c++latest -I../include -O2 cmap_benchmark.cpp >nul
+cl.exe -nologo -EHsc -std:c++latest -I../include -O2 csmap_benchmark.cpp >nul
+cl.exe -nologo -EHsc -std:c++latest -I../include -O2 cvec_benchmark.cpp >nul
+del *.obj >nul
+
+set c=VC-19.28
+cdeq_benchmark.exe %c%
+clist_benchmark.exe %c%
+cmap_benchmark.exe %c%
+csmap_benchmark.exe %c%
+cvec_benchmark.exe %c% \ No newline at end of file
diff --git a/misc/benchmarks/shootout_hashmaps.cpp b/misc/benchmarks/shootout_hashmaps.cpp
new file mode 100644
index 00000000..78d7bce2
--- /dev/null
+++ b/misc/benchmarks/shootout_hashmaps.cpp
@@ -0,0 +1,349 @@
+#include <stdio.h>
+#include <time.h>
+#include <stc/crandom.h>
+
+#define MAX_LOAD_FACTOR 77
+
+#ifdef __cplusplus
+#include <limits>
+#include <unordered_map>
+#include "external/ankerl/robin_hood.h"
+#include "external/ankerl/unordered_dense.h"
+#include "external/skarupke/flat_hash_map.hpp"
+#include "external/tsl/robin_map.h"
+#include "external/emhash/hash_table7.hpp"
+#ifdef HAVE_BOOST
+#include <boost/unordered/unordered_flat_map.hpp>
+#endif
+//#include "external/skarupke/bytell_hash_map.hpp"
+//#include "external/parallel_hashmap/phmap.h"
+//#include "external/tsl/hopscotch_map.h"
+
+template<typename C> inline void std_destroy(C& c) { C().swap(c); }
+
+template <class K, class V> using robin_hood_flat_map = robin_hood::unordered_flat_map<
+ K, V, robin_hood::hash<K>, std::equal_to<K>, MAX_LOAD_FACTOR>;
+#endif
+
+typedef int64_t IKey;
+typedef int64_t IValue;
+
+// khash template expansion
+#include "external/khash.h"
+KHASH_MAP_INIT_INT64(ii, IValue)
+
+// cmap template expansion
+#define i_key IKey
+#define i_val IValue
+#define i_size uint32_t // optional, enables 2x expand
+#define i_tag ii
+#define i_max_load_factor MAX_LOAD_FACTOR / 100.0f
+#include <stc/cmap.h>
+
+#define SEED(s) rng = stc64_new(s)
+#define RAND(N) (stc64_rand(&rng) & (((uint64_t)1 << N) - 1))
+
+#define CMAP_SETUP(X, Key, Value) cmap_##X map = cmap_##X##_init()
+#define CMAP_PUT(X, key, val) cmap_##X##_insert_or_assign(&map, key, val).ref->second
+#define CMAP_EMPLACE(X, key, val) cmap_##X##_insert(&map, key, val).ref->second
+#define CMAP_ERASE(X, key) cmap_##X##_erase(&map, key)
+#define CMAP_FIND(X, key) cmap_##X##_contains(&map, key)
+#define CMAP_FOR(X, i) c_foreach (i, cmap_##X, map)
+#define CMAP_ITEM(X, i) i.ref->second
+#define CMAP_SIZE(X) cmap_##X##_size(&map)
+#define CMAP_BUCKETS(X) cmap_##X##_bucket_count(&map)
+#define CMAP_CLEAR(X) cmap_##X##_clear(&map)
+#define CMAP_DTOR(X) cmap_##X##_drop(&map)
+
+#define KMAP_SETUP(X, Key, Value) khash_t(X)* map = kh_init(X); khiter_t ki; int ret
+#define KMAP_PUT(X, key, val) (ki = kh_put(X, map, key, &ret), \
+ map->vals[ki] = val, map->vals[ki])
+#define KMAP_EMPLACE(X, key, val) (ki = kh_put(X, map, key, &ret), \
+ (ret ? map->vals[ki] = val, 1 : 0), map->vals[ki])
+#define KMAP_ERASE(X, key) (ki = kh_get(X, map, key), \
+ ki != kh_end(map) ? (kh_del(X, map, ki), 1) : 0)
+#define KMAP_FOR(X, i) for (khint_t i = kh_begin(map); i != kh_end(map); ++i) \
+ if (kh_exist(map, i))
+#define KMAP_ITEM(X, i) map->vals[i]
+#define KMAP_FIND(X, key) (kh_get(X, map, key) != kh_end(map))
+#define KMAP_SIZE(X) kh_size(map)
+#define KMAP_BUCKETS(X) kh_n_buckets(map)
+#define KMAP_CLEAR(X) kh_clear(X, map)
+#define KMAP_DTOR(X) kh_destroy(X, map)
+
+#define UMAP_SETUP(X, Key, Value) std::unordered_map<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define UMAP_PUT(X, key, val) (map[key] = val)
+#define UMAP_EMPLACE(X, key, val) map.emplace(key, val).first->second
+#define UMAP_FIND(X, key) int(map.find(key) != map.end())
+#define UMAP_ERASE(X, key) map.erase(key)
+#define UMAP_FOR(X, i) for (const auto& i: map)
+#define UMAP_ITEM(X, i) i.second
+#define UMAP_SIZE(X) map.size()
+#define UMAP_BUCKETS(X) map.bucket_count()
+#define UMAP_CLEAR(X) map.clear()
+#define UMAP_DTOR(X) std_destroy(map)
+
+#define FMAP_SETUP(X, Key, Value) ska::flat_hash_map<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define FMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define FMAP_EMPLACE(X, key, val) UMAP_EMPLACE(X, key, val)
+#define FMAP_FIND(X, key) UMAP_FIND(X, key)
+#define FMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define FMAP_FOR(X, i) UMAP_FOR(X, i)
+#define FMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define FMAP_SIZE(X) UMAP_SIZE(X)
+#define FMAP_BUCKETS(X) UMAP_BUCKETS(X)
+#define FMAP_CLEAR(X) UMAP_CLEAR(X)
+#define FMAP_DTOR(X) UMAP_DTOR(X)
+
+#define BMAP_SETUP(X, Key, Value) boost::unordered_flat_map<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define BMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define BMAP_EMPLACE(X, key, val) UMAP_EMPLACE(X, key, val)
+#define BMAP_FIND(X, key) UMAP_FIND(X, key)
+#define BMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define BMAP_FOR(X, i) UMAP_FOR(X, i)
+#define BMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define BMAP_SIZE(X) UMAP_SIZE(X)
+#define BMAP_BUCKETS(X) UMAP_BUCKETS(X)
+#define BMAP_CLEAR(X) UMAP_CLEAR(X)
+#define BMAP_DTOR(X) UMAP_DTOR(X)
+
+#define HMAP_SETUP(X, Key, Value) tsl::hopscotch_map<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define HMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define HMAP_EMPLACE(X, key, val) map.emplace(key, val).first.value()
+#define HMAP_FIND(X, key) UMAP_FIND(X, key)
+#define HMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define HMAP_FOR(X, i) UMAP_FOR(X, i)
+#define HMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define HMAP_SIZE(X) UMAP_SIZE(X)
+#define HMAP_BUCKETS(X) UMAP_BUCKETS(X)
+#define HMAP_CLEAR(X) UMAP_CLEAR(X)
+#define HMAP_DTOR(X) UMAP_DTOR(X)
+
+#define TMAP_SETUP(X, Key, Value) tsl::robin_map<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define TMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define TMAP_EMPLACE(X, key, val) map.emplace(key, val).first.value()
+#define TMAP_FIND(X, key) UMAP_FIND(X, key)
+#define TMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define TMAP_FOR(X, i) UMAP_FOR(X, i)
+#define TMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define TMAP_SIZE(X) UMAP_SIZE(X)
+#define TMAP_BUCKETS(X) UMAP_BUCKETS(X)
+#define TMAP_CLEAR(X) UMAP_CLEAR(X)
+#define TMAP_DTOR(X) UMAP_DTOR(X)
+
+#define RMAP_SETUP(X, Key, Value) robin_hood_flat_map<Key, Value> map
+#define RMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define RMAP_EMPLACE(X, key, val) UMAP_EMPLACE(X, key, val)
+#define RMAP_FIND(X, key) UMAP_FIND(X, key)
+#define RMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define RMAP_FOR(X, i) UMAP_FOR(X, i)
+#define RMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define RMAP_SIZE(X) UMAP_SIZE(X)
+#define RMAP_BUCKETS(X) (map.mask() + 1)
+#define RMAP_CLEAR(X) UMAP_CLEAR(X)
+#define RMAP_DTOR(X) UMAP_DTOR(X)
+
+#define DMAP_SETUP(X, Key, Value) ankerl::unordered_dense::map<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define DMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define DMAP_EMPLACE(X, key, val) UMAP_EMPLACE(X, key, val)
+#define DMAP_FIND(X, key) UMAP_FIND(X, key)
+#define DMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define DMAP_FOR(X, i) UMAP_FOR(X, i)
+#define DMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define DMAP_SIZE(X) UMAP_SIZE(X)
+#define DMAP_BUCKETS(X) UMAP_BUCKETS(X)
+#define DMAP_CLEAR(X) UMAP_CLEAR(X)
+#define DMAP_DTOR(X) UMAP_DTOR(X)
+
+#define EMAP_SETUP(X, Key, Value) emhash7::HashMap<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define EMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define EMAP_EMPLACE(X, key, val) UMAP_EMPLACE(X, key, val)
+#define EMAP_FIND(X, key) UMAP_FIND(X, key)
+#define EMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define EMAP_FOR(X, i) UMAP_FOR(X, i)
+#define EMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define EMAP_SIZE(X) UMAP_SIZE(X)
+#define EMAP_BUCKETS(X) UMAP_BUCKETS(X)
+#define EMAP_CLEAR(X) UMAP_CLEAR(X)
+#define EMAP_DTOR(X) UMAP_DTOR(X)
+
+#define PMAP_SETUP(X, Key, Value) phmap::flat_hash_map<Key, Value> map; \
+ map.max_load_factor(MAX_LOAD_FACTOR/100.0f)
+#define PMAP_PUT(X, key, val) UMAP_PUT(X, key, val)
+#define PMAP_EMPLACE(X, key, val) UMAP_EMPLACE(X, key, val)
+#define PMAP_FIND(X, key) UMAP_FIND(X, key)
+#define PMAP_ERASE(X, key) UMAP_ERASE(X, key)
+#define PMAP_FOR(X, i) UMAP_FOR(X, i)
+#define PMAP_ITEM(X, i) UMAP_ITEM(X, i)
+#define PMAP_SIZE(X) UMAP_SIZE(X)
+#define PMAP_BUCKETS(X) UMAP_BUCKETS(X)
+#define PMAP_CLEAR(X) UMAP_CLEAR(X)
+#define PMAP_DTOR(X) UMAP_DTOR(X)
+
+
+#define MAP_TEST1(M, X, n) \
+{ /* Insert, update */ \
+ M##_SETUP(X, IKey, IValue); \
+ uint64_t sum = 0; \
+ SEED(seed); \
+ clock_t difference, before = clock(); \
+ for (size_t i = 0; i < n; ++i) { \
+ sum += M##_PUT(X, RAND(keybits), i); \
+ } \
+ difference = clock() - before; \
+ printf(#M ": %5.03f s, size: %" c_ZU ", buckets: %8" c_ZU ", sum: %" c_ZU "\n", \
+ (float) difference / CLOCKS_PER_SEC, (size_t) M##_SIZE(X), (size_t) M##_BUCKETS(X), sum); \
+ M##_DTOR(X); \
+}
+
+#define MAP_TEST2(M, X, n) \
+{ /* Insert sequential keys, then erase them */ \
+ M##_SETUP(X, IKey, IValue); \
+ size_t erased = 0; \
+ clock_t difference, before = clock(); \
+ for (size_t i = 0; i < n; ++i) \
+ M##_EMPLACE(X, i, i); \
+ for (size_t i = 0; i < n; ++i) \
+ erased += M##_ERASE(X, i); \
+ difference = clock() - before; \
+ printf(#M ": %5.03f s, size: %" c_ZU ", buckets: %8" c_ZU ", erased %" c_ZU "\n", \
+ (float) difference / CLOCKS_PER_SEC, (size_t) M##_SIZE(X), (size_t) M##_BUCKETS(X), erased); \
+ M##_DTOR(X); \
+}
+
+#define MAP_TEST3(M, X, n) \
+{ /* Erase elements */ \
+ M##_SETUP(X, IKey, IValue); \
+ size_t erased = 0, _n = (n)*2; \
+ clock_t difference, before; \
+ SEED(seed); \
+ for (size_t i = 0; i < _n; ++i) \
+ M##_EMPLACE(X, RAND(keybits), i); \
+ SEED(seed); \
+ before = clock(); \
+ for (size_t i = 0; i < _n; ++i) \
+ erased += M##_ERASE(X, RAND(keybits)); \
+ difference = clock() - before; \
+ printf(#M ": %5.03f s, size: %" c_ZU ", buckets: %8" c_ZU ", erased %" c_ZU "\n", \
+ (float) difference / CLOCKS_PER_SEC, (size_t) M##_SIZE(X), (size_t) M##_BUCKETS(X), erased); \
+ M##_DTOR(X); \
+}
+
+#define MAP_TEST4(M, X, n) \
+{ /* Iterate */ \
+ M##_SETUP(X, IKey, IValue); \
+ size_t sum = 0, m = 1ull << (keybits + 1), _n = n; \
+ if (_n < m) m = _n; \
+ SEED(seed); \
+ for (size_t i = 0; i < m; ++i) \
+ M##_EMPLACE(X, RAND(keybits), i); \
+ size_t rep = 60000000ull/M##_SIZE(X); \
+ clock_t difference, before = clock(); \
+ for (size_t k=0; k < rep; k++) M##_FOR (X, it) \
+ sum += M##_ITEM(X, it); \
+ difference = clock() - before; \
+ printf(#M ": %5.03f s, size: %" c_ZU ", buckets: %8" c_ZU ", repeats: %" c_ZU ", check: %" c_ZU "\n", \
+ (float) difference / CLOCKS_PER_SEC, (size_t) M##_SIZE(X), (size_t) M##_BUCKETS(X), rep, sum & 0xffff); \
+ M##_DTOR(X); \
+}
+
+#define MAP_TEST5(M, X, n) \
+{ /* Lookup */ \
+ M##_SETUP(X, IKey, IValue); \
+ size_t found = 0, m = 1ull << (keybits + 1), _n = n; \
+ clock_t difference, before; \
+ if (_n < m) m = _n; \
+ SEED(seed); \
+ for (size_t i = 0; i < m; ++i) \
+ M##_EMPLACE(X, RAND(keybits), i); \
+ before = clock(); \
+ /* Lookup x random keys */ \
+ size_t x = m * 8000000ull/M##_SIZE(X); \
+ for (size_t i = 0; i < x; ++i) \
+ found += M##_FIND(X, RAND(keybits)); \
+ /* Lookup x existing keys by resetting seed */ \
+ SEED(seed); \
+ for (size_t i = 0; i < x; ++i) \
+ found += M##_FIND(X, RAND(keybits)); \
+ difference = clock() - before; \
+ printf(#M ": %5.03f s, size: %" c_ZU ", lookups: %" c_ZU ", found: %" c_ZU "\n", \
+ (float) difference / CLOCKS_PER_SEC, (size_t) M##_SIZE(X), x*2, found); \
+ M##_DTOR(X); \
+}
+
+
+#ifdef __cplusplus
+#ifdef HAVE_BOOST
+#define MAP_TEST_BOOST(n, X) MAP_TEST##n(BMAP, X, N##n)
+#else
+#define MAP_TEST_BOOST(n, X)
+#endif
+#define RUN_TEST(n) MAP_TEST##n(KMAP, ii, N##n) \
+ MAP_TEST_BOOST(n, ii) \
+ MAP_TEST##n(CMAP, ii, N##n) \
+ MAP_TEST##n(FMAP, ii, N##n) \
+ MAP_TEST##n(TMAP, ii, N##n) \
+ MAP_TEST##n(RMAP, ii, N##n) \
+ MAP_TEST##n(DMAP, ii, N##n) \
+ MAP_TEST##n(EMAP, ii, N##n) \
+ MAP_TEST##n(UMAP, ii, N##n)
+#else
+#define RUN_TEST(n) MAP_TEST##n(KMAP, ii, N##n) \
+ MAP_TEST##n(CMAP, ii, N##n)
+#endif
+
+enum {
+ DEFAULT_N_MILL = 10,
+ DEFAULT_KEYBITS = 22,
+};
+
+int main(int argc, char* argv[])
+{
+ if (argc < 2) {
+ printf("Usage %s n-million [key-bits (default %d)]\n\n", argv[0], DEFAULT_KEYBITS);
+ return 0;
+ }
+ unsigned n_mill = argc >= 2 ? atoi(argv[1]) : DEFAULT_N_MILL;
+ unsigned keybits = argc >= 3 ? atoi(argv[2]) : DEFAULT_KEYBITS;
+ unsigned n = n_mill * 1000000;
+ unsigned N1 = n, N2 = n, N3 = n, N4 = n, N5 = n;
+ stc64_t rng;
+ size_t seed = time(NULL);
+
+ printf("\nUnordered hash map shootout\n");
+ printf("KMAP = https://github.com/attractivechaos/klib\n"
+ "BMAP = https://www.boost.org (unordered_flat_map)\n"
+ "CMAP = https://github.com/tylov/STC (**)\n"
+ //"PMAP = https://github.com/greg7mdp/parallel-hashmap\n"
+ "FMAP = https://github.com/skarupke/flat_hash_map\n"
+ "TMAP = https://github.com/Tessil/robin-map\n"
+ //"HMAP = https://github.com/Tessil/hopscotch-map\n"
+ "RMAP = https://github.com/martinus/robin-hood-hashing\n"
+ "DMAP = https://github.com/martinus/unordered_dense\n"
+ "EMAP = https://github.com//ktprime/emhash\n"
+ "UMAP = std::unordered_map\n\n");
+
+ printf("Seed = %" c_ZU ":\n", seed);
+
+ printf("\nT1: Insert %g mill. random keys range [0, 2^%u): map[rnd] = i;\n", N1/1000000.0, keybits);
+ RUN_TEST(1)
+
+ printf("\nT2: Insert %g mill. SEQUENTIAL keys, erase them in same order:\n", N2/1000000.0);
+ RUN_TEST(2)
+
+ printf("\nT3: Erase all elements by lookup (%u mill. random inserts), key range [0, 2^%u)\n", n_mill*2, keybits);
+ RUN_TEST(3)
+
+ printf("\nT4: Iterate map with Min(%u mill, 2^%u) inserts repeated times:\n", n_mill, keybits+1);
+ RUN_TEST(4)
+
+ printf("\nT5: Lookup mix of random/existing keys in range [0, 2^%u). Num lookups depends on size.\n", keybits);
+ RUN_TEST(5)
+}