aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/hyperscan/src/util
diff options
context:
space:
mode:
authorbnagaev <bnagaev@yandex-team.ru>2022-02-10 16:47:04 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:04 +0300
commitd6449ba66291ff0c0d352c82e6eb3efb4c8a7e8d (patch)
treed5dca6d44593f5e52556a1cc7b1ab0386e096ebe /contrib/libs/hyperscan/src/util
parent1861d4c1402bb2c67a3e6b43b51706081b74508a (diff)
downloadydb-d6449ba66291ff0c0d352c82e6eb3efb4c8a7e8d.tar.gz
Restoring authorship annotation for <bnagaev@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/hyperscan/src/util')
-rw-r--r--contrib/libs/hyperscan/src/util/alloc.cpp266
-rw-r--r--contrib/libs/hyperscan/src/util/alloc.h146
-rw-r--r--contrib/libs/hyperscan/src/util/bitfield.h760
-rw-r--r--contrib/libs/hyperscan/src/util/bitutils.h798
-rw-r--r--contrib/libs/hyperscan/src/util/boundary_reports.h96
-rw-r--r--contrib/libs/hyperscan/src/util/charreach.cpp296
-rw-r--r--contrib/libs/hyperscan/src/util/charreach.h356
-rw-r--r--contrib/libs/hyperscan/src/util/charreach_util.h110
-rw-r--r--contrib/libs/hyperscan/src/util/compare.h338
-rw-r--r--contrib/libs/hyperscan/src/util/compile_context.cpp92
-rw-r--r--contrib/libs/hyperscan/src/util/compile_context.h118
-rw-r--r--contrib/libs/hyperscan/src/util/compile_error.cpp118
-rw-r--r--contrib/libs/hyperscan/src/util/compile_error.h136
-rw-r--r--contrib/libs/hyperscan/src/util/container.h432
-rw-r--r--contrib/libs/hyperscan/src/util/cpuid_flags.c246
-rw-r--r--contrib/libs/hyperscan/src/util/cpuid_flags.h90
-rw-r--r--contrib/libs/hyperscan/src/util/depth.cpp182
-rw-r--r--contrib/libs/hyperscan/src/util/depth.h466
-rw-r--r--contrib/libs/hyperscan/src/util/determinise.h332
-rw-r--r--contrib/libs/hyperscan/src/util/dump_charclass.h118
-rw-r--r--contrib/libs/hyperscan/src/util/dump_mask.cpp126
-rw-r--r--contrib/libs/hyperscan/src/util/dump_mask.h110
-rw-r--r--contrib/libs/hyperscan/src/util/exhaust.h76
-rw-r--r--contrib/libs/hyperscan/src/util/fatbit.h162
-rw-r--r--contrib/libs/hyperscan/src/util/graph.h392
-rw-r--r--contrib/libs/hyperscan/src/util/graph_range.h220
-rw-r--r--contrib/libs/hyperscan/src/util/join.h68
-rw-r--r--contrib/libs/hyperscan/src/util/make_unique.h92
-rw-r--r--contrib/libs/hyperscan/src/util/masked_move.c174
-rw-r--r--contrib/libs/hyperscan/src/util/masked_move.h140
-rw-r--r--contrib/libs/hyperscan/src/util/multibit.c278
-rw-r--r--contrib/libs/hyperscan/src/util/multibit.h2818
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_build.cpp584
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_build.h108
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_internal.h154
-rw-r--r--contrib/libs/hyperscan/src/util/order_check.h74
-rw-r--r--contrib/libs/hyperscan/src/util/pack_bits.h454
-rw-r--r--contrib/libs/hyperscan/src/util/partial_store.h326
-rw-r--r--contrib/libs/hyperscan/src/util/partitioned_set.h510
-rw-r--r--contrib/libs/hyperscan/src/util/popcount.h122
-rw-r--r--contrib/libs/hyperscan/src/util/pqueue.h218
-rw-r--r--contrib/libs/hyperscan/src/util/queue_index_factory.h98
-rw-r--r--contrib/libs/hyperscan/src/util/report.h424
-rw-r--r--contrib/libs/hyperscan/src/util/report_manager.cpp476
-rw-r--r--contrib/libs/hyperscan/src/util/report_manager.h286
-rw-r--r--contrib/libs/hyperscan/src/util/scatter.h110
-rw-r--r--contrib/libs/hyperscan/src/util/scatter_runtime.h148
-rw-r--r--contrib/libs/hyperscan/src/util/simd_types.h90
-rw-r--r--contrib/libs/hyperscan/src/util/simd_utils.h1692
-rw-r--r--contrib/libs/hyperscan/src/util/state_compress.c1182
-rw-r--r--contrib/libs/hyperscan/src/util/state_compress.h136
-rw-r--r--contrib/libs/hyperscan/src/util/target_info.cpp120
-rw-r--r--contrib/libs/hyperscan/src/util/target_info.h116
-rw-r--r--contrib/libs/hyperscan/src/util/ue2string.cpp678
-rw-r--r--contrib/libs/hyperscan/src/util/ue2string.h446
-rw-r--r--contrib/libs/hyperscan/src/util/unaligned.h196
-rw-r--r--contrib/libs/hyperscan/src/util/unicode_def.h170
-rw-r--r--contrib/libs/hyperscan/src/util/unicode_set.h282
-rw-r--r--contrib/libs/hyperscan/src/util/uniform_ops.h386
-rw-r--r--contrib/libs/hyperscan/src/util/verify_types.h102
60 files changed, 9920 insertions, 9920 deletions
diff --git a/contrib/libs/hyperscan/src/util/alloc.cpp b/contrib/libs/hyperscan/src/util/alloc.cpp
index ace26ed5d2..bd0812d0d3 100644
--- a/contrib/libs/hyperscan/src/util/alloc.cpp
+++ b/contrib/libs/hyperscan/src/util/alloc.cpp
@@ -1,133 +1,133 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Aligned memory alloc/free.
- */
-#include "ue2common.h"
-#include "alloc.h"
-
-#include <cstdlib>
-#include <cstring>
-
-namespace ue2 {
-
-// This is one of the simplest ways to catch failure where we aren't using an
-// aligned_(zmalloc|_free) pair - it will force death if the wrong free is used.
-// We use this whenever assertions are switched on.
-#if !defined(NDEBUG)
-#define HACK_OFFSET 64
-#else
-#define HACK_OFFSET 0
-#endif
-
-/* get us a posix_memalign from somewhere */
-#if !defined(HAVE_POSIX_MEMALIGN)
-# if defined(HAVE_MEMALIGN)
- #define posix_memalign(A, B, C) ((*A = (void *)memalign(B, C)) == nullptr)
-# elif defined(HAVE__ALIGNED_MALLOC)
- /* on Windows */
- #include <malloc.h>
- #define posix_memalign(A, B, C) ((*A = (void *)_aligned_malloc(C, B)) == nullptr)
-# else
- #error no posix_memalign or memalign aligned malloc
-# endif
-#endif
-
-void *aligned_malloc_internal(size_t size, size_t align) {
- void *mem;
-#if !defined(_WIN32)
- int rv = posix_memalign(&mem, align, size);
- if (rv != 0) {
- DEBUG_PRINTF("posix_memalign returned %d when asked for %zu bytes\n",
- rv, size);
- return nullptr;
- }
-#else
- if (nullptr == (mem = _aligned_malloc(size, align))) {
- DEBUG_PRINTF("_aligned_malloc failed when asked for %zu bytes\n",
- size);
- return nullptr;
- }
-#endif
-
- assert(mem);
- return mem;
-}
-
-void aligned_free_internal(void *ptr) {
- if (!ptr) {
- return;
- }
-
-#if defined(_WIN32)
- _aligned_free(ptr);
-#else
- free(ptr);
-#endif
-}
-
-/** \brief 64-byte aligned, zeroed malloc.
- *
- * Pointers should be freed with \ref aligned_free. If we are unable to
- * allocate the requested number of bytes, this function will throw
- * std::bad_alloc. */
-void *aligned_zmalloc(size_t size) {
- // Really huge allocations are probably an indication that we've
- // done something wrong.
- assert(size < 1024 * 1024 * 1024); // 1GB
-
- const size_t alloc_size = size + HACK_OFFSET;
-
- void *mem = aligned_malloc_internal(alloc_size, 64);
- if (!mem) {
- DEBUG_PRINTF("unable to allocate %zu bytes\n", alloc_size);
- throw std::bad_alloc();
- }
-
- DEBUG_PRINTF("alloced %p reporting %p\n", mem, (char *)mem + HACK_OFFSET);
- assert(ISALIGNED_N(mem, 64));
-
- memset(mem, 0, alloc_size);
- return (void *)((char *)mem + HACK_OFFSET);
-}
-
-/** \brief Free a pointer allocated with \ref aligned_zmalloc. */
-void aligned_free(void *ptr) {
- if (!ptr) {
- return;
- }
-
- void *addr = (void *)((char *)ptr - HACK_OFFSET);
- DEBUG_PRINTF("asked to free %p freeing %p\n", ptr, addr);
-
- assert(ISALIGNED_N(addr, 64));
- aligned_free_internal(addr);
-}
-
-} // namespace ue2
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Aligned memory alloc/free.
+ */
+#include "ue2common.h"
+#include "alloc.h"
+
+#include <cstdlib>
+#include <cstring>
+
+namespace ue2 {
+
+// This is one of the simplest ways to catch failure where we aren't using an
+// aligned_(zmalloc|_free) pair - it will force death if the wrong free is used.
+// We use this whenever assertions are switched on.
+#if !defined(NDEBUG)
+#define HACK_OFFSET 64
+#else
+#define HACK_OFFSET 0
+#endif
+
+/* get us a posix_memalign from somewhere */
+#if !defined(HAVE_POSIX_MEMALIGN)
+# if defined(HAVE_MEMALIGN)
+ #define posix_memalign(A, B, C) ((*A = (void *)memalign(B, C)) == nullptr)
+# elif defined(HAVE__ALIGNED_MALLOC)
+ /* on Windows */
+ #include <malloc.h>
+ #define posix_memalign(A, B, C) ((*A = (void *)_aligned_malloc(C, B)) == nullptr)
+# else
+ #error no posix_memalign or memalign aligned malloc
+# endif
+#endif
+
+void *aligned_malloc_internal(size_t size, size_t align) {
+ void *mem;
+#if !defined(_WIN32)
+ int rv = posix_memalign(&mem, align, size);
+ if (rv != 0) {
+ DEBUG_PRINTF("posix_memalign returned %d when asked for %zu bytes\n",
+ rv, size);
+ return nullptr;
+ }
+#else
+ if (nullptr == (mem = _aligned_malloc(size, align))) {
+ DEBUG_PRINTF("_aligned_malloc failed when asked for %zu bytes\n",
+ size);
+ return nullptr;
+ }
+#endif
+
+ assert(mem);
+ return mem;
+}
+
+void aligned_free_internal(void *ptr) {
+ if (!ptr) {
+ return;
+ }
+
+#if defined(_WIN32)
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+/** \brief 64-byte aligned, zeroed malloc.
+ *
+ * Pointers should be freed with \ref aligned_free. If we are unable to
+ * allocate the requested number of bytes, this function will throw
+ * std::bad_alloc. */
+void *aligned_zmalloc(size_t size) {
+ // Really huge allocations are probably an indication that we've
+ // done something wrong.
+ assert(size < 1024 * 1024 * 1024); // 1GB
+
+ const size_t alloc_size = size + HACK_OFFSET;
+
+ void *mem = aligned_malloc_internal(alloc_size, 64);
+ if (!mem) {
+ DEBUG_PRINTF("unable to allocate %zu bytes\n", alloc_size);
+ throw std::bad_alloc();
+ }
+
+ DEBUG_PRINTF("alloced %p reporting %p\n", mem, (char *)mem + HACK_OFFSET);
+ assert(ISALIGNED_N(mem, 64));
+
+ memset(mem, 0, alloc_size);
+ return (void *)((char *)mem + HACK_OFFSET);
+}
+
+/** \brief Free a pointer allocated with \ref aligned_zmalloc. */
+void aligned_free(void *ptr) {
+ if (!ptr) {
+ return;
+ }
+
+ void *addr = (void *)((char *)ptr - HACK_OFFSET);
+ DEBUG_PRINTF("asked to free %p freeing %p\n", ptr, addr);
+
+ assert(ISALIGNED_N(addr, 64));
+ aligned_free_internal(addr);
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/alloc.h b/contrib/libs/hyperscan/src/util/alloc.h
index de20c8d028..6651437169 100644
--- a/contrib/libs/hyperscan/src/util/alloc.h
+++ b/contrib/libs/hyperscan/src/util/alloc.h
@@ -1,101 +1,101 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
/**
* \file
- * \brief Aligned memory alloc/free.
- */
-
-#ifndef UTIL_ALLOC_H
-#define UTIL_ALLOC_H
-
-#include <cassert>
-#include <cstddef> // size_t
-#include <limits>
-#include <memory>
-#include <new> // std::bad_alloc
-
-namespace ue2 {
-
-/** \brief 64-byte aligned, zeroed malloc.
- *
- * Pointers should be freed with \ref aligned_free. If we are unable to
- * allocate the requested number of bytes, this function will throw
- * std::bad_alloc. */
-void *aligned_zmalloc(size_t size);
-
-/** \brief Free a pointer allocated with \ref aligned_zmalloc. */
-void aligned_free(void *ptr);
-
-/** \brief Internal use only, used by AlignedAllocator. */
-void *aligned_malloc_internal(size_t size, size_t align);
-
-/** \brief Internal use only, used by AlignedAllocator. */
-void aligned_free_internal(void *ptr);
-
-/** \brief Aligned allocator class for use with STL containers. Ensures that
- * your objects are aligned to N bytes. */
+ * \brief Aligned memory alloc/free.
+ */
+
+#ifndef UTIL_ALLOC_H
+#define UTIL_ALLOC_H
+
+#include <cassert>
+#include <cstddef> // size_t
+#include <limits>
+#include <memory>
+#include <new> // std::bad_alloc
+
+namespace ue2 {
+
+/** \brief 64-byte aligned, zeroed malloc.
+ *
+ * Pointers should be freed with \ref aligned_free. If we are unable to
+ * allocate the requested number of bytes, this function will throw
+ * std::bad_alloc. */
+void *aligned_zmalloc(size_t size);
+
+/** \brief Free a pointer allocated with \ref aligned_zmalloc. */
+void aligned_free(void *ptr);
+
+/** \brief Internal use only, used by AlignedAllocator. */
+void *aligned_malloc_internal(size_t size, size_t align);
+
+/** \brief Internal use only, used by AlignedAllocator. */
+void aligned_free_internal(void *ptr);
+
+/** \brief Aligned allocator class for use with STL containers. Ensures that
+ * your objects are aligned to N bytes. */
template <class T, std::size_t N>
class AlignedAllocator {
-public:
+public:
using value_type = T;
-
+
AlignedAllocator() noexcept {}
-
+
template <class U, std::size_t N2>
AlignedAllocator(const AlignedAllocator<U, N2> &) noexcept {}
-
+
template <class U> struct rebind {
using other = AlignedAllocator<U, N>;
};
-
+
T *allocate(std::size_t size) const {
size_t alloc_size = size * sizeof(T);
return static_cast<T *>(aligned_malloc_internal(alloc_size, N));
- }
-
+ }
+
void deallocate(T *x, std::size_t) const noexcept {
aligned_free_internal(x);
- }
+ }
};
-
+
template <class T, class U, std::size_t N, std::size_t N2>
bool operator==(const AlignedAllocator<T, N> &,
const AlignedAllocator<U, N2> &) {
return true;
}
-
+
template <class T, class U, std::size_t N, std::size_t N2>
bool operator!=(const AlignedAllocator<T, N> &a,
const AlignedAllocator<U, N2> &b) {
return !(a == b);
}
-
-} // namespace ue2
-
-#endif
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/bitfield.h b/contrib/libs/hyperscan/src/util/bitfield.h
index a580da7b60..f8c2a3336c 100644
--- a/contrib/libs/hyperscan/src/util/bitfield.h
+++ b/contrib/libs/hyperscan/src/util/bitfield.h
@@ -1,318 +1,318 @@
-/*
+/*
* Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Fast bitset class with find_first and find_next operations.
- */
-
-#ifndef BITFIELD_H
-#define BITFIELD_H
-
-#include "ue2common.h"
-#include "popcount.h"
-#include "util/bitutils.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Fast bitset class with find_first and find_next operations.
+ */
+
+#ifndef BITFIELD_H
+#define BITFIELD_H
+
+#include "ue2common.h"
+#include "popcount.h"
+#include "util/bitutils.h"
#include "util/hash.h"
-
-#include <array>
-#include <cassert>
-
-#include <boost/dynamic_bitset.hpp>
-
-namespace ue2 {
-
-/**
- * \brief Templated bitset class with find_first and find_next operations.
- *
- * This is a simple (but hopefully fast) class to replace our use of
- * std::bitset<>.
- *
- * Note: underlying storage is allocated as an array of 64-bit blocks. All
- * mutating operations MUST ensure that the trailer (the bits between
- * requested_size and the end of the array) is filled with zeroes; there's a
- * clear_trailer member function for this.
- */
-template<size_t requested_size>
-class bitfield {
-public:
- /// Empty constructor, zero initializes all bits.
- bitfield() : bits{{0}} {
- assert(none());
- }
-
- bitfield(const boost::dynamic_bitset<> &a) : bits{{0}} {
- assert(a.size() == requested_size);
- assert(none());
- for (auto i = a.find_first(); i != a.npos; i = a.find_next(i)) {
- set(i);
- }
- }
-
- /// Complete bitset equality.
- bool operator==(const bitfield &a) const {
- return bits == a.bits;
- }
-
- /// Inequality.
- bool operator!=(const bitfield &a) const {
- return bits != a.bits;
- }
-
- /// Ordering.
- bool operator<(const bitfield &a) const {
- return bits < a.bits;
- }
-
- /// Set all bits.
- void setall() {
- for (auto &e : bits) {
- e = all_ones;
- }
- clear_trailer();
- }
-
- /// Set all bits (alias for bitset::setall, to match dynamic_bitset).
- void set() {
- setall();
- }
-
- /// Clear all bits.
- void clear() {
- for (auto &e : bits) {
- e = 0;
- }
- }
-
- /// Clear all bits (alias for bitset::clear).
- void reset() {
- clear();
- }
-
- /// Clear bit N.
- void clear(size_t n) {
- assert(n < size());
- bits[getword(n)] &= ~maskbit(n);
- }
-
- /// Set bit N.
- void set(size_t n) {
- assert(n < size());
- bits[getword(n)] |= maskbit(n);
- }
-
- /// Test bit N.
- bool test(size_t n) const {
- assert(n < size());
- return bits[getword(n)] & maskbit(n);
- }
-
- /// Flip bit N.
- void flip(size_t n) {
- assert(n < size());
- bits[getword(n)] ^= maskbit(n);
- }
-
- /// Flip all bits.
- void flip() {
- for (auto &e : bits) {
- e = ~e;
- }
- clear_trailer();
- }
-
- /// Switch on the bit in the range [from, to], inclusive.
- void set_range(size_t from, size_t to) {
- assert(from <= to);
- assert(to < requested_size);
-
- if (from / block_size == to / block_size) {
- // Small case, our indices are in the same block.
- block_type block = all_ones << (from % block_size);
- if (to % block_size != block_size - 1) {
- block &= maskbit(to + 1) - 1;
- }
- bits[from / block_size] |= block;
- return;
- }
-
- // Large case, work in block units. Write a partial mask, then a
- // run of all-ones blocks, then a partial mask at the end.
- size_t i = from;
- if (i % block_size) {
- block_type block = all_ones << (i % block_size);
- bits[i / block_size] |= block;
- i = ROUNDUP_N(i, block_size);
- }
-
- for (; i + block_size <= to + 1; i += block_size) {
- bits[i / block_size] = all_ones;
- }
-
- if (i <= to) {
- assert(to - i + 1 < block_size);
- bits[i / block_size] |= (maskbit(to + 1) - 1);
- }
- }
-
- /// Returns total number of bits.
- static constexpr size_t size() {
- return requested_size;
- }
-
- /// Returns number of bits set on.
- size_t count() const {
- static_assert(block_size == 64, "adjust popcount for block_type");
- size_t sum = 0;
+
+#include <array>
+#include <cassert>
+
+#include <boost/dynamic_bitset.hpp>
+
+namespace ue2 {
+
+/**
+ * \brief Templated bitset class with find_first and find_next operations.
+ *
+ * This is a simple (but hopefully fast) class to replace our use of
+ * std::bitset<>.
+ *
+ * Note: underlying storage is allocated as an array of 64-bit blocks. All
+ * mutating operations MUST ensure that the trailer (the bits between
+ * requested_size and the end of the array) is filled with zeroes; there's a
+ * clear_trailer member function for this.
+ */
+template<size_t requested_size>
+class bitfield {
+public:
+ /// Empty constructor, zero initializes all bits.
+ bitfield() : bits{{0}} {
+ assert(none());
+ }
+
+ bitfield(const boost::dynamic_bitset<> &a) : bits{{0}} {
+ assert(a.size() == requested_size);
+ assert(none());
+ for (auto i = a.find_first(); i != a.npos; i = a.find_next(i)) {
+ set(i);
+ }
+ }
+
+ /// Complete bitset equality.
+ bool operator==(const bitfield &a) const {
+ return bits == a.bits;
+ }
+
+ /// Inequality.
+ bool operator!=(const bitfield &a) const {
+ return bits != a.bits;
+ }
+
+ /// Ordering.
+ bool operator<(const bitfield &a) const {
+ return bits < a.bits;
+ }
+
+ /// Set all bits.
+ void setall() {
+ for (auto &e : bits) {
+ e = all_ones;
+ }
+ clear_trailer();
+ }
+
+ /// Set all bits (alias for bitset::setall, to match dynamic_bitset).
+ void set() {
+ setall();
+ }
+
+ /// Clear all bits.
+ void clear() {
+ for (auto &e : bits) {
+ e = 0;
+ }
+ }
+
+ /// Clear all bits (alias for bitset::clear).
+ void reset() {
+ clear();
+ }
+
+ /// Clear bit N.
+ void clear(size_t n) {
+ assert(n < size());
+ bits[getword(n)] &= ~maskbit(n);
+ }
+
+ /// Set bit N.
+ void set(size_t n) {
+ assert(n < size());
+ bits[getword(n)] |= maskbit(n);
+ }
+
+ /// Test bit N.
+ bool test(size_t n) const {
+ assert(n < size());
+ return bits[getword(n)] & maskbit(n);
+ }
+
+ /// Flip bit N.
+ void flip(size_t n) {
+ assert(n < size());
+ bits[getword(n)] ^= maskbit(n);
+ }
+
+ /// Flip all bits.
+ void flip() {
+ for (auto &e : bits) {
+ e = ~e;
+ }
+ clear_trailer();
+ }
+
+ /// Switch on the bit in the range [from, to], inclusive.
+ void set_range(size_t from, size_t to) {
+ assert(from <= to);
+ assert(to < requested_size);
+
+ if (from / block_size == to / block_size) {
+ // Small case, our indices are in the same block.
+ block_type block = all_ones << (from % block_size);
+ if (to % block_size != block_size - 1) {
+ block &= maskbit(to + 1) - 1;
+ }
+ bits[from / block_size] |= block;
+ return;
+ }
+
+ // Large case, work in block units. Write a partial mask, then a
+ // run of all-ones blocks, then a partial mask at the end.
+ size_t i = from;
+ if (i % block_size) {
+ block_type block = all_ones << (i % block_size);
+ bits[i / block_size] |= block;
+ i = ROUNDUP_N(i, block_size);
+ }
+
+ for (; i + block_size <= to + 1; i += block_size) {
+ bits[i / block_size] = all_ones;
+ }
+
+ if (i <= to) {
+ assert(to - i + 1 < block_size);
+ bits[i / block_size] |= (maskbit(to + 1) - 1);
+ }
+ }
+
+ /// Returns total number of bits.
+ static constexpr size_t size() {
+ return requested_size;
+ }
+
+ /// Returns number of bits set on.
+ size_t count() const {
+ static_assert(block_size == 64, "adjust popcount for block_type");
+ size_t sum = 0;
size_t i = 0;
for (; i + 4 <= num_blocks; i += 4) {
sum += popcount64(bits[i]);
sum += popcount64(bits[i + 1]);
sum += popcount64(bits[i + 2]);
sum += popcount64(bits[i + 3]);
- }
+ }
for (; i < num_blocks; i++) {
sum += popcount64(bits[i]);
}
- assert(sum <= size());
- return sum;
- }
-
- /// Are no bits set?
- bool none() const {
- for (const auto &e : bits) {
- if (e != 0) {
- return false;
- }
- }
- return true;
- }
-
- /// Is any bit set?
- bool any() const {
- return !none();
- }
-
- /// Are all bits set?
- bool all() const {
- for (size_t i = 0; i < bits.size() - 1; i++) {
- if (bits[i] != all_ones) {
- return false;
- }
- }
- size_t rem = requested_size % block_size;
- block_type exp = rem ? ((block_type{1} << rem) - 1) : all_ones;
- return *bits.rbegin() == exp;
- }
-
- /// Returns first bit set, or bitfield::npos if none set.
- size_t find_first() const {
- for (size_t i = 0; i < bits.size(); i++) {
- if (bits[i] != 0) {
- return (i * block_size) + word_ctz(i);
- }
- }
- return npos;
- }
-
- // Returns last bit set, or bitfield::npos if none set.
- size_t find_last() const {
- for (int i = bits.size() - 1; i >= 0; i--) {
- if (bits[i]) {
- static_assert(block_size == 64, "adjust clz for block_type");
- return (i * block_size) + block_size - 1 - clz64(bits[i]);
- }
- }
- return npos;
- }
-
- /// Returns next bit set, or bitfield::npos if none set after 'last'.
- size_t find_next(size_t last) const {
- if (last >= size()) {
- return npos;
- }
-
- // check current word.
- size_t i = getword(last);
- block_type lastword = bits[i];
-
- if ((last % block_size) != (block_size - 1)) {
- lastword &= (all_ones << ((last % block_size) + 1));
-
- if (lastword) {
- static_assert(block_size == 64, "adjust ctz for block_type");
- return (i * block_size) + ctz64(lastword);
- }
- }
-
- // check the rest.
- for (i++; i < bits.size(); i++) {
- if (bits[i]) {
- return (i * block_size) + word_ctz(i);
- }
- }
-
- return npos;
- }
-
- size_t find_nth(size_t n) const {
- assert(n < npos);
-
- static_assert(block_size == 64, "adjust for block_type");
-
- size_t sum = 0;
- for (size_t i = 0; i < bits.size(); i++) {
- block_type block = bits[i];
- size_t aftersum = sum + popcount64(block);
- if (aftersum > n) { // Block contains the nth bit.
- for (; sum < n; sum++) {
- assert(block);
- block &= (block - 1);
- }
- assert(block);
- size_t bit = (i * block_size) + ctz64(block);
- assert(test(bit));
- return bit;
- }
- sum = aftersum;
- }
-
- assert(count() < n + 1);
- return npos;
- }
-
- /// Bitwise OR.
+ assert(sum <= size());
+ return sum;
+ }
+
+ /// Are no bits set?
+ bool none() const {
+ for (const auto &e : bits) {
+ if (e != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /// Is any bit set?
+ bool any() const {
+ return !none();
+ }
+
+ /// Are all bits set?
+ bool all() const {
+ for (size_t i = 0; i < bits.size() - 1; i++) {
+ if (bits[i] != all_ones) {
+ return false;
+ }
+ }
+ size_t rem = requested_size % block_size;
+ block_type exp = rem ? ((block_type{1} << rem) - 1) : all_ones;
+ return *bits.rbegin() == exp;
+ }
+
+ /// Returns first bit set, or bitfield::npos if none set.
+ size_t find_first() const {
+ for (size_t i = 0; i < bits.size(); i++) {
+ if (bits[i] != 0) {
+ return (i * block_size) + word_ctz(i);
+ }
+ }
+ return npos;
+ }
+
+ // Returns last bit set, or bitfield::npos if none set.
+ size_t find_last() const {
+ for (int i = bits.size() - 1; i >= 0; i--) {
+ if (bits[i]) {
+ static_assert(block_size == 64, "adjust clz for block_type");
+ return (i * block_size) + block_size - 1 - clz64(bits[i]);
+ }
+ }
+ return npos;
+ }
+
+ /// Returns next bit set, or bitfield::npos if none set after 'last'.
+ size_t find_next(size_t last) const {
+ if (last >= size()) {
+ return npos;
+ }
+
+ // check current word.
+ size_t i = getword(last);
+ block_type lastword = bits[i];
+
+ if ((last % block_size) != (block_size - 1)) {
+ lastword &= (all_ones << ((last % block_size) + 1));
+
+ if (lastword) {
+ static_assert(block_size == 64, "adjust ctz for block_type");
+ return (i * block_size) + ctz64(lastword);
+ }
+ }
+
+ // check the rest.
+ for (i++; i < bits.size(); i++) {
+ if (bits[i]) {
+ return (i * block_size) + word_ctz(i);
+ }
+ }
+
+ return npos;
+ }
+
+ size_t find_nth(size_t n) const {
+ assert(n < npos);
+
+ static_assert(block_size == 64, "adjust for block_type");
+
+ size_t sum = 0;
+ for (size_t i = 0; i < bits.size(); i++) {
+ block_type block = bits[i];
+ size_t aftersum = sum + popcount64(block);
+ if (aftersum > n) { // Block contains the nth bit.
+ for (; sum < n; sum++) {
+ assert(block);
+ block &= (block - 1);
+ }
+ assert(block);
+ size_t bit = (i * block_size) + ctz64(block);
+ assert(test(bit));
+ return bit;
+ }
+ sum = aftersum;
+ }
+
+ assert(count() < n + 1);
+ return npos;
+ }
+
+ /// Bitwise OR.
bitfield operator|(const bitfield &a) const {
bitfield b = a;
b |= *this;
return b;
- }
-
- /// Bitwise OR-equals.
- void operator|=(const bitfield &a) {
+ }
+
+ /// Bitwise OR-equals.
+ void operator|=(const bitfield &a) {
size_t i = 0;
for (; i + 4 <= num_blocks; i += 4) {
bits[i] |= a.bits[i];
@@ -321,19 +321,19 @@ public:
bits[i + 3] |= a.bits[i + 3];
}
for (; i < num_blocks; i++) {
- bits[i] |= a.bits[i];
- }
- }
-
- /// Bitwise AND.
+ bits[i] |= a.bits[i];
+ }
+ }
+
+ /// Bitwise AND.
bitfield operator&(const bitfield &a) const {
bitfield b = a;
b &= *this;
return b;
- }
-
- /// Bitwise AND-equals.
- void operator&=(const bitfield &a) {
+ }
+
+ /// Bitwise AND-equals.
+ void operator&=(const bitfield &a) {
size_t i = 0;
for (; i + 4 <= num_blocks; i += 4) {
bits[i] &= a.bits[i];
@@ -342,17 +342,17 @@ public:
bits[i + 3] &= a.bits[i + 3];
}
for (; i < num_blocks; i++) {
- bits[i] &= a.bits[i];
- }
- }
-
- /// Bitwise XOR.
+ bits[i] &= a.bits[i];
+ }
+ }
+
+ /// Bitwise XOR.
bitfield operator^(bitfield a) const {
a ^= *this;
return a;
- }
-
- /// Bitwise XOR-equals.
+ }
+
+ /// Bitwise XOR-equals.
void operator^=(bitfield a) {
size_t i = 0;
for (; i + 4 <= num_blocks; i += 4) {
@@ -362,77 +362,77 @@ public:
bits[i + 3] ^= a.bits[i + 3];
}
for (; i < num_blocks; i++) {
- bits[i] ^= a.bits[i];
- }
- }
-
- /// Bitwise complement.
- bitfield operator~(void) const {
- bitfield cr(*this);
- cr.flip();
- return cr;
- }
-
- /// Simple hash.
- size_t hash() const {
+ bits[i] ^= a.bits[i];
+ }
+ }
+
+ /// Bitwise complement.
+ bitfield operator~(void) const {
+ bitfield cr(*this);
+ cr.flip();
+ return cr;
+ }
+
+ /// Simple hash.
+ size_t hash() const {
return ue2_hasher()(bits);
- }
-
- /// Sentinel value meaning "no more bits", used by find_first and
- /// find_next.
- static constexpr size_t npos = requested_size;
-
-private:
- /// Underlying block type.
- using block_type = u64a;
-
- /// A block filled with on bits.
- static constexpr block_type all_ones = ~block_type{0};
-
- /// Size of a block.
- static constexpr size_t block_size = sizeof(block_type) * 8;
-
- static size_t getword(size_t n) {
- return n / block_size;
- }
-
- static block_type maskbit(size_t n) {
- return (block_type{1} << (n % block_size));
- }
-
- size_t word_ctz(size_t n) const {
- static_assert(block_size == 64, "adjust ctz call for block type");
- return ctz64(bits[n]);
- }
-
- /// Ensures that bits between our requested size and the end of storage are
- /// zero.
- void clear_trailer() {
- size_t final_bits = requested_size % block_size;
- if (final_bits) {
- bits.back() &= ((block_type{1} << final_bits) - 1);
- }
- }
-
- /// Size of storage array of blocks.
- static constexpr size_t num_blocks =
- (requested_size + block_size - 1) / block_size;
-
- /// Underlying storage.
- std::array<block_type, num_blocks> bits;
-};
-
+ }
+
+ /// Sentinel value meaning "no more bits", used by find_first and
+ /// find_next.
+ static constexpr size_t npos = requested_size;
+
+private:
+ /// Underlying block type.
+ using block_type = u64a;
+
+ /// A block filled with on bits.
+ static constexpr block_type all_ones = ~block_type{0};
+
+ /// Size of a block.
+ static constexpr size_t block_size = sizeof(block_type) * 8;
+
+ static size_t getword(size_t n) {
+ return n / block_size;
+ }
+
+ static block_type maskbit(size_t n) {
+ return (block_type{1} << (n % block_size));
+ }
+
+ size_t word_ctz(size_t n) const {
+ static_assert(block_size == 64, "adjust ctz call for block type");
+ return ctz64(bits[n]);
+ }
+
+ /// Ensures that bits between our requested size and the end of storage are
+ /// zero.
+ void clear_trailer() {
+ size_t final_bits = requested_size % block_size;
+ if (final_bits) {
+ bits.back() &= ((block_type{1} << final_bits) - 1);
+ }
+ }
+
+ /// Size of storage array of blocks.
+ static constexpr size_t num_blocks =
+ (requested_size + block_size - 1) / block_size;
+
+ /// Underlying storage.
+ std::array<block_type, num_blocks> bits;
+};
+
} // namespace ue2
namespace std {
-template<size_t requested_size>
+template<size_t requested_size>
struct hash<ue2::bitfield<requested_size>> {
size_t operator()(const ue2::bitfield<requested_size> &b) const {
return b.hash();
}
};
-
+
} // namespace std
-
-#endif // BITFIELD_H
+
+#endif // BITFIELD_H
diff --git a/contrib/libs/hyperscan/src/util/bitutils.h b/contrib/libs/hyperscan/src/util/bitutils.h
index c545ee1872..0bb468d9dd 100644
--- a/contrib/libs/hyperscan/src/util/bitutils.h
+++ b/contrib/libs/hyperscan/src/util/bitutils.h
@@ -1,67 +1,67 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Bit-twiddling primitives (ctz, compress etc)
- */
-
-#ifndef BITUTILS_H
-#define BITUTILS_H
-
-#include "ue2common.h"
-#include "popcount.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Bit-twiddling primitives (ctz, compress etc)
+ */
+
+#ifndef BITUTILS_H
+#define BITUTILS_H
+
+#include "ue2common.h"
+#include "popcount.h"
#include "util/arch.h"
#include "util/intrinsics.h"
-
-#define CASE_BIT 0x20
-#define CASE_CLEAR 0xdf
-#define DOUBLE_CASE_CLEAR 0xdfdf
+
+#define CASE_BIT 0x20
+#define CASE_CLEAR 0xdf
+#define DOUBLE_CASE_CLEAR 0xdfdf
#define OCTO_CASE_CLEAR 0xdfdfdfdfdfdfdfdfULL
-
-static really_inline
-u32 clz32(u32 x) {
- assert(x); // behaviour not defined for x == 0
-#if defined(_WIN32)
- unsigned long r;
- _BitScanReverse(&r, x);
- return 31 - r;
-#else
- return (u32)__builtin_clz(x);
-#endif
-}
-
-static really_inline
-u32 clz64(u64a x) {
- assert(x); // behaviour not defined for x == 0
+
+static really_inline
+u32 clz32(u32 x) {
+ assert(x); // behaviour not defined for x == 0
+#if defined(_WIN32)
+ unsigned long r;
+ _BitScanReverse(&r, x);
+ return 31 - r;
+#else
+ return (u32)__builtin_clz(x);
+#endif
+}
+
+static really_inline
+u32 clz64(u64a x) {
+ assert(x); // behaviour not defined for x == 0
#if defined(_WIN64)
- unsigned long r;
- _BitScanReverse64(&r, x);
- return 63 - r;
+ unsigned long r;
+ _BitScanReverse64(&r, x);
+ return 63 - r;
#elif defined(_WIN32)
unsigned long x1 = (u32)x;
unsigned long x2 = (u32)(x >> 32);
@@ -72,31 +72,31 @@ u32 clz64(u64a x) {
}
_BitScanReverse(&r, (u32)x1);
return (u32)(63 - r);
-#else
- return (u32)__builtin_clzll(x);
-#endif
-}
-
-// CTZ (count trailing zero) implementations.
-static really_inline
-u32 ctz32(u32 x) {
- assert(x); // behaviour not defined for x == 0
-#if defined(_WIN32)
- unsigned long r;
- _BitScanForward(&r, x);
- return r;
-#else
- return (u32)__builtin_ctz(x);
-#endif
-}
-
-static really_inline
-u32 ctz64(u64a x) {
- assert(x); // behaviour not defined for x == 0
+#else
+ return (u32)__builtin_clzll(x);
+#endif
+}
+
+// CTZ (count trailing zero) implementations.
+static really_inline
+u32 ctz32(u32 x) {
+ assert(x); // behaviour not defined for x == 0
+#if defined(_WIN32)
+ unsigned long r;
+ _BitScanForward(&r, x);
+ return r;
+#else
+ return (u32)__builtin_ctz(x);
+#endif
+}
+
+static really_inline
+u32 ctz64(u64a x) {
+ assert(x); // behaviour not defined for x == 0
#if defined(_WIN64)
- unsigned long r;
- _BitScanForward64(&r, x);
- return r;
+ unsigned long r;
+ _BitScanForward64(&r, x);
+ return r;
#elif defined(_WIN32)
unsigned long r;
if (_BitScanForward(&r, (u32)x)) {
@@ -104,328 +104,328 @@ u32 ctz64(u64a x) {
}
_BitScanForward(&r, x >> 32);
return (u32)(r + 32);
-#else
- return (u32)__builtin_ctzll(x);
-#endif
-}
-
-static really_inline
-u32 lg2(u32 x) {
- if (!x) {
- return 0;
- }
- return 31 - clz32(x);
-}
-
-static really_inline
-u64a lg2_64(u64a x) {
- if (!x) {
- return 0;
- }
- return 63 - clz64(x);
-}
-
-static really_inline
-u32 findAndClearLSB_32(u32 *v) {
- assert(*v != 0); // behaviour not defined in this case
-#ifndef NO_ASM
- u32 val = *v, offset;
- __asm__ ("bsf %1, %0\n"
- "btr %0, %1\n"
- : "=r" (offset), "=r" (val)
- : "1" (val));
- *v = val;
-#else
- u32 val = *v;
- u32 offset = ctz32(val);
- *v = val & (val - 1);
-#endif
-
- assert(offset < 32);
- return offset;
-}
-
-static really_inline
-u32 findAndClearLSB_64(u64a *v) {
- assert(*v != 0); // behaviour not defined in this case
-
-#ifdef ARCH_64_BIT
-#if defined(ARCH_X86_64) && !defined(NO_ASM)
- u64a val = *v, offset;
- __asm__ ("bsfq %1, %0\n"
- "btrq %0, %1\n"
- : "=r" (offset), "=r" (val)
- : "1" (val));
- *v = val;
-#else
- // generic variant using gcc's builtin on 64-bit
- u64a val = *v, offset;
- offset = ctz64(val);
- *v = val & (val - 1);
-#endif // ARCH_X86_64
-#else
- // fall back to doing things with two 32-bit cases, since gcc-4.1 doesn't
- // inline calls to __builtin_ctzll
+#else
+ return (u32)__builtin_ctzll(x);
+#endif
+}
+
+static really_inline
+u32 lg2(u32 x) {
+ if (!x) {
+ return 0;
+ }
+ return 31 - clz32(x);
+}
+
+static really_inline
+u64a lg2_64(u64a x) {
+ if (!x) {
+ return 0;
+ }
+ return 63 - clz64(x);
+}
+
+static really_inline
+u32 findAndClearLSB_32(u32 *v) {
+ assert(*v != 0); // behaviour not defined in this case
+#ifndef NO_ASM
+ u32 val = *v, offset;
+ __asm__ ("bsf %1, %0\n"
+ "btr %0, %1\n"
+ : "=r" (offset), "=r" (val)
+ : "1" (val));
+ *v = val;
+#else
+ u32 val = *v;
+ u32 offset = ctz32(val);
+ *v = val & (val - 1);
+#endif
+
+ assert(offset < 32);
+ return offset;
+}
+
+static really_inline
+u32 findAndClearLSB_64(u64a *v) {
+ assert(*v != 0); // behaviour not defined in this case
+
+#ifdef ARCH_64_BIT
+#if defined(ARCH_X86_64) && !defined(NO_ASM)
+ u64a val = *v, offset;
+ __asm__ ("bsfq %1, %0\n"
+ "btrq %0, %1\n"
+ : "=r" (offset), "=r" (val)
+ : "1" (val));
+ *v = val;
+#else
+ // generic variant using gcc's builtin on 64-bit
+ u64a val = *v, offset;
+ offset = ctz64(val);
+ *v = val & (val - 1);
+#endif // ARCH_X86_64
+#else
+ // fall back to doing things with two 32-bit cases, since gcc-4.1 doesn't
+ // inline calls to __builtin_ctzll
u32 v1 = (u32)*v;
u32 v2 = (u32)(*v >> 32);
- u32 offset;
- if (v1) {
- offset = findAndClearLSB_32(&v1);
- *v = (u64a)v1 | ((u64a)v2 << 32);
- } else {
- offset = findAndClearLSB_32(&v2) + 32;
- *v = (u64a)v2 << 32;
- }
-#endif
-
- assert(offset < 64);
- return (u32)offset;
-}
-
-static really_inline
-u32 findAndClearMSB_32(u32 *v) {
- assert(*v != 0); // behaviour not defined in this case
-#ifndef NO_ASM
- u32 val = *v, offset;
- __asm__ ("bsr %1, %0\n"
- "btr %0, %1\n"
- : "=r" (offset), "=r" (val)
- : "1" (val));
- *v = val;
-#else
- u32 val = *v;
- u32 offset = 31 - clz32(val);
- *v = val & ~(1 << offset);
-#endif
- assert(offset < 32);
- return offset;
-}
-
-static really_inline
-u32 findAndClearMSB_64(u64a *v) {
- assert(*v != 0); // behaviour not defined in this case
-
-#ifdef ARCH_64_BIT
-#if defined(ARCH_X86_64) && !defined(NO_ASM)
- u64a val = *v, offset;
- __asm__ ("bsrq %1, %0\n"
- "btrq %0, %1\n"
- : "=r" (offset), "=r" (val)
- : "1" (val));
- *v = val;
-#else
- // generic variant using gcc's builtin on 64-bit
- u64a val = *v, offset;
- offset = 63 - clz64(val);
- *v = val & ~(1ULL << offset);
-#endif // ARCH_X86_64
-#else
- // fall back to doing things with two 32-bit cases, since gcc-4.1 doesn't
- // inline calls to __builtin_ctzll
+ u32 offset;
+ if (v1) {
+ offset = findAndClearLSB_32(&v1);
+ *v = (u64a)v1 | ((u64a)v2 << 32);
+ } else {
+ offset = findAndClearLSB_32(&v2) + 32;
+ *v = (u64a)v2 << 32;
+ }
+#endif
+
+ assert(offset < 64);
+ return (u32)offset;
+}
+
+static really_inline
+u32 findAndClearMSB_32(u32 *v) {
+ assert(*v != 0); // behaviour not defined in this case
+#ifndef NO_ASM
+ u32 val = *v, offset;
+ __asm__ ("bsr %1, %0\n"
+ "btr %0, %1\n"
+ : "=r" (offset), "=r" (val)
+ : "1" (val));
+ *v = val;
+#else
+ u32 val = *v;
+ u32 offset = 31 - clz32(val);
+ *v = val & ~(1 << offset);
+#endif
+ assert(offset < 32);
+ return offset;
+}
+
+static really_inline
+u32 findAndClearMSB_64(u64a *v) {
+ assert(*v != 0); // behaviour not defined in this case
+
+#ifdef ARCH_64_BIT
+#if defined(ARCH_X86_64) && !defined(NO_ASM)
+ u64a val = *v, offset;
+ __asm__ ("bsrq %1, %0\n"
+ "btrq %0, %1\n"
+ : "=r" (offset), "=r" (val)
+ : "1" (val));
+ *v = val;
+#else
+ // generic variant using gcc's builtin on 64-bit
+ u64a val = *v, offset;
+ offset = 63 - clz64(val);
+ *v = val & ~(1ULL << offset);
+#endif // ARCH_X86_64
+#else
+ // fall back to doing things with two 32-bit cases, since gcc-4.1 doesn't
+ // inline calls to __builtin_ctzll
u32 v1 = (u32)*v;
- u32 v2 = (*v >> 32);
- u32 offset;
- if (v2) {
- offset = findAndClearMSB_32(&v2) + 32;
- *v = ((u64a)v2 << 32) | (u64a)v1;
- } else {
- offset = findAndClearMSB_32(&v1);
- *v = (u64a)v1;
- }
-#endif
-
- assert(offset < 64);
- return (u32)offset;
-}
-
-static really_inline
-u32 compress32(u32 x, u32 m) {
+ u32 v2 = (*v >> 32);
+ u32 offset;
+ if (v2) {
+ offset = findAndClearMSB_32(&v2) + 32;
+ *v = ((u64a)v2 << 32) | (u64a)v1;
+ } else {
+ offset = findAndClearMSB_32(&v1);
+ *v = (u64a)v1;
+ }
+#endif
+
+ assert(offset < 64);
+ return (u32)offset;
+}
+
+static really_inline
+u32 compress32(u32 x, u32 m) {
#if defined(HAVE_BMI2)
- // BMI2 has a single instruction for this operation.
- return _pext_u32(x, m);
-#else
-
- // Return zero quickly on trivial cases
- if ((x & m) == 0) {
- return 0;
- }
-
- u32 mk, mp, mv, t;
-
- x &= m; // clear irrelevant bits
-
- mk = ~m << 1; // we will count 0's to right
- for (u32 i = 0; i < 5; i++) {
- mp = mk ^ (mk << 1);
- mp ^= mp << 2;
- mp ^= mp << 4;
- mp ^= mp << 8;
- mp ^= mp << 16;
-
- mv = mp & m; // bits to move
- m = (m ^ mv) | (mv >> (1 << i)); // compress m
- t = x & mv;
- x = (x ^ t) | (t >> (1 << i)); // compress x
- mk = mk & ~mp;
- }
-
- return x;
-#endif
-}
-
-static really_inline
-u64a compress64(u64a x, u64a m) {
+ // BMI2 has a single instruction for this operation.
+ return _pext_u32(x, m);
+#else
+
+ // Return zero quickly on trivial cases
+ if ((x & m) == 0) {
+ return 0;
+ }
+
+ u32 mk, mp, mv, t;
+
+ x &= m; // clear irrelevant bits
+
+ mk = ~m << 1; // we will count 0's to right
+ for (u32 i = 0; i < 5; i++) {
+ mp = mk ^ (mk << 1);
+ mp ^= mp << 2;
+ mp ^= mp << 4;
+ mp ^= mp << 8;
+ mp ^= mp << 16;
+
+ mv = mp & m; // bits to move
+ m = (m ^ mv) | (mv >> (1 << i)); // compress m
+ t = x & mv;
+ x = (x ^ t) | (t >> (1 << i)); // compress x
+ mk = mk & ~mp;
+ }
+
+ return x;
+#endif
+}
+
+static really_inline
+u64a compress64(u64a x, u64a m) {
#if defined(ARCH_X86_64) && defined(HAVE_BMI2)
- // BMI2 has a single instruction for this operation.
- return _pext_u64(x, m);
-#else
-
- // Return zero quickly on trivial cases
- if ((x & m) == 0) {
- return 0;
- }
-
- u64a mk, mp, mv, t;
-
- x &= m; // clear irrelevant bits
-
- mk = ~m << 1; // we will count 0's to right
- for (u32 i = 0; i < 6; i++) {
- mp = mk ^ (mk << 1);
- mp ^= mp << 2;
- mp ^= mp << 4;
- mp ^= mp << 8;
- mp ^= mp << 16;
- mp ^= mp << 32;
-
- mv = mp & m; // bits to move
- m = (m ^ mv) | (mv >> (1 << i)); // compress m
- t = x & mv;
- x = (x ^ t) | (t >> (1 << i)); // compress x
- mk = mk & ~mp;
- }
-
- return x;
-#endif
-}
-
-static really_inline
-u32 expand32(u32 x, u32 m) {
+ // BMI2 has a single instruction for this operation.
+ return _pext_u64(x, m);
+#else
+
+ // Return zero quickly on trivial cases
+ if ((x & m) == 0) {
+ return 0;
+ }
+
+ u64a mk, mp, mv, t;
+
+ x &= m; // clear irrelevant bits
+
+ mk = ~m << 1; // we will count 0's to right
+ for (u32 i = 0; i < 6; i++) {
+ mp = mk ^ (mk << 1);
+ mp ^= mp << 2;
+ mp ^= mp << 4;
+ mp ^= mp << 8;
+ mp ^= mp << 16;
+ mp ^= mp << 32;
+
+ mv = mp & m; // bits to move
+ m = (m ^ mv) | (mv >> (1 << i)); // compress m
+ t = x & mv;
+ x = (x ^ t) | (t >> (1 << i)); // compress x
+ mk = mk & ~mp;
+ }
+
+ return x;
+#endif
+}
+
+static really_inline
+u32 expand32(u32 x, u32 m) {
#if defined(HAVE_BMI2)
- // BMI2 has a single instruction for this operation.
- return _pdep_u32(x, m);
-#else
-
- // Return zero quickly on trivial cases
- if (!x || !m) {
- return 0;
- }
-
- u32 m0, mk, mp, mv, t;
- u32 array[5];
-
- m0 = m; // save original mask
- mk = ~m << 1; // we will count 0's to right
-
- for (int i = 0; i < 5; i++) {
- mp = mk ^ (mk << 1); // parallel suffix
- mp = mp ^ (mp << 2);
- mp = mp ^ (mp << 4);
- mp = mp ^ (mp << 8);
- mp = mp ^ (mp << 16);
- mv = mp & m; // bits to move
- array[i] = mv;
- m = (m ^ mv) | (mv >> (1 << i)); // compress m
- mk = mk & ~mp;
- }
-
- for (int i = 4; i >= 0; i--) {
- mv = array[i];
- t = x << (1 << i);
- x = (x & ~mv) | (t & mv);
- }
-
- return x & m0; // clear out extraneous bits
-#endif
-}
-
-static really_inline
-u64a expand64(u64a x, u64a m) {
+ // BMI2 has a single instruction for this operation.
+ return _pdep_u32(x, m);
+#else
+
+ // Return zero quickly on trivial cases
+ if (!x || !m) {
+ return 0;
+ }
+
+ u32 m0, mk, mp, mv, t;
+ u32 array[5];
+
+ m0 = m; // save original mask
+ mk = ~m << 1; // we will count 0's to right
+
+ for (int i = 0; i < 5; i++) {
+ mp = mk ^ (mk << 1); // parallel suffix
+ mp = mp ^ (mp << 2);
+ mp = mp ^ (mp << 4);
+ mp = mp ^ (mp << 8);
+ mp = mp ^ (mp << 16);
+ mv = mp & m; // bits to move
+ array[i] = mv;
+ m = (m ^ mv) | (mv >> (1 << i)); // compress m
+ mk = mk & ~mp;
+ }
+
+ for (int i = 4; i >= 0; i--) {
+ mv = array[i];
+ t = x << (1 << i);
+ x = (x & ~mv) | (t & mv);
+ }
+
+ return x & m0; // clear out extraneous bits
+#endif
+}
+
+static really_inline
+u64a expand64(u64a x, u64a m) {
#if defined(ARCH_X86_64) && defined(HAVE_BMI2)
- // BMI2 has a single instruction for this operation.
- return _pdep_u64(x, m);
-#else
-
- // Return zero quickly on trivial cases
- if (!x || !m) {
- return 0;
- }
-
- u64a m0, mk, mp, mv, t;
- u64a array[6];
-
- m0 = m; // save original mask
- mk = ~m << 1; // we will count 0's to right
-
- for (int i = 0; i < 6; i++) {
- mp = mk ^ (mk << 1); // parallel suffix
- mp = mp ^ (mp << 2);
- mp = mp ^ (mp << 4);
- mp = mp ^ (mp << 8);
- mp = mp ^ (mp << 16);
- mp = mp ^ (mp << 32);
- mv = mp & m; // bits to move
- array[i] = mv;
- m = (m ^ mv) | (mv >> (1 << i)); // compress m
- mk = mk & ~mp;
- }
-
- for (int i = 5; i >= 0; i--) {
- mv = array[i];
- t = x << (1 << i);
- x = (x & ~mv) | (t & mv);
- }
-
- return x & m0; // clear out extraneous bits
-#endif
-}
-
-
-/* returns the first set bit after begin (if not ~0U). If no bit is set after
- * begin returns ~0U
- */
-static really_inline
-u32 bf64_iterate(u64a bitfield, u32 begin) {
- if (begin != ~0U) {
- /* switch off all bits at or below begin. Note: not legal to shift by
- * by size of the datatype or larger. */
- assert(begin <= 63);
- bitfield &= ~((2ULL << begin) - 1);
- }
-
- if (!bitfield) {
- return ~0U;
- }
-
- return ctz64(bitfield);
-}
-
-static really_inline
-char bf64_set(u64a *bitfield, u32 i) {
- assert(i < 64);
- u64a mask = 1ULL << i;
- char was_set = !!(*bitfield & mask);
- *bitfield |= mask;
-
- return was_set;
-}
-
-static really_inline
-void bf64_unset(u64a *bitfield, u32 i) {
- assert(i < 64);
- *bitfield &= ~(1ULL << i);
-}
-
+ // BMI2 has a single instruction for this operation.
+ return _pdep_u64(x, m);
+#else
+
+ // Return zero quickly on trivial cases
+ if (!x || !m) {
+ return 0;
+ }
+
+ u64a m0, mk, mp, mv, t;
+ u64a array[6];
+
+ m0 = m; // save original mask
+ mk = ~m << 1; // we will count 0's to right
+
+ for (int i = 0; i < 6; i++) {
+ mp = mk ^ (mk << 1); // parallel suffix
+ mp = mp ^ (mp << 2);
+ mp = mp ^ (mp << 4);
+ mp = mp ^ (mp << 8);
+ mp = mp ^ (mp << 16);
+ mp = mp ^ (mp << 32);
+ mv = mp & m; // bits to move
+ array[i] = mv;
+ m = (m ^ mv) | (mv >> (1 << i)); // compress m
+ mk = mk & ~mp;
+ }
+
+ for (int i = 5; i >= 0; i--) {
+ mv = array[i];
+ t = x << (1 << i);
+ x = (x & ~mv) | (t & mv);
+ }
+
+ return x & m0; // clear out extraneous bits
+#endif
+}
+
+
+/* returns the first set bit after begin (if not ~0U). If no bit is set after
+ * begin returns ~0U
+ */
+static really_inline
+u32 bf64_iterate(u64a bitfield, u32 begin) {
+ if (begin != ~0U) {
+ /* switch off all bits at or below begin. Note: not legal to shift by
+ * by size of the datatype or larger. */
+ assert(begin <= 63);
+ bitfield &= ~((2ULL << begin) - 1);
+ }
+
+ if (!bitfield) {
+ return ~0U;
+ }
+
+ return ctz64(bitfield);
+}
+
+static really_inline
+char bf64_set(u64a *bitfield, u32 i) {
+ assert(i < 64);
+ u64a mask = 1ULL << i;
+ char was_set = !!(*bitfield & mask);
+ *bitfield |= mask;
+
+ return was_set;
+}
+
+static really_inline
+void bf64_unset(u64a *bitfield, u32 i) {
+ assert(i < 64);
+ *bitfield &= ~(1ULL << i);
+}
+
static really_inline
u32 rank_in_mask32(u32 mask, u32 bit) {
assert(bit < sizeof(u32) * 8);
@@ -489,4 +489,4 @@ u64a pdep64(u64a x, u64a mask) {
}
#endif
-#endif // BITUTILS_H
+#endif // BITUTILS_H
diff --git a/contrib/libs/hyperscan/src/util/boundary_reports.h b/contrib/libs/hyperscan/src/util/boundary_reports.h
index b2bb1c9b0a..61ce42b043 100644
--- a/contrib/libs/hyperscan/src/util/boundary_reports.h
+++ b/contrib/libs/hyperscan/src/util/boundary_reports.h
@@ -1,51 +1,51 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef BOUNDARY_REPORTS_H
-#define BOUNDARY_REPORTS_H
-
-#include "ue2common.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BOUNDARY_REPORTS_H
+#define BOUNDARY_REPORTS_H
+
+#include "ue2common.h"
#include "util/noncopyable.h"
-
-#include <set>
-
-namespace ue2 {
-
+
+#include <set>
+
+namespace ue2 {
+
struct BoundaryReports : noncopyable {
- std::set<ReportID> report_at_0; /* set of internal reports to fire
- * unconditionally at offset 0 */
- std::set<ReportID> report_at_0_eod; /* set of internal reports to fire
- * unconditionally at offset 0 if it is
- * eod */
- std::set<ReportID> report_at_eod; /* set of internal reports to fire
- * unconditionally at eod */
-};
-
-} // namespace ue2
-
-#endif
+ std::set<ReportID> report_at_0; /* set of internal reports to fire
+ * unconditionally at offset 0 */
+ std::set<ReportID> report_at_0_eod; /* set of internal reports to fire
+ * unconditionally at offset 0 if it is
+ * eod */
+ std::set<ReportID> report_at_eod; /* set of internal reports to fire
+ * unconditionally at eod */
+};
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/charreach.cpp b/contrib/libs/hyperscan/src/util/charreach.cpp
index 9116b719db..28379c2759 100644
--- a/contrib/libs/hyperscan/src/util/charreach.cpp
+++ b/contrib/libs/hyperscan/src/util/charreach.cpp
@@ -1,149 +1,149 @@
-/*
+/*
* Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Class for representing character reachability.
- *
- * This is a simple (but hopefully fast) class for representing 8-bit character
- * reachability, along with a bunch of useful operations.
- */
-#include "ue2common.h"
-#include "charreach.h"
-#include "charreach_util.h"
-#include "compare.h"
-#include "unicode_def.h"
-
-#include <cassert>
-#include <string>
-
-namespace ue2 {
-
-/// Switch on the bits corresponding to the characters in \a s.
-void CharReach::set(const std::string &s) {
- for (const auto &c : s) {
- set(c);
- }
-}
-
-/// Do we only contain bits representing alpha characters?
-bool CharReach::isAlpha() const {
- if (none()) {
- return false;
- }
- for (size_t i = find_first(); i != npos; i = find_next(i)) {
- if (!ourisalpha((char)i)) {
- return false;
- }
- }
- return true;
-}
-
-/// Do we represent an uppercase/lowercase pair?
-bool CharReach::isCaselessChar() const {
- if (count() != 2) {
- return false;
- }
- size_t first = find_first();
- size_t second = find_next(first);
- assert(first != npos && second != npos);
- return (char)first == mytoupper((char)second);
-}
-
-/// Do we represent a cheapskate caseless set?
-bool CharReach::isBit5Insensitive() const {
- for (size_t i = find_first(); i != npos; i = find_next(i)) {
- if (!test((char)i ^ 0x20)) {
- return false;
- }
- }
- return true;
-}
-
-/// Return a string containing the characters that are switched on.
-std::string CharReach::to_string() const {
- std::string s;
- for (size_t i = find_first(); i != npos; i = find_next(i)) {
- s += (char)i;
- }
- return s;
-}
-
-/** \brief True iff there is a non-empty intersection between \a and \a b */
-bool overlaps(const CharReach &a, const CharReach &b) {
- return (a & b).any();
-}
-
-/** \brief True iff \a small is a subset of \a big. */
-bool isSubsetOf(const CharReach &small, const CharReach &big) {
- return small.isSubsetOf(big);
-}
-
-/// True if this character class is a subset of \a other.
-bool CharReach::isSubsetOf(const CharReach &other) const {
- return (bits & other.bits) == bits;
-}
-
-void make_caseless(CharReach *cr) {
- for (char c = 'A'; c <= 'Z'; c++) {
- if (cr->test(c) || cr->test(mytolower(c))) {
- cr->set(c);
- cr->set(mytolower(c));
- }
- }
-}
-
-bool isutf8ascii(const CharReach &cr) {
- return (cr & ~CharReach(0x0, 0x7f)).none();
-}
-
-bool isutf8start(const CharReach &cr) {
- return (cr & CharReach(0x0, UTF_CONT_MAX)).none();
-}
-
-void fill_bitvector(const CharReach &cr, u8 *bits) {
- assert(bits);
- std::fill_n(bits, 32, 0);
- for (size_t i = cr.find_first(); i != cr.npos; i = cr.find_next(i)) {
- bits[i / 8U] |= (u8)1U << (i % 8U);
- }
-}
-
-void make_and_cmp_mask(const CharReach &cr, u8 *and_mask, u8 *cmp_mask) {
- u8 lo = 0xff;
- u8 hi = 0;
-
- for (size_t c = cr.find_first(); c != cr.npos; c = cr.find_next(c)) {
- hi |= (u8)c;
- lo &= (u8)c;
- }
-
- *and_mask = ~(lo ^ hi);
- *cmp_mask = lo;
-}
-
-} // namespace ue2
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Class for representing character reachability.
+ *
+ * This is a simple (but hopefully fast) class for representing 8-bit character
+ * reachability, along with a bunch of useful operations.
+ */
+#include "ue2common.h"
+#include "charreach.h"
+#include "charreach_util.h"
+#include "compare.h"
+#include "unicode_def.h"
+
+#include <cassert>
+#include <string>
+
+namespace ue2 {
+
+/// Switch on the bits corresponding to the characters in \a s.
+void CharReach::set(const std::string &s) {
+ for (const auto &c : s) {
+ set(c);
+ }
+}
+
+/// Do we only contain bits representing alpha characters?
+bool CharReach::isAlpha() const {
+ if (none()) {
+ return false;
+ }
+ for (size_t i = find_first(); i != npos; i = find_next(i)) {
+ if (!ourisalpha((char)i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Do we represent an uppercase/lowercase pair?
+bool CharReach::isCaselessChar() const {
+ if (count() != 2) {
+ return false;
+ }
+ size_t first = find_first();
+ size_t second = find_next(first);
+ assert(first != npos && second != npos);
+ return (char)first == mytoupper((char)second);
+}
+
+/// Do we represent a cheapskate caseless set?
+bool CharReach::isBit5Insensitive() const {
+ for (size_t i = find_first(); i != npos; i = find_next(i)) {
+ if (!test((char)i ^ 0x20)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Return a string containing the characters that are switched on.
+std::string CharReach::to_string() const {
+ std::string s;
+ for (size_t i = find_first(); i != npos; i = find_next(i)) {
+ s += (char)i;
+ }
+ return s;
+}
+
+/** \brief True iff there is a non-empty intersection between \a and \a b */
+bool overlaps(const CharReach &a, const CharReach &b) {
+ return (a & b).any();
+}
+
+/** \brief True iff \a small is a subset of \a big. */
+bool isSubsetOf(const CharReach &small, const CharReach &big) {
+ return small.isSubsetOf(big);
+}
+
+/// True if this character class is a subset of \a other.
+bool CharReach::isSubsetOf(const CharReach &other) const {
+ return (bits & other.bits) == bits;
+}
+
+void make_caseless(CharReach *cr) {
+ for (char c = 'A'; c <= 'Z'; c++) {
+ if (cr->test(c) || cr->test(mytolower(c))) {
+ cr->set(c);
+ cr->set(mytolower(c));
+ }
+ }
+}
+
+bool isutf8ascii(const CharReach &cr) {
+ return (cr & ~CharReach(0x0, 0x7f)).none();
+}
+
+bool isutf8start(const CharReach &cr) {
+ return (cr & CharReach(0x0, UTF_CONT_MAX)).none();
+}
+
+void fill_bitvector(const CharReach &cr, u8 *bits) {
+ assert(bits);
+ std::fill_n(bits, 32, 0);
+ for (size_t i = cr.find_first(); i != cr.npos; i = cr.find_next(i)) {
+ bits[i / 8U] |= (u8)1U << (i % 8U);
+ }
+}
+
+void make_and_cmp_mask(const CharReach &cr, u8 *and_mask, u8 *cmp_mask) {
+ u8 lo = 0xff;
+ u8 hi = 0;
+
+ for (size_t c = cr.find_first(); c != cr.npos; c = cr.find_next(c)) {
+ hi |= (u8)c;
+ lo &= (u8)c;
+ }
+
+ *and_mask = ~(lo ^ hi);
+ *cmp_mask = lo;
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/charreach.h b/contrib/libs/hyperscan/src/util/charreach.h
index f6d3a2af3e..f25e940769 100644
--- a/contrib/libs/hyperscan/src/util/charreach.h
+++ b/contrib/libs/hyperscan/src/util/charreach.h
@@ -1,203 +1,203 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Class for representing character reachability.
- *
- * This is a simple (but hopefully fast) class for representing 8-bit character
- * reachability, along with a bunch of useful operations.
- */
-
-#ifndef NG_CHARREACH_H
-#define NG_CHARREACH_H
-
-#include "ue2common.h"
-#include "util/bitfield.h"
-
-#include <string>
-
-namespace ue2 {
-
-class CharReach {
-private:
- /// Underlying storage.
- ue2::bitfield<256> bits;
-
-public:
- static constexpr size_t npos = decltype(bits)::npos; //!< One past the max value.
-
- /// Empty constructor.
- CharReach() {}
-
- /// Constructor for a character class containing a single char.
- explicit CharReach(unsigned char c) { set(c); }
-
- /// Constructor for a character class representing a contiguous range of
- /// chars, inclusive.
- CharReach(unsigned char from, unsigned char to) { setRange(from, to); }
-
- /// Constructor for a character class based on the set of chars in a
- /// string.
- explicit CharReach(const std::string &str) { set(str); }
-
- /// Returns total capacity.
- static constexpr size_t size() { return npos; }
-
- /// Returns a CharReach with complete reachability (a "dot").
- static CharReach dot() { return CharReach(0, 255); }
-
- /// Complete bitset equality.
- bool operator==(const CharReach &a) const { return bits == a.bits; }
-
- /// Inequality.
- bool operator!=(const CharReach &a) const { return bits != a.bits; }
-
- /// Ordering.
- bool operator<(const CharReach &a) const { return bits < a.bits; }
-
- /// Set all bits.
- void setall() { bits.setall(); }
-
- /// Clear all bits.
- void clear() { bits.clear(); }
-
- /// Clear bit N.
- void clear(unsigned char n) { bits.clear(n); }
-
- /// Set bit N.
- void set(unsigned char n) { bits.set(n); }
-
- /// Test bit N.
- bool test(unsigned char n) const { return bits.test(n); }
-
- /// Flip bit N.
- void flip(unsigned char n) { bits.flip(n); }
-
- /// Flip all bits.
- void flip() { bits.flip(); }
-
- // Switch on the bit in the range (from, to), inclusive.
- void setRange(unsigned char from, unsigned char to) {
- bits.set_range(from, to);
- }
-
- // Switch on the bits corresponding to the characters in \a s.
- void set(const std::string &s);
-
- /// Returns number of bits set on.
- size_t count() const { return bits.count(); }
-
- /// Are no bits set?
- bool none() const { return bits.none(); }
-
- /// Is any bit set?
- bool any() const { return bits.any(); }
-
- /// Are all bits set?
- bool all() const { return bits.all(); }
-
- /// Returns first bit set, or CharReach::npos if none set.
- size_t find_first() const { return bits.find_first(); }
-
- /// Returns last bit set, or CharReach::npos if none set.
- size_t find_last() const { return bits.find_last(); }
-
- /// Returns next bit set, or CharReach::npos if none set after n.
- size_t find_next(size_t last) const { return bits.find_next(last); }
-
- /// Returns (zero-based) N'th bit set, or CharReach::npos if fewer than
- /// N + 1 bits are on.
- size_t find_nth(size_t n) const { return bits.find_nth(n); }
-
- /// Bitwise OR.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Class for representing character reachability.
+ *
+ * This is a simple (but hopefully fast) class for representing 8-bit character
+ * reachability, along with a bunch of useful operations.
+ */
+
+#ifndef NG_CHARREACH_H
+#define NG_CHARREACH_H
+
+#include "ue2common.h"
+#include "util/bitfield.h"
+
+#include <string>
+
+namespace ue2 {
+
+class CharReach {
+private:
+ /// Underlying storage.
+ ue2::bitfield<256> bits;
+
+public:
+ static constexpr size_t npos = decltype(bits)::npos; //!< One past the max value.
+
+ /// Empty constructor.
+ CharReach() {}
+
+ /// Constructor for a character class containing a single char.
+ explicit CharReach(unsigned char c) { set(c); }
+
+ /// Constructor for a character class representing a contiguous range of
+ /// chars, inclusive.
+ CharReach(unsigned char from, unsigned char to) { setRange(from, to); }
+
+ /// Constructor for a character class based on the set of chars in a
+ /// string.
+ explicit CharReach(const std::string &str) { set(str); }
+
+ /// Returns total capacity.
+ static constexpr size_t size() { return npos; }
+
+ /// Returns a CharReach with complete reachability (a "dot").
+ static CharReach dot() { return CharReach(0, 255); }
+
+ /// Complete bitset equality.
+ bool operator==(const CharReach &a) const { return bits == a.bits; }
+
+ /// Inequality.
+ bool operator!=(const CharReach &a) const { return bits != a.bits; }
+
+ /// Ordering.
+ bool operator<(const CharReach &a) const { return bits < a.bits; }
+
+ /// Set all bits.
+ void setall() { bits.setall(); }
+
+ /// Clear all bits.
+ void clear() { bits.clear(); }
+
+ /// Clear bit N.
+ void clear(unsigned char n) { bits.clear(n); }
+
+ /// Set bit N.
+ void set(unsigned char n) { bits.set(n); }
+
+ /// Test bit N.
+ bool test(unsigned char n) const { return bits.test(n); }
+
+ /// Flip bit N.
+ void flip(unsigned char n) { bits.flip(n); }
+
+ /// Flip all bits.
+ void flip() { bits.flip(); }
+
+ // Switch on the bit in the range (from, to), inclusive.
+ void setRange(unsigned char from, unsigned char to) {
+ bits.set_range(from, to);
+ }
+
+ // Switch on the bits corresponding to the characters in \a s.
+ void set(const std::string &s);
+
+ /// Returns number of bits set on.
+ size_t count() const { return bits.count(); }
+
+ /// Are no bits set?
+ bool none() const { return bits.none(); }
+
+ /// Is any bit set?
+ bool any() const { return bits.any(); }
+
+ /// Are all bits set?
+ bool all() const { return bits.all(); }
+
+ /// Returns first bit set, or CharReach::npos if none set.
+ size_t find_first() const { return bits.find_first(); }
+
+ /// Returns last bit set, or CharReach::npos if none set.
+ size_t find_last() const { return bits.find_last(); }
+
+ /// Returns next bit set, or CharReach::npos if none set after n.
+ size_t find_next(size_t last) const { return bits.find_next(last); }
+
+ /// Returns (zero-based) N'th bit set, or CharReach::npos if fewer than
+ /// N + 1 bits are on.
+ size_t find_nth(size_t n) const { return bits.find_nth(n); }
+
+ /// Bitwise OR.
CharReach operator|(const CharReach &a) const {
CharReach cr(*this);
cr.bits |= a.bits;
return cr;
}
-
- /// Bitwise OR-equals.
+
+ /// Bitwise OR-equals.
void operator|=(const CharReach &a) { bits |= a.bits; }
-
- /// Bitwise AND.
+
+ /// Bitwise AND.
CharReach operator&(const CharReach &a) const {
CharReach cr(*this);
cr.bits &= a.bits;
return cr;
}
-
- /// Bitwise AND-equals.
+
+ /// Bitwise AND-equals.
void operator&=(const CharReach &a) { bits &= a.bits; }
-
- /// Bitwise XOR.
+
+ /// Bitwise XOR.
CharReach operator^(const CharReach &a) const {
CharReach cr(*this);
cr.bits ^= a.bits;
return cr;
}
-
- /// Bitwise complement.
+
+ /// Bitwise complement.
CharReach operator~(void) const {
CharReach cr(*this);
cr.flip();
return cr;
}
-
- /// Do we only contain bits representing alpha characters?
- bool isAlpha() const;
-
- /// Do we represent an uppercase/lowercase pair?
- bool isCaselessChar() const;
-
- /// Do we represent a cheapskate caseless set?
- bool isBit5Insensitive() const;
-
- /// Return a string containing the characters that are switched on.
- std::string to_string() const;
-
- /// Hash of enabled bits.
- size_t hash() const { return bits.hash(); }
-
- /// True if this character class is a subset of \a other.
- bool isSubsetOf(const CharReach &other) const;
-};
-
-/** \brief True iff there is a non-empty intersection between \a and \a b */
-bool overlaps(const CharReach &a, const CharReach &b);
-
-/** \brief True iff \a small is a subset of \a big. */
-bool isSubsetOf(const CharReach &small, const CharReach &big);
-
-bool isutf8ascii(const CharReach &cr);
-bool isutf8start(const CharReach &cr);
-
-} // namespace ue2
-
+
+ /// Do we only contain bits representing alpha characters?
+ bool isAlpha() const;
+
+ /// Do we represent an uppercase/lowercase pair?
+ bool isCaselessChar() const;
+
+ /// Do we represent a cheapskate caseless set?
+ bool isBit5Insensitive() const;
+
+ /// Return a string containing the characters that are switched on.
+ std::string to_string() const;
+
+ /// Hash of enabled bits.
+ size_t hash() const { return bits.hash(); }
+
+ /// True if this character class is a subset of \a other.
+ bool isSubsetOf(const CharReach &other) const;
+};
+
+/** \brief True iff there is a non-empty intersection between \a and \a b */
+bool overlaps(const CharReach &a, const CharReach &b);
+
+/** \brief True iff \a small is a subset of \a big. */
+bool isSubsetOf(const CharReach &small, const CharReach &big);
+
+bool isutf8ascii(const CharReach &cr);
+bool isutf8start(const CharReach &cr);
+
+} // namespace ue2
+
namespace std {
template<>
@@ -209,4 +209,4 @@ struct hash<ue2::CharReach> {
} // namespace std
-#endif // NG_CHARREACH_H
+#endif // NG_CHARREACH_H
diff --git a/contrib/libs/hyperscan/src/util/charreach_util.h b/contrib/libs/hyperscan/src/util/charreach_util.h
index f0dc4227b0..0f2fbbb70c 100644
--- a/contrib/libs/hyperscan/src/util/charreach_util.h
+++ b/contrib/libs/hyperscan/src/util/charreach_util.h
@@ -1,57 +1,57 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CHARREACH_UTIL_H
-#define CHARREACH_UTIL_H
-
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CHARREACH_UTIL_H
+#define CHARREACH_UTIL_H
+
#include "ue2common.h"
-
-namespace ue2 {
-
+
+namespace ue2 {
+
class CharReach;
-
-void make_caseless(CharReach *cr);
-
-/**
- * \brief Fill a bitvector with the contents of the given CharReach.
- *
- * \a bits should point at an array of 32 bytes.
- */
-void fill_bitvector(const CharReach &cr, u8 *bits);
-
-/**
- * \brief Generate and and compare masks for checking the char reach.
- *
- * Any character c in cr will be result in (c & and_mask) == cmp_mask being true.
- * Note: characters not in cr may also pass the and/cmp checks.
- */
-void make_and_cmp_mask(const CharReach &cr, u8 *and_mask, u8 *cmp_mask);
-
-} // namespace ue2
-
-#endif
+
+void make_caseless(CharReach *cr);
+
+/**
+ * \brief Fill a bitvector with the contents of the given CharReach.
+ *
+ * \a bits should point at an array of 32 bytes.
+ */
+void fill_bitvector(const CharReach &cr, u8 *bits);
+
+/**
+ * \brief Generate and and compare masks for checking the char reach.
+ *
+ * Any character c in cr will be result in (c & and_mask) == cmp_mask being true.
+ * Note: characters not in cr may also pass the and/cmp checks.
+ */
+void make_and_cmp_mask(const CharReach &cr, u8 *and_mask, u8 *cmp_mask);
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/compare.h b/contrib/libs/hyperscan/src/util/compare.h
index eaa717a4c2..8717e5c1ee 100644
--- a/contrib/libs/hyperscan/src/util/compare.h
+++ b/contrib/libs/hyperscan/src/util/compare.h
@@ -1,183 +1,183 @@
-/*
+/*
* Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef COMPARE_H
-#define COMPARE_H
-
-#include "unaligned.h"
-#include "ue2common.h"
-
-/* Our own definitions of tolower, toupper and isalpha are provided to prevent
- * us from going out to libc for these tests. */
-
-static really_inline
-char myisupper(const char c) {
- return ((c >= 'A') && (c <= 'Z'));
-}
-
-static really_inline
-char myislower(const char c) {
- return ((c >= 'a') && (c <= 'z'));
-}
-
-static really_inline
-char mytolower(const char c) {
- if (myisupper(c)) {
- return c + 0x20;
- }
- return c;
-}
-
-static really_inline
-char mytoupper(const char c) {
- if (myislower(c)) {
- return c - 0x20;
- }
- return c;
-}
-
-/* this is a slightly warped definition of `alpha'. What we really
- * mean is: does this character have different uppercase and lowercase forms?
- */
-static really_inline char ourisalpha(const char c) {
- return mytolower(c) != mytoupper(c);
-}
-
-static really_inline char ourisprint(const char c) {
- return c >= 0x20 && c <= 0x7e;
-}
-
-// Paul Hsieh's SWAR toupper; used because it doesn't
-// matter whether we go toupper or tolower. We should
-// probably change the other one
-static really_inline
-u32 theirtoupper32(const u32 x) {
- u32 b = 0x80808080ul | x;
- u32 c = b - 0x61616161ul;
- u32 d = ~(b - 0x7b7b7b7bul);
- u32 e = (c & d) & (~x & 0x80808080ul);
- return x - (e >> 2);
-}
-
-// 64-bit variant.
-static really_inline
-u64a theirtoupper64(const u64a x) {
- u64a b = 0x8080808080808080ull | x;
- u64a c = b - 0x6161616161616161ull;
- u64a d = ~(b - 0x7b7b7b7b7b7b7b7bull);
- u64a e = (c & d) & (~x & 0x8080808080808080ull);
- u64a v = x - (e >> 2);
- return v;
-}
-
-static really_inline
-int cmpNocaseNaive(const u8 *p1, const u8 *p2, size_t len) {
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef COMPARE_H
+#define COMPARE_H
+
+#include "unaligned.h"
+#include "ue2common.h"
+
+/* Our own definitions of tolower, toupper and isalpha are provided to prevent
+ * us from going out to libc for these tests. */
+
+static really_inline
+char myisupper(const char c) {
+ return ((c >= 'A') && (c <= 'Z'));
+}
+
+static really_inline
+char myislower(const char c) {
+ return ((c >= 'a') && (c <= 'z'));
+}
+
+static really_inline
+char mytolower(const char c) {
+ if (myisupper(c)) {
+ return c + 0x20;
+ }
+ return c;
+}
+
+static really_inline
+char mytoupper(const char c) {
+ if (myislower(c)) {
+ return c - 0x20;
+ }
+ return c;
+}
+
+/* this is a slightly warped definition of `alpha'. What we really
+ * mean is: does this character have different uppercase and lowercase forms?
+ */
+static really_inline char ourisalpha(const char c) {
+ return mytolower(c) != mytoupper(c);
+}
+
+static really_inline char ourisprint(const char c) {
+ return c >= 0x20 && c <= 0x7e;
+}
+
+// Paul Hsieh's SWAR toupper; used because it doesn't
+// matter whether we go toupper or tolower. We should
+// probably change the other one
+static really_inline
+u32 theirtoupper32(const u32 x) {
+ u32 b = 0x80808080ul | x;
+ u32 c = b - 0x61616161ul;
+ u32 d = ~(b - 0x7b7b7b7bul);
+ u32 e = (c & d) & (~x & 0x80808080ul);
+ return x - (e >> 2);
+}
+
+// 64-bit variant.
+static really_inline
+u64a theirtoupper64(const u64a x) {
+ u64a b = 0x8080808080808080ull | x;
+ u64a c = b - 0x6161616161616161ull;
+ u64a d = ~(b - 0x7b7b7b7b7b7b7b7bull);
+ u64a e = (c & d) & (~x & 0x8080808080808080ull);
+ u64a v = x - (e >> 2);
+ return v;
+}
+
+static really_inline
+int cmpNocaseNaive(const u8 *p1, const u8 *p2, size_t len) {
const u8 *pEnd = p1 + len;
- for (; p1 < pEnd; p1++, p2++) {
+ for (; p1 < pEnd; p1++, p2++) {
assert(!ourisalpha(*p2) || myisupper(*p2)); // Already upper-case.
if ((u8)mytoupper(*p1) != *p2) {
- return 1;
- }
- }
- return 0;
-}
-
-static really_inline
-int cmpCaseNaive(const u8 *p1, const u8 *p2, size_t len) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static really_inline
+int cmpCaseNaive(const u8 *p1, const u8 *p2, size_t len) {
const u8 *pEnd = p1 + len;
- for (; p1 < pEnd; p1++, p2++) {
- if (*p1 != *p2) {
- return 1;
- }
- }
- return 0;
-}
-
-#ifdef ARCH_64_BIT
-# define CMP_T u64a
-# define ULOAD(x) unaligned_load_u64a(x)
-# define TOUPPER(x) theirtoupper64(x)
-#else
-# define CMP_T u32
-# define ULOAD(x) unaligned_load_u32(x)
-# define TOUPPER(x) theirtoupper32(x)
-#endif
-
-#define CMP_SIZE sizeof(CMP_T)
-
+ for (; p1 < pEnd; p1++, p2++) {
+ if (*p1 != *p2) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+#ifdef ARCH_64_BIT
+# define CMP_T u64a
+# define ULOAD(x) unaligned_load_u64a(x)
+# define TOUPPER(x) theirtoupper64(x)
+#else
+# define CMP_T u32
+# define ULOAD(x) unaligned_load_u32(x)
+# define TOUPPER(x) theirtoupper32(x)
+#endif
+
+#define CMP_SIZE sizeof(CMP_T)
+
/**
* \brief Compare two strings, optionally caselessly.
*
* Note: If nocase is true, p2 is assumed to be already upper-case.
*/
-#if defined(ARCH_IA32)
-static UNUSED never_inline
-#else
-static really_inline
-#endif
-int cmpForward(const u8 *p1, const u8 *p2, size_t len, char nocase) {
- if (len < CMP_SIZE) {
- return nocase ? cmpNocaseNaive(p1, p2, len)
- : cmpCaseNaive(p1, p2, len);
- }
-
- const u8 *p1_end = p1 + len - CMP_SIZE;
- const u8 *p2_end = p2 + len - CMP_SIZE;
-
- if (nocase) { // Case-insensitive version.
- for (; p1 < p1_end; p1 += CMP_SIZE, p2 += CMP_SIZE) {
+#if defined(ARCH_IA32)
+static UNUSED never_inline
+#else
+static really_inline
+#endif
+int cmpForward(const u8 *p1, const u8 *p2, size_t len, char nocase) {
+ if (len < CMP_SIZE) {
+ return nocase ? cmpNocaseNaive(p1, p2, len)
+ : cmpCaseNaive(p1, p2, len);
+ }
+
+ const u8 *p1_end = p1 + len - CMP_SIZE;
+ const u8 *p2_end = p2 + len - CMP_SIZE;
+
+ if (nocase) { // Case-insensitive version.
+ for (; p1 < p1_end; p1 += CMP_SIZE, p2 += CMP_SIZE) {
assert(ULOAD(p2) == TOUPPER(ULOAD(p2))); // Already upper-case.
if (TOUPPER(ULOAD(p1)) != ULOAD(p2)) {
- return 1;
- }
- }
+ return 1;
+ }
+ }
assert(ULOAD(p2_end) == TOUPPER(ULOAD(p2_end))); // Already upper-case.
if (TOUPPER(ULOAD(p1_end)) != ULOAD(p2_end)) {
- return 1;
- }
- } else { // Case-sensitive version.
- for (; p1 < p1_end; p1 += CMP_SIZE, p2 += CMP_SIZE) {
- if (ULOAD(p1) != ULOAD(p2)) {
- return 1;
- }
- }
- if (ULOAD(p1_end) != ULOAD(p2_end)) {
- return 1;
- }
- }
-
- return 0;
-}
-
-#undef CMP_T
-#undef ULOAD
-#undef TOUPPER
-#undef CMP_SIZE
-
-#endif
-
+ return 1;
+ }
+ } else { // Case-sensitive version.
+ for (; p1 < p1_end; p1 += CMP_SIZE, p2 += CMP_SIZE) {
+ if (ULOAD(p1) != ULOAD(p2)) {
+ return 1;
+ }
+ }
+ if (ULOAD(p1_end) != ULOAD(p2_end)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#undef CMP_T
+#undef ULOAD
+#undef TOUPPER
+#undef CMP_SIZE
+
+#endif
+
diff --git a/contrib/libs/hyperscan/src/util/compile_context.cpp b/contrib/libs/hyperscan/src/util/compile_context.cpp
index d18f645389..b8a957db7c 100644
--- a/contrib/libs/hyperscan/src/util/compile_context.cpp
+++ b/contrib/libs/hyperscan/src/util/compile_context.cpp
@@ -1,46 +1,46 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Global compile context, describes compile environment.
- */
-#include "compile_context.h"
-#include "grey.h"
-
-namespace ue2 {
-
-CompileContext::CompileContext(bool in_isStreaming, bool in_isVectored,
- const target_t &in_target_info,
- const Grey &in_grey)
- : streaming(in_isStreaming || in_isVectored),
- vectored(in_isVectored),
- target_info(in_target_info),
- grey(in_grey) {
-}
-
-} // namespace ue2
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Global compile context, describes compile environment.
+ */
+#include "compile_context.h"
+#include "grey.h"
+
+namespace ue2 {
+
+CompileContext::CompileContext(bool in_isStreaming, bool in_isVectored,
+ const target_t &in_target_info,
+ const Grey &in_grey)
+ : streaming(in_isStreaming || in_isVectored),
+ vectored(in_isVectored),
+ target_info(in_target_info),
+ grey(in_grey) {
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/compile_context.h b/contrib/libs/hyperscan/src/util/compile_context.h
index 481b1d3760..211f95a411 100644
--- a/contrib/libs/hyperscan/src/util/compile_context.h
+++ b/contrib/libs/hyperscan/src/util/compile_context.h
@@ -1,59 +1,59 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Global compile context, describes compile environment.
- */
-
-#ifndef COMPILE_CONTEXT_H
-#define COMPILE_CONTEXT_H
-
-#include "target_info.h"
-#include "grey.h"
-
-namespace ue2 {
-
-/** \brief Structure for describing the compile environment: grey box settings,
- * target arch, mode flags, etc. */
-struct CompileContext {
- CompileContext(bool isStreaming, bool isVectored,
- const target_t &target_info, const Grey &grey);
-
- const bool streaming; /* streaming or vectored mode */
- const bool vectored;
-
- /** \brief Target platform info. */
- const target_t target_info;
-
- /** \brief Greybox structure, allows tuning of all sorts of behaviour. */
- const Grey grey;
-};
-
-} // namespace ue2
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Global compile context, describes compile environment.
+ */
+
+#ifndef COMPILE_CONTEXT_H
+#define COMPILE_CONTEXT_H
+
+#include "target_info.h"
+#include "grey.h"
+
+namespace ue2 {
+
+/** \brief Structure for describing the compile environment: grey box settings,
+ * target arch, mode flags, etc. */
+struct CompileContext {
+ CompileContext(bool isStreaming, bool isVectored,
+ const target_t &target_info, const Grey &grey);
+
+ const bool streaming; /* streaming or vectored mode */
+ const bool vectored;
+
+ /** \brief Target platform info. */
+ const target_t target_info;
+
+ /** \brief Greybox structure, allows tuning of all sorts of behaviour. */
+ const Grey grey;
+};
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/compile_error.cpp b/contrib/libs/hyperscan/src/util/compile_error.cpp
index 6519c1bb2f..8a916837c5 100644
--- a/contrib/libs/hyperscan/src/util/compile_error.cpp
+++ b/contrib/libs/hyperscan/src/util/compile_error.cpp
@@ -1,59 +1,59 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "compile_error.h"
-
-using namespace std;
-
-namespace ue2 {
-
-CompileError::CompileError(const string &why)
- : reason(why), hasIndex(false), index(0) {
- assert(!why.empty());
- assert(*why.rbegin() == '.');
-}
-
-CompileError::CompileError(unsigned int idx, const string &why)
- : reason(why), hasIndex(true), index(idx) {
- assert(!why.empty());
- assert(*why.rbegin() == '.');
-}
-
-void CompileError::setExpressionIndex(u32 expr_index) {
- hasIndex = true;
- index = expr_index;
-}
-
-CompileError::~CompileError() {}
-
-ResourceLimitError::ResourceLimitError()
- : CompileError("Resource limit exceeded.") {}
-
-ResourceLimitError::~ResourceLimitError() {}
-
-} // namespace ue2
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "compile_error.h"
+
+using namespace std;
+
+namespace ue2 {
+
+CompileError::CompileError(const string &why)
+ : reason(why), hasIndex(false), index(0) {
+ assert(!why.empty());
+ assert(*why.rbegin() == '.');
+}
+
+CompileError::CompileError(unsigned int idx, const string &why)
+ : reason(why), hasIndex(true), index(idx) {
+ assert(!why.empty());
+ assert(*why.rbegin() == '.');
+}
+
+void CompileError::setExpressionIndex(u32 expr_index) {
+ hasIndex = true;
+ index = expr_index;
+}
+
+CompileError::~CompileError() {}
+
+ResourceLimitError::ResourceLimitError()
+ : CompileError("Resource limit exceeded.") {}
+
+ResourceLimitError::~ResourceLimitError() {}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/compile_error.h b/contrib/libs/hyperscan/src/util/compile_error.h
index a38220526b..87e156a5f9 100644
--- a/contrib/libs/hyperscan/src/util/compile_error.h
+++ b/contrib/libs/hyperscan/src/util/compile_error.h
@@ -1,68 +1,68 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_COMPILE_ERROR_H
-#define UTIL_COMPILE_ERROR_H
-
-#include <cassert>
-#include <stdexcept>
-#include <string>
-
-#include "ue2common.h"
-
-namespace ue2 {
-
-/** \brief Error thrown by the compiler, can reference a specific expression
- * index. */
-class CompileError {
-public:
- // Note: 'why' should describe why the error occurred and end with a
- // full stop, but no line break.
- explicit CompileError(const std::string &why);
- CompileError(u32 index, const std::string &why);
-
- virtual ~CompileError();
-
- void setExpressionIndex(u32 index);
-
- std::string reason; //!< Reason for the error
- bool hasIndex; //!< Does it reference a specific expression?
- u32 index; //!< The index of the expression referred to
-};
-
-/** \brief Error thrown by the compiler when an arbitrary resource limit (as
- * specified in the grey box) is exceeded. */
-class ResourceLimitError : public CompileError {
-public:
- ResourceLimitError();
- ~ResourceLimitError() override;
-};
-
-} // namespace ue2
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_COMPILE_ERROR_H
+#define UTIL_COMPILE_ERROR_H
+
+#include <cassert>
+#include <stdexcept>
+#include <string>
+
+#include "ue2common.h"
+
+namespace ue2 {
+
+/** \brief Error thrown by the compiler, can reference a specific expression
+ * index. */
+class CompileError {
+public:
+ // Note: 'why' should describe why the error occurred and end with a
+ // full stop, but no line break.
+ explicit CompileError(const std::string &why);
+ CompileError(u32 index, const std::string &why);
+
+ virtual ~CompileError();
+
+ void setExpressionIndex(u32 index);
+
+ std::string reason; //!< Reason for the error
+ bool hasIndex; //!< Does it reference a specific expression?
+ u32 index; //!< The index of the expression referred to
+};
+
+/** \brief Error thrown by the compiler when an arbitrary resource limit (as
+ * specified in the grey box) is exceeded. */
+class ResourceLimitError : public CompileError {
+public:
+ ResourceLimitError();
+ ~ResourceLimitError() override;
+};
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/container.h b/contrib/libs/hyperscan/src/util/container.h
index 68f60e99ee..83aa318b6c 100644
--- a/contrib/libs/hyperscan/src/util/container.h
+++ b/contrib/libs/hyperscan/src/util/container.h
@@ -1,94 +1,94 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Convenience template functions for containers.
- */
-
-#ifndef UTIL_CONTAINER_H
-#define UTIL_CONTAINER_H
-
-#include "ue2common.h"
-
-#include <algorithm>
-#include <cassert>
-#include <cstring>
-#include <set>
-#include <type_traits>
-#include <utility>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Convenience template functions for containers.
+ */
+
+#ifndef UTIL_CONTAINER_H
+#define UTIL_CONTAINER_H
+
+#include "ue2common.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <set>
+#include <type_traits>
+#include <utility>
#include <vector>
-
-namespace ue2 {
-
-// Existence check for associative containers.
-template<typename C>
-bool contains(const C &container, const typename C::key_type &key) {
- return container.find(key) != container.end();
-}
-
-template<typename C, typename It>
-bool contains_any_of(const C &container, const std::pair<It, It> &range) {
- return std::find_first_of(range.first, range.second, container.begin(),
- container.end()) != range.second;
-}
-
-template<typename C, typename It>
-void insert(C *container, const std::pair<It, It> &range) {
- container->insert(range.first, range.second);
-}
-
-template<typename C, typename It>
-void insert(C *container, typename C::iterator pos,
- const std::pair<It, It> &range) {
- container->insert(pos, range.first, range.second);
-}
-
-template<typename C, typename D>
-void insert(C *container, const D &donor) {
- container->insert(donor.begin(), donor.end());
-}
-
-template<typename C, typename D>
-void insert(C *container, typename C::iterator pos, const D &donor) {
- container->insert(pos, donor.begin(), donor.end());
-}
-
-/**
+
+namespace ue2 {
+
+// Existence check for associative containers.
+template<typename C>
+bool contains(const C &container, const typename C::key_type &key) {
+ return container.find(key) != container.end();
+}
+
+template<typename C, typename It>
+bool contains_any_of(const C &container, const std::pair<It, It> &range) {
+ return std::find_first_of(range.first, range.second, container.begin(),
+ container.end()) != range.second;
+}
+
+template<typename C, typename It>
+void insert(C *container, const std::pair<It, It> &range) {
+ container->insert(range.first, range.second);
+}
+
+template<typename C, typename It>
+void insert(C *container, typename C::iterator pos,
+ const std::pair<It, It> &range) {
+ container->insert(pos, range.first, range.second);
+}
+
+template<typename C, typename D>
+void insert(C *container, const D &donor) {
+ container->insert(donor.begin(), donor.end());
+}
+
+template<typename C, typename D>
+void insert(C *container, typename C::iterator pos, const D &donor) {
+ container->insert(pos, donor.begin(), donor.end());
+}
+
+/**
* \brief Constructs a vector from a range bounded by the given pair of
* iterators.
*/
-template <typename It>
-auto make_vector_from(const std::pair<It, It> &range)
- -> std::vector<decltype(*range.first)> {
- using T = decltype(*range.first);
- return std::vector<T>(range.first, range.second);
-}
-
+template <typename It>
+auto make_vector_from(const std::pair<It, It> &range)
+ -> std::vector<decltype(*range.first)> {
+ using T = decltype(*range.first);
+ return std::vector<T>(range.first, range.second);
+}
+
/** \brief Sort a sequence container and remove duplicates. */
template <typename C, typename Compare = std::less<typename C::value_type>>
void sort_and_unique(C &container, Compare comp = Compare()) {
@@ -97,111 +97,111 @@ void sort_and_unique(C &container, Compare comp = Compare()) {
std::end(container));
}
-/** \brief Returns a set containing the keys in the given associative
- * container. */
-template <typename C>
-std::set<typename C::key_type> assoc_keys(const C &container) {
- std::set<typename C::key_type> keys;
- for (const auto &elem : container) {
- keys.insert(elem.first);
- }
- return keys;
-}
-
-/**
- * \brief Return the length in bytes of the given vector of (POD) objects.
- */
+/** \brief Returns a set containing the keys in the given associative
+ * container. */
+template <typename C>
+std::set<typename C::key_type> assoc_keys(const C &container) {
+ std::set<typename C::key_type> keys;
+ for (const auto &elem : container) {
+ keys.insert(elem.first);
+ }
+ return keys;
+}
+
+/**
+ * \brief Return the length in bytes of the given vector of (POD) objects.
+ */
template <typename T, typename Alloc>
typename std::vector<T, Alloc>::size_type
byte_length(const std::vector<T, Alloc> &vec) {
- static_assert(std::is_pod<T>::value, "should be pod");
- return vec.size() * sizeof(T);
-}
-
-/**
- * \brief Copy the given vector of POD objects to the given location in memory.
- * It is safe to give this function an empty vector.
- */
+ static_assert(std::is_pod<T>::value, "should be pod");
+ return vec.size() * sizeof(T);
+}
+
+/**
+ * \brief Copy the given vector of POD objects to the given location in memory.
+ * It is safe to give this function an empty vector.
+ */
template<typename T, typename Alloc>
void *copy_bytes(void *dest, const std::vector<T, Alloc> &vec) {
- static_assert(std::is_pod<T>::value, "should be pod");
- assert(dest);
-
- // Since we're generally using this function to write into the bytecode,
- // dest should be appropriately aligned for T.
- assert(ISALIGNED_N(dest, alignof(T)));
-
- if (vec.empty()) {
- return dest; // Protect memcpy against null pointers.
- }
- assert(vec.data() != nullptr);
- return std::memcpy(dest, vec.data(), byte_length(vec));
-}
-
-template<typename OrderedContainer1, typename OrderedContainer2>
-bool is_subset_of(const OrderedContainer1 &small, const OrderedContainer2 &big) {
- static_assert(std::is_same<typename OrderedContainer1::value_type,
- typename OrderedContainer2::value_type>::value,
- "Both containers should have the same value_type");
- auto sit = small.begin();
- auto bit = big.begin();
- if (small.size() > big.size()) {
- return false;
- }
-
- while (sit != small.end()) {
- if (bit == big.end()) {
- return false;
- }
-
- if (*sit == *bit) {
- ++sit;
- ++bit;
- continue;
- }
- if (*bit < *sit) {
- ++bit;
- continue;
- }
-
- return false;
- }
- return true;
-}
-
-template<typename OrderedContainer1, typename OrderedContainer2>
-bool has_intersection(const OrderedContainer1 &a, const OrderedContainer2 &b) {
- static_assert(std::is_same<typename OrderedContainer1::value_type,
- typename OrderedContainer2::value_type>::value,
- "Both containers should have the same value_type");
- auto ait = a.begin();
- auto bit = b.begin();
- while (ait != a.end() && bit != b.end()) {
- if (*ait == *bit) {
- return true;
- }
-
- if (*ait < *bit) {
- ++ait;
- } else {
- ++bit;
- }
- }
-
- return false;
-}
-
-/**
- * \brief Erase the elements (by value) in the donor container from the given
- * container.
- */
-template<typename C, typename D>
-void erase_all(C *container, const D &donor) {
- for (const auto &elem : donor) {
- container->erase(elem);
- }
-}
-
+ static_assert(std::is_pod<T>::value, "should be pod");
+ assert(dest);
+
+ // Since we're generally using this function to write into the bytecode,
+ // dest should be appropriately aligned for T.
+ assert(ISALIGNED_N(dest, alignof(T)));
+
+ if (vec.empty()) {
+ return dest; // Protect memcpy against null pointers.
+ }
+ assert(vec.data() != nullptr);
+ return std::memcpy(dest, vec.data(), byte_length(vec));
+}
+
+template<typename OrderedContainer1, typename OrderedContainer2>
+bool is_subset_of(const OrderedContainer1 &small, const OrderedContainer2 &big) {
+ static_assert(std::is_same<typename OrderedContainer1::value_type,
+ typename OrderedContainer2::value_type>::value,
+ "Both containers should have the same value_type");
+ auto sit = small.begin();
+ auto bit = big.begin();
+ if (small.size() > big.size()) {
+ return false;
+ }
+
+ while (sit != small.end()) {
+ if (bit == big.end()) {
+ return false;
+ }
+
+ if (*sit == *bit) {
+ ++sit;
+ ++bit;
+ continue;
+ }
+ if (*bit < *sit) {
+ ++bit;
+ continue;
+ }
+
+ return false;
+ }
+ return true;
+}
+
+template<typename OrderedContainer1, typename OrderedContainer2>
+bool has_intersection(const OrderedContainer1 &a, const OrderedContainer2 &b) {
+ static_assert(std::is_same<typename OrderedContainer1::value_type,
+ typename OrderedContainer2::value_type>::value,
+ "Both containers should have the same value_type");
+ auto ait = a.begin();
+ auto bit = b.begin();
+ while (ait != a.end() && bit != b.end()) {
+ if (*ait == *bit) {
+ return true;
+ }
+
+ if (*ait < *bit) {
+ ++ait;
+ } else {
+ ++bit;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * \brief Erase the elements (by value) in the donor container from the given
+ * container.
+ */
+template<typename C, typename D>
+void erase_all(C *container, const D &donor) {
+ for (const auto &elem : donor) {
+ container->erase(elem);
+ }
+}
+
template<typename C, typename Pred>
bool any_of_in(const C &c, Pred p) {
@@ -213,33 +213,33 @@ bool all_of_in(const C &c, Pred p) {
return std::all_of(c.begin(), c.end(), std::move(p));
}
-} // namespace ue2
-
-#ifdef DUMP_SUPPORT
-
-#include <sstream>
-#include <string>
-
-namespace ue2 {
-
-/**
- * \brief Dump a container of stream-printable objects into a comma-separated
- * list in a string.
- */
-template<class C>
-std::string as_string_list(const C &c) {
- std::ostringstream oss;
- for (auto it = c.begin(); it != c.end(); ++it) {
- if (it != c.begin()) {
- oss << ", ";
- }
- oss << *it;
- }
- return oss.str();
-}
-
-} // namespace ue2
-
-#endif // DUMP_SUPPORT
-
-#endif // UTIL_CONTAINER_H
+} // namespace ue2
+
+#ifdef DUMP_SUPPORT
+
+#include <sstream>
+#include <string>
+
+namespace ue2 {
+
+/**
+ * \brief Dump a container of stream-printable objects into a comma-separated
+ * list in a string.
+ */
+template<class C>
+std::string as_string_list(const C &c) {
+ std::ostringstream oss;
+ for (auto it = c.begin(); it != c.end(); ++it) {
+ if (it != c.begin()) {
+ oss << ", ";
+ }
+ oss << *it;
+ }
+ return oss.str();
+}
+
+} // namespace ue2
+
+#endif // DUMP_SUPPORT
+
+#endif // UTIL_CONTAINER_H
diff --git a/contrib/libs/hyperscan/src/util/cpuid_flags.c b/contrib/libs/hyperscan/src/util/cpuid_flags.c
index c00ce58e2d..84f6077d32 100644
--- a/contrib/libs/hyperscan/src/util/cpuid_flags.c
+++ b/contrib/libs/hyperscan/src/util/cpuid_flags.c
@@ -1,50 +1,50 @@
-/*
+/*
* Copyright (c) 2015-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "cpuid_flags.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "cpuid_flags.h"
#include "cpuid_inline.h"
-#include "ue2common.h"
-#include "hs_compile.h" // for HS_MODE_ flags
-#include "hs_internal.h"
+#include "ue2common.h"
+#include "hs_compile.h" // for HS_MODE_ flags
+#include "hs_internal.h"
#include "util/arch.h"
-
+
#if !defined(_WIN32) && !defined(CPUID_H_)
-#include <cpuid.h>
-#endif
-
-u64a cpuid_flags(void) {
- u64a cap = 0;
-
+#include <cpuid.h>
+#endif
+
+u64a cpuid_flags(void) {
+ u64a cap = 0;
+
if (check_avx2()) {
DEBUG_PRINTF("AVX2 enabled\n");
- cap |= HS_CPU_FEATURES_AVX2;
- }
-
+ cap |= HS_CPU_FEATURES_AVX2;
+ }
+
if (check_avx512()) {
DEBUG_PRINTF("AVX512 enabled\n");
cap |= HS_CPU_FEATURES_AVX512;
@@ -56,9 +56,9 @@ u64a cpuid_flags(void) {
}
#if !defined(FAT_RUNTIME) && !defined(HAVE_AVX2)
- cap &= ~HS_CPU_FEATURES_AVX2;
-#endif
-
+ cap &= ~HS_CPU_FEATURES_AVX2;
+#endif
+
#if (!defined(FAT_RUNTIME) && !defined(HAVE_AVX512)) || \
(defined(FAT_RUNTIME) && !defined(BUILD_AVX512))
cap &= ~HS_CPU_FEATURES_AVX512;
@@ -69,108 +69,108 @@ u64a cpuid_flags(void) {
cap &= ~HS_CPU_FEATURES_AVX512VBMI;
#endif
- return cap;
-}
-
-struct family_id {
- u32 full_family;
- u32 full_model;
- u32 tune;
-};
-
-/* from table 35-1 of the Intel 64 and IA32 Arch. Software Developer's Manual
- * and "Intel Architecture and Processor Identification With CPUID Model and
- * Family Numbers" */
-static const struct family_id known_microarch[] = {
- { 0x6, 0x37, HS_TUNE_FAMILY_SLM }, /* baytrail */
+ return cap;
+}
+
+struct family_id {
+ u32 full_family;
+ u32 full_model;
+ u32 tune;
+};
+
+/* from table 35-1 of the Intel 64 and IA32 Arch. Software Developer's Manual
+ * and "Intel Architecture and Processor Identification With CPUID Model and
+ * Family Numbers" */
+static const struct family_id known_microarch[] = {
+ { 0x6, 0x37, HS_TUNE_FAMILY_SLM }, /* baytrail */
{ 0x6, 0x4A, HS_TUNE_FAMILY_SLM }, /* silvermont */
{ 0x6, 0x4C, HS_TUNE_FAMILY_SLM }, /* silvermont */
- { 0x6, 0x4D, HS_TUNE_FAMILY_SLM }, /* avoton, rangley */
+ { 0x6, 0x4D, HS_TUNE_FAMILY_SLM }, /* avoton, rangley */
{ 0x6, 0x5A, HS_TUNE_FAMILY_SLM }, /* silvermont */
{ 0x6, 0x5D, HS_TUNE_FAMILY_SLM }, /* silvermont */
-
+
{ 0x6, 0x5C, HS_TUNE_FAMILY_GLM }, /* goldmont */
{ 0x6, 0x5F, HS_TUNE_FAMILY_GLM }, /* denverton */
- { 0x6, 0x3C, HS_TUNE_FAMILY_HSW }, /* haswell */
- { 0x6, 0x45, HS_TUNE_FAMILY_HSW }, /* haswell */
- { 0x6, 0x46, HS_TUNE_FAMILY_HSW }, /* haswell */
+ { 0x6, 0x3C, HS_TUNE_FAMILY_HSW }, /* haswell */
+ { 0x6, 0x45, HS_TUNE_FAMILY_HSW }, /* haswell */
+ { 0x6, 0x46, HS_TUNE_FAMILY_HSW }, /* haswell */
{ 0x6, 0x3F, HS_TUNE_FAMILY_HSW }, /* haswell Xeon */
-
+
{ 0x6, 0x3E, HS_TUNE_FAMILY_IVB }, /* ivybridge Xeon */
- { 0x6, 0x3A, HS_TUNE_FAMILY_IVB }, /* ivybridge */
-
- { 0x6, 0x2A, HS_TUNE_FAMILY_SNB }, /* sandybridge */
+ { 0x6, 0x3A, HS_TUNE_FAMILY_IVB }, /* ivybridge */
+
+ { 0x6, 0x2A, HS_TUNE_FAMILY_SNB }, /* sandybridge */
{ 0x6, 0x2D, HS_TUNE_FAMILY_SNB }, /* sandybridge Xeon */
-
- { 0x6, 0x3D, HS_TUNE_FAMILY_BDW }, /* broadwell Core-M */
+
+ { 0x6, 0x3D, HS_TUNE_FAMILY_BDW }, /* broadwell Core-M */
{ 0x6, 0x47, HS_TUNE_FAMILY_BDW }, /* broadwell */
- { 0x6, 0x4F, HS_TUNE_FAMILY_BDW }, /* broadwell xeon */
- { 0x6, 0x56, HS_TUNE_FAMILY_BDW }, /* broadwell xeon-d */
-
+ { 0x6, 0x4F, HS_TUNE_FAMILY_BDW }, /* broadwell xeon */
+ { 0x6, 0x56, HS_TUNE_FAMILY_BDW }, /* broadwell xeon-d */
+
{ 0x6, 0x4E, HS_TUNE_FAMILY_SKL }, /* Skylake Mobile */
{ 0x6, 0x5E, HS_TUNE_FAMILY_SKL }, /* Skylake Core/E3 Xeon */
{ 0x6, 0x55, HS_TUNE_FAMILY_SKX }, /* Skylake Xeon */
-
+
{ 0x6, 0x8E, HS_TUNE_FAMILY_SKL }, /* Kabylake Mobile */
{ 0x6, 0x9E, HS_TUNE_FAMILY_SKL }, /* Kabylake desktop */
-
+
{ 0x6, 0x7D, HS_TUNE_FAMILY_ICL }, /* Icelake */
{ 0x6, 0x7E, HS_TUNE_FAMILY_ICL }, /* Icelake */
{ 0x6, 0x6A, HS_TUNE_FAMILY_ICX }, /* Icelake Xeon-D */
{ 0x6, 0x6C, HS_TUNE_FAMILY_ICX }, /* Icelake Xeon */
-};
-
-#ifdef DUMP_SUPPORT
-static UNUSED
-const char *dumpTune(u32 tune) {
-#define T_CASE(x) case x: return #x;
- switch (tune) {
- T_CASE(HS_TUNE_FAMILY_SLM);
+};
+
+#ifdef DUMP_SUPPORT
+static UNUSED
+const char *dumpTune(u32 tune) {
+#define T_CASE(x) case x: return #x;
+ switch (tune) {
+ T_CASE(HS_TUNE_FAMILY_SLM);
T_CASE(HS_TUNE_FAMILY_GLM);
- T_CASE(HS_TUNE_FAMILY_HSW);
- T_CASE(HS_TUNE_FAMILY_SNB);
- T_CASE(HS_TUNE_FAMILY_IVB);
- T_CASE(HS_TUNE_FAMILY_BDW);
+ T_CASE(HS_TUNE_FAMILY_HSW);
+ T_CASE(HS_TUNE_FAMILY_SNB);
+ T_CASE(HS_TUNE_FAMILY_IVB);
+ T_CASE(HS_TUNE_FAMILY_BDW);
T_CASE(HS_TUNE_FAMILY_SKL);
T_CASE(HS_TUNE_FAMILY_SKX);
T_CASE(HS_TUNE_FAMILY_ICL);
T_CASE(HS_TUNE_FAMILY_ICX);
- }
-#undef T_CASE
- return "unknown";
-}
-#endif
-
-u32 cpuid_tune(void) {
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(1, 0, &eax, &ebx, &ecx, &edx);
-
- u32 family = (eax >> 8) & 0xf;
- u32 model = 0;
-
- if (family == 0x6 || family == 0xf) {
- model = ((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0);
- } else {
- model = (eax >> 4) & 0xf;
- }
-
- DEBUG_PRINTF("family = %xh model = %xh\n", family, model);
- for (u32 i = 0; i < ARRAY_LENGTH(known_microarch); i++) {
- if (family != known_microarch[i].full_family) {
- continue;
- }
-
- if (model != known_microarch[i].full_model) {
- continue;
- }
-
- u32 tune = known_microarch[i].tune;
- DEBUG_PRINTF("found tune flag %s\n", dumpTune(tune) );
- return tune;
- }
-
- return HS_TUNE_FAMILY_GENERIC;
-}
+ }
+#undef T_CASE
+ return "unknown";
+}
+#endif
+
+u32 cpuid_tune(void) {
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(1, 0, &eax, &ebx, &ecx, &edx);
+
+ u32 family = (eax >> 8) & 0xf;
+ u32 model = 0;
+
+ if (family == 0x6 || family == 0xf) {
+ model = ((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0);
+ } else {
+ model = (eax >> 4) & 0xf;
+ }
+
+ DEBUG_PRINTF("family = %xh model = %xh\n", family, model);
+ for (u32 i = 0; i < ARRAY_LENGTH(known_microarch); i++) {
+ if (family != known_microarch[i].full_family) {
+ continue;
+ }
+
+ if (model != known_microarch[i].full_model) {
+ continue;
+ }
+
+ u32 tune = known_microarch[i].tune;
+ DEBUG_PRINTF("found tune flag %s\n", dumpTune(tune) );
+ return tune;
+ }
+
+ return HS_TUNE_FAMILY_GENERIC;
+}
diff --git a/contrib/libs/hyperscan/src/util/cpuid_flags.h b/contrib/libs/hyperscan/src/util/cpuid_flags.h
index 527c6d52f3..c7ab2522d9 100644
--- a/contrib/libs/hyperscan/src/util/cpuid_flags.h
+++ b/contrib/libs/hyperscan/src/util/cpuid_flags.h
@@ -1,55 +1,55 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
#ifndef UTIL_CPUID_H_
#define UTIL_CPUID_H_
-
-#include "ue2common.h"
-
+
+#include "ue2common.h"
+
#if !defined(_WIN32) && !defined(CPUID_H_)
#include <cpuid.h>
/* system header doesn't have a header guard */
#define CPUID_H_
#endif
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* returns HS_CPU_FEATURES_* mask. */
-u64a cpuid_flags(void);
-
-u32 cpuid_tune(void);
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* returns HS_CPU_FEATURES_* mask. */
+u64a cpuid_flags(void);
+
+u32 cpuid_tune(void);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
#endif /* UTIL_CPUID_H_ */
-
+
diff --git a/contrib/libs/hyperscan/src/util/depth.cpp b/contrib/libs/hyperscan/src/util/depth.cpp
index 475458763a..fdfc66546a 100644
--- a/contrib/libs/hyperscan/src/util/depth.cpp
+++ b/contrib/libs/hyperscan/src/util/depth.cpp
@@ -1,91 +1,91 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Data types used to represent depth quantities.
- */
-#include "depth.h"
-#include "ue2common.h"
-
-#include <algorithm> // std::min, std::max
-
-namespace ue2 {
-
-DepthMinMax unionDepthMinMax(const DepthMinMax &a, const DepthMinMax &b) {
- DepthMinMax rv;
-
- if (a.min.is_unreachable()) {
- rv.min = b.min;
- } else if (b.min.is_unreachable()) {
- rv.min = a.min;
- } else {
- rv.min = std::min(a.min, b.min);
- }
-
- if (a.max.is_infinite() || b.max.is_infinite()) {
- rv.max = depth::infinity();
- } else if (a.max.is_unreachable()) {
- rv.max = b.max;
- } else if (b.max.is_unreachable()) {
- rv.max = a.max;
- } else {
- rv.max = std::max(a.max, b.max);
- }
-
- return rv;
-}
-
-} // namespace ue2
-
-#ifdef DUMP_SUPPORT
-
-#include <sstream>
-#include <string>
-
-namespace ue2 {
-
-std::string depth::str() const {
- if (is_unreachable()) {
- return "unr";
- } else if (is_infinite()) {
- return "inf";
- }
- std::ostringstream oss;
- oss << val;
- return oss.str();
-}
-
-std::string DepthMinMax::str() const {
- std::ostringstream oss;
- oss << "[" << min.str() << "," << max.str() << "]";
- return oss.str();
-}
-
-} // namespace ue2
-
-#endif // DUMP_SUPPORT
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Data types used to represent depth quantities.
+ */
+#include "depth.h"
+#include "ue2common.h"
+
+#include <algorithm> // std::min, std::max
+
+namespace ue2 {
+
+DepthMinMax unionDepthMinMax(const DepthMinMax &a, const DepthMinMax &b) {
+ DepthMinMax rv;
+
+ if (a.min.is_unreachable()) {
+ rv.min = b.min;
+ } else if (b.min.is_unreachable()) {
+ rv.min = a.min;
+ } else {
+ rv.min = std::min(a.min, b.min);
+ }
+
+ if (a.max.is_infinite() || b.max.is_infinite()) {
+ rv.max = depth::infinity();
+ } else if (a.max.is_unreachable()) {
+ rv.max = b.max;
+ } else if (b.max.is_unreachable()) {
+ rv.max = a.max;
+ } else {
+ rv.max = std::max(a.max, b.max);
+ }
+
+ return rv;
+}
+
+} // namespace ue2
+
+#ifdef DUMP_SUPPORT
+
+#include <sstream>
+#include <string>
+
+namespace ue2 {
+
+std::string depth::str() const {
+ if (is_unreachable()) {
+ return "unr";
+ } else if (is_infinite()) {
+ return "inf";
+ }
+ std::ostringstream oss;
+ oss << val;
+ return oss.str();
+}
+
+std::string DepthMinMax::str() const {
+ std::ostringstream oss;
+ oss << "[" << min.str() << "," << max.str() << "]";
+ return oss.str();
+}
+
+} // namespace ue2
+
+#endif // DUMP_SUPPORT
diff --git a/contrib/libs/hyperscan/src/util/depth.h b/contrib/libs/hyperscan/src/util/depth.h
index 5305c6f1b3..ad06184647 100644
--- a/contrib/libs/hyperscan/src/util/depth.h
+++ b/contrib/libs/hyperscan/src/util/depth.h
@@ -1,198 +1,198 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Data types used to represent depth quantities.
- */
-
-#ifndef DEPTH_H
-#define DEPTH_H
-
-#include "ue2common.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Data types used to represent depth quantities.
+ */
+
+#ifndef DEPTH_H
+#define DEPTH_H
+
+#include "ue2common.h"
#include "util/hash.h"
#include "util/operators.h"
-
-#ifdef DUMP_SUPPORT
-#include <string>
-#endif
-
-namespace ue2 {
-
-/**
- * \brief Exception thrown if a depth operation overflows.
- */
-struct DepthOverflowError {};
-
-/**
- * \brief Type used to represent depth information; value is either a count,
- * or the special values "infinity" and "unreachable".
- */
+
+#ifdef DUMP_SUPPORT
+#include <string>
+#endif
+
+namespace ue2 {
+
+/**
+ * \brief Exception thrown if a depth operation overflows.
+ */
+struct DepthOverflowError {};
+
+/**
+ * \brief Type used to represent depth information; value is either a count,
+ * or the special values "infinity" and "unreachable".
+ */
class depth : totally_ordered<depth> {
-public:
+public:
/** \brief The default depth is special value "unreachable". */
depth() = default;
-
+
explicit depth(u32 v) : val(v) {
- if (v > max_value()) {
- DEBUG_PRINTF("depth %u too large to represent!\n", v);
- throw DepthOverflowError();
- }
- }
-
- static depth unreachable() {
- depth d;
- d.val = val_unreachable;
- return d;
- }
-
- static depth infinity() {
- depth d;
- d.val = val_infinity;
- return d;
- }
-
- /** \brief Returns the max finite value representable as a depth. */
- static constexpr u32 max_value() { return val_infinity - 1; }
-
- bool is_finite() const { return val < val_infinity; }
- bool is_infinite() const { return val == val_infinity; }
- bool is_unreachable() const { return val == val_unreachable; }
- bool is_reachable() const { return !is_unreachable(); }
-
- /** \brief Convert a finite depth to an integer. */
- operator u32() const {
- if (!is_finite()) {
- throw DepthOverflowError();
- }
- return val;
- }
-
- bool operator<(const depth &d) const { return val < d.val; }
- bool operator==(const depth &d) const { return val == d.val; }
-
- // The following comparison operators exist for use against integer types
- // that are bigger than what we can safely convert to depth (such as those
- // in extparam).
-
- bool operator<(u64a d) const {
- if (!is_finite()) {
- return false;
- }
- return val < d;
- }
- bool operator<=(u64a d) const {
- if (!is_finite()) {
- return false;
- }
- return val <= d;
- }
- bool operator==(u64a d) const {
- if (!is_finite()) {
- return false;
- }
- return val == d;
- }
- bool operator>(u64a d) const { return !(*this <= d); }
- bool operator>=(u64a d) const { return !(*this < d); }
- bool operator!=(u64a d) const { return !(*this == d); }
-
- depth operator+(const depth &d) const {
- if (is_unreachable() || d.is_unreachable()) {
- return unreachable();
- }
- if (is_infinite() || d.is_infinite()) {
- return infinity();
- }
-
- u64a rv = val + d.val;
- if (rv >= val_infinity) {
- DEBUG_PRINTF("depth %llu too large to represent!\n", rv);
- throw DepthOverflowError();
- }
-
- return depth((u32)rv);
- }
-
- depth &operator+=(const depth &d) {
- depth rv = *this + d;
- *this = rv;
- return *this;
- }
-
- depth operator-(const depth &d) const {
- if (!d.is_finite()) {
- throw DepthOverflowError();
- }
-
- if (is_unreachable()) {
- return unreachable();
- }
- if (is_infinite()) {
- return infinity();
- }
-
- if (val < d.val) {
- throw DepthOverflowError();
- }
-
- u32 rv = val - d.val;
- return depth(rv);
- }
-
- depth &operator-=(const depth &d) {
- depth rv = *this - d;
- *this = rv;
- return *this;
- }
-
- depth operator+(s32 d) const {
- if (is_unreachable()) {
- return unreachable();
- }
- if (is_infinite()) {
- return infinity();
- }
-
- s64a rv = val + d;
- if (rv < 0 || (u64a)rv >= val_infinity) {
- DEBUG_PRINTF("depth %lld too large to represent!\n", rv);
- throw DepthOverflowError();
- }
-
- return depth((u32)rv);
- }
-
- depth operator+=(s32 d) {
- depth rv = *this + d;
- *this = rv;
- return *this;
- }
-
+ if (v > max_value()) {
+ DEBUG_PRINTF("depth %u too large to represent!\n", v);
+ throw DepthOverflowError();
+ }
+ }
+
+ static depth unreachable() {
+ depth d;
+ d.val = val_unreachable;
+ return d;
+ }
+
+ static depth infinity() {
+ depth d;
+ d.val = val_infinity;
+ return d;
+ }
+
+ /** \brief Returns the max finite value representable as a depth. */
+ static constexpr u32 max_value() { return val_infinity - 1; }
+
+ bool is_finite() const { return val < val_infinity; }
+ bool is_infinite() const { return val == val_infinity; }
+ bool is_unreachable() const { return val == val_unreachable; }
+ bool is_reachable() const { return !is_unreachable(); }
+
+ /** \brief Convert a finite depth to an integer. */
+ operator u32() const {
+ if (!is_finite()) {
+ throw DepthOverflowError();
+ }
+ return val;
+ }
+
+ bool operator<(const depth &d) const { return val < d.val; }
+ bool operator==(const depth &d) const { return val == d.val; }
+
+ // The following comparison operators exist for use against integer types
+ // that are bigger than what we can safely convert to depth (such as those
+ // in extparam).
+
+ bool operator<(u64a d) const {
+ if (!is_finite()) {
+ return false;
+ }
+ return val < d;
+ }
+ bool operator<=(u64a d) const {
+ if (!is_finite()) {
+ return false;
+ }
+ return val <= d;
+ }
+ bool operator==(u64a d) const {
+ if (!is_finite()) {
+ return false;
+ }
+ return val == d;
+ }
+ bool operator>(u64a d) const { return !(*this <= d); }
+ bool operator>=(u64a d) const { return !(*this < d); }
+ bool operator!=(u64a d) const { return !(*this == d); }
+
+ depth operator+(const depth &d) const {
+ if (is_unreachable() || d.is_unreachable()) {
+ return unreachable();
+ }
+ if (is_infinite() || d.is_infinite()) {
+ return infinity();
+ }
+
+ u64a rv = val + d.val;
+ if (rv >= val_infinity) {
+ DEBUG_PRINTF("depth %llu too large to represent!\n", rv);
+ throw DepthOverflowError();
+ }
+
+ return depth((u32)rv);
+ }
+
+ depth &operator+=(const depth &d) {
+ depth rv = *this + d;
+ *this = rv;
+ return *this;
+ }
+
+ depth operator-(const depth &d) const {
+ if (!d.is_finite()) {
+ throw DepthOverflowError();
+ }
+
+ if (is_unreachable()) {
+ return unreachable();
+ }
+ if (is_infinite()) {
+ return infinity();
+ }
+
+ if (val < d.val) {
+ throw DepthOverflowError();
+ }
+
+ u32 rv = val - d.val;
+ return depth(rv);
+ }
+
+ depth &operator-=(const depth &d) {
+ depth rv = *this - d;
+ *this = rv;
+ return *this;
+ }
+
+ depth operator+(s32 d) const {
+ if (is_unreachable()) {
+ return unreachable();
+ }
+ if (is_infinite()) {
+ return infinity();
+ }
+
+ s64a rv = val + d;
+ if (rv < 0 || (u64a)rv >= val_infinity) {
+ DEBUG_PRINTF("depth %lld too large to represent!\n", rv);
+ throw DepthOverflowError();
+ }
+
+ return depth((u32)rv);
+ }
+
+ depth operator+=(s32 d) {
+ depth rv = *this + d;
+ *this = rv;
+ return *this;
+ }
+
depth operator-(s32 d) const {
if (is_unreachable()) {
return unreachable();
@@ -216,57 +216,57 @@ public:
return *this;
}
-#ifdef DUMP_SUPPORT
- /** \brief Render as a string, useful for debugging. */
- std::string str() const;
-#endif
-
+#ifdef DUMP_SUPPORT
+ /** \brief Render as a string, useful for debugging. */
+ std::string str() const;
+#endif
+
size_t hash() const {
return val;
- }
-
-private:
- static constexpr u32 val_infinity = (1u << 31) - 1;
- static constexpr u32 val_unreachable = 1u << 31;
-
+ }
+
+private:
+ static constexpr u32 val_infinity = (1u << 31) - 1;
+ static constexpr u32 val_unreachable = 1u << 31;
+
u32 val = val_unreachable;
-};
-
-/**
- * \brief Encapsulates a min/max pair.
- */
+};
+
+/**
+ * \brief Encapsulates a min/max pair.
+ */
struct DepthMinMax : totally_ordered<DepthMinMax> {
depth min{depth::infinity()};
depth max{0};
-
+
DepthMinMax() = default;
- DepthMinMax(const depth &mn, const depth &mx) : min(mn), max(mx) {}
-
- bool operator<(const DepthMinMax &b) const {
- if (min != b.min) {
- return min < b.min;
- }
- return max < b.max;
- }
-
- bool operator==(const DepthMinMax &b) const {
- return min == b.min && max == b.max;
- }
-
-#ifdef DUMP_SUPPORT
- /** \brief Render as a string, useful for debugging. */
- std::string str() const;
-#endif
-
-};
-
-/**
- * \brief Merge two DepthMinMax values together to produce their union.
- */
-DepthMinMax unionDepthMinMax(const DepthMinMax &a, const DepthMinMax &b);
-
-} // namespace ue2
-
+ DepthMinMax(const depth &mn, const depth &mx) : min(mn), max(mx) {}
+
+ bool operator<(const DepthMinMax &b) const {
+ if (min != b.min) {
+ return min < b.min;
+ }
+ return max < b.max;
+ }
+
+ bool operator==(const DepthMinMax &b) const {
+ return min == b.min && max == b.max;
+ }
+
+#ifdef DUMP_SUPPORT
+ /** \brief Render as a string, useful for debugging. */
+ std::string str() const;
+#endif
+
+};
+
+/**
+ * \brief Merge two DepthMinMax values together to produce their union.
+ */
+DepthMinMax unionDepthMinMax(const DepthMinMax &a, const DepthMinMax &b);
+
+} // namespace ue2
+
namespace std {
template<>
@@ -285,4 +285,4 @@ struct hash<ue2::DepthMinMax> {
} // namespace
-#endif // DEPTH_H
+#endif // DEPTH_H
diff --git a/contrib/libs/hyperscan/src/util/determinise.h b/contrib/libs/hyperscan/src/util/determinise.h
index 102a197441..0beeeef0a8 100644
--- a/contrib/libs/hyperscan/src/util/determinise.h
+++ b/contrib/libs/hyperscan/src/util/determinise.h
@@ -1,205 +1,205 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief DFA determinisation code.
- */
-
-#ifndef DETERMINISE_H
-#define DETERMINISE_H
-
-#include "nfagraph/ng_holder.h"
-#include "charreach.h"
-#include "container.h"
-#include "ue2common.h"
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief DFA determinisation code.
+ */
+
+#ifndef DETERMINISE_H
+#define DETERMINISE_H
+
+#include "nfagraph/ng_holder.h"
+#include "charreach.h"
+#include "container.h"
+#include "ue2common.h"
+
#include <algorithm>
-#include <array>
+#include <array>
#include <queue>
-#include <vector>
-
-namespace ue2 {
-
-/* Automaton details:
- *
- * const vector<StateSet> initial()
- * returns initial states to start determinising from. StateSets in the
- * initial() vector will given consecutive ids starting from 1, in the order
- * that they appear.
- *
- * void reports(StateSet s, flat_set<ReportID> *out)
- * fills out with any reports that need to be raised for stateset.
- *
- * void reportsEod(StateSet s, flat_set<ReportID> *out)
- * fills out with any reports that need to be raised for stateset at EOD.
- *
- * void transition(const StateSet &in, StateSet *next)
- * fills the next array such next[i] is the stateset that in transitions to
- * on seeing symbol i (i is in the compressed alphabet of the automaton).
- *
- * u16 alphasize
- * size of the compressed alphabet
- */
-
-/** \brief determinises some sort of nfa
- * \param n the automaton to determinise
- * \param dstates_out output dfa states
- * \param state_limit limit on the number of dfa states to construct
- * \param statesets_out a mapping from DFA state to the set of NFA states in
- * the automaton
+#include <vector>
+
+namespace ue2 {
+
+/* Automaton details:
+ *
+ * const vector<StateSet> initial()
+ * returns initial states to start determinising from. StateSets in the
+ * initial() vector will given consecutive ids starting from 1, in the order
+ * that they appear.
+ *
+ * void reports(StateSet s, flat_set<ReportID> *out)
+ * fills out with any reports that need to be raised for stateset.
+ *
+ * void reportsEod(StateSet s, flat_set<ReportID> *out)
+ * fills out with any reports that need to be raised for stateset at EOD.
+ *
+ * void transition(const StateSet &in, StateSet *next)
+ * fills the next array such next[i] is the stateset that in transitions to
+ * on seeing symbol i (i is in the compressed alphabet of the automaton).
+ *
+ * u16 alphasize
+ * size of the compressed alphabet
+ */
+
+/** \brief determinises some sort of nfa
+ * \param n the automaton to determinise
+ * \param dstates_out output dfa states
+ * \param state_limit limit on the number of dfa states to construct
+ * \param statesets_out a mapping from DFA state to the set of NFA states in
+ * the automaton
* \return true on success, false if state limit exceeded
- */
-template<class Auto, class ds>
-never_inline
+ */
+template<class Auto, class ds>
+never_inline
bool determinise(Auto &n, std::vector<ds> &dstates, size_t state_limit,
- std::vector<typename Auto::StateSet> *statesets_out = nullptr) {
- DEBUG_PRINTF("the determinator\n");
+ std::vector<typename Auto::StateSet> *statesets_out = nullptr) {
+ DEBUG_PRINTF("the determinator\n");
using StateSet = typename Auto::StateSet;
typename Auto::StateMap dstate_ids;
-
- const size_t alphabet_size = n.alphasize;
-
+
+ const size_t alphabet_size = n.alphasize;
+
dstates.clear();
dstates.reserve(state_limit);
-
+
dstate_ids.emplace(n.dead, DEAD_STATE);
- dstates.push_back(ds(alphabet_size));
- std::fill_n(dstates[0].next.begin(), alphabet_size, DEAD_STATE);
-
+ dstates.push_back(ds(alphabet_size));
+ std::fill_n(dstates[0].next.begin(), alphabet_size, DEAD_STATE);
+
std::queue<std::pair<StateSet, dstate_id_t>> q;
q.emplace(n.dead, DEAD_STATE);
-
- const std::vector<StateSet> &init = n.initial();
- for (u32 i = 0; i < init.size(); i++) {
+
+ const std::vector<StateSet> &init = n.initial();
+ for (u32 i = 0; i < init.size(); i++) {
q.emplace(init[i], dstates.size());
- assert(!contains(dstate_ids, init[i]));
+ assert(!contains(dstate_ids, init[i]));
dstate_ids.emplace(init[i], dstates.size());
- dstates.push_back(ds(alphabet_size));
- }
-
- std::vector<StateSet> succs(alphabet_size, n.dead);
-
+ dstates.push_back(ds(alphabet_size));
+ }
+
+ std::vector<StateSet> succs(alphabet_size, n.dead);
+
while (!q.empty()) {
auto m = std::move(q.front());
q.pop();
StateSet &curr = m.first;
dstate_id_t curr_id = m.second;
- DEBUG_PRINTF("curr: %hu\n", curr_id);
-
- /* fill in accepts */
- n.reports(curr, dstates[curr_id].reports);
- n.reportsEod(curr, dstates[curr_id].reports_eod);
-
- if (!dstates[curr_id].reports.empty()) {
- DEBUG_PRINTF("curr: %hu: is accept\n", curr_id);
- }
-
- if (!dstates[curr_id].reports.empty()) {
- /* only external reports set ekeys */
- if (n.canPrune(dstates[curr_id].reports)) {
- /* we only transition to dead on characters, TOPs leave us
- * alone */
- std::fill_n(dstates[curr_id].next.begin(), alphabet_size,
- DEAD_STATE);
- dstates[curr_id].next[n.alpha[TOP]] = curr_id;
- continue;
- }
- }
-
- /* fill in successor states */
- n.transition(curr, &succs[0]);
- for (symbol_t s = 0; s < n.alphasize; s++) {
- dstate_id_t succ_id;
- if (s && succs[s] == succs[s - 1]) {
- succ_id = dstates[curr_id].next[s - 1];
- } else {
+ DEBUG_PRINTF("curr: %hu\n", curr_id);
+
+ /* fill in accepts */
+ n.reports(curr, dstates[curr_id].reports);
+ n.reportsEod(curr, dstates[curr_id].reports_eod);
+
+ if (!dstates[curr_id].reports.empty()) {
+ DEBUG_PRINTF("curr: %hu: is accept\n", curr_id);
+ }
+
+ if (!dstates[curr_id].reports.empty()) {
+ /* only external reports set ekeys */
+ if (n.canPrune(dstates[curr_id].reports)) {
+ /* we only transition to dead on characters, TOPs leave us
+ * alone */
+ std::fill_n(dstates[curr_id].next.begin(), alphabet_size,
+ DEAD_STATE);
+ dstates[curr_id].next[n.alpha[TOP]] = curr_id;
+ continue;
+ }
+ }
+
+ /* fill in successor states */
+ n.transition(curr, &succs[0]);
+ for (symbol_t s = 0; s < n.alphasize; s++) {
+ dstate_id_t succ_id;
+ if (s && succs[s] == succs[s - 1]) {
+ succ_id = dstates[curr_id].next[s - 1];
+ } else {
auto p = dstate_ids.find(succs[s]);
if (p != dstate_ids.end()) { // succ[s] is already present
succ_id = p->second;
- if (succ_id > curr_id && !dstates[succ_id].daddy
- && n.unalpha[s] < N_CHARS) {
- dstates[succ_id].daddy = curr_id;
- }
- } else {
+ if (succ_id > curr_id && !dstates[succ_id].daddy
+ && n.unalpha[s] < N_CHARS) {
+ dstates[succ_id].daddy = curr_id;
+ }
+ } else {
succ_id = dstate_ids.size();
dstate_ids.emplace(succs[s], succ_id);
- dstates.push_back(ds(alphabet_size));
- dstates.back().daddy = n.unalpha[s] < N_CHARS ? curr_id : 0;
+ dstates.push_back(ds(alphabet_size));
+ dstates.back().daddy = n.unalpha[s] < N_CHARS ? curr_id : 0;
q.emplace(succs[s], succ_id);
- }
-
- DEBUG_PRINTF("-->%hu on %02hx\n", succ_id, n.unalpha[s]);
- }
-
- if (succ_id >= state_limit) {
+ }
+
+ DEBUG_PRINTF("-->%hu on %02hx\n", succ_id, n.unalpha[s]);
+ }
+
+ if (succ_id >= state_limit) {
DEBUG_PRINTF("succ_id %hu >= state_limit %zu\n",
- succ_id, state_limit);
+ succ_id, state_limit);
dstates.clear();
return false;
- }
-
- dstates[curr_id].next[s] = succ_id;
- }
- }
-
+ }
+
+ dstates[curr_id].next[s] = succ_id;
+ }
+ }
+
// The dstates vector will persist in the raw_dfa.
dstates.shrink_to_fit();
- if (statesets_out) {
+ if (statesets_out) {
auto &statesets = *statesets_out;
statesets.resize(dstate_ids.size());
for (auto &m : dstate_ids) {
statesets[m.second] = std::move(m.first);
}
- }
+ }
- DEBUG_PRINTF("ok\n");
+ DEBUG_PRINTF("ok\n");
return true;
-}
-
-static inline
-std::vector<CharReach> populateCR(const NGHolder &g,
- const std::vector<NFAVertex> &v_by_index,
- const std::array<u16, ALPHABET_SIZE> &alpha) {
- std::vector<CharReach> cr_by_index(v_by_index.size());
-
- for (size_t i = 0; i < v_by_index.size(); i++) {
- const CharReach &cr = g[v_by_index[i]].char_reach;
- CharReach &cr_out = cr_by_index[i];
- for (size_t s = cr.find_first(); s != cr.npos; s = cr.find_next(s)) {
- cr_out.set(alpha[s]);
- }
- }
-
- return cr_by_index;
-}
-
-} // namespace ue2
-
-#endif
+}
+
+static inline
+std::vector<CharReach> populateCR(const NGHolder &g,
+ const std::vector<NFAVertex> &v_by_index,
+ const std::array<u16, ALPHABET_SIZE> &alpha) {
+ std::vector<CharReach> cr_by_index(v_by_index.size());
+
+ for (size_t i = 0; i < v_by_index.size(); i++) {
+ const CharReach &cr = g[v_by_index[i]].char_reach;
+ CharReach &cr_out = cr_by_index[i];
+ for (size_t s = cr.find_first(); s != cr.npos; s = cr.find_next(s)) {
+ cr_out.set(alpha[s]);
+ }
+ }
+
+ return cr_by_index;
+}
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/dump_charclass.h b/contrib/libs/hyperscan/src/util/dump_charclass.h
index 999641340a..aa6b3b4d56 100644
--- a/contrib/libs/hyperscan/src/util/dump_charclass.h
+++ b/contrib/libs/hyperscan/src/util/dump_charclass.h
@@ -1,61 +1,61 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Dump code for character classes (expressed as CharReach objects).
- */
-
-#ifndef DUMP_CHARCLASS_H
-#define DUMP_CHARCLASS_H
-
-#include "ue2common.h"
-
-#include <cstdio>
-#include <ostream>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Dump code for character classes (expressed as CharReach objects).
+ */
+
+#ifndef DUMP_CHARCLASS_H
+#define DUMP_CHARCLASS_H
+
+#include "ue2common.h"
+
+#include <cstdio>
+#include <ostream>
#include <sstream>
-#include <string>
+#include <string>
#include <vector>
-
-namespace ue2 {
-
-enum cc_output_t {
- CC_OUT_TEXT, //!< unescaped text output
- CC_OUT_DOT //!< escaped DOT label output
-};
-
-class CharReach;
-
+
+namespace ue2 {
+
+enum cc_output_t {
+ CC_OUT_TEXT, //!< unescaped text output
+ CC_OUT_DOT //!< escaped DOT label output
+};
+
+class CharReach;
+
void describeClass(std::ostream &os, const CharReach &cr, size_t maxLength = 16,
enum cc_output_t out_type = CC_OUT_TEXT);
-
-std::string describeClass(const CharReach &cr, size_t maxLength = 16,
- enum cc_output_t out_type = CC_OUT_TEXT);
-
+
+std::string describeClass(const CharReach &cr, size_t maxLength = 16,
+ enum cc_output_t out_type = CC_OUT_TEXT);
+
template<typename Container>
std::string describeClasses(const Container &container,
size_t maxClassLength = 16,
@@ -67,9 +67,9 @@ std::string describeClasses(const Container &container,
return oss.str();
}
-void describeClass(FILE *f, const CharReach &cr, size_t maxLength,
- enum cc_output_t out_type);
-
-} // namespace ue2
-
-#endif // DUMP_CHARCLASS_H
+void describeClass(FILE *f, const CharReach &cr, size_t maxLength,
+ enum cc_output_t out_type);
+
+} // namespace ue2
+
+#endif // DUMP_CHARCLASS_H
diff --git a/contrib/libs/hyperscan/src/util/dump_mask.cpp b/contrib/libs/hyperscan/src/util/dump_mask.cpp
index 445f79b3af..bc704805db 100644
--- a/contrib/libs/hyperscan/src/util/dump_mask.cpp
+++ b/contrib/libs/hyperscan/src/util/dump_mask.cpp
@@ -1,63 +1,63 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Dump code for bitmasks.
- *
- * Note that these functions are only emitted in builds with DUMP_SUPPORT.
- */
-
-#include "config.h"
-
-#ifdef DUMP_SUPPORT
-
-#include "ue2common.h"
-#include "util/dump_mask.h"
-
-#include <string>
-
-namespace ue2 {
-
-std::string dumpMask(const u8 *mask, size_t len) {
- std::string s;
- s.reserve(len + len / 8);
-
- for (size_t i = 0; i < len; i++) {
- if ((i % 8) == 0 && i != 0) {
- s.push_back(' ');
- }
-
- s.push_back((mask[i / 8] >> (i % 8)) & 0x1 ? '1' : '0');
- }
-
- return s;
-}
-
-} // namespace ue2
-
-#endif // DUMP_SUPPORT
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Dump code for bitmasks.
+ *
+ * Note that these functions are only emitted in builds with DUMP_SUPPORT.
+ */
+
+#include "config.h"
+
+#ifdef DUMP_SUPPORT
+
+#include "ue2common.h"
+#include "util/dump_mask.h"
+
+#include <string>
+
+namespace ue2 {
+
+std::string dumpMask(const u8 *mask, size_t len) {
+ std::string s;
+ s.reserve(len + len / 8);
+
+ for (size_t i = 0; i < len; i++) {
+ if ((i % 8) == 0 && i != 0) {
+ s.push_back(' ');
+ }
+
+ s.push_back((mask[i / 8] >> (i % 8)) & 0x1 ? '1' : '0');
+ }
+
+ return s;
+}
+
+} // namespace ue2
+
+#endif // DUMP_SUPPORT
diff --git a/contrib/libs/hyperscan/src/util/dump_mask.h b/contrib/libs/hyperscan/src/util/dump_mask.h
index 04792ba7cc..791f2e4f88 100644
--- a/contrib/libs/hyperscan/src/util/dump_mask.h
+++ b/contrib/libs/hyperscan/src/util/dump_mask.h
@@ -1,55 +1,55 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Dump code for bitmasks.
- *
- * Note that these functions are only emitted in builds with DUMP_SUPPORT.
- */
-
-#ifndef DUMP_MASK_H
-#define DUMP_MASK_H
-
-#ifdef DUMP_SUPPORT
-
-#include "ue2common.h"
-#include <string>
-
-namespace ue2 {
-
-/**
- * Returns a representation of the given mask in binary, as a string of 1s and
- * 0s.
- */
-std::string dumpMask(const u8 *mask, size_t len);
-
-} // namespace ue2
-
-#endif // DUMP_SUPPORT
-
-#endif // DUMP_MASK_H
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Dump code for bitmasks.
+ *
+ * Note that these functions are only emitted in builds with DUMP_SUPPORT.
+ */
+
+#ifndef DUMP_MASK_H
+#define DUMP_MASK_H
+
+#ifdef DUMP_SUPPORT
+
+#include "ue2common.h"
+#include <string>
+
+namespace ue2 {
+
+/**
+ * Returns a representation of the given mask in binary, as a string of 1s and
+ * 0s.
+ */
+std::string dumpMask(const u8 *mask, size_t len);
+
+} // namespace ue2
+
+#endif // DUMP_SUPPORT
+
+#endif // DUMP_MASK_H
diff --git a/contrib/libs/hyperscan/src/util/exhaust.h b/contrib/libs/hyperscan/src/util/exhaust.h
index d6f2ac06d9..3f1eab4a0a 100644
--- a/contrib/libs/hyperscan/src/util/exhaust.h
+++ b/contrib/libs/hyperscan/src/util/exhaust.h
@@ -1,41 +1,41 @@
-/*
+/*
* Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Inline functions for manipulating exhaustion vector.
- */
-
-#ifndef EXHAUST_H
-#define EXHAUST_H
-
-#include "ue2common.h"
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Inline functions for manipulating exhaustion vector.
+ */
+
+#ifndef EXHAUST_H
+#define EXHAUST_H
+
+#include "ue2common.h"
+
/** Index meaning a given exhaustion key is invalid. */
#define INVALID_EKEY (~(u32)0)
-
-#endif
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/fatbit.h b/contrib/libs/hyperscan/src/util/fatbit.h
index 3c65db1a59..b8860d7734 100644
--- a/contrib/libs/hyperscan/src/util/fatbit.h
+++ b/contrib/libs/hyperscan/src/util/fatbit.h
@@ -1,93 +1,93 @@
-/*
+/*
* Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef FATBIT_H
-#define FATBIT_H
-
-/** \file
- * \brief Multibit: fast bitset structure for use in scratch.
- * Uses more space than mmbit, to avoid partial words for hopefully a taddy more
- * performance.
- *
- * API is also trimmed down.
- */
-
-#include "multibit.h"
-#include "ue2common.h"
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FATBIT_H
+#define FATBIT_H
+
+/** \file
+ * \brief Multibit: fast bitset structure for use in scratch.
+ * Uses more space than mmbit, to avoid partial words for hopefully a taddy more
+ * performance.
+ *
+ * API is also trimmed down.
+ */
+
+#include "multibit.h"
+#include "ue2common.h"
+
#ifdef __cplusplus
extern "C" {
#endif
-#define MIN_FAT_SIZE 32
-
-struct fatbit {
- union {
- u64a flat[MIN_FAT_SIZE / sizeof(u64a)];
- u8 raw[MIN_FAT_SIZE];
- } fb_int;
- u64a tail[];
-};
-
-static really_inline
-void fatbit_clear(struct fatbit *bits) {
- assert(ISALIGNED(bits));
- memset(bits, 0, sizeof(struct fatbit));
-}
-
-static really_inline
-char fatbit_set(struct fatbit *bits, u32 total_bits, u32 key) {
+#define MIN_FAT_SIZE 32
+
+struct fatbit {
+ union {
+ u64a flat[MIN_FAT_SIZE / sizeof(u64a)];
+ u8 raw[MIN_FAT_SIZE];
+ } fb_int;
+ u64a tail[];
+};
+
+static really_inline
+void fatbit_clear(struct fatbit *bits) {
+ assert(ISALIGNED(bits));
+ memset(bits, 0, sizeof(struct fatbit));
+}
+
+static really_inline
+char fatbit_set(struct fatbit *bits, u32 total_bits, u32 key) {
assert(ISALIGNED(bits));
- return mmbit_set(bits->fb_int.raw, total_bits, key);
-}
-
-static really_inline
-void fatbit_unset(struct fatbit *bits, u32 total_bits, u32 key) {
+ return mmbit_set(bits->fb_int.raw, total_bits, key);
+}
+
+static really_inline
+void fatbit_unset(struct fatbit *bits, u32 total_bits, u32 key) {
assert(ISALIGNED(bits));
- mmbit_unset(bits->fb_int.raw, total_bits, key);
-}
-
-static really_inline
-char fatbit_isset(const struct fatbit *bits, u32 total_bits, u32 key) {
+ mmbit_unset(bits->fb_int.raw, total_bits, key);
+}
+
+static really_inline
+char fatbit_isset(const struct fatbit *bits, u32 total_bits, u32 key) {
assert(ISALIGNED(bits));
- return mmbit_isset(bits->fb_int.raw, total_bits, key);
-}
-
-static really_inline
-u32 fatbit_iterate(const struct fatbit *bits, u32 total_bits, u32 it_in) {
+ return mmbit_isset(bits->fb_int.raw, total_bits, key);
+}
+
+static really_inline
+u32 fatbit_iterate(const struct fatbit *bits, u32 total_bits, u32 it_in) {
assert(ISALIGNED(bits));
- /* TODO: iterate_flat could be specialised as we don't have to worry about
- * partial blocks. */
- return mmbit_iterate(bits->fb_int.raw, total_bits, it_in);
-}
-
+ /* TODO: iterate_flat could be specialised as we don't have to worry about
+ * partial blocks. */
+ return mmbit_iterate(bits->fb_int.raw, total_bits, it_in);
+}
+
#ifdef __cplusplus
} // extern "C"
#endif
-
-#endif
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/graph.h b/contrib/libs/hyperscan/src/util/graph.h
index 3e18dae552..15f5694cab 100644
--- a/contrib/libs/hyperscan/src/util/graph.h
+++ b/contrib/libs/hyperscan/src/util/graph.h
@@ -1,151 +1,151 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Functions for graph manipulation that aren't in the base BGL toolkit.
- */
-
-#ifndef UTIL_GRAPH_H
-#define UTIL_GRAPH_H
-
-#include "container.h"
-#include "ue2common.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Functions for graph manipulation that aren't in the base BGL toolkit.
+ */
+
+#ifndef UTIL_GRAPH_H
+#define UTIL_GRAPH_H
+
+#include "container.h"
+#include "ue2common.h"
#include "util/flat_containers.h"
-#include "util/graph_range.h"
+#include "util/graph_range.h"
#include "util/unordered.h"
-
-#include <boost/graph/depth_first_search.hpp>
+
+#include <boost/graph/depth_first_search.hpp>
#include <boost/graph/strong_components.hpp>
#include <boost/range/adaptor/map.hpp>
-
+
#include <algorithm>
#include <map>
#include <set>
#include <utility>
#include <vector>
-namespace ue2 {
-
-/** \brief True if the given vertex has no out-edges. */
-template<class Graph>
-bool isLeafNode(const typename Graph::vertex_descriptor& v, const Graph& g) {
+namespace ue2 {
+
+/** \brief True if the given vertex has no out-edges. */
+template<class Graph>
+bool isLeafNode(const typename Graph::vertex_descriptor& v, const Graph& g) {
return out_degree(v, g) == 0;
-}
-
-/** \brief True if vertex \a v has an edge to itself. */
-template<class Graph>
-bool hasSelfLoop(const typename Graph::vertex_descriptor &v, const Graph &g) {
- return edge(v, v, g).second;
-}
-
-/** \brief True if any vertex in [it, end) has an edge to itself. */
-template<class Graph, class Iterator>
-bool anySelfLoop(const Graph &g, Iterator it, const Iterator &end) {
- for (; it != end; ++it) {
- if (hasSelfLoop(*it, g)) {
- return true;
- }
- }
-
- return false;
-}
-
-/** \brief Returns the out-degree of vertex \a v, ignoring self-loops. */
-template<class Graph>
-size_t proper_out_degree(const typename Graph::vertex_descriptor &v,
- const Graph &g) {
- return out_degree(v, g) - (edge(v, v, g).second ? 1 : 0);
-}
-
-/** \brief Returns the in-degree of vertex \a v, ignoring self-loops. */
-template<class Graph>
-size_t proper_in_degree(const typename Graph::vertex_descriptor &v,
- const Graph &g) {
- return in_degree(v, g) - (edge(v, v, g).second ? 1 : 0);
-}
-
-/** \brief True if vertex \a v has at least one successor. */
-template<class Graph>
-bool has_successor(const typename Graph::vertex_descriptor &v, const Graph &g) {
+}
+
+/** \brief True if vertex \a v has an edge to itself. */
+template<class Graph>
+bool hasSelfLoop(const typename Graph::vertex_descriptor &v, const Graph &g) {
+ return edge(v, v, g).second;
+}
+
+/** \brief True if any vertex in [it, end) has an edge to itself. */
+template<class Graph, class Iterator>
+bool anySelfLoop(const Graph &g, Iterator it, const Iterator &end) {
+ for (; it != end; ++it) {
+ if (hasSelfLoop(*it, g)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/** \brief Returns the out-degree of vertex \a v, ignoring self-loops. */
+template<class Graph>
+size_t proper_out_degree(const typename Graph::vertex_descriptor &v,
+ const Graph &g) {
+ return out_degree(v, g) - (edge(v, v, g).second ? 1 : 0);
+}
+
+/** \brief Returns the in-degree of vertex \a v, ignoring self-loops. */
+template<class Graph>
+size_t proper_in_degree(const typename Graph::vertex_descriptor &v,
+ const Graph &g) {
+ return in_degree(v, g) - (edge(v, v, g).second ? 1 : 0);
+}
+
+/** \brief True if vertex \a v has at least one successor. */
+template<class Graph>
+bool has_successor(const typename Graph::vertex_descriptor &v, const Graph &g) {
return out_degree(v, g) > 0;
-}
-
-/** \brief True if vertex \a v has at least one successor other than itself. */
-template<class Graph>
-bool has_proper_successor(const typename Graph::vertex_descriptor &v,
- const Graph &g) {
- typename Graph::adjacency_iterator ai, ae;
- std::tie(ai, ae) = adjacent_vertices(v, g);
- if (ai == ae) {
- return false;
- }
- if (*ai == v) {
- ++ai; // skip self-loop
- }
-
- return ai != ae;
-}
-
-/** \brief Find the set of vertices that are reachable from the vertices in \a
- * sources. */
-template<class Graph, class SourceCont, class OutCont>
-void find_reachable(const Graph &g, const SourceCont &sources, OutCont *out) {
- using vertex_descriptor = typename Graph::vertex_descriptor;
+}
+
+/** \brief True if vertex \a v has at least one successor other than itself. */
+template<class Graph>
+bool has_proper_successor(const typename Graph::vertex_descriptor &v,
+ const Graph &g) {
+ typename Graph::adjacency_iterator ai, ae;
+ std::tie(ai, ae) = adjacent_vertices(v, g);
+ if (ai == ae) {
+ return false;
+ }
+ if (*ai == v) {
+ ++ai; // skip self-loop
+ }
+
+ return ai != ae;
+}
+
+/** \brief Find the set of vertices that are reachable from the vertices in \a
+ * sources. */
+template<class Graph, class SourceCont, class OutCont>
+void find_reachable(const Graph &g, const SourceCont &sources, OutCont *out) {
+ using vertex_descriptor = typename Graph::vertex_descriptor;
std::unordered_map<vertex_descriptor, boost::default_color_type> colours;
-
- for (auto v : sources) {
- boost::depth_first_visit(g, v,
- boost::make_dfs_visitor(boost::null_visitor()),
- boost::make_assoc_property_map(colours));
- }
-
- for (const auto &e : colours) {
- out->insert(e.first);
- }
-}
-
-/** \brief Find the set of vertices that are NOT reachable from the vertices in
- * \a sources. */
-template<class Graph, class SourceCont, class OutCont>
-void find_unreachable(const Graph &g, const SourceCont &sources, OutCont *out) {
- using vertex_descriptor = typename Graph::vertex_descriptor;
+
+ for (auto v : sources) {
+ boost::depth_first_visit(g, v,
+ boost::make_dfs_visitor(boost::null_visitor()),
+ boost::make_assoc_property_map(colours));
+ }
+
+ for (const auto &e : colours) {
+ out->insert(e.first);
+ }
+}
+
+/** \brief Find the set of vertices that are NOT reachable from the vertices in
+ * \a sources. */
+template<class Graph, class SourceCont, class OutCont>
+void find_unreachable(const Graph &g, const SourceCont &sources, OutCont *out) {
+ using vertex_descriptor = typename Graph::vertex_descriptor;
std::unordered_set<vertex_descriptor> reachable;
-
- find_reachable(g, sources, &reachable);
-
- for (const auto &v : vertices_range(g)) {
- if (!contains(reachable, v)) {
- out->insert(v);
- }
- }
-}
-
-template <class Graph>
+
+ find_reachable(g, sources, &reachable);
+
+ for (const auto &v : vertices_range(g)) {
+ if (!contains(reachable, v)) {
+ out->insert(v);
+ }
+ }
+}
+
+template <class Graph>
flat_set<typename Graph::vertex_descriptor>
find_vertices_in_cycles(const Graph &g) {
using vertex_descriptor = typename Graph::vertex_descriptor;
@@ -182,46 +182,46 @@ find_vertices_in_cycles(const Graph &g) {
}
template <class Graph>
-bool has_parallel_edge(const Graph &g) {
- using vertex_descriptor = typename Graph::vertex_descriptor;
+bool has_parallel_edge(const Graph &g) {
+ using vertex_descriptor = typename Graph::vertex_descriptor;
ue2_unordered_set<std::pair<vertex_descriptor, vertex_descriptor>> seen;
- for (const auto &e : edges_range(g)) {
- auto u = source(e, g);
- auto v = target(e, g);
- if (!seen.emplace(u, v).second) {
- return true;
- }
- }
- return false;
-}
-
-struct found_back_edge {};
-struct detect_back_edges : public boost::default_dfs_visitor {
- explicit detect_back_edges(bool ignore_self_in)
- : ignore_self(ignore_self_in) {}
- template <class Graph>
- void back_edge(const typename Graph::edge_descriptor &e,
- const Graph &g) const {
- if (ignore_self && source(e, g) == target(e, g)) {
- return;
- }
- throw found_back_edge();
- }
- bool ignore_self;
-};
-
-template <class Graph>
-bool is_dag(const Graph &g, bool ignore_self_loops = false) {
- try {
- depth_first_search(g, visitor(detect_back_edges(ignore_self_loops)));
- } catch (const found_back_edge &) {
- return false;
- }
-
- return true;
-}
-
+ for (const auto &e : edges_range(g)) {
+ auto u = source(e, g);
+ auto v = target(e, g);
+ if (!seen.emplace(u, v).second) {
+ return true;
+ }
+ }
+ return false;
+}
+
+struct found_back_edge {};
+struct detect_back_edges : public boost::default_dfs_visitor {
+ explicit detect_back_edges(bool ignore_self_in)
+ : ignore_self(ignore_self_in) {}
+ template <class Graph>
+ void back_edge(const typename Graph::edge_descriptor &e,
+ const Graph &g) const {
+ if (ignore_self && source(e, g) == target(e, g)) {
+ return;
+ }
+ throw found_back_edge();
+ }
+ bool ignore_self;
+};
+
+template <class Graph>
+bool is_dag(const Graph &g, bool ignore_self_loops = false) {
+ try {
+ depth_first_search(g, visitor(detect_back_edges(ignore_self_loops)));
+ } catch (const found_back_edge &) {
+ return false;
+ }
+
+ return true;
+}
+
template<typename Cont>
class vertex_recorder : public boost::default_dfs_visitor {
public:
@@ -261,28 +261,28 @@ make_vertex_index_bitset_recorder(Bitset &o) {
return vertex_index_bitset_recorder<Bitset>(o);
}
-template <class Graph>
-std::pair<typename Graph::edge_descriptor, bool>
-add_edge_if_not_present(typename Graph::vertex_descriptor u,
- typename Graph::vertex_descriptor v, Graph &g) {
- std::pair<typename Graph::edge_descriptor, bool> e = edge(u, v, g);
- if (!e.second) {
- e = add_edge(u, v, g);
- }
- return e;
-}
-
-template <class Graph>
-std::pair<typename Graph::edge_descriptor, bool> add_edge_if_not_present(
- typename Graph::vertex_descriptor u, typename Graph::vertex_descriptor v,
- const typename Graph::edge_property_type &prop, Graph &g) {
- std::pair<typename Graph::edge_descriptor, bool> e = edge(u, v, g);
- if (!e.second) {
- e = add_edge(u, v, prop, g);
- }
- return e;
-}
-
+template <class Graph>
+std::pair<typename Graph::edge_descriptor, bool>
+add_edge_if_not_present(typename Graph::vertex_descriptor u,
+ typename Graph::vertex_descriptor v, Graph &g) {
+ std::pair<typename Graph::edge_descriptor, bool> e = edge(u, v, g);
+ if (!e.second) {
+ e = add_edge(u, v, g);
+ }
+ return e;
+}
+
+template <class Graph>
+std::pair<typename Graph::edge_descriptor, bool> add_edge_if_not_present(
+ typename Graph::vertex_descriptor u, typename Graph::vertex_descriptor v,
+ const typename Graph::edge_property_type &prop, Graph &g) {
+ std::pair<typename Graph::edge_descriptor, bool> e = edge(u, v, g);
+ if (!e.second) {
+ e = add_edge(u, v, prop, g);
+ }
+ return e;
+}
+
#ifndef NDEBUG
template <class Graph>
@@ -317,6 +317,6 @@ bool hasCorrectlyNumberedEdges(const Graph &g) {
#endif
-} // namespace ue2
-
-#endif // UTIL_GRAPH_H
+} // namespace ue2
+
+#endif // UTIL_GRAPH_H
diff --git a/contrib/libs/hyperscan/src/util/graph_range.h b/contrib/libs/hyperscan/src/util/graph_range.h
index 3df06911a7..05b8f76896 100644
--- a/contrib/libs/hyperscan/src/util/graph_range.h
+++ b/contrib/libs/hyperscan/src/util/graph_range.h
@@ -1,111 +1,111 @@
-/*
+/*
* Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Convenience functions allowing range-based-for over BGL graphs.
- *
- * Normally with the BGL in C++98 you need to do this to iterate over graph
- * elements:
- *
- * Graph:out_edge_iterator ei, ee;
- * for (tie(ei, ee) = out_edges(v, g); ei != ee; ++ei) {
- * do_thing_with_edge(*ei, g);
- * }
- *
- * But now, with C++11 range-based-for and these functions, you can do this
- * instead:
- *
- * for (auto e : out_edges_range(v, g)) {
- * do_thing_with_edge(e, g);
- * }
- *
- * This syntax is much more compact and keeps the iterator vars from cluttering
- * the outer scope.
- */
-
-#ifndef UTIL_GRAPH_RANGE_H
-#define UTIL_GRAPH_RANGE_H
-
-#include <boost/range/iterator_range.hpp>
-
-namespace ue2 {
-
-/** Adapts a pair of iterators into a range. */
-template <class Iter>
-inline boost::iterator_range<Iter> pair_range(const std::pair<Iter, Iter> &p) {
- return boost::make_iterator_range(p.first, p.second);
-}
-
-/** vertices(g) */
-template <class Graph>
-inline auto vertices_range(const Graph &g)
- -> decltype(pair_range(vertices(g))) {
- return pair_range(vertices(g));
-}
-
-/** edges(g) */
-template <class Graph>
-inline auto edges_range(const Graph &g) -> decltype(pair_range(edges(g))) {
- return pair_range(edges(g));
-}
-
-/** out_edges(v, g) */
-template <class Graph>
-inline auto out_edges_range(const typename Graph::vertex_descriptor &v,
- const Graph &g)
- -> decltype(pair_range(out_edges(v, g))) {
- return pair_range(out_edges(v, g));
-}
-
-/** in_edges(v, g) */
-template <class Graph>
-inline auto in_edges_range(const typename Graph::vertex_descriptor &v,
- const Graph &g)
- -> decltype(pair_range(in_edges(v, g))) {
- return pair_range(in_edges(v, g));
-}
-
-/** adjacent_vertices(v, g) */
-template <class Graph>
-inline auto adjacent_vertices_range(const typename Graph::vertex_descriptor &v,
- const Graph &g)
- -> decltype(pair_range(adjacent_vertices(v, g))) {
- return pair_range(adjacent_vertices(v, g));
-}
-
-/** inv_adjacent_vertices(v, g) */
-template <class Graph>
-inline auto inv_adjacent_vertices_range(
- const typename Graph::vertex_descriptor &v, const Graph &g)
- -> decltype(pair_range(inv_adjacent_vertices(v, g))) {
- return pair_range(inv_adjacent_vertices(v, g));
-}
-
-} // namespace ue2
-
-#endif // UTIL_GRAPH_RANGE_H
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Convenience functions allowing range-based-for over BGL graphs.
+ *
+ * Normally with the BGL in C++98 you need to do this to iterate over graph
+ * elements:
+ *
+ * Graph:out_edge_iterator ei, ee;
+ * for (tie(ei, ee) = out_edges(v, g); ei != ee; ++ei) {
+ * do_thing_with_edge(*ei, g);
+ * }
+ *
+ * But now, with C++11 range-based-for and these functions, you can do this
+ * instead:
+ *
+ * for (auto e : out_edges_range(v, g)) {
+ * do_thing_with_edge(e, g);
+ * }
+ *
+ * This syntax is much more compact and keeps the iterator vars from cluttering
+ * the outer scope.
+ */
+
+#ifndef UTIL_GRAPH_RANGE_H
+#define UTIL_GRAPH_RANGE_H
+
+#include <boost/range/iterator_range.hpp>
+
+namespace ue2 {
+
+/** Adapts a pair of iterators into a range. */
+template <class Iter>
+inline boost::iterator_range<Iter> pair_range(const std::pair<Iter, Iter> &p) {
+ return boost::make_iterator_range(p.first, p.second);
+}
+
+/** vertices(g) */
+template <class Graph>
+inline auto vertices_range(const Graph &g)
+ -> decltype(pair_range(vertices(g))) {
+ return pair_range(vertices(g));
+}
+
+/** edges(g) */
+template <class Graph>
+inline auto edges_range(const Graph &g) -> decltype(pair_range(edges(g))) {
+ return pair_range(edges(g));
+}
+
+/** out_edges(v, g) */
+template <class Graph>
+inline auto out_edges_range(const typename Graph::vertex_descriptor &v,
+ const Graph &g)
+ -> decltype(pair_range(out_edges(v, g))) {
+ return pair_range(out_edges(v, g));
+}
+
+/** in_edges(v, g) */
+template <class Graph>
+inline auto in_edges_range(const typename Graph::vertex_descriptor &v,
+ const Graph &g)
+ -> decltype(pair_range(in_edges(v, g))) {
+ return pair_range(in_edges(v, g));
+}
+
+/** adjacent_vertices(v, g) */
+template <class Graph>
+inline auto adjacent_vertices_range(const typename Graph::vertex_descriptor &v,
+ const Graph &g)
+ -> decltype(pair_range(adjacent_vertices(v, g))) {
+ return pair_range(adjacent_vertices(v, g));
+}
+
+/** inv_adjacent_vertices(v, g) */
+template <class Graph>
+inline auto inv_adjacent_vertices_range(
+ const typename Graph::vertex_descriptor &v, const Graph &g)
+ -> decltype(pair_range(inv_adjacent_vertices(v, g))) {
+ return pair_range(inv_adjacent_vertices(v, g));
+}
+
+} // namespace ue2
+
+#endif // UTIL_GRAPH_RANGE_H
diff --git a/contrib/libs/hyperscan/src/util/join.h b/contrib/libs/hyperscan/src/util/join.h
index 7d5a30c39a..2073ef7a5a 100644
--- a/contrib/libs/hyperscan/src/util/join.h
+++ b/contrib/libs/hyperscan/src/util/join.h
@@ -1,40 +1,40 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JOIN_H
-
-#define JOIN(x, y) JOIN_AGAIN(x, y)
-#define JOIN_AGAIN(x, y) x ## y
-
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JOIN_H
+
+#define JOIN(x, y) JOIN_AGAIN(x, y)
+#define JOIN_AGAIN(x, y) x ## y
+
#define JOIN3(x, y, z) JOIN_AGAIN3(x, y, z)
#define JOIN_AGAIN3(x, y, z) x ## y ## z
#define JOIN4(w, x, y, z) JOIN_AGAIN4(w, x, y, z)
#define JOIN_AGAIN4(w, x, y, z) w ## x ## y ## z
-#endif
+#endif
diff --git a/contrib/libs/hyperscan/src/util/make_unique.h b/contrib/libs/hyperscan/src/util/make_unique.h
index 651e8c5cf9..86de7cecc1 100644
--- a/contrib/libs/hyperscan/src/util/make_unique.h
+++ b/contrib/libs/hyperscan/src/util/make_unique.h
@@ -1,49 +1,49 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_MAKE_UNIQUE_H
-#define UTIL_MAKE_UNIQUE_H
-
-#if (defined(_WIN32) || defined(_WIN64)) && (_MSC_VER > 1700)
-// VC++ 2013 onwards has make_unique in the STL
-#define USE_STD
-#include <memory>
-#else
-#include <boost/smart_ptr/make_unique.hpp>
-#endif
-
-namespace ue2 {
-#if defined(USE_STD)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_MAKE_UNIQUE_H
+#define UTIL_MAKE_UNIQUE_H
+
+#if (defined(_WIN32) || defined(_WIN64)) && (_MSC_VER > 1700)
+// VC++ 2013 onwards has make_unique in the STL
+#define USE_STD
+#include <memory>
+#else
+#include <boost/smart_ptr/make_unique.hpp>
+#endif
+
+namespace ue2 {
+#if defined(USE_STD)
using std::make_unique;
-#else
+#else
using boost::make_unique;
-#endif
-}
-
-#undef USE_STD
-#endif // UTIL_MAKE_UNIQUE_H
+#endif
+}
+
+#undef USE_STD
+#endif // UTIL_MAKE_UNIQUE_H
diff --git a/contrib/libs/hyperscan/src/util/masked_move.c b/contrib/libs/hyperscan/src/util/masked_move.c
index 001cd49f28..8172d443c4 100644
--- a/contrib/libs/hyperscan/src/util/masked_move.c
+++ b/contrib/libs/hyperscan/src/util/masked_move.c
@@ -1,91 +1,91 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "ue2common.h"
-#include "masked_move.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "ue2common.h"
+#include "masked_move.h"
#include "util/arch.h"
-
+
#if defined(HAVE_AVX2)
-/* masks for masked moves */
-
-/* magic mask for maskload (vmmaskmovq) - described in UE-2424 */
+/* masks for masked moves */
+
+/* magic mask for maskload (vmmaskmovq) - described in UE-2424 */
const ALIGN_CL_DIRECTIVE u32 mm_mask_mask[16] = {
- 0x00000000U,
- 0x00000000U,
- 0x00000000U,
- 0x00000000U,
- 0x00000000U,
- 0x00000000U,
- 0x00000000U,
- 0x00000000U,
- 0xff000000U,
- 0xfe000000U,
- 0xfc000000U,
- 0xf8000000U,
- 0xf0000000U,
- 0xe0000000U,
- 0xc0000000U,
- 0x80000000U,
-};
-
-const u32 mm_shuffle_end[32][8] = {
- { 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, },
- { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, },
-};
-#endif // AVX2
+ 0x00000000U,
+ 0x00000000U,
+ 0x00000000U,
+ 0x00000000U,
+ 0x00000000U,
+ 0x00000000U,
+ 0x00000000U,
+ 0x00000000U,
+ 0xff000000U,
+ 0xfe000000U,
+ 0xfc000000U,
+ 0xf8000000U,
+ 0xf0000000U,
+ 0xe0000000U,
+ 0xc0000000U,
+ 0x80000000U,
+};
+
+const u32 mm_shuffle_end[32][8] = {
+ { 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, 0x80808080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, 0x80808003U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, 0x80800302U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, 0x80030201U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x03020100U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x02010080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x01008080U, },
+ { 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x80808080U, 0x00808080U, },
+};
+#endif // AVX2
diff --git a/contrib/libs/hyperscan/src/util/masked_move.h b/contrib/libs/hyperscan/src/util/masked_move.h
index 4c877ca9e5..2db31e6009 100644
--- a/contrib/libs/hyperscan/src/util/masked_move.h
+++ b/contrib/libs/hyperscan/src/util/masked_move.h
@@ -1,82 +1,82 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef MASKED_MOVE_H
-#define MASKED_MOVE_H
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MASKED_MOVE_H
+#define MASKED_MOVE_H
+
#include "arch.h"
-
+
#if defined(HAVE_AVX2)
-#include "unaligned.h"
-#include "simd_utils.h"
-
+#include "unaligned.h"
+#include "simd_utils.h"
+
#ifdef __cplusplus
extern "C" {
#endif
-extern const u32 mm_mask_mask[16];
-extern const u32 mm_shuffle_end[32][8];
+extern const u32 mm_mask_mask[16];
+extern const u32 mm_shuffle_end[32][8];
#ifdef __cplusplus
}
#endif
-
-/* load mask for len bytes from start of buffer */
-static really_inline m256
-_get_mm_mask_end(u32 len) {
- assert(len <= 32);
- const u8 *masky = (const u8 *)mm_mask_mask;
- m256 mask = load256(masky + 32);
- mask = _mm256_sll_epi32(mask, _mm_cvtsi32_si128(8 - (len >> 2)));
- return mask;
-}
-
-/*
- * masked_move256_len: Will load len bytes from *buf into m256
- * _______________________________
- * |0<----len---->| 32|
- * -------------------------------
- */
-static really_inline m256
-masked_move256_len(const u8 *buf, const u32 len) {
- assert(len >= 4);
-
- m256 lmask = _get_mm_mask_end(len);
-
- u32 end = unaligned_load_u32(buf + len - 4);
- m256 preshufend = _mm256_broadcastq_epi64(_mm_cvtsi32_si128(end));
- m256 v = _mm256_maskload_epi32((const int *)buf, lmask);
+
+/* load mask for len bytes from start of buffer */
+static really_inline m256
+_get_mm_mask_end(u32 len) {
+ assert(len <= 32);
+ const u8 *masky = (const u8 *)mm_mask_mask;
+ m256 mask = load256(masky + 32);
+ mask = _mm256_sll_epi32(mask, _mm_cvtsi32_si128(8 - (len >> 2)));
+ return mask;
+}
+
+/*
+ * masked_move256_len: Will load len bytes from *buf into m256
+ * _______________________________
+ * |0<----len---->| 32|
+ * -------------------------------
+ */
+static really_inline m256
+masked_move256_len(const u8 *buf, const u32 len) {
+ assert(len >= 4);
+
+ m256 lmask = _get_mm_mask_end(len);
+
+ u32 end = unaligned_load_u32(buf + len - 4);
+ m256 preshufend = _mm256_broadcastq_epi64(_mm_cvtsi32_si128(end));
+ m256 v = _mm256_maskload_epi32((const int *)buf, lmask);
m256 shufend = pshufb_m256(preshufend,
loadu256(&mm_shuffle_end[len - 4]));
- m256 target = or256(v, shufend);
-
- return target;
-}
-
-#endif /* AVX2 */
-#endif /* MASKED_MOVE_H */
-
+ m256 target = or256(v, shufend);
+
+ return target;
+}
+
+#endif /* AVX2 */
+#endif /* MASKED_MOVE_H */
+
diff --git a/contrib/libs/hyperscan/src/util/multibit.c b/contrib/libs/hyperscan/src/util/multibit.c
index de192d7dd7..9a648528d3 100644
--- a/contrib/libs/hyperscan/src/util/multibit.c
+++ b/contrib/libs/hyperscan/src/util/multibit.c
@@ -1,140 +1,140 @@
-/*
+/*
* Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Multibit: lookup tables and support code.
- *
- * This C file contains the constant tables used by multibit, so we don't end
- * up creating copies of them for every unit that uses it.
- */
-
-#include "multibit.h"
-#include "ue2common.h"
-
-const u8 mmbit_keyshift_lut[32] = {
- 30, 30, 24, 24, 24, 24, 24, 24, 18, 18, 18,
- 18, 18, 18, 12, 12, 12, 12, 12, 12, 6, 6,
- 6, 6, 6, 6, 0, 0, 0, 0, 0, 0
-};
-
-// The only actually valid values of ks are as shown in the LUT above, but a
-// division is just too expensive.
-const u8 mmbit_maxlevel_from_keyshift_lut[32] = {
- 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4,
- 5, 5
-};
-
-const u8 mmbit_maxlevel_direct_lut[32] = {
- 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3,
- 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1,
- 1, 1, 1, 1, 0, 0, 0, 0, 0, 0
-};
-
-#define ZERO_TO_LUT(x) ((1ULL << x) - 1)
-
-const u64a mmbit_zero_to_lut[65] = {
- ZERO_TO_LUT(0),
- ZERO_TO_LUT(1),
- ZERO_TO_LUT(2),
- ZERO_TO_LUT(3),
- ZERO_TO_LUT(4),
- ZERO_TO_LUT(5),
- ZERO_TO_LUT(6),
- ZERO_TO_LUT(7),
- ZERO_TO_LUT(8),
- ZERO_TO_LUT(9),
- ZERO_TO_LUT(10),
- ZERO_TO_LUT(11),
- ZERO_TO_LUT(12),
- ZERO_TO_LUT(13),
- ZERO_TO_LUT(14),
- ZERO_TO_LUT(15),
- ZERO_TO_LUT(16),
- ZERO_TO_LUT(17),
- ZERO_TO_LUT(18),
- ZERO_TO_LUT(19),
- ZERO_TO_LUT(20),
- ZERO_TO_LUT(21),
- ZERO_TO_LUT(22),
- ZERO_TO_LUT(23),
- ZERO_TO_LUT(24),
- ZERO_TO_LUT(25),
- ZERO_TO_LUT(26),
- ZERO_TO_LUT(27),
- ZERO_TO_LUT(28),
- ZERO_TO_LUT(29),
- ZERO_TO_LUT(30),
- ZERO_TO_LUT(31),
- ZERO_TO_LUT(32),
- ZERO_TO_LUT(33),
- ZERO_TO_LUT(34),
- ZERO_TO_LUT(35),
- ZERO_TO_LUT(36),
- ZERO_TO_LUT(37),
- ZERO_TO_LUT(38),
- ZERO_TO_LUT(39),
- ZERO_TO_LUT(40),
- ZERO_TO_LUT(41),
- ZERO_TO_LUT(42),
- ZERO_TO_LUT(43),
- ZERO_TO_LUT(44),
- ZERO_TO_LUT(45),
- ZERO_TO_LUT(46),
- ZERO_TO_LUT(47),
- ZERO_TO_LUT(48),
- ZERO_TO_LUT(49),
- ZERO_TO_LUT(50),
- ZERO_TO_LUT(51),
- ZERO_TO_LUT(52),
- ZERO_TO_LUT(53),
- ZERO_TO_LUT(54),
- ZERO_TO_LUT(55),
- ZERO_TO_LUT(56),
- ZERO_TO_LUT(57),
- ZERO_TO_LUT(58),
- ZERO_TO_LUT(59),
- ZERO_TO_LUT(60),
- ZERO_TO_LUT(61),
- ZERO_TO_LUT(62),
- ZERO_TO_LUT(63),
- ~0ULL
-};
-
-const u32 mmbit_root_offset_from_level[7] = {
- 0,
- 1,
- 1 + (1 << MMB_KEY_SHIFT),
- 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2),
- 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2) + (1 << MMB_KEY_SHIFT * 3),
- 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2) + (1 << MMB_KEY_SHIFT * 3) + (1 << MMB_KEY_SHIFT * 4),
- 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2) + (1 << MMB_KEY_SHIFT * 3) + (1 << MMB_KEY_SHIFT * 4) + (1 << MMB_KEY_SHIFT * 5),
-};
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Multibit: lookup tables and support code.
+ *
+ * This C file contains the constant tables used by multibit, so we don't end
+ * up creating copies of them for every unit that uses it.
+ */
+
+#include "multibit.h"
+#include "ue2common.h"
+
+const u8 mmbit_keyshift_lut[32] = {
+ 30, 30, 24, 24, 24, 24, 24, 24, 18, 18, 18,
+ 18, 18, 18, 12, 12, 12, 12, 12, 12, 6, 6,
+ 6, 6, 6, 6, 0, 0, 0, 0, 0, 0
+};
+
+// The only actually valid values of ks are as shown in the LUT above, but a
+// division is just too expensive.
+const u8 mmbit_maxlevel_from_keyshift_lut[32] = {
+ 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4,
+ 5, 5
+};
+
+const u8 mmbit_maxlevel_direct_lut[32] = {
+ 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0
+};
+
+#define ZERO_TO_LUT(x) ((1ULL << x) - 1)
+
+const u64a mmbit_zero_to_lut[65] = {
+ ZERO_TO_LUT(0),
+ ZERO_TO_LUT(1),
+ ZERO_TO_LUT(2),
+ ZERO_TO_LUT(3),
+ ZERO_TO_LUT(4),
+ ZERO_TO_LUT(5),
+ ZERO_TO_LUT(6),
+ ZERO_TO_LUT(7),
+ ZERO_TO_LUT(8),
+ ZERO_TO_LUT(9),
+ ZERO_TO_LUT(10),
+ ZERO_TO_LUT(11),
+ ZERO_TO_LUT(12),
+ ZERO_TO_LUT(13),
+ ZERO_TO_LUT(14),
+ ZERO_TO_LUT(15),
+ ZERO_TO_LUT(16),
+ ZERO_TO_LUT(17),
+ ZERO_TO_LUT(18),
+ ZERO_TO_LUT(19),
+ ZERO_TO_LUT(20),
+ ZERO_TO_LUT(21),
+ ZERO_TO_LUT(22),
+ ZERO_TO_LUT(23),
+ ZERO_TO_LUT(24),
+ ZERO_TO_LUT(25),
+ ZERO_TO_LUT(26),
+ ZERO_TO_LUT(27),
+ ZERO_TO_LUT(28),
+ ZERO_TO_LUT(29),
+ ZERO_TO_LUT(30),
+ ZERO_TO_LUT(31),
+ ZERO_TO_LUT(32),
+ ZERO_TO_LUT(33),
+ ZERO_TO_LUT(34),
+ ZERO_TO_LUT(35),
+ ZERO_TO_LUT(36),
+ ZERO_TO_LUT(37),
+ ZERO_TO_LUT(38),
+ ZERO_TO_LUT(39),
+ ZERO_TO_LUT(40),
+ ZERO_TO_LUT(41),
+ ZERO_TO_LUT(42),
+ ZERO_TO_LUT(43),
+ ZERO_TO_LUT(44),
+ ZERO_TO_LUT(45),
+ ZERO_TO_LUT(46),
+ ZERO_TO_LUT(47),
+ ZERO_TO_LUT(48),
+ ZERO_TO_LUT(49),
+ ZERO_TO_LUT(50),
+ ZERO_TO_LUT(51),
+ ZERO_TO_LUT(52),
+ ZERO_TO_LUT(53),
+ ZERO_TO_LUT(54),
+ ZERO_TO_LUT(55),
+ ZERO_TO_LUT(56),
+ ZERO_TO_LUT(57),
+ ZERO_TO_LUT(58),
+ ZERO_TO_LUT(59),
+ ZERO_TO_LUT(60),
+ ZERO_TO_LUT(61),
+ ZERO_TO_LUT(62),
+ ZERO_TO_LUT(63),
+ ~0ULL
+};
+
+const u32 mmbit_root_offset_from_level[7] = {
+ 0,
+ 1,
+ 1 + (1 << MMB_KEY_SHIFT),
+ 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2),
+ 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2) + (1 << MMB_KEY_SHIFT * 3),
+ 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2) + (1 << MMB_KEY_SHIFT * 3) + (1 << MMB_KEY_SHIFT * 4),
+ 1 + (1 << MMB_KEY_SHIFT) + (1 << MMB_KEY_SHIFT * 2) + (1 << MMB_KEY_SHIFT * 3) + (1 << MMB_KEY_SHIFT * 4) + (1 << MMB_KEY_SHIFT * 5),
+};
diff --git a/contrib/libs/hyperscan/src/util/multibit.h b/contrib/libs/hyperscan/src/util/multibit.h
index c3a4ba461a..2fb3c948a2 100644
--- a/contrib/libs/hyperscan/src/util/multibit.h
+++ b/contrib/libs/hyperscan/src/util/multibit.h
@@ -1,670 +1,670 @@
-/*
+/*
* Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Multibit: fast bitset structure, main runtime.
- *
- * *Structure*
- *
- * For sizes <= MMB_FLAT_MAX_BITS, a flat bit vector is used, stored as N
- * 64-bit blocks followed by one "runt block".
- *
- * In larger cases, we use a sequence of blocks forming a tree. Each bit in an
- * internal block indicates whether its child block contains valid data. Every
- * level bar the last is complete. The last level is just a basic bit vector.
- *
- * -----------------------------------------------------------------------------
- * WARNING:
- *
- * mmbit code assumes that it is legal to load 8 bytes before the end of the
- * mmbit. This means that for small mmbits (< 8byte), data may be read from
- * before the base pointer. It is the user's responsibility to ensure that this
- * is possible.
- * -----------------------------------------------------------------------------
- */
-#ifndef MULTIBIT_H
-#define MULTIBIT_H
-
-#include "config.h"
-#include "ue2common.h"
-#include "bitutils.h"
-#include "partial_store.h"
-#include "unaligned.h"
-#include "multibit_internal.h"
-
-#include <string.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define MMB_ONE (1ULL)
-#define MMB_ALL_ONES (0xffffffffffffffffULL)
-
-/** \brief Number of bits in a block. */
-#define MMB_KEY_BITS (sizeof(MMB_TYPE) * 8)
-
-#define MMB_KEY_MASK (MMB_KEY_BITS - 1)
-
-// Key structure defines
-#define MMB_KEY_SHIFT 6
-
-/** \brief Max size of a flat multibit. */
-#define MMB_FLAT_MAX_BITS 256
-
-// Utility functions and data
-// see multibit.c for contents
-extern const u8 mmbit_keyshift_lut[32];
-extern const u8 mmbit_maxlevel_from_keyshift_lut[32];
-extern const u8 mmbit_maxlevel_direct_lut[32];
-extern const u32 mmbit_root_offset_from_level[7];
-extern const u64a mmbit_zero_to_lut[65];
-
-static really_inline
-MMB_TYPE mmb_load(const u8 * bits) {
- return unaligned_load_u64a(bits);
-}
-
-static really_inline
-void mmb_store(u8 *bits, MMB_TYPE val) {
- unaligned_store_u64a(bits, val);
-}
-
-static really_inline
-void mmb_store_partial(u8 *bits, MMB_TYPE val, u32 block_bits) {
- assert(block_bits <= MMB_KEY_BITS);
- partial_store_u64a(bits, val, ROUNDUP_N(block_bits, 8U) / 8U);
-}
-
-static really_inline
-MMB_TYPE mmb_single_bit(u32 bit) {
- assert(bit < MMB_KEY_BITS);
- return MMB_ONE << bit;
-}
-
-static really_inline
-MMB_TYPE mmb_mask_zero_to(u32 bit) {
- assert(bit <= MMB_KEY_BITS);
-#ifdef ARCH_32_BIT
- return mmbit_zero_to_lut[bit];
-#else
- if (bit == MMB_KEY_BITS) {
- return MMB_ALL_ONES;
- } else {
- return mmb_single_bit(bit) - MMB_ONE;
- }
-#endif
-}
-
-/** \brief Returns a mask of set bits up to position \a bit. Does not handle
- * the case where bit == MMB_KEY_BITS. */
-static really_inline
-MMB_TYPE mmb_mask_zero_to_nocheck(u32 bit) {
- assert(bit < MMB_KEY_BITS);
-#ifdef ARCH_32_BIT
- return mmbit_zero_to_lut[bit];
-#else
- return mmb_single_bit(bit) - MMB_ONE;
-#endif
-}
-
-static really_inline
-u32 mmb_test(MMB_TYPE val, u32 bit) {
- assert(bit < MMB_KEY_BITS);
- return (val >> bit) & MMB_ONE;
-}
-
-static really_inline
-void mmb_set(MMB_TYPE * val, u32 bit) {
- assert(bit < MMB_KEY_BITS);
- *val |= mmb_single_bit(bit);
-}
-
-static really_inline
-void mmb_clear(MMB_TYPE * val, u32 bit) {
- assert(bit < MMB_KEY_BITS);
- *val &= ~mmb_single_bit(bit);
-}
-
-static really_inline
-u32 mmb_ctz(MMB_TYPE val) {
- return ctz64(val);
-}
-
-static really_inline
-u32 mmb_popcount(MMB_TYPE val) {
- return popcount64(val);
-}
-
-#ifndef MMMB_DEBUG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Multibit: fast bitset structure, main runtime.
+ *
+ * *Structure*
+ *
+ * For sizes <= MMB_FLAT_MAX_BITS, a flat bit vector is used, stored as N
+ * 64-bit blocks followed by one "runt block".
+ *
+ * In larger cases, we use a sequence of blocks forming a tree. Each bit in an
+ * internal block indicates whether its child block contains valid data. Every
+ * level bar the last is complete. The last level is just a basic bit vector.
+ *
+ * -----------------------------------------------------------------------------
+ * WARNING:
+ *
+ * mmbit code assumes that it is legal to load 8 bytes before the end of the
+ * mmbit. This means that for small mmbits (< 8byte), data may be read from
+ * before the base pointer. It is the user's responsibility to ensure that this
+ * is possible.
+ * -----------------------------------------------------------------------------
+ */
+#ifndef MULTIBIT_H
+#define MULTIBIT_H
+
+#include "config.h"
+#include "ue2common.h"
+#include "bitutils.h"
+#include "partial_store.h"
+#include "unaligned.h"
+#include "multibit_internal.h"
+
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MMB_ONE (1ULL)
+#define MMB_ALL_ONES (0xffffffffffffffffULL)
+
+/** \brief Number of bits in a block. */
+#define MMB_KEY_BITS (sizeof(MMB_TYPE) * 8)
+
+#define MMB_KEY_MASK (MMB_KEY_BITS - 1)
+
+// Key structure defines
+#define MMB_KEY_SHIFT 6
+
+/** \brief Max size of a flat multibit. */
+#define MMB_FLAT_MAX_BITS 256
+
+// Utility functions and data
+// see multibit.c for contents
+extern const u8 mmbit_keyshift_lut[32];
+extern const u8 mmbit_maxlevel_from_keyshift_lut[32];
+extern const u8 mmbit_maxlevel_direct_lut[32];
+extern const u32 mmbit_root_offset_from_level[7];
+extern const u64a mmbit_zero_to_lut[65];
+
+static really_inline
+MMB_TYPE mmb_load(const u8 * bits) {
+ return unaligned_load_u64a(bits);
+}
+
+static really_inline
+void mmb_store(u8 *bits, MMB_TYPE val) {
+ unaligned_store_u64a(bits, val);
+}
+
+static really_inline
+void mmb_store_partial(u8 *bits, MMB_TYPE val, u32 block_bits) {
+ assert(block_bits <= MMB_KEY_BITS);
+ partial_store_u64a(bits, val, ROUNDUP_N(block_bits, 8U) / 8U);
+}
+
+static really_inline
+MMB_TYPE mmb_single_bit(u32 bit) {
+ assert(bit < MMB_KEY_BITS);
+ return MMB_ONE << bit;
+}
+
+static really_inline
+MMB_TYPE mmb_mask_zero_to(u32 bit) {
+ assert(bit <= MMB_KEY_BITS);
+#ifdef ARCH_32_BIT
+ return mmbit_zero_to_lut[bit];
+#else
+ if (bit == MMB_KEY_BITS) {
+ return MMB_ALL_ONES;
+ } else {
+ return mmb_single_bit(bit) - MMB_ONE;
+ }
+#endif
+}
+
+/** \brief Returns a mask of set bits up to position \a bit. Does not handle
+ * the case where bit == MMB_KEY_BITS. */
+static really_inline
+MMB_TYPE mmb_mask_zero_to_nocheck(u32 bit) {
+ assert(bit < MMB_KEY_BITS);
+#ifdef ARCH_32_BIT
+ return mmbit_zero_to_lut[bit];
+#else
+ return mmb_single_bit(bit) - MMB_ONE;
+#endif
+}
+
+static really_inline
+u32 mmb_test(MMB_TYPE val, u32 bit) {
+ assert(bit < MMB_KEY_BITS);
+ return (val >> bit) & MMB_ONE;
+}
+
+static really_inline
+void mmb_set(MMB_TYPE * val, u32 bit) {
+ assert(bit < MMB_KEY_BITS);
+ *val |= mmb_single_bit(bit);
+}
+
+static really_inline
+void mmb_clear(MMB_TYPE * val, u32 bit) {
+ assert(bit < MMB_KEY_BITS);
+ *val &= ~mmb_single_bit(bit);
+}
+
+static really_inline
+u32 mmb_ctz(MMB_TYPE val) {
+ return ctz64(val);
+}
+
+static really_inline
+u32 mmb_popcount(MMB_TYPE val) {
+ return popcount64(val);
+}
+
+#ifndef MMMB_DEBUG
#define MDEBUG_PRINTF(x, ...) do { } while(0)
-#else
-#define MDEBUG_PRINTF DEBUG_PRINTF
-#endif
-
-// Switch the following define on to trace writes to multibit.
-//#define MMB_TRACE_WRITES
-#ifdef MMB_TRACE_WRITES
-#define MMB_TRACE(format, ...) \
- printf("mmb [%u bits @ %p] " format, total_bits, bits, ##__VA_ARGS__)
-#else
-#define MMB_TRACE(format, ...) \
- do { \
- } while (0)
-#endif
-
-static really_inline
-u32 mmbit_keyshift(u32 total_bits) {
- assert(total_bits > 1);
- u32 n = clz32(total_bits - 1); // subtract one as we're rounding down
- return mmbit_keyshift_lut[n];
-}
-
-static really_inline
-u32 mmbit_maxlevel(u32 total_bits) {
- assert(total_bits > 1);
- u32 n = clz32(total_bits - 1); // subtract one as we're rounding down
- u32 max_level = mmbit_maxlevel_direct_lut[n];
- assert(max_level <= MMB_MAX_LEVEL);
- return max_level;
-}
-
-static really_inline
-u32 mmbit_maxlevel_from_keyshift(u32 ks) {
- assert(ks <= 30);
- assert(ks % MMB_KEY_SHIFT == 0);
-
- u32 max_level = mmbit_maxlevel_from_keyshift_lut[ks];
- assert(max_level <= MMB_MAX_LEVEL);
- return max_level;
-}
-
-/** \brief get our keyshift for the current level */
-static really_inline
-u32 mmbit_get_ks(u32 max_level, u32 level) {
- assert(max_level <= MMB_MAX_LEVEL);
- assert(level <= max_level);
- return (max_level - level) * MMB_KEY_SHIFT;
-}
-
-/** \brief get our key value for the current level */
-static really_inline
-u32 mmbit_get_key_val(u32 max_level, u32 level, u32 key) {
- return (key >> mmbit_get_ks(max_level, level)) & MMB_KEY_MASK;
-}
-
-/** \brief get the level root for the current level */
-static really_inline
-u8 *mmbit_get_level_root(u8 *bits, u32 level) {
- assert(level < ARRAY_LENGTH(mmbit_root_offset_from_level));
- return bits + mmbit_root_offset_from_level[level] * sizeof(MMB_TYPE);
-}
-
-/** \brief get the level root for the current level as const */
-static really_inline
-const u8 *mmbit_get_level_root_const(const u8 *bits, u32 level) {
- assert(level < ARRAY_LENGTH(mmbit_root_offset_from_level));
- return bits + mmbit_root_offset_from_level[level] * sizeof(MMB_TYPE);
-}
-
-/** \brief get the block for this key on the current level as a u8 ptr */
-static really_inline
-u8 *mmbit_get_block_ptr(u8 *bits, u32 max_level, u32 level, u32 key) {
- u8 *level_root = mmbit_get_level_root(bits, level);
- u32 ks = mmbit_get_ks(max_level, level);
- return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT)) * sizeof(MMB_TYPE);
-}
-
-/** \brief get the block for this key on the current level as a const u8 ptr */
-static really_inline
-const u8 *mmbit_get_block_ptr_const(const u8 *bits, u32 max_level, u32 level,
- u32 key) {
- const u8 *level_root = mmbit_get_level_root_const(bits, level);
- u32 ks = mmbit_get_ks(max_level, level);
- return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT)) * sizeof(MMB_TYPE);
-}
-
-/** \brief get the _byte_ for this key on the current level as a u8 ptr */
-static really_inline
-u8 *mmbit_get_byte_ptr(u8 *bits, u32 max_level, u32 level, u32 key) {
- u8 *level_root = mmbit_get_level_root(bits, level);
- u32 ks = mmbit_get_ks(max_level, level);
- return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT - 3));
-}
-
-/** \brief get our key value for the current level */
-static really_inline
-u32 mmbit_get_key_val_byte(u32 max_level, u32 level, u32 key) {
- return (key >> (mmbit_get_ks(max_level, level))) & 0x7;
-}
-
-/** \brief Load a flat bitvector block corresponding to N bits. */
-static really_inline
-MMB_TYPE mmbit_get_flat_block(const u8 *bits, u32 n_bits) {
- assert(n_bits <= MMB_KEY_BITS);
- u32 n_bytes = ROUNDUP_N(n_bits, 8) / 8;
- switch (n_bytes) {
- case 1:
- return *bits;
- case 2:
- return unaligned_load_u16(bits);
- case 3:
- case 4: {
- u32 rv;
- assert(n_bytes <= sizeof(rv));
- memcpy(&rv, bits + n_bytes - sizeof(rv), sizeof(rv));
- rv >>= (sizeof(rv) - n_bytes) * 8; /* need to shift to get things in
- * the right position and remove
- * junk */
- assert(rv == partial_load_u32(bits, n_bytes));
- return rv;
- }
- default: {
- u64a rv;
- assert(n_bytes <= sizeof(rv));
- memcpy(&rv, bits + n_bytes - sizeof(rv), sizeof(rv));
- rv >>= (sizeof(rv) - n_bytes) * 8; /* need to shift to get things in
- * the right position and remove
- * junk */
- assert(rv == partial_load_u64a(bits, n_bytes));
- return rv;
- }
- }
-}
-
-/** \brief True if this multibit is small enough to use a flat model */
-static really_inline
-u32 mmbit_is_flat_model(u32 total_bits) {
- return total_bits <= MMB_FLAT_MAX_BITS;
-}
-
-static really_inline
-u32 mmbit_flat_size(u32 total_bits) {
- assert(mmbit_is_flat_model(total_bits));
- return ROUNDUP_N(total_bits, 8) / 8;
-}
-
-static really_inline
-u32 mmbit_flat_select_byte(u32 key, UNUSED u32 total_bits) {
- return key / 8;
-}
-
-/** \brief returns the dense index of the bit in the given mask. */
-static really_inline
-u32 mmbit_mask_index(u32 bit, MMB_TYPE mask) {
- assert(bit < MMB_KEY_BITS);
- assert(mmb_test(mask, bit));
-
- mask &= mmb_mask_zero_to(bit);
- if (mask == 0ULL) {
- return 0; // Common case.
- }
- return mmb_popcount(mask);
-}
-
-/** \brief Clear all bits. */
-static really_inline
-void mmbit_clear(u8 *bits, u32 total_bits) {
- MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
- MMB_TRACE("CLEAR\n");
- if (!total_bits) {
- return;
- }
- if (mmbit_is_flat_model(total_bits)) {
- memset(bits, 0, mmbit_flat_size(total_bits));
- return;
- }
- mmb_store(bits, 0);
-}
-
-/** \brief Specialisation of \ref mmbit_set for flat models. */
-static really_inline
-char mmbit_set_flat(u8 *bits, u32 total_bits, u32 key) {
- bits += mmbit_flat_select_byte(key, total_bits);
- u8 mask = 1U << (key % 8);
- char was_set = !!(*bits & mask);
- *bits |= mask;
- return was_set;
-}
-
-static really_inline
-char mmbit_set_big(u8 *bits, u32 total_bits, u32 key) {
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
- do {
- u8 * byte_ptr = mmbit_get_byte_ptr(bits, max_level, level, key);
- u8 keymask = 1U << mmbit_get_key_val_byte(max_level, level, key);
- u8 byte = *byte_ptr;
- if (likely(!(byte & keymask))) {
- *byte_ptr = byte | keymask;
- while (level++ != max_level) {
- u8 *block_ptr_1 = mmbit_get_block_ptr(bits, max_level, level, key);
- MMB_TYPE keymask_1 = mmb_single_bit(mmbit_get_key_val(max_level, level, key));
- mmb_store(block_ptr_1, keymask_1);
- }
- return 0;
- }
- } while (level++ != max_level);
- return 1;
-}
-
-/** Internal version of \ref mmbit_set without MMB_TRACE, so it can be used by
- * \ref mmbit_sparse_iter_dump. */
-static really_inline
-char mmbit_set_i(u8 *bits, u32 total_bits, u32 key) {
- assert(key < total_bits);
- if (mmbit_is_flat_model(total_bits)) {
- return mmbit_set_flat(bits, total_bits, key);
- } else {
- return mmbit_set_big(bits, total_bits, key);
- }
-}
-
-static really_inline
-char mmbit_isset(const u8 *bits, u32 total_bits, u32 key);
-
-/** \brief Sets the given key in the multibit. Returns 0 if the key was NOT
- * already set, 1 otherwise. */
-static really_inline
-char mmbit_set(u8 *bits, u32 total_bits, u32 key) {
- MDEBUG_PRINTF("%p total_bits %u key %u\n", bits, total_bits, key);
- char status = mmbit_set_i(bits, total_bits, key);
- MMB_TRACE("SET %u (prev status: %d)\n", key, (int)status);
- assert(mmbit_isset(bits, total_bits, key));
- return status;
-}
-
-/** \brief Specialisation of \ref mmbit_isset for flat models. */
-static really_inline
-char mmbit_isset_flat(const u8 *bits, u32 total_bits, u32 key) {
- bits += mmbit_flat_select_byte(key, total_bits);
- return !!(*bits & (1U << (key % 8U)));
-}
-
-static really_inline
-char mmbit_isset_big(const u8 *bits, u32 total_bits, u32 key) {
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
- do {
- const u8 *block_ptr = mmbit_get_block_ptr_const(bits, max_level, level, key);
- MMB_TYPE block = mmb_load(block_ptr);
- if (!mmb_test(block, mmbit_get_key_val(max_level, level, key))) {
- return 0;
- }
- } while (level++ != max_level);
- return 1;
-}
-
-/** \brief Returns whether the given key is set. */
-static really_inline
-char mmbit_isset(const u8 *bits, u32 total_bits, u32 key) {
- MDEBUG_PRINTF("%p total_bits %u key %u\n", bits, total_bits, key);
- assert(key < total_bits);
- if (mmbit_is_flat_model(total_bits)) {
- return mmbit_isset_flat(bits, total_bits, key);
- } else {
- return mmbit_isset_big(bits, total_bits, key);
- }
-}
-
-/** \brief Specialisation of \ref mmbit_unset for flat models. */
-static really_inline
-void mmbit_unset_flat(u8 *bits, u32 total_bits, u32 key) {
- bits += mmbit_flat_select_byte(key, total_bits);
- *bits &= ~(1U << (key % 8U));
-}
-
-// TODO:
-// build two versions of this - unset_dangerous that doesn't clear the summary
-// block and a regular unset that actually clears ALL the way up the levels if
-// possible - might make a utility function for the clear
-static really_inline
-void mmbit_unset_big(u8 *bits, u32 total_bits, u32 key) {
- /* This function is lazy as it does not clear the summary block
- * entry if the child becomes empty. This is not a correctness problem as the
- * summary block entries are used to mean that their children are valid
- * rather than that they have a set child. */
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
- do {
- u8 *block_ptr = mmbit_get_block_ptr(bits, max_level, level, key);
- u32 key_val = mmbit_get_key_val(max_level, level, key);
- MMB_TYPE block = mmb_load(block_ptr);
- if (!mmb_test(block, key_val)) {
- return;
- }
- if (level == max_level) {
- mmb_clear(&block, key_val);
- mmb_store(block_ptr, block);
- }
- } while (level++ != max_level);
-}
-
-/** \brief Switch off a given key. */
-static really_inline
-void mmbit_unset(u8 *bits, u32 total_bits, u32 key) {
- MDEBUG_PRINTF("%p total_bits %u key %u\n", bits, total_bits, key);
- assert(key < total_bits);
- MMB_TRACE("UNSET %u (prev status: %d)\n", key,
- (int)mmbit_isset(bits, total_bits, key));
-
- if (mmbit_is_flat_model(total_bits)) {
- mmbit_unset_flat(bits, total_bits, key);
- } else {
- mmbit_unset_big(bits, total_bits, key);
- }
-}
-
-/** \brief Specialisation of \ref mmbit_iterate for flat models. */
-static really_inline
-u32 mmbit_iterate_flat(const u8 *bits, u32 total_bits, u32 it_in) {
- // Short cut for single-block cases.
- if (total_bits <= MMB_KEY_BITS) {
- MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
- if (it_in != MMB_INVALID) {
- it_in++;
- assert(it_in < total_bits);
- block &= ~mmb_mask_zero_to(it_in);
- }
- if (block) {
- return mmb_ctz(block);
- }
- return MMB_INVALID;
- }
-
- const u32 last_block = total_bits / MMB_KEY_BITS;
- u32 start; // starting block index
-
- if (it_in != MMB_INVALID) {
- it_in++;
- assert(it_in < total_bits);
-
- start = (ROUNDUP_N(it_in, MMB_KEY_BITS) / MMB_KEY_BITS) - 1;
- u32 start_key = start * MMB_KEY_BITS;
- u32 block_size = MIN(MMB_KEY_BITS, total_bits - start_key);
- MMB_TYPE block =
- mmbit_get_flat_block(bits + (start * sizeof(MMB_TYPE)), block_size);
- block &= ~mmb_mask_zero_to(it_in - start_key);
-
- if (block) {
- return start_key + mmb_ctz(block);
- } else if (start_key + MMB_KEY_BITS >= total_bits) {
- return MMB_INVALID; // That was the final block.
- }
- start++;
- } else {
- start = 0;
- }
-
- // Remaining full-sized blocks.
- for (; start < last_block; start++) {
- MMB_TYPE block = mmb_load(bits + (start * sizeof(MMB_TYPE)));
- if (block) {
- return (start * MMB_KEY_BITS) + mmb_ctz(block);
- }
- }
-
- // We may have a final, smaller than full-sized, block to deal with at the
- // end.
- if (total_bits % MMB_KEY_BITS) {
- u32 start_key = start * MMB_KEY_BITS;
- u32 block_size = MIN(MMB_KEY_BITS, total_bits - start_key);
- MMB_TYPE block =
- mmbit_get_flat_block(bits + (start * sizeof(MMB_TYPE)), block_size);
- if (block) {
- return start_key + mmb_ctz(block);
- }
- }
-
- return MMB_INVALID;
-}
-
-static really_inline
-u32 mmbit_iterate_big(const u8 * bits, u32 total_bits, u32 it_in) {
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
- u32 key = 0;
- u32 key_rem = 0;
-
- if (it_in != MMB_INVALID) {
- // We're continuing a previous iteration, so we need to go
- // to max_level so we can pick up where we left off.
- // NOTE: assumes that we're valid down the whole tree
- key = it_in >> MMB_KEY_SHIFT;
- key_rem = (it_in & MMB_KEY_MASK) + 1;
- level = max_level;
- }
- while (1) {
- if (key_rem < MMB_KEY_BITS) {
- const u8 *block_ptr = mmbit_get_level_root_const(bits, level) +
- key * sizeof(MMB_TYPE);
- MMB_TYPE block
- = mmb_load(block_ptr) & ~mmb_mask_zero_to_nocheck(key_rem);
- if (block) {
- key = (key << MMB_KEY_SHIFT) + mmb_ctz(block);
- if (level++ == max_level) {
- break;
- }
- key_rem = 0;
- continue; // jump the rootwards step if we found a 'tree' non-zero bit
- }
- }
- // rootwards step (block is zero or key_rem == MMB_KEY_BITS)
- if (level-- == 0) {
- return MMB_INVALID; // if we don't find anything and we're at the top level, we're done
- }
- key_rem = (key & MMB_KEY_MASK) + 1;
- key >>= MMB_KEY_SHIFT;
- }
- assert(key < total_bits);
- assert(mmbit_isset(bits, total_bits, key));
- return key;
-}
-
-/** \brief Unbounded iterator. Returns the index of the next set bit after \a
- * it_in, or MMB_INVALID.
- *
- * Note: assumes that if you pass in a value of it_in other than MMB_INVALID,
- * that bit must be on (assumes all its summary blocks are set).
- */
-static really_inline
-u32 mmbit_iterate(const u8 *bits, u32 total_bits, u32 it_in) {
- MDEBUG_PRINTF("%p total_bits %u it_in %u\n", bits, total_bits, it_in);
- assert(it_in < total_bits || it_in == MMB_INVALID);
- if (!total_bits) {
- return MMB_INVALID;
- }
- if (it_in == total_bits - 1) {
- return MMB_INVALID; // it_in is the last key.
- }
-
- u32 key;
- if (mmbit_is_flat_model(total_bits)) {
- key = mmbit_iterate_flat(bits, total_bits, it_in);
- } else {
- key = mmbit_iterate_big(bits, total_bits, it_in);
- }
- assert(key == MMB_INVALID || mmbit_isset(bits, total_bits, key));
- return key;
-}
-
-/** \brief Specialisation of \ref mmbit_any and \ref mmbit_any_precise for flat
- * models. */
-static really_inline
-char mmbit_any_flat(const u8 *bits, u32 total_bits) {
- if (total_bits <= MMB_KEY_BITS) {
- return !!mmbit_get_flat_block(bits, total_bits);
- }
-
- const u8 *end = bits + mmbit_flat_size(total_bits);
- for (const u8 *last = end - sizeof(MMB_TYPE); bits < last;
- bits += sizeof(MMB_TYPE)) {
- if (mmb_load(bits)) {
- return 1;
- }
- }
-
- // Overlapping load at the end.
- return !!mmb_load(end - sizeof(MMB_TYPE));
-}
-
-/** \brief True if any keys are (or might be) on in the given multibit.
- *
- * NOTE: mmbit_any is sloppy (may return true when only summary bits are set).
- * Use \ref mmbit_any_precise if you need/want a correct answer.
- */
-static really_inline
-char mmbit_any(const u8 *bits, u32 total_bits) {
- MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
- if (!total_bits) {
- return 0;
- }
- if (mmbit_is_flat_model(total_bits)) {
- return mmbit_any_flat(bits, total_bits);
- }
- return !!mmb_load(bits);
-}
-
-/** \brief True if there are any keys on. Guaranteed precise. */
-static really_inline
-char mmbit_any_precise(const u8 *bits, u32 total_bits) {
- MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
- if (!total_bits) {
- return 0;
- }
- if (mmbit_is_flat_model(total_bits)) {
- return mmbit_any_flat(bits, total_bits);
- }
-
- return mmbit_iterate_big(bits, total_bits, MMB_INVALID) != MMB_INVALID;
-}
-
-static really_inline
+#else
+#define MDEBUG_PRINTF DEBUG_PRINTF
+#endif
+
+// Switch the following define on to trace writes to multibit.
+//#define MMB_TRACE_WRITES
+#ifdef MMB_TRACE_WRITES
+#define MMB_TRACE(format, ...) \
+ printf("mmb [%u bits @ %p] " format, total_bits, bits, ##__VA_ARGS__)
+#else
+#define MMB_TRACE(format, ...) \
+ do { \
+ } while (0)
+#endif
+
+static really_inline
+u32 mmbit_keyshift(u32 total_bits) {
+ assert(total_bits > 1);
+ u32 n = clz32(total_bits - 1); // subtract one as we're rounding down
+ return mmbit_keyshift_lut[n];
+}
+
+static really_inline
+u32 mmbit_maxlevel(u32 total_bits) {
+ assert(total_bits > 1);
+ u32 n = clz32(total_bits - 1); // subtract one as we're rounding down
+ u32 max_level = mmbit_maxlevel_direct_lut[n];
+ assert(max_level <= MMB_MAX_LEVEL);
+ return max_level;
+}
+
+static really_inline
+u32 mmbit_maxlevel_from_keyshift(u32 ks) {
+ assert(ks <= 30);
+ assert(ks % MMB_KEY_SHIFT == 0);
+
+ u32 max_level = mmbit_maxlevel_from_keyshift_lut[ks];
+ assert(max_level <= MMB_MAX_LEVEL);
+ return max_level;
+}
+
+/** \brief get our keyshift for the current level */
+static really_inline
+u32 mmbit_get_ks(u32 max_level, u32 level) {
+ assert(max_level <= MMB_MAX_LEVEL);
+ assert(level <= max_level);
+ return (max_level - level) * MMB_KEY_SHIFT;
+}
+
+/** \brief get our key value for the current level */
+static really_inline
+u32 mmbit_get_key_val(u32 max_level, u32 level, u32 key) {
+ return (key >> mmbit_get_ks(max_level, level)) & MMB_KEY_MASK;
+}
+
+/** \brief get the level root for the current level */
+static really_inline
+u8 *mmbit_get_level_root(u8 *bits, u32 level) {
+ assert(level < ARRAY_LENGTH(mmbit_root_offset_from_level));
+ return bits + mmbit_root_offset_from_level[level] * sizeof(MMB_TYPE);
+}
+
+/** \brief get the level root for the current level as const */
+static really_inline
+const u8 *mmbit_get_level_root_const(const u8 *bits, u32 level) {
+ assert(level < ARRAY_LENGTH(mmbit_root_offset_from_level));
+ return bits + mmbit_root_offset_from_level[level] * sizeof(MMB_TYPE);
+}
+
+/** \brief get the block for this key on the current level as a u8 ptr */
+static really_inline
+u8 *mmbit_get_block_ptr(u8 *bits, u32 max_level, u32 level, u32 key) {
+ u8 *level_root = mmbit_get_level_root(bits, level);
+ u32 ks = mmbit_get_ks(max_level, level);
+ return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT)) * sizeof(MMB_TYPE);
+}
+
+/** \brief get the block for this key on the current level as a const u8 ptr */
+static really_inline
+const u8 *mmbit_get_block_ptr_const(const u8 *bits, u32 max_level, u32 level,
+ u32 key) {
+ const u8 *level_root = mmbit_get_level_root_const(bits, level);
+ u32 ks = mmbit_get_ks(max_level, level);
+ return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT)) * sizeof(MMB_TYPE);
+}
+
+/** \brief get the _byte_ for this key on the current level as a u8 ptr */
+static really_inline
+u8 *mmbit_get_byte_ptr(u8 *bits, u32 max_level, u32 level, u32 key) {
+ u8 *level_root = mmbit_get_level_root(bits, level);
+ u32 ks = mmbit_get_ks(max_level, level);
+ return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT - 3));
+}
+
+/** \brief get our key value for the current level */
+static really_inline
+u32 mmbit_get_key_val_byte(u32 max_level, u32 level, u32 key) {
+ return (key >> (mmbit_get_ks(max_level, level))) & 0x7;
+}
+
+/** \brief Load a flat bitvector block corresponding to N bits. */
+static really_inline
+MMB_TYPE mmbit_get_flat_block(const u8 *bits, u32 n_bits) {
+ assert(n_bits <= MMB_KEY_BITS);
+ u32 n_bytes = ROUNDUP_N(n_bits, 8) / 8;
+ switch (n_bytes) {
+ case 1:
+ return *bits;
+ case 2:
+ return unaligned_load_u16(bits);
+ case 3:
+ case 4: {
+ u32 rv;
+ assert(n_bytes <= sizeof(rv));
+ memcpy(&rv, bits + n_bytes - sizeof(rv), sizeof(rv));
+ rv >>= (sizeof(rv) - n_bytes) * 8; /* need to shift to get things in
+ * the right position and remove
+ * junk */
+ assert(rv == partial_load_u32(bits, n_bytes));
+ return rv;
+ }
+ default: {
+ u64a rv;
+ assert(n_bytes <= sizeof(rv));
+ memcpy(&rv, bits + n_bytes - sizeof(rv), sizeof(rv));
+ rv >>= (sizeof(rv) - n_bytes) * 8; /* need to shift to get things in
+ * the right position and remove
+ * junk */
+ assert(rv == partial_load_u64a(bits, n_bytes));
+ return rv;
+ }
+ }
+}
+
+/** \brief True if this multibit is small enough to use a flat model */
+static really_inline
+u32 mmbit_is_flat_model(u32 total_bits) {
+ return total_bits <= MMB_FLAT_MAX_BITS;
+}
+
+static really_inline
+u32 mmbit_flat_size(u32 total_bits) {
+ assert(mmbit_is_flat_model(total_bits));
+ return ROUNDUP_N(total_bits, 8) / 8;
+}
+
+static really_inline
+u32 mmbit_flat_select_byte(u32 key, UNUSED u32 total_bits) {
+ return key / 8;
+}
+
+/** \brief returns the dense index of the bit in the given mask. */
+static really_inline
+u32 mmbit_mask_index(u32 bit, MMB_TYPE mask) {
+ assert(bit < MMB_KEY_BITS);
+ assert(mmb_test(mask, bit));
+
+ mask &= mmb_mask_zero_to(bit);
+ if (mask == 0ULL) {
+ return 0; // Common case.
+ }
+ return mmb_popcount(mask);
+}
+
+/** \brief Clear all bits. */
+static really_inline
+void mmbit_clear(u8 *bits, u32 total_bits) {
+ MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
+ MMB_TRACE("CLEAR\n");
+ if (!total_bits) {
+ return;
+ }
+ if (mmbit_is_flat_model(total_bits)) {
+ memset(bits, 0, mmbit_flat_size(total_bits));
+ return;
+ }
+ mmb_store(bits, 0);
+}
+
+/** \brief Specialisation of \ref mmbit_set for flat models. */
+static really_inline
+char mmbit_set_flat(u8 *bits, u32 total_bits, u32 key) {
+ bits += mmbit_flat_select_byte(key, total_bits);
+ u8 mask = 1U << (key % 8);
+ char was_set = !!(*bits & mask);
+ *bits |= mask;
+ return was_set;
+}
+
+static really_inline
+char mmbit_set_big(u8 *bits, u32 total_bits, u32 key) {
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+ do {
+ u8 * byte_ptr = mmbit_get_byte_ptr(bits, max_level, level, key);
+ u8 keymask = 1U << mmbit_get_key_val_byte(max_level, level, key);
+ u8 byte = *byte_ptr;
+ if (likely(!(byte & keymask))) {
+ *byte_ptr = byte | keymask;
+ while (level++ != max_level) {
+ u8 *block_ptr_1 = mmbit_get_block_ptr(bits, max_level, level, key);
+ MMB_TYPE keymask_1 = mmb_single_bit(mmbit_get_key_val(max_level, level, key));
+ mmb_store(block_ptr_1, keymask_1);
+ }
+ return 0;
+ }
+ } while (level++ != max_level);
+ return 1;
+}
+
+/** Internal version of \ref mmbit_set without MMB_TRACE, so it can be used by
+ * \ref mmbit_sparse_iter_dump. */
+static really_inline
+char mmbit_set_i(u8 *bits, u32 total_bits, u32 key) {
+ assert(key < total_bits);
+ if (mmbit_is_flat_model(total_bits)) {
+ return mmbit_set_flat(bits, total_bits, key);
+ } else {
+ return mmbit_set_big(bits, total_bits, key);
+ }
+}
+
+static really_inline
+char mmbit_isset(const u8 *bits, u32 total_bits, u32 key);
+
+/** \brief Sets the given key in the multibit. Returns 0 if the key was NOT
+ * already set, 1 otherwise. */
+static really_inline
+char mmbit_set(u8 *bits, u32 total_bits, u32 key) {
+ MDEBUG_PRINTF("%p total_bits %u key %u\n", bits, total_bits, key);
+ char status = mmbit_set_i(bits, total_bits, key);
+ MMB_TRACE("SET %u (prev status: %d)\n", key, (int)status);
+ assert(mmbit_isset(bits, total_bits, key));
+ return status;
+}
+
+/** \brief Specialisation of \ref mmbit_isset for flat models. */
+static really_inline
+char mmbit_isset_flat(const u8 *bits, u32 total_bits, u32 key) {
+ bits += mmbit_flat_select_byte(key, total_bits);
+ return !!(*bits & (1U << (key % 8U)));
+}
+
+static really_inline
+char mmbit_isset_big(const u8 *bits, u32 total_bits, u32 key) {
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+ do {
+ const u8 *block_ptr = mmbit_get_block_ptr_const(bits, max_level, level, key);
+ MMB_TYPE block = mmb_load(block_ptr);
+ if (!mmb_test(block, mmbit_get_key_val(max_level, level, key))) {
+ return 0;
+ }
+ } while (level++ != max_level);
+ return 1;
+}
+
+/** \brief Returns whether the given key is set. */
+static really_inline
+char mmbit_isset(const u8 *bits, u32 total_bits, u32 key) {
+ MDEBUG_PRINTF("%p total_bits %u key %u\n", bits, total_bits, key);
+ assert(key < total_bits);
+ if (mmbit_is_flat_model(total_bits)) {
+ return mmbit_isset_flat(bits, total_bits, key);
+ } else {
+ return mmbit_isset_big(bits, total_bits, key);
+ }
+}
+
+/** \brief Specialisation of \ref mmbit_unset for flat models. */
+static really_inline
+void mmbit_unset_flat(u8 *bits, u32 total_bits, u32 key) {
+ bits += mmbit_flat_select_byte(key, total_bits);
+ *bits &= ~(1U << (key % 8U));
+}
+
+// TODO:
+// build two versions of this - unset_dangerous that doesn't clear the summary
+// block and a regular unset that actually clears ALL the way up the levels if
+// possible - might make a utility function for the clear
+static really_inline
+void mmbit_unset_big(u8 *bits, u32 total_bits, u32 key) {
+ /* This function is lazy as it does not clear the summary block
+ * entry if the child becomes empty. This is not a correctness problem as the
+ * summary block entries are used to mean that their children are valid
+ * rather than that they have a set child. */
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+ do {
+ u8 *block_ptr = mmbit_get_block_ptr(bits, max_level, level, key);
+ u32 key_val = mmbit_get_key_val(max_level, level, key);
+ MMB_TYPE block = mmb_load(block_ptr);
+ if (!mmb_test(block, key_val)) {
+ return;
+ }
+ if (level == max_level) {
+ mmb_clear(&block, key_val);
+ mmb_store(block_ptr, block);
+ }
+ } while (level++ != max_level);
+}
+
+/** \brief Switch off a given key. */
+static really_inline
+void mmbit_unset(u8 *bits, u32 total_bits, u32 key) {
+ MDEBUG_PRINTF("%p total_bits %u key %u\n", bits, total_bits, key);
+ assert(key < total_bits);
+ MMB_TRACE("UNSET %u (prev status: %d)\n", key,
+ (int)mmbit_isset(bits, total_bits, key));
+
+ if (mmbit_is_flat_model(total_bits)) {
+ mmbit_unset_flat(bits, total_bits, key);
+ } else {
+ mmbit_unset_big(bits, total_bits, key);
+ }
+}
+
+/** \brief Specialisation of \ref mmbit_iterate for flat models. */
+static really_inline
+u32 mmbit_iterate_flat(const u8 *bits, u32 total_bits, u32 it_in) {
+ // Short cut for single-block cases.
+ if (total_bits <= MMB_KEY_BITS) {
+ MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
+ if (it_in != MMB_INVALID) {
+ it_in++;
+ assert(it_in < total_bits);
+ block &= ~mmb_mask_zero_to(it_in);
+ }
+ if (block) {
+ return mmb_ctz(block);
+ }
+ return MMB_INVALID;
+ }
+
+ const u32 last_block = total_bits / MMB_KEY_BITS;
+ u32 start; // starting block index
+
+ if (it_in != MMB_INVALID) {
+ it_in++;
+ assert(it_in < total_bits);
+
+ start = (ROUNDUP_N(it_in, MMB_KEY_BITS) / MMB_KEY_BITS) - 1;
+ u32 start_key = start * MMB_KEY_BITS;
+ u32 block_size = MIN(MMB_KEY_BITS, total_bits - start_key);
+ MMB_TYPE block =
+ mmbit_get_flat_block(bits + (start * sizeof(MMB_TYPE)), block_size);
+ block &= ~mmb_mask_zero_to(it_in - start_key);
+
+ if (block) {
+ return start_key + mmb_ctz(block);
+ } else if (start_key + MMB_KEY_BITS >= total_bits) {
+ return MMB_INVALID; // That was the final block.
+ }
+ start++;
+ } else {
+ start = 0;
+ }
+
+ // Remaining full-sized blocks.
+ for (; start < last_block; start++) {
+ MMB_TYPE block = mmb_load(bits + (start * sizeof(MMB_TYPE)));
+ if (block) {
+ return (start * MMB_KEY_BITS) + mmb_ctz(block);
+ }
+ }
+
+ // We may have a final, smaller than full-sized, block to deal with at the
+ // end.
+ if (total_bits % MMB_KEY_BITS) {
+ u32 start_key = start * MMB_KEY_BITS;
+ u32 block_size = MIN(MMB_KEY_BITS, total_bits - start_key);
+ MMB_TYPE block =
+ mmbit_get_flat_block(bits + (start * sizeof(MMB_TYPE)), block_size);
+ if (block) {
+ return start_key + mmb_ctz(block);
+ }
+ }
+
+ return MMB_INVALID;
+}
+
+static really_inline
+u32 mmbit_iterate_big(const u8 * bits, u32 total_bits, u32 it_in) {
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+ u32 key = 0;
+ u32 key_rem = 0;
+
+ if (it_in != MMB_INVALID) {
+ // We're continuing a previous iteration, so we need to go
+ // to max_level so we can pick up where we left off.
+ // NOTE: assumes that we're valid down the whole tree
+ key = it_in >> MMB_KEY_SHIFT;
+ key_rem = (it_in & MMB_KEY_MASK) + 1;
+ level = max_level;
+ }
+ while (1) {
+ if (key_rem < MMB_KEY_BITS) {
+ const u8 *block_ptr = mmbit_get_level_root_const(bits, level) +
+ key * sizeof(MMB_TYPE);
+ MMB_TYPE block
+ = mmb_load(block_ptr) & ~mmb_mask_zero_to_nocheck(key_rem);
+ if (block) {
+ key = (key << MMB_KEY_SHIFT) + mmb_ctz(block);
+ if (level++ == max_level) {
+ break;
+ }
+ key_rem = 0;
+ continue; // jump the rootwards step if we found a 'tree' non-zero bit
+ }
+ }
+ // rootwards step (block is zero or key_rem == MMB_KEY_BITS)
+ if (level-- == 0) {
+ return MMB_INVALID; // if we don't find anything and we're at the top level, we're done
+ }
+ key_rem = (key & MMB_KEY_MASK) + 1;
+ key >>= MMB_KEY_SHIFT;
+ }
+ assert(key < total_bits);
+ assert(mmbit_isset(bits, total_bits, key));
+ return key;
+}
+
+/** \brief Unbounded iterator. Returns the index of the next set bit after \a
+ * it_in, or MMB_INVALID.
+ *
+ * Note: assumes that if you pass in a value of it_in other than MMB_INVALID,
+ * that bit must be on (assumes all its summary blocks are set).
+ */
+static really_inline
+u32 mmbit_iterate(const u8 *bits, u32 total_bits, u32 it_in) {
+ MDEBUG_PRINTF("%p total_bits %u it_in %u\n", bits, total_bits, it_in);
+ assert(it_in < total_bits || it_in == MMB_INVALID);
+ if (!total_bits) {
+ return MMB_INVALID;
+ }
+ if (it_in == total_bits - 1) {
+ return MMB_INVALID; // it_in is the last key.
+ }
+
+ u32 key;
+ if (mmbit_is_flat_model(total_bits)) {
+ key = mmbit_iterate_flat(bits, total_bits, it_in);
+ } else {
+ key = mmbit_iterate_big(bits, total_bits, it_in);
+ }
+ assert(key == MMB_INVALID || mmbit_isset(bits, total_bits, key));
+ return key;
+}
+
+/** \brief Specialisation of \ref mmbit_any and \ref mmbit_any_precise for flat
+ * models. */
+static really_inline
+char mmbit_any_flat(const u8 *bits, u32 total_bits) {
+ if (total_bits <= MMB_KEY_BITS) {
+ return !!mmbit_get_flat_block(bits, total_bits);
+ }
+
+ const u8 *end = bits + mmbit_flat_size(total_bits);
+ for (const u8 *last = end - sizeof(MMB_TYPE); bits < last;
+ bits += sizeof(MMB_TYPE)) {
+ if (mmb_load(bits)) {
+ return 1;
+ }
+ }
+
+ // Overlapping load at the end.
+ return !!mmb_load(end - sizeof(MMB_TYPE));
+}
+
+/** \brief True if any keys are (or might be) on in the given multibit.
+ *
+ * NOTE: mmbit_any is sloppy (may return true when only summary bits are set).
+ * Use \ref mmbit_any_precise if you need/want a correct answer.
+ */
+static really_inline
+char mmbit_any(const u8 *bits, u32 total_bits) {
+ MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
+ if (!total_bits) {
+ return 0;
+ }
+ if (mmbit_is_flat_model(total_bits)) {
+ return mmbit_any_flat(bits, total_bits);
+ }
+ return !!mmb_load(bits);
+}
+
+/** \brief True if there are any keys on. Guaranteed precise. */
+static really_inline
+char mmbit_any_precise(const u8 *bits, u32 total_bits) {
+ MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
+ if (!total_bits) {
+ return 0;
+ }
+ if (mmbit_is_flat_model(total_bits)) {
+ return mmbit_any_flat(bits, total_bits);
+ }
+
+ return mmbit_iterate_big(bits, total_bits, MMB_INVALID) != MMB_INVALID;
+}
+
+static really_inline
char mmbit_all_flat(const u8 *bits, u32 total_bits) {
while (total_bits > MMB_KEY_BITS) {
if (mmb_load(bits) != MMB_ALL_ONES) {
@@ -743,760 +743,760 @@ char mmbit_all(const u8 *bits, u32 total_bits) {
}
static really_inline
-MMB_TYPE get_flat_masks(u32 base, u32 it_start, u32 it_end) {
- if (it_end <= base) {
- return 0;
- }
- u32 udiff = it_end - base;
- MMB_TYPE mask = udiff < 64 ? mmb_mask_zero_to_nocheck(udiff) : MMB_ALL_ONES;
- if (it_start >= base) {
- u32 ldiff = it_start - base;
- MMB_TYPE lmask = ldiff < 64 ? ~mmb_mask_zero_to_nocheck(ldiff) : 0;
- mask &= lmask;
- }
- return mask;
-}
-
-/** \brief Specialisation of \ref mmbit_iterate_bounded for flat models. */
-static really_inline
-u32 mmbit_iterate_bounded_flat(const u8 *bits, u32 total_bits, u32 begin,
- u32 end) {
- // Short cut for single-block cases.
- if (total_bits <= MMB_KEY_BITS) {
- MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
- block &= get_flat_masks(0, begin, end);
- if (block) {
- return mmb_ctz(block);
- }
- return MMB_INVALID;
- }
-
- const u32 last_block = ROUNDDOWN_N(total_bits, MMB_KEY_BITS);
-
- // Iterate over full-sized blocks.
- for (u32 i = ROUNDDOWN_N(begin, MMB_KEY_BITS), e = MIN(end, last_block);
- i < e; i += MMB_KEY_BITS) {
- const u8 *block_ptr = bits + i / 8;
- MMB_TYPE block = mmb_load(block_ptr);
- block &= get_flat_masks(i, begin, end);
- if (block) {
- return i + mmb_ctz(block);
- }
- }
-
- // Final block, which is less than full-sized.
- if (end > last_block) {
- const u8 *block_ptr = bits + last_block / 8;
- u32 num_bits = total_bits - last_block;
- MMB_TYPE block = mmbit_get_flat_block(block_ptr, num_bits);
- block &= get_flat_masks(last_block, begin, end);
- if (block) {
- return last_block + mmb_ctz(block);
- }
- }
-
- return MMB_INVALID;
-}
-
-static really_inline
-MMB_TYPE get_lowhi_masks(u32 level, u32 max_level, u64a block_min, u64a block_max,
- u64a block_base) {
- const u32 level_shift = (max_level - level) * MMB_KEY_SHIFT;
- u64a lshift = (block_min - block_base) >> level_shift;
- u64a ushift = (block_max - block_base) >> level_shift;
- MMB_TYPE lmask = lshift < 64 ? ~mmb_mask_zero_to_nocheck(lshift) : 0;
- MMB_TYPE umask =
- ushift < 63 ? mmb_mask_zero_to_nocheck(ushift + 1) : MMB_ALL_ONES;
- return lmask & umask;
-}
-
-static really_inline
-u32 mmbit_iterate_bounded_big(const u8 *bits, u32 total_bits, u32 it_start, u32 it_end) {
- u64a key = 0;
- u32 ks = mmbit_keyshift(total_bits);
- const u32 max_level = mmbit_maxlevel_from_keyshift(ks);
- u32 level = 0;
- --it_end; // make end-limit inclusive
- for (;;) {
- assert(level <= max_level);
-
+MMB_TYPE get_flat_masks(u32 base, u32 it_start, u32 it_end) {
+ if (it_end <= base) {
+ return 0;
+ }
+ u32 udiff = it_end - base;
+ MMB_TYPE mask = udiff < 64 ? mmb_mask_zero_to_nocheck(udiff) : MMB_ALL_ONES;
+ if (it_start >= base) {
+ u32 ldiff = it_start - base;
+ MMB_TYPE lmask = ldiff < 64 ? ~mmb_mask_zero_to_nocheck(ldiff) : 0;
+ mask &= lmask;
+ }
+ return mask;
+}
+
+/** \brief Specialisation of \ref mmbit_iterate_bounded for flat models. */
+static really_inline
+u32 mmbit_iterate_bounded_flat(const u8 *bits, u32 total_bits, u32 begin,
+ u32 end) {
+ // Short cut for single-block cases.
+ if (total_bits <= MMB_KEY_BITS) {
+ MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
+ block &= get_flat_masks(0, begin, end);
+ if (block) {
+ return mmb_ctz(block);
+ }
+ return MMB_INVALID;
+ }
+
+ const u32 last_block = ROUNDDOWN_N(total_bits, MMB_KEY_BITS);
+
+ // Iterate over full-sized blocks.
+ for (u32 i = ROUNDDOWN_N(begin, MMB_KEY_BITS), e = MIN(end, last_block);
+ i < e; i += MMB_KEY_BITS) {
+ const u8 *block_ptr = bits + i / 8;
+ MMB_TYPE block = mmb_load(block_ptr);
+ block &= get_flat_masks(i, begin, end);
+ if (block) {
+ return i + mmb_ctz(block);
+ }
+ }
+
+ // Final block, which is less than full-sized.
+ if (end > last_block) {
+ const u8 *block_ptr = bits + last_block / 8;
+ u32 num_bits = total_bits - last_block;
+ MMB_TYPE block = mmbit_get_flat_block(block_ptr, num_bits);
+ block &= get_flat_masks(last_block, begin, end);
+ if (block) {
+ return last_block + mmb_ctz(block);
+ }
+ }
+
+ return MMB_INVALID;
+}
+
+static really_inline
+MMB_TYPE get_lowhi_masks(u32 level, u32 max_level, u64a block_min, u64a block_max,
+ u64a block_base) {
+ const u32 level_shift = (max_level - level) * MMB_KEY_SHIFT;
+ u64a lshift = (block_min - block_base) >> level_shift;
+ u64a ushift = (block_max - block_base) >> level_shift;
+ MMB_TYPE lmask = lshift < 64 ? ~mmb_mask_zero_to_nocheck(lshift) : 0;
+ MMB_TYPE umask =
+ ushift < 63 ? mmb_mask_zero_to_nocheck(ushift + 1) : MMB_ALL_ONES;
+ return lmask & umask;
+}
+
+static really_inline
+u32 mmbit_iterate_bounded_big(const u8 *bits, u32 total_bits, u32 it_start, u32 it_end) {
+ u64a key = 0;
+ u32 ks = mmbit_keyshift(total_bits);
+ const u32 max_level = mmbit_maxlevel_from_keyshift(ks);
+ u32 level = 0;
+ --it_end; // make end-limit inclusive
+ for (;;) {
+ assert(level <= max_level);
+
u64a block_width = MMB_KEY_BITS << ks;
- u64a block_base = key * block_width;
- u64a block_min = MAX(it_start, block_base);
- u64a block_max = MIN(it_end, block_base + block_width - 1);
- const u8 *block_ptr =
- mmbit_get_level_root_const(bits, level) + key * sizeof(MMB_TYPE);
- MMB_TYPE block = mmb_load(block_ptr);
- block &= get_lowhi_masks(level, max_level, block_min, block_max, block_base);
- if (block) {
- // Found a bit, go down a level
- key = (key << MMB_KEY_SHIFT) + mmb_ctz(block);
- if (level++ == max_level) {
- return key;
- }
- ks -= MMB_KEY_SHIFT;
- } else {
- // No bit found, go up a level
- // we know that this block didn't have any answers, so we can push
- // our start iterator forward.
- u64a next_start = block_base + block_width;
- if (next_start > it_end) {
- break;
- }
- if (level-- == 0) {
- break;
- }
- it_start = next_start;
- key >>= MMB_KEY_SHIFT;
- ks += MMB_KEY_SHIFT;
- }
- }
- return MMB_INVALID;
-}
-
-/** \brief Bounded iterator. Returns the index of the first set bit between
- * it_start (inclusive) and it_end (exclusive) or MMB_INVALID if no bits are
- * set in that range.
- */
-static really_inline
-u32 mmbit_iterate_bounded(const u8 *bits, u32 total_bits, u32 it_start,
- u32 it_end) {
- MDEBUG_PRINTF("%p total_bits %u it_start %u it_end %u\n", bits, total_bits,
- it_start, it_end);
- assert(it_start <= it_end);
- assert(it_end <= total_bits);
- if (!total_bits || it_end == it_start) {
- return MMB_INVALID;
- }
- assert(it_start < total_bits);
- u32 key;
- if (mmbit_is_flat_model(total_bits)) {
- key = mmbit_iterate_bounded_flat(bits, total_bits, it_start, it_end);
- } else {
- key = mmbit_iterate_bounded_big(bits, total_bits, it_start, it_end);
- }
- assert(key == MMB_INVALID || mmbit_isset(bits, total_bits, key));
- return key;
-}
-
-/** \brief Specialisation of \ref mmbit_unset_range for flat models. */
-static really_inline
-void mmbit_unset_range_flat(u8 *bits, u32 total_bits, u32 begin, u32 end) {
- const u32 last_block = ROUNDDOWN_N(total_bits, MMB_KEY_BITS);
-
- // Iterate over full-sized blocks.
- for (u32 i = ROUNDDOWN_N(begin, MMB_KEY_BITS), e = MIN(end, last_block);
- i < e; i += MMB_KEY_BITS) {
- u8 *block_ptr = bits + i / 8;
- MMB_TYPE block = mmb_load(block_ptr);
- MMB_TYPE mask = get_flat_masks(i, begin, end);
- mmb_store(block_ptr, block & ~mask);
- }
-
- // Final block, which is less than full-sized.
- if (end > last_block) {
- u8 *block_ptr = bits + last_block / 8;
- u32 num_bits = total_bits - last_block;
- MMB_TYPE block = mmbit_get_flat_block(block_ptr, num_bits);
- MMB_TYPE mask = get_flat_masks(last_block, begin, end);
- mmb_store_partial(block_ptr, block & ~mask, num_bits);
- }
-}
-
-static really_inline
-void mmbit_unset_range_big(u8 *bits, const u32 total_bits, u32 begin,
- u32 end) {
- // TODO: combine iterator and unset operation; completely replace this
- u32 i = begin;
- for (;;) {
- i = mmbit_iterate_bounded(bits, total_bits, i, end);
- if (i == MMB_INVALID) {
- break;
- }
- mmbit_unset_big(bits, total_bits, i);
- if (++i == end) {
- break;
- }
- }
-}
-
-/** \brief Unset a whole range of bits. Ensures that all bits between \a begin
- * (inclusive) and \a end (exclusive) are switched off. */
-static really_inline
-void mmbit_unset_range(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
- MDEBUG_PRINTF("%p total_bits %u begin %u end %u\n", bits, total_bits, begin,
- end);
- assert(begin <= end);
- assert(end <= total_bits);
- if (mmbit_is_flat_model(total_bits)) {
- mmbit_unset_range_flat(bits, total_bits, begin, end);
- } else {
- mmbit_unset_range_big(bits, total_bits, begin, end);
- }
- // No bits are on in [begin, end) once we're done.
- assert(MMB_INVALID == mmbit_iterate_bounded(bits, total_bits, begin, end));
-}
-
-/** \brief Specialisation of \ref mmbit_init_range for flat models. */
-static really_inline
-void mmbit_init_range_flat(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
- const u32 last_block = ROUNDDOWN_N(total_bits, MMB_KEY_BITS);
-
- // Iterate over full-sized blocks.
- for (u32 i = 0; i < last_block; i += MMB_KEY_BITS) {
- mmb_store(bits + i / 8, get_flat_masks(i, begin, end));
- }
-
- // Final block, which is less than full-sized.
- if (total_bits % MMB_KEY_BITS) {
- u32 num_bits = total_bits - last_block;
- MMB_TYPE block = get_flat_masks(last_block, begin, end);
- mmb_store_partial(bits + last_block / 8, block, num_bits);
- }
-}
-
-static really_inline
-void mmbit_init_range_big(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
- u32 ks = mmbit_keyshift(total_bits);
- u32 level = 0;
-
- for (;;) {
- u8 *block = mmbit_get_level_root(bits, level);
- u32 k1 = begin >> ks, k2 = end >> ks;
-
- // Summary blocks need to account for the runt block on the end.
- if ((k2 << ks) != end) {
- k2++;
- }
-
- // Partial block to deal with beginning.
- block += (k1 / MMB_KEY_BITS) * sizeof(MMB_TYPE);
- if (k1 % MMB_KEY_BITS) {
- u32 idx = k1 / MMB_KEY_BITS;
- u32 block_end = (idx + 1) * MMB_KEY_BITS;
-
- // Because k1 % MMB_KEY_BITS != 0, we can avoid checking edge cases
- // here (see the branch in mmb_mask_zero_to).
- MMB_TYPE mask = MMB_ALL_ONES << (k1 % MMB_KEY_BITS);
-
- if (k2 < block_end) {
- assert(k2 % MMB_KEY_BITS);
- mask &= mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
- mmb_store(block, mask);
- goto next_level;
- } else {
- mmb_store(block, mask);
- k1 = block_end;
- block += sizeof(MMB_TYPE);
- }
- }
-
- // Write blocks filled with ones until we get to the last block.
- for (; k1 < (k2 & ~MMB_KEY_MASK); k1 += MMB_KEY_BITS) {
- mmb_store(block, MMB_ALL_ONES);
- block += sizeof(MMB_TYPE);
- }
-
- // Final block.
- if (likely(k1 < k2)) {
- // Again, if k2 was at a block boundary, it would have been handled
- // by the previous loop, so we know k2 % MMB_KEY_BITS != 0 and can
- // avoid the branch in mmb_mask_zero_to here.
- assert(k2 % MMB_KEY_BITS);
- MMB_TYPE mask = mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
- mmb_store(block, mask);
- }
-
- next_level:
- if (ks == 0) {
- break; // Last level is done, finished.
- }
-
- ks -= MMB_KEY_SHIFT;
- level++;
- }
-}
-
-/** \brief Initialises the multibit so that only the given range of bits are
- * set.
- *
- * Ensures that all bits between \a begin (inclusive) and \a end (exclusive)
- * are switched on.
- */
-static really_inline
-void mmbit_init_range(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
- MDEBUG_PRINTF("%p total_bits %u begin %u end %u\n", bits, total_bits, begin,
- end);
- assert(begin <= end);
- assert(end <= total_bits);
-
- if (!total_bits) {
- return;
- }
-
- // Short cut for cases where we're not actually setting any bits; just
- // clear the multibit.
- if (begin == end) {
- mmbit_clear(bits, total_bits);
- return;
- }
-
- if (mmbit_is_flat_model(total_bits)) {
- mmbit_init_range_flat(bits, total_bits, begin, end);
- } else {
- mmbit_init_range_big(bits, total_bits, begin, end);
- }
-
- assert(begin == end ||
- mmbit_iterate(bits, total_bits, MMB_INVALID) == begin);
- assert(!end || begin == end ||
- mmbit_iterate(bits, total_bits, end - 1) == MMB_INVALID);
-}
-
-/** \brief Determine the number of \ref mmbit_sparse_state elements required.
- * */
-static really_inline
-u32 mmbit_sparse_iter_state_size(u32 total_bits) {
- if (mmbit_is_flat_model(total_bits)) {
- return 2;
- }
- u32 levels = mmbit_maxlevel(total_bits);
- return levels + 1;
-}
-
-#ifdef DUMP_SUPPORT
-// Dump function, defined in multibit.c.
-void mmbit_sparse_iter_dump(const struct mmbit_sparse_iter *it, u32 total_bits);
-#endif
-
-/** Internal: common loop used by mmbit_sparse_iter_{begin,next}_big. Returns
- * matching next key given starting state, or MMB_INVALID. */
-static really_inline
-u32 mmbit_sparse_iter_exec(const u8 *bits, u32 key, u32 *idx, u32 level,
- const u32 max_level, struct mmbit_sparse_state *s,
- const struct mmbit_sparse_iter *it_root,
- const struct mmbit_sparse_iter *it) {
- for (;;) {
- MMB_TYPE block = s[level].mask;
- if (block) {
- u32 bit = mmb_ctz(block);
- key = (key << MMB_KEY_SHIFT) + bit;
- u32 bit_idx = mmbit_mask_index(bit, it->mask);
- if (level++ == max_level) {
- // we've found a key
- *idx = it->val + bit_idx;
- return key;
- } else {
- // iterator record is the start of the level (current it->val)
- // plus N, where N is the dense index of the bit in the current
- // level's itmask
- u32 iter_key = it->val + bit_idx;
- it = it_root + iter_key;
- MMB_TYPE nextblock =
- mmb_load(mmbit_get_level_root_const(bits, level) +
- key * sizeof(MMB_TYPE));
- s[level].mask = nextblock & it->mask;
- s[level].itkey = iter_key;
- }
- } else {
- // No bits set in this block
- if (level-- == 0) {
- break; // no key available
- }
- key >>= MMB_KEY_SHIFT;
- // Update state mask and iterator
- s[level].mask &= (s[level].mask - 1);
- it = it_root + s[level].itkey;
- }
- }
- return MMB_INVALID;
-}
-
-static really_inline
-u32 mmbit_sparse_iter_begin_big(const u8 *bits, u32 total_bits, u32 *idx,
- const struct mmbit_sparse_iter *it_root,
- struct mmbit_sparse_state *s) {
- const struct mmbit_sparse_iter *it = it_root;
- u32 key = 0;
- MMB_TYPE block = mmb_load(bits) & it->mask;
- if (!block) {
- return MMB_INVALID;
- }
-
- // Load first block into top level state.
- const u32 max_level = mmbit_maxlevel(total_bits);
- s[0].mask = block;
- s[0].itkey = 0;
- return mmbit_sparse_iter_exec(bits, key, idx, 0, max_level,
- s, it_root, it);
-}
-
-/** \brief Specialisation of \ref mmbit_sparse_iter_begin for flat models. */
-static really_inline
-u32 mmbit_sparse_iter_begin_flat(const u8 *bits, u32 total_bits, u32 *idx,
- const struct mmbit_sparse_iter *it_root,
- struct mmbit_sparse_state *s) {
- // Small cases have everything in the root iterator mask.
- if (total_bits <= MMB_KEY_BITS) {
- MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
- block &= it_root->mask;
- if (!block) {
- return MMB_INVALID;
- }
-
- s->mask = block;
- u32 key = mmb_ctz(block);
- *idx = mmbit_mask_index(key, it_root->mask);
- return key;
- }
-
- // Otherwise, the root iterator mask tells us which blocks (which we lay out
- // linearly in the flat model) could contain keys.
- assert(mmbit_maxlevel(total_bits) == 1); // Should only be two levels
- MMB_TYPE root = it_root->mask;
- for (; root; root &= (root - 1)) {
- u32 bit = mmb_ctz(root);
- u32 bit_idx = mmbit_mask_index(bit, it_root->mask);
- u32 iter_key = it_root->val + bit_idx;
- const struct mmbit_sparse_iter *it = it_root + iter_key;
- u32 block_key_min = bit * MMB_KEY_BITS;
- u32 block_key_max = block_key_min + MMB_KEY_BITS;
- MMB_TYPE block;
- if (block_key_max > total_bits) {
- block_key_max = total_bits;
- block = mmbit_get_flat_block(bits + (bit * sizeof(MMB_TYPE)),
- block_key_max - block_key_min);
- } else {
- block = mmb_load(bits + (bit * sizeof(MMB_TYPE)));
- }
-
- block &= it->mask;
- if (block) {
- s[0].mask = root;
- s[1].mask = block;
- s[1].itkey = iter_key;
- u32 key = mmb_ctz(block);
- *idx = it->val + mmbit_mask_index(key, it->mask);
- return key + block_key_min;
- }
- }
-
- return MMB_INVALID;
-}
-
-/** \brief Sparse iterator, find first key.
- *
- * Returns the first of the bits specified by the iterator \a it_root that is
- * on, and initialises the state \a s. If none of the bits specified by the
- * iterator are on, returns MMB_INVALID.
- */
-static really_inline
-u32 mmbit_sparse_iter_begin(const u8 *bits, u32 total_bits, u32 *idx,
- const struct mmbit_sparse_iter *it_root,
- struct mmbit_sparse_state *s) {
- assert(ISALIGNED_N(it_root, alignof(struct mmbit_sparse_iter)));
-
- // Our state _may_ be on the stack
+ u64a block_base = key * block_width;
+ u64a block_min = MAX(it_start, block_base);
+ u64a block_max = MIN(it_end, block_base + block_width - 1);
+ const u8 *block_ptr =
+ mmbit_get_level_root_const(bits, level) + key * sizeof(MMB_TYPE);
+ MMB_TYPE block = mmb_load(block_ptr);
+ block &= get_lowhi_masks(level, max_level, block_min, block_max, block_base);
+ if (block) {
+ // Found a bit, go down a level
+ key = (key << MMB_KEY_SHIFT) + mmb_ctz(block);
+ if (level++ == max_level) {
+ return key;
+ }
+ ks -= MMB_KEY_SHIFT;
+ } else {
+ // No bit found, go up a level
+ // we know that this block didn't have any answers, so we can push
+ // our start iterator forward.
+ u64a next_start = block_base + block_width;
+ if (next_start > it_end) {
+ break;
+ }
+ if (level-- == 0) {
+ break;
+ }
+ it_start = next_start;
+ key >>= MMB_KEY_SHIFT;
+ ks += MMB_KEY_SHIFT;
+ }
+ }
+ return MMB_INVALID;
+}
+
+/** \brief Bounded iterator. Returns the index of the first set bit between
+ * it_start (inclusive) and it_end (exclusive) or MMB_INVALID if no bits are
+ * set in that range.
+ */
+static really_inline
+u32 mmbit_iterate_bounded(const u8 *bits, u32 total_bits, u32 it_start,
+ u32 it_end) {
+ MDEBUG_PRINTF("%p total_bits %u it_start %u it_end %u\n", bits, total_bits,
+ it_start, it_end);
+ assert(it_start <= it_end);
+ assert(it_end <= total_bits);
+ if (!total_bits || it_end == it_start) {
+ return MMB_INVALID;
+ }
+ assert(it_start < total_bits);
+ u32 key;
+ if (mmbit_is_flat_model(total_bits)) {
+ key = mmbit_iterate_bounded_flat(bits, total_bits, it_start, it_end);
+ } else {
+ key = mmbit_iterate_bounded_big(bits, total_bits, it_start, it_end);
+ }
+ assert(key == MMB_INVALID || mmbit_isset(bits, total_bits, key));
+ return key;
+}
+
+/** \brief Specialisation of \ref mmbit_unset_range for flat models. */
+static really_inline
+void mmbit_unset_range_flat(u8 *bits, u32 total_bits, u32 begin, u32 end) {
+ const u32 last_block = ROUNDDOWN_N(total_bits, MMB_KEY_BITS);
+
+ // Iterate over full-sized blocks.
+ for (u32 i = ROUNDDOWN_N(begin, MMB_KEY_BITS), e = MIN(end, last_block);
+ i < e; i += MMB_KEY_BITS) {
+ u8 *block_ptr = bits + i / 8;
+ MMB_TYPE block = mmb_load(block_ptr);
+ MMB_TYPE mask = get_flat_masks(i, begin, end);
+ mmb_store(block_ptr, block & ~mask);
+ }
+
+ // Final block, which is less than full-sized.
+ if (end > last_block) {
+ u8 *block_ptr = bits + last_block / 8;
+ u32 num_bits = total_bits - last_block;
+ MMB_TYPE block = mmbit_get_flat_block(block_ptr, num_bits);
+ MMB_TYPE mask = get_flat_masks(last_block, begin, end);
+ mmb_store_partial(block_ptr, block & ~mask, num_bits);
+ }
+}
+
+static really_inline
+void mmbit_unset_range_big(u8 *bits, const u32 total_bits, u32 begin,
+ u32 end) {
+ // TODO: combine iterator and unset operation; completely replace this
+ u32 i = begin;
+ for (;;) {
+ i = mmbit_iterate_bounded(bits, total_bits, i, end);
+ if (i == MMB_INVALID) {
+ break;
+ }
+ mmbit_unset_big(bits, total_bits, i);
+ if (++i == end) {
+ break;
+ }
+ }
+}
+
+/** \brief Unset a whole range of bits. Ensures that all bits between \a begin
+ * (inclusive) and \a end (exclusive) are switched off. */
+static really_inline
+void mmbit_unset_range(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
+ MDEBUG_PRINTF("%p total_bits %u begin %u end %u\n", bits, total_bits, begin,
+ end);
+ assert(begin <= end);
+ assert(end <= total_bits);
+ if (mmbit_is_flat_model(total_bits)) {
+ mmbit_unset_range_flat(bits, total_bits, begin, end);
+ } else {
+ mmbit_unset_range_big(bits, total_bits, begin, end);
+ }
+ // No bits are on in [begin, end) once we're done.
+ assert(MMB_INVALID == mmbit_iterate_bounded(bits, total_bits, begin, end));
+}
+
+/** \brief Specialisation of \ref mmbit_init_range for flat models. */
+static really_inline
+void mmbit_init_range_flat(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
+ const u32 last_block = ROUNDDOWN_N(total_bits, MMB_KEY_BITS);
+
+ // Iterate over full-sized blocks.
+ for (u32 i = 0; i < last_block; i += MMB_KEY_BITS) {
+ mmb_store(bits + i / 8, get_flat_masks(i, begin, end));
+ }
+
+ // Final block, which is less than full-sized.
+ if (total_bits % MMB_KEY_BITS) {
+ u32 num_bits = total_bits - last_block;
+ MMB_TYPE block = get_flat_masks(last_block, begin, end);
+ mmb_store_partial(bits + last_block / 8, block, num_bits);
+ }
+}
+
+static really_inline
+void mmbit_init_range_big(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
+ u32 ks = mmbit_keyshift(total_bits);
+ u32 level = 0;
+
+ for (;;) {
+ u8 *block = mmbit_get_level_root(bits, level);
+ u32 k1 = begin >> ks, k2 = end >> ks;
+
+ // Summary blocks need to account for the runt block on the end.
+ if ((k2 << ks) != end) {
+ k2++;
+ }
+
+ // Partial block to deal with beginning.
+ block += (k1 / MMB_KEY_BITS) * sizeof(MMB_TYPE);
+ if (k1 % MMB_KEY_BITS) {
+ u32 idx = k1 / MMB_KEY_BITS;
+ u32 block_end = (idx + 1) * MMB_KEY_BITS;
+
+ // Because k1 % MMB_KEY_BITS != 0, we can avoid checking edge cases
+ // here (see the branch in mmb_mask_zero_to).
+ MMB_TYPE mask = MMB_ALL_ONES << (k1 % MMB_KEY_BITS);
+
+ if (k2 < block_end) {
+ assert(k2 % MMB_KEY_BITS);
+ mask &= mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
+ mmb_store(block, mask);
+ goto next_level;
+ } else {
+ mmb_store(block, mask);
+ k1 = block_end;
+ block += sizeof(MMB_TYPE);
+ }
+ }
+
+ // Write blocks filled with ones until we get to the last block.
+ for (; k1 < (k2 & ~MMB_KEY_MASK); k1 += MMB_KEY_BITS) {
+ mmb_store(block, MMB_ALL_ONES);
+ block += sizeof(MMB_TYPE);
+ }
+
+ // Final block.
+ if (likely(k1 < k2)) {
+ // Again, if k2 was at a block boundary, it would have been handled
+ // by the previous loop, so we know k2 % MMB_KEY_BITS != 0 and can
+ // avoid the branch in mmb_mask_zero_to here.
+ assert(k2 % MMB_KEY_BITS);
+ MMB_TYPE mask = mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
+ mmb_store(block, mask);
+ }
+
+ next_level:
+ if (ks == 0) {
+ break; // Last level is done, finished.
+ }
+
+ ks -= MMB_KEY_SHIFT;
+ level++;
+ }
+}
+
+/** \brief Initialises the multibit so that only the given range of bits are
+ * set.
+ *
+ * Ensures that all bits between \a begin (inclusive) and \a end (exclusive)
+ * are switched on.
+ */
+static really_inline
+void mmbit_init_range(u8 *bits, const u32 total_bits, u32 begin, u32 end) {
+ MDEBUG_PRINTF("%p total_bits %u begin %u end %u\n", bits, total_bits, begin,
+ end);
+ assert(begin <= end);
+ assert(end <= total_bits);
+
+ if (!total_bits) {
+ return;
+ }
+
+ // Short cut for cases where we're not actually setting any bits; just
+ // clear the multibit.
+ if (begin == end) {
+ mmbit_clear(bits, total_bits);
+ return;
+ }
+
+ if (mmbit_is_flat_model(total_bits)) {
+ mmbit_init_range_flat(bits, total_bits, begin, end);
+ } else {
+ mmbit_init_range_big(bits, total_bits, begin, end);
+ }
+
+ assert(begin == end ||
+ mmbit_iterate(bits, total_bits, MMB_INVALID) == begin);
+ assert(!end || begin == end ||
+ mmbit_iterate(bits, total_bits, end - 1) == MMB_INVALID);
+}
+
+/** \brief Determine the number of \ref mmbit_sparse_state elements required.
+ * */
+static really_inline
+u32 mmbit_sparse_iter_state_size(u32 total_bits) {
+ if (mmbit_is_flat_model(total_bits)) {
+ return 2;
+ }
+ u32 levels = mmbit_maxlevel(total_bits);
+ return levels + 1;
+}
+
+#ifdef DUMP_SUPPORT
+// Dump function, defined in multibit.c.
+void mmbit_sparse_iter_dump(const struct mmbit_sparse_iter *it, u32 total_bits);
+#endif
+
+/** Internal: common loop used by mmbit_sparse_iter_{begin,next}_big. Returns
+ * matching next key given starting state, or MMB_INVALID. */
+static really_inline
+u32 mmbit_sparse_iter_exec(const u8 *bits, u32 key, u32 *idx, u32 level,
+ const u32 max_level, struct mmbit_sparse_state *s,
+ const struct mmbit_sparse_iter *it_root,
+ const struct mmbit_sparse_iter *it) {
+ for (;;) {
+ MMB_TYPE block = s[level].mask;
+ if (block) {
+ u32 bit = mmb_ctz(block);
+ key = (key << MMB_KEY_SHIFT) + bit;
+ u32 bit_idx = mmbit_mask_index(bit, it->mask);
+ if (level++ == max_level) {
+ // we've found a key
+ *idx = it->val + bit_idx;
+ return key;
+ } else {
+ // iterator record is the start of the level (current it->val)
+ // plus N, where N is the dense index of the bit in the current
+ // level's itmask
+ u32 iter_key = it->val + bit_idx;
+ it = it_root + iter_key;
+ MMB_TYPE nextblock =
+ mmb_load(mmbit_get_level_root_const(bits, level) +
+ key * sizeof(MMB_TYPE));
+ s[level].mask = nextblock & it->mask;
+ s[level].itkey = iter_key;
+ }
+ } else {
+ // No bits set in this block
+ if (level-- == 0) {
+ break; // no key available
+ }
+ key >>= MMB_KEY_SHIFT;
+ // Update state mask and iterator
+ s[level].mask &= (s[level].mask - 1);
+ it = it_root + s[level].itkey;
+ }
+ }
+ return MMB_INVALID;
+}
+
+static really_inline
+u32 mmbit_sparse_iter_begin_big(const u8 *bits, u32 total_bits, u32 *idx,
+ const struct mmbit_sparse_iter *it_root,
+ struct mmbit_sparse_state *s) {
+ const struct mmbit_sparse_iter *it = it_root;
+ u32 key = 0;
+ MMB_TYPE block = mmb_load(bits) & it->mask;
+ if (!block) {
+ return MMB_INVALID;
+ }
+
+ // Load first block into top level state.
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ s[0].mask = block;
+ s[0].itkey = 0;
+ return mmbit_sparse_iter_exec(bits, key, idx, 0, max_level,
+ s, it_root, it);
+}
+
+/** \brief Specialisation of \ref mmbit_sparse_iter_begin for flat models. */
+static really_inline
+u32 mmbit_sparse_iter_begin_flat(const u8 *bits, u32 total_bits, u32 *idx,
+ const struct mmbit_sparse_iter *it_root,
+ struct mmbit_sparse_state *s) {
+ // Small cases have everything in the root iterator mask.
+ if (total_bits <= MMB_KEY_BITS) {
+ MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
+ block &= it_root->mask;
+ if (!block) {
+ return MMB_INVALID;
+ }
+
+ s->mask = block;
+ u32 key = mmb_ctz(block);
+ *idx = mmbit_mask_index(key, it_root->mask);
+ return key;
+ }
+
+ // Otherwise, the root iterator mask tells us which blocks (which we lay out
+ // linearly in the flat model) could contain keys.
+ assert(mmbit_maxlevel(total_bits) == 1); // Should only be two levels
+ MMB_TYPE root = it_root->mask;
+ for (; root; root &= (root - 1)) {
+ u32 bit = mmb_ctz(root);
+ u32 bit_idx = mmbit_mask_index(bit, it_root->mask);
+ u32 iter_key = it_root->val + bit_idx;
+ const struct mmbit_sparse_iter *it = it_root + iter_key;
+ u32 block_key_min = bit * MMB_KEY_BITS;
+ u32 block_key_max = block_key_min + MMB_KEY_BITS;
+ MMB_TYPE block;
+ if (block_key_max > total_bits) {
+ block_key_max = total_bits;
+ block = mmbit_get_flat_block(bits + (bit * sizeof(MMB_TYPE)),
+ block_key_max - block_key_min);
+ } else {
+ block = mmb_load(bits + (bit * sizeof(MMB_TYPE)));
+ }
+
+ block &= it->mask;
+ if (block) {
+ s[0].mask = root;
+ s[1].mask = block;
+ s[1].itkey = iter_key;
+ u32 key = mmb_ctz(block);
+ *idx = it->val + mmbit_mask_index(key, it->mask);
+ return key + block_key_min;
+ }
+ }
+
+ return MMB_INVALID;
+}
+
+/** \brief Sparse iterator, find first key.
+ *
+ * Returns the first of the bits specified by the iterator \a it_root that is
+ * on, and initialises the state \a s. If none of the bits specified by the
+ * iterator are on, returns MMB_INVALID.
+ */
+static really_inline
+u32 mmbit_sparse_iter_begin(const u8 *bits, u32 total_bits, u32 *idx,
+ const struct mmbit_sparse_iter *it_root,
+ struct mmbit_sparse_state *s) {
+ assert(ISALIGNED_N(it_root, alignof(struct mmbit_sparse_iter)));
+
+ // Our state _may_ be on the stack
#ifndef _WIN32
- assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
+ assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
#else
assert(ISALIGNED_N(s, 4));
#endif
-
- MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
- // iterator should have _something_ at the root level
- assert(it_root->mask != 0);
- u32 key;
- if (mmbit_is_flat_model(total_bits)) {
- key = mmbit_sparse_iter_begin_flat(bits, total_bits, idx, it_root, s);
- } else {
- key = mmbit_sparse_iter_begin_big(bits, total_bits, idx, it_root, s);
- }
- if (key != MMB_INVALID) {
- assert(key < total_bits);
- assert(mmbit_isset(bits, total_bits, key));
- }
- return key;
-}
-
-static really_inline
-u32 mmbit_sparse_iter_next_big(const u8 *bits, u32 total_bits, u32 last_key,
- u32 *idx,
- const struct mmbit_sparse_iter *it_root,
- struct mmbit_sparse_state *s) {
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 key = last_key >> MMB_KEY_SHIFT;
- s[max_level].mask &= (s[max_level].mask - 1);
- const struct mmbit_sparse_iter *it = it_root + s[max_level].itkey;
- return mmbit_sparse_iter_exec(bits, key, idx, max_level, max_level, s,
- it_root, it);
-}
-
-/** \brief Specialisation of \ref mmbit_sparse_iter_next for flat models. */
-static really_inline
-u32 mmbit_sparse_iter_next_flat(const u8 *bits, const u32 total_bits, u32 *idx,
- const struct mmbit_sparse_iter *it_root,
- struct mmbit_sparse_state *s) {
- if (total_bits <= MMB_KEY_BITS) {
- // All of our data is already in the s->mask, so we just need to scrape
- // off the next match.
- s->mask &= (s->mask - 1);
- if (s->mask) {
- u32 key = mmb_ctz(s->mask);
- *idx = mmbit_mask_index(key, it_root->mask);
- return key;
- }
- } else {
- assert(s[0].mask);
-
- s[1].mask &= (s[1].mask - 1); // Remove previous key from iter state.
- u32 bit = mmb_ctz(s[0].mask); // Flat block currently being accessed.
-
- for (;;) {
- if (s[1].mask) {
- u32 key = mmb_ctz(s[1].mask);
- const struct mmbit_sparse_iter *it = it_root + s[1].itkey;
- *idx = it->val + mmbit_mask_index(key, it->mask);
- key += (bit * MMB_KEY_BITS);
- return key;
- }
-
- // Otherwise, we have no keys left in this block. Consult the root
- // mask and find the next one.
-
- s[0].mask &= s[0].mask - 1;
- if (!s[0].mask) {
- break;
- }
-
- bit = mmb_ctz(s[0].mask);
- u32 bit_idx = mmbit_mask_index(bit, it_root->mask);
- u32 iter_key = it_root->val + bit_idx;
- const struct mmbit_sparse_iter *it = it_root + iter_key;
- u32 block_key_min = bit * MMB_KEY_BITS;
- u32 block_key_max = block_key_min + MMB_KEY_BITS;
- MMB_TYPE block;
- if (block_key_max > total_bits) {
- block_key_max = total_bits;
- block = mmbit_get_flat_block(bits + (bit * sizeof(MMB_TYPE)),
- block_key_max - block_key_min);
- } else {
- block = mmb_load(bits + (bit * sizeof(MMB_TYPE)));
- }
-
- s[1].mask = block & it->mask;
- s[1].itkey = iter_key;
- }
- }
-
- return MMB_INVALID;
-}
-
-/** \brief Sparse iterator, find next key.
- *
- * Takes in a sparse iterator tree structure \a it_root and a state array, and
- * finds the next on bit (from the set of bits specified in the iterator).
- *
- * NOTE: The sparse iterator stores copies of the multibit blocks in its state,
- * so it is not necessarily safe to set or unset bits in the multibit while
- * iterating: the changes you make may or may not be taken into account
- * by the iterator.
- */
-static really_inline
-u32 mmbit_sparse_iter_next(const u8 *bits, u32 total_bits, u32 last_key,
- u32 *idx, const struct mmbit_sparse_iter *it_root,
- struct mmbit_sparse_state *s) {
- assert(ISALIGNED_N(it_root, alignof(struct mmbit_sparse_iter)));
-
- // Our state _may_ be on the stack
+
+ MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
+ // iterator should have _something_ at the root level
+ assert(it_root->mask != 0);
+ u32 key;
+ if (mmbit_is_flat_model(total_bits)) {
+ key = mmbit_sparse_iter_begin_flat(bits, total_bits, idx, it_root, s);
+ } else {
+ key = mmbit_sparse_iter_begin_big(bits, total_bits, idx, it_root, s);
+ }
+ if (key != MMB_INVALID) {
+ assert(key < total_bits);
+ assert(mmbit_isset(bits, total_bits, key));
+ }
+ return key;
+}
+
+static really_inline
+u32 mmbit_sparse_iter_next_big(const u8 *bits, u32 total_bits, u32 last_key,
+ u32 *idx,
+ const struct mmbit_sparse_iter *it_root,
+ struct mmbit_sparse_state *s) {
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 key = last_key >> MMB_KEY_SHIFT;
+ s[max_level].mask &= (s[max_level].mask - 1);
+ const struct mmbit_sparse_iter *it = it_root + s[max_level].itkey;
+ return mmbit_sparse_iter_exec(bits, key, idx, max_level, max_level, s,
+ it_root, it);
+}
+
+/** \brief Specialisation of \ref mmbit_sparse_iter_next for flat models. */
+static really_inline
+u32 mmbit_sparse_iter_next_flat(const u8 *bits, const u32 total_bits, u32 *idx,
+ const struct mmbit_sparse_iter *it_root,
+ struct mmbit_sparse_state *s) {
+ if (total_bits <= MMB_KEY_BITS) {
+ // All of our data is already in the s->mask, so we just need to scrape
+ // off the next match.
+ s->mask &= (s->mask - 1);
+ if (s->mask) {
+ u32 key = mmb_ctz(s->mask);
+ *idx = mmbit_mask_index(key, it_root->mask);
+ return key;
+ }
+ } else {
+ assert(s[0].mask);
+
+ s[1].mask &= (s[1].mask - 1); // Remove previous key from iter state.
+ u32 bit = mmb_ctz(s[0].mask); // Flat block currently being accessed.
+
+ for (;;) {
+ if (s[1].mask) {
+ u32 key = mmb_ctz(s[1].mask);
+ const struct mmbit_sparse_iter *it = it_root + s[1].itkey;
+ *idx = it->val + mmbit_mask_index(key, it->mask);
+ key += (bit * MMB_KEY_BITS);
+ return key;
+ }
+
+ // Otherwise, we have no keys left in this block. Consult the root
+ // mask and find the next one.
+
+ s[0].mask &= s[0].mask - 1;
+ if (!s[0].mask) {
+ break;
+ }
+
+ bit = mmb_ctz(s[0].mask);
+ u32 bit_idx = mmbit_mask_index(bit, it_root->mask);
+ u32 iter_key = it_root->val + bit_idx;
+ const struct mmbit_sparse_iter *it = it_root + iter_key;
+ u32 block_key_min = bit * MMB_KEY_BITS;
+ u32 block_key_max = block_key_min + MMB_KEY_BITS;
+ MMB_TYPE block;
+ if (block_key_max > total_bits) {
+ block_key_max = total_bits;
+ block = mmbit_get_flat_block(bits + (bit * sizeof(MMB_TYPE)),
+ block_key_max - block_key_min);
+ } else {
+ block = mmb_load(bits + (bit * sizeof(MMB_TYPE)));
+ }
+
+ s[1].mask = block & it->mask;
+ s[1].itkey = iter_key;
+ }
+ }
+
+ return MMB_INVALID;
+}
+
+/** \brief Sparse iterator, find next key.
+ *
+ * Takes in a sparse iterator tree structure \a it_root and a state array, and
+ * finds the next on bit (from the set of bits specified in the iterator).
+ *
+ * NOTE: The sparse iterator stores copies of the multibit blocks in its state,
+ * so it is not necessarily safe to set or unset bits in the multibit while
+ * iterating: the changes you make may or may not be taken into account
+ * by the iterator.
+ */
+static really_inline
+u32 mmbit_sparse_iter_next(const u8 *bits, u32 total_bits, u32 last_key,
+ u32 *idx, const struct mmbit_sparse_iter *it_root,
+ struct mmbit_sparse_state *s) {
+ assert(ISALIGNED_N(it_root, alignof(struct mmbit_sparse_iter)));
+
+ // Our state _may_ be on the stack
#ifndef _WIN32
- assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
+ assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
#else
assert(ISALIGNED_N(s, 4));
#endif
-
- MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
- MDEBUG_PRINTF("NEXT (total_bits=%u, last_key=%u)\n", total_bits, last_key);
- UNUSED u32 last_idx = *idx; // for assertion at the end
- // our iterator should have _something_ at the root level
- assert(it_root->mask != 0);
- assert(last_key < total_bits);
-
- u32 key;
- if (mmbit_is_flat_model(total_bits)) {
- key = mmbit_sparse_iter_next_flat(bits, total_bits, idx, it_root, s);
- } else {
- key = mmbit_sparse_iter_next_big(bits, total_bits, last_key, idx,
- it_root, s);
- }
- if (key != MMB_INVALID) {
- MDEBUG_PRINTF("END NEXT: key=%u, idx=%u\n", key, *idx);
- assert(key < total_bits);
- assert(key > last_key);
- assert(mmbit_isset(bits, total_bits, key));
- assert(*idx > last_idx);
- } else {
- MDEBUG_PRINTF("END NEXT: no more keys\n");
- }
- return key;
-}
-
-/** \brief Specialisation of \ref mmbit_sparse_iter_unset for flat models. */
-static really_inline
-void mmbit_sparse_iter_unset_flat(u8 *bits, u32 total_bits,
- const struct mmbit_sparse_iter *it_root) {
- if (total_bits <= MMB_KEY_BITS) {
- // Everything is in the root mask: we can just mask those bits off.
- MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
- block &= ~it_root->mask;
- mmb_store_partial(bits, block, total_bits);
- return;
- }
-
- // Larger case, we have two iterator levels to worry about.
- u32 bit_idx = 0;
- for (MMB_TYPE root = it_root->mask; root; root &= (root - 1), bit_idx++) {
- u32 bit = mmb_ctz(root);
- u32 block_key_min = bit * MMB_KEY_BITS;
- u32 block_key_max = block_key_min + MMB_KEY_BITS;
- u8 *block_ptr = bits + (bit * sizeof(MMB_TYPE));
- u32 iter_key = it_root->val + bit_idx;
- const struct mmbit_sparse_iter *it = it_root + iter_key;
- if (block_key_max <= total_bits) {
- // Full-sized block.
- MMB_TYPE block = mmb_load(block_ptr);
- block &= ~it->mask;
- mmb_store(block_ptr, block);
- } else {
- // Runt (final) block.
- u32 num_bits = total_bits - block_key_min;
- MMB_TYPE block = mmbit_get_flat_block(block_ptr, num_bits);
- block &= ~it->mask;
- mmb_store_partial(block_ptr, block, num_bits);
- break; // We know this is the last block.
- }
- }
-}
-
-static really_inline
-void mmbit_sparse_iter_unset_big(u8 *bits, u32 total_bits,
- const struct mmbit_sparse_iter *it_root,
- struct mmbit_sparse_state *s) {
- const struct mmbit_sparse_iter *it = it_root;
- MMB_TYPE block = mmb_load(bits) & it->mask;
- if (!block) {
- return;
- }
-
- u32 key = 0;
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
-
- // Load first block into top level state
- s[level].mask = block;
- s[level].itkey = 0;
- for (;;) {
- block = s[level].mask;
- if (block) {
- if (level == max_level) {
- // bottom level block: we want to mask out the bits specified
- // by the iterator mask and then go back up a level.
- u8 *block_ptr =
- mmbit_get_level_root(bits, level) + key * sizeof(MMB_TYPE);
- MMB_TYPE real_block = mmb_load(block_ptr);
- real_block &= ~(it->mask);
- mmb_store(block_ptr, real_block);
- goto uplevel; // still cheap and nasty
- } else {
- u32 bit = mmb_ctz(block);
- key = (key << MMB_KEY_SHIFT) + bit;
- level++;
-
- // iterator record is the start of the level (current it->val)
- // plus N, where N is the dense index of the bit in the current
- // level's itmask
- u32 iter_key = it->val + mmbit_mask_index(bit, it->mask);
- it = it_root + iter_key;
- MMB_TYPE nextblock =
- mmb_load(mmbit_get_level_root_const(bits, level) +
- key * sizeof(MMB_TYPE));
- s[level].mask = nextblock & it->mask;
- s[level].itkey = iter_key;
- }
- } else {
-uplevel:
- // No bits set in this block
- if (level == 0) {
- return; // we are done
- }
- u8 *block_ptr =
- mmbit_get_level_root(bits, level) + key * sizeof(MMB_TYPE);
- MMB_TYPE real_block = mmb_load(block_ptr);
- key >>= MMB_KEY_SHIFT;
- level--;
-
- if (real_block == 0) {
- // If we've zeroed our block For Real (unmasked by iterator),
- // we can clear the parent bit that led us to it, so that
- // we don't go down this particular garden path again later.
- u32 bit = mmb_ctz(s[level].mask);
- u8 *parent_ptr =
- mmbit_get_level_root(bits, level) + key * sizeof(MMB_TYPE);
- MMB_TYPE parent_block = mmb_load(parent_ptr);
- mmb_clear(&parent_block, bit);
- mmb_store(parent_ptr, parent_block);
- }
-
- // Update state mask and iterator
- s[level].mask &= (s[level].mask - 1);
- it = it_root + s[level].itkey;
- }
- }
-}
-
-/** \brief Sparse iterator, unset all bits.
- *
- * Takes in a sparse iterator tree structure and switches off any entries found
- * therein.
- */
-static really_inline
-void mmbit_sparse_iter_unset(u8 *bits, u32 total_bits,
- const struct mmbit_sparse_iter *it,
- struct mmbit_sparse_state *s) {
- assert(ISALIGNED_N(it, alignof(struct mmbit_sparse_iter)));
-
- // Our state _may_ be on the stack
+
+ MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
+ MDEBUG_PRINTF("NEXT (total_bits=%u, last_key=%u)\n", total_bits, last_key);
+ UNUSED u32 last_idx = *idx; // for assertion at the end
+ // our iterator should have _something_ at the root level
+ assert(it_root->mask != 0);
+ assert(last_key < total_bits);
+
+ u32 key;
+ if (mmbit_is_flat_model(total_bits)) {
+ key = mmbit_sparse_iter_next_flat(bits, total_bits, idx, it_root, s);
+ } else {
+ key = mmbit_sparse_iter_next_big(bits, total_bits, last_key, idx,
+ it_root, s);
+ }
+ if (key != MMB_INVALID) {
+ MDEBUG_PRINTF("END NEXT: key=%u, idx=%u\n", key, *idx);
+ assert(key < total_bits);
+ assert(key > last_key);
+ assert(mmbit_isset(bits, total_bits, key));
+ assert(*idx > last_idx);
+ } else {
+ MDEBUG_PRINTF("END NEXT: no more keys\n");
+ }
+ return key;
+}
+
+/** \brief Specialisation of \ref mmbit_sparse_iter_unset for flat models. */
+static really_inline
+void mmbit_sparse_iter_unset_flat(u8 *bits, u32 total_bits,
+ const struct mmbit_sparse_iter *it_root) {
+ if (total_bits <= MMB_KEY_BITS) {
+ // Everything is in the root mask: we can just mask those bits off.
+ MMB_TYPE block = mmbit_get_flat_block(bits, total_bits);
+ block &= ~it_root->mask;
+ mmb_store_partial(bits, block, total_bits);
+ return;
+ }
+
+ // Larger case, we have two iterator levels to worry about.
+ u32 bit_idx = 0;
+ for (MMB_TYPE root = it_root->mask; root; root &= (root - 1), bit_idx++) {
+ u32 bit = mmb_ctz(root);
+ u32 block_key_min = bit * MMB_KEY_BITS;
+ u32 block_key_max = block_key_min + MMB_KEY_BITS;
+ u8 *block_ptr = bits + (bit * sizeof(MMB_TYPE));
+ u32 iter_key = it_root->val + bit_idx;
+ const struct mmbit_sparse_iter *it = it_root + iter_key;
+ if (block_key_max <= total_bits) {
+ // Full-sized block.
+ MMB_TYPE block = mmb_load(block_ptr);
+ block &= ~it->mask;
+ mmb_store(block_ptr, block);
+ } else {
+ // Runt (final) block.
+ u32 num_bits = total_bits - block_key_min;
+ MMB_TYPE block = mmbit_get_flat_block(block_ptr, num_bits);
+ block &= ~it->mask;
+ mmb_store_partial(block_ptr, block, num_bits);
+ break; // We know this is the last block.
+ }
+ }
+}
+
+static really_inline
+void mmbit_sparse_iter_unset_big(u8 *bits, u32 total_bits,
+ const struct mmbit_sparse_iter *it_root,
+ struct mmbit_sparse_state *s) {
+ const struct mmbit_sparse_iter *it = it_root;
+ MMB_TYPE block = mmb_load(bits) & it->mask;
+ if (!block) {
+ return;
+ }
+
+ u32 key = 0;
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+
+ // Load first block into top level state
+ s[level].mask = block;
+ s[level].itkey = 0;
+ for (;;) {
+ block = s[level].mask;
+ if (block) {
+ if (level == max_level) {
+ // bottom level block: we want to mask out the bits specified
+ // by the iterator mask and then go back up a level.
+ u8 *block_ptr =
+ mmbit_get_level_root(bits, level) + key * sizeof(MMB_TYPE);
+ MMB_TYPE real_block = mmb_load(block_ptr);
+ real_block &= ~(it->mask);
+ mmb_store(block_ptr, real_block);
+ goto uplevel; // still cheap and nasty
+ } else {
+ u32 bit = mmb_ctz(block);
+ key = (key << MMB_KEY_SHIFT) + bit;
+ level++;
+
+ // iterator record is the start of the level (current it->val)
+ // plus N, where N is the dense index of the bit in the current
+ // level's itmask
+ u32 iter_key = it->val + mmbit_mask_index(bit, it->mask);
+ it = it_root + iter_key;
+ MMB_TYPE nextblock =
+ mmb_load(mmbit_get_level_root_const(bits, level) +
+ key * sizeof(MMB_TYPE));
+ s[level].mask = nextblock & it->mask;
+ s[level].itkey = iter_key;
+ }
+ } else {
+uplevel:
+ // No bits set in this block
+ if (level == 0) {
+ return; // we are done
+ }
+ u8 *block_ptr =
+ mmbit_get_level_root(bits, level) + key * sizeof(MMB_TYPE);
+ MMB_TYPE real_block = mmb_load(block_ptr);
+ key >>= MMB_KEY_SHIFT;
+ level--;
+
+ if (real_block == 0) {
+ // If we've zeroed our block For Real (unmasked by iterator),
+ // we can clear the parent bit that led us to it, so that
+ // we don't go down this particular garden path again later.
+ u32 bit = mmb_ctz(s[level].mask);
+ u8 *parent_ptr =
+ mmbit_get_level_root(bits, level) + key * sizeof(MMB_TYPE);
+ MMB_TYPE parent_block = mmb_load(parent_ptr);
+ mmb_clear(&parent_block, bit);
+ mmb_store(parent_ptr, parent_block);
+ }
+
+ // Update state mask and iterator
+ s[level].mask &= (s[level].mask - 1);
+ it = it_root + s[level].itkey;
+ }
+ }
+}
+
+/** \brief Sparse iterator, unset all bits.
+ *
+ * Takes in a sparse iterator tree structure and switches off any entries found
+ * therein.
+ */
+static really_inline
+void mmbit_sparse_iter_unset(u8 *bits, u32 total_bits,
+ const struct mmbit_sparse_iter *it,
+ struct mmbit_sparse_state *s) {
+ assert(ISALIGNED_N(it, alignof(struct mmbit_sparse_iter)));
+
+ // Our state _may_ be on the stack
#ifndef _WIN32
- assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
+ assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
#else
assert(ISALIGNED_N(s, 4));
#endif
-
- MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
-
-#ifdef MMB_TRACE_WRITES
- MMB_TRACE("ITER-UNSET iter=[");
- mmbit_sparse_iter_dump(it, total_bits);
- printf("] actually on=[");
- struct mmbit_sparse_state tmp[MAX_SPARSE_ITER_STATES];
- u32 idx = 0;
- u32 i = mmbit_sparse_iter_begin(bits, total_bits, &idx, it, tmp);
- for (; i != MMB_INVALID;
- i = mmbit_sparse_iter_next(bits, total_bits, i, &idx, it, tmp)) {
- printf(" %u", i);
- }
- printf("]\n");
-#endif
-
- if (mmbit_is_flat_model(total_bits)) {
- mmbit_sparse_iter_unset_flat(bits, total_bits, it);
- } else {
- mmbit_sparse_iter_unset_big(bits, total_bits, it, s);
- }
-}
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // MULTIBIT_H
+
+ MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
+
+#ifdef MMB_TRACE_WRITES
+ MMB_TRACE("ITER-UNSET iter=[");
+ mmbit_sparse_iter_dump(it, total_bits);
+ printf("] actually on=[");
+ struct mmbit_sparse_state tmp[MAX_SPARSE_ITER_STATES];
+ u32 idx = 0;
+ u32 i = mmbit_sparse_iter_begin(bits, total_bits, &idx, it, tmp);
+ for (; i != MMB_INVALID;
+ i = mmbit_sparse_iter_next(bits, total_bits, i, &idx, it, tmp)) {
+ printf(" %u", i);
+ }
+ printf("]\n");
+#endif
+
+ if (mmbit_is_flat_model(total_bits)) {
+ mmbit_sparse_iter_unset_flat(bits, total_bits, it);
+ } else {
+ mmbit_sparse_iter_unset_big(bits, total_bits, it, s);
+ }
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // MULTIBIT_H
diff --git a/contrib/libs/hyperscan/src/util/multibit_build.cpp b/contrib/libs/hyperscan/src/util/multibit_build.cpp
index 67bb9ec702..f1a88de63d 100644
--- a/contrib/libs/hyperscan/src/util/multibit_build.cpp
+++ b/contrib/libs/hyperscan/src/util/multibit_build.cpp
@@ -1,51 +1,51 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Multibit: build code (for sparse iterators)
- */
-#include "multibit.h"
-#include "multibit_build.h"
-#include "scatter.h"
-#include "ue2common.h"
-#include "rose/rose_build_scatter.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Multibit: build code (for sparse iterators)
+ */
+#include "multibit.h"
+#include "multibit_build.h"
+#include "scatter.h"
+#include "ue2common.h"
+#include "rose/rose_build_scatter.h"
#include "util/compile_error.h"
-
-#include <cassert>
-#include <cstring> // for memset
-#include <map>
-#include <queue>
-#include <vector>
-
-using namespace std;
-
-namespace ue2 {
-
+
+#include <cassert>
+#include <cstring> // for memset
+#include <map>
+#include <queue>
+#include <vector>
+
+using namespace std;
+
+namespace ue2 {
+
u32 mmbit_size(u32 total_bits) {
if (total_bits > MMB_MAX_BITS) {
throw ResourceLimitError();
@@ -72,257 +72,257 @@ u32 mmbit_size(u32 total_bits) {
return (u32)(total * sizeof(MMB_TYPE));
}
-namespace {
-struct TreeNode {
- MMB_TYPE mask = 0;
- u32 depth = 0;
- map<u32, TreeNode> children; // keyed by rkey
-};
-} // namespace
-
-static
-void addNode(TreeNode &tree, u32 depth, u32 key, s32 ks, u32 rkey) {
- u32 bit = (key >> ks) & MMB_KEY_MASK;
- DEBUG_PRINTF("depth=%u, key=%u, ks=%d, rkey=%u, bit=%u\n", depth, key, ks,
- rkey, bit);
- mmb_set(&tree.mask, bit); // add bit to this level
- tree.depth = depth; // record depth
- // next level
- rkey = (rkey << MMB_KEY_SHIFT) + bit;
- ks -= MMB_KEY_SHIFT;
- depth++;
- if (ks >= 0) {
- addNode(tree.children[rkey], depth, key, ks, rkey);
- }
-}
-
-static
-void bfs(vector<mmbit_sparse_iter> &out, const TreeNode &tree) {
- queue<const TreeNode *> q;
- q.push(&tree);
-
- vector<u32> levels;
- u32 depth = 0;
-
- DEBUG_PRINTF("walking q\n");
-
- while (!q.empty()) {
- const TreeNode *t = q.front();
- q.pop();
-
- if (depth != t->depth) {
- depth = t->depth;
- levels.push_back(out.size());
- }
-
- DEBUG_PRINTF("pop: mask=0x%08llx, depth=%u, children.size()=%zu\n",
- t->mask, t->depth, t->children.size());
-
- out.push_back(mmbit_sparse_iter());
- memset(&out.back(), 0, sizeof(mmbit_sparse_iter));
- mmbit_sparse_iter &record = out.back();
- record.mask = t->mask;
- record.val = 0;
-
- for (auto &e : t->children) {
- q.push(&e.second);
- }
- }
-
- // val for records in non-last levels is the iterator array start offset
- // for that iterator record's children
- u32 start = 0;
- for (size_t i = 0; i < levels.size(); i++) {
- u32 start_next = levels[i];
- u32 population = 0;
- DEBUG_PRINTF("next level starts at %u\n", start_next);
- for (u32 j = start; j < start_next; j++) {
- out[j].val = start_next + population;
- DEBUG_PRINTF(" children of %u start at %u\n", j, out[j].val);
- population += mmb_popcount(out[j].mask);
- }
- start = start_next;
- }
-
- // val for records in the last level is the cumulative popcount
- u32 population = 0;
- for (size_t i = start; i < out.size(); i++) {
- DEBUG_PRINTF("last level: i=%zu, population=%u\n", i, population);
- out[i].val = population;
- population += mmb_popcount(out[i].mask);
- }
-}
-
-/** \brief Construct a sparse iterator over the values in \a bits for a
- * multibit of size \a total_bits. */
+namespace {
+struct TreeNode {
+ MMB_TYPE mask = 0;
+ u32 depth = 0;
+ map<u32, TreeNode> children; // keyed by rkey
+};
+} // namespace
+
+static
+void addNode(TreeNode &tree, u32 depth, u32 key, s32 ks, u32 rkey) {
+ u32 bit = (key >> ks) & MMB_KEY_MASK;
+ DEBUG_PRINTF("depth=%u, key=%u, ks=%d, rkey=%u, bit=%u\n", depth, key, ks,
+ rkey, bit);
+ mmb_set(&tree.mask, bit); // add bit to this level
+ tree.depth = depth; // record depth
+ // next level
+ rkey = (rkey << MMB_KEY_SHIFT) + bit;
+ ks -= MMB_KEY_SHIFT;
+ depth++;
+ if (ks >= 0) {
+ addNode(tree.children[rkey], depth, key, ks, rkey);
+ }
+}
+
+static
+void bfs(vector<mmbit_sparse_iter> &out, const TreeNode &tree) {
+ queue<const TreeNode *> q;
+ q.push(&tree);
+
+ vector<u32> levels;
+ u32 depth = 0;
+
+ DEBUG_PRINTF("walking q\n");
+
+ while (!q.empty()) {
+ const TreeNode *t = q.front();
+ q.pop();
+
+ if (depth != t->depth) {
+ depth = t->depth;
+ levels.push_back(out.size());
+ }
+
+ DEBUG_PRINTF("pop: mask=0x%08llx, depth=%u, children.size()=%zu\n",
+ t->mask, t->depth, t->children.size());
+
+ out.push_back(mmbit_sparse_iter());
+ memset(&out.back(), 0, sizeof(mmbit_sparse_iter));
+ mmbit_sparse_iter &record = out.back();
+ record.mask = t->mask;
+ record.val = 0;
+
+ for (auto &e : t->children) {
+ q.push(&e.second);
+ }
+ }
+
+ // val for records in non-last levels is the iterator array start offset
+ // for that iterator record's children
+ u32 start = 0;
+ for (size_t i = 0; i < levels.size(); i++) {
+ u32 start_next = levels[i];
+ u32 population = 0;
+ DEBUG_PRINTF("next level starts at %u\n", start_next);
+ for (u32 j = start; j < start_next; j++) {
+ out[j].val = start_next + population;
+ DEBUG_PRINTF(" children of %u start at %u\n", j, out[j].val);
+ population += mmb_popcount(out[j].mask);
+ }
+ start = start_next;
+ }
+
+ // val for records in the last level is the cumulative popcount
+ u32 population = 0;
+ for (size_t i = start; i < out.size(); i++) {
+ DEBUG_PRINTF("last level: i=%zu, population=%u\n", i, population);
+ out[i].val = population;
+ population += mmb_popcount(out[i].mask);
+ }
+}
+
+/** \brief Construct a sparse iterator over the values in \a bits for a
+ * multibit of size \a total_bits. */
vector<mmbit_sparse_iter> mmbBuildSparseIterator(const vector<u32> &bits,
u32 total_bits) {
vector<mmbit_sparse_iter> out;
- assert(!bits.empty());
- assert(total_bits > 0);
+ assert(!bits.empty());
+ assert(total_bits > 0);
assert(total_bits <= MMB_MAX_BITS);
-
- DEBUG_PRINTF("building sparse iter for %zu of %u bits\n",
- bits.size(), total_bits);
-
- s32 ks = (total_bits > 1 ? mmbit_keyshift(total_bits) : 0);
-
- // Construct an intermediate tree
- TreeNode tree;
- for (const auto &bit : bits) {
- assert(bit < total_bits);
- addNode(tree, 0, bit, ks, 0);
- }
-
- // From our intermediate tree, lay the data out with a breadth-first walk
- bfs(out, tree);
- assert(!out.empty());
-
-#ifdef DEBUG
- DEBUG_PRINTF("dump of iterator tree:\n");
- for (size_t i = 0; i < out.size(); ++i) {
- printf(" %zu:\tmask=0x%08llx, val=%u\n", i, out[i].mask, out[i].val);
- }
-#endif
-
- DEBUG_PRINTF("iter has %zu records\n", out.size());
+
+ DEBUG_PRINTF("building sparse iter for %zu of %u bits\n",
+ bits.size(), total_bits);
+
+ s32 ks = (total_bits > 1 ? mmbit_keyshift(total_bits) : 0);
+
+ // Construct an intermediate tree
+ TreeNode tree;
+ for (const auto &bit : bits) {
+ assert(bit < total_bits);
+ addNode(tree, 0, bit, ks, 0);
+ }
+
+ // From our intermediate tree, lay the data out with a breadth-first walk
+ bfs(out, tree);
+ assert(!out.empty());
+
+#ifdef DEBUG
+ DEBUG_PRINTF("dump of iterator tree:\n");
+ for (size_t i = 0; i < out.size(); ++i) {
+ printf(" %zu:\tmask=0x%08llx, val=%u\n", i, out[i].mask, out[i].val);
+ }
+#endif
+
+ DEBUG_PRINTF("iter has %zu records\n", out.size());
return out;
-}
-
-template<typename T>
-static
-void add_scatter(vector<T> *out, u32 offset, u64a mask) {
+}
+
+template<typename T>
+static
+void add_scatter(vector<T> *out, u32 offset, u64a mask) {
out->emplace_back();
T &su = out->back();
- memset(&su, 0, sizeof(su));
- su.offset = offset;
- su.val = mask;
- DEBUG_PRINTF("add %llu at offset %u\n", mask, offset);
-}
-
-static
-u32 mmbit_get_level_root_offset(u32 level) {
- return mmbit_root_offset_from_level[level] * sizeof(MMB_TYPE);
-}
-
-void mmbBuildInitRangePlan(u32 total_bits, u32 begin, u32 end,
- scatter_plan_raw *out) {
- DEBUG_PRINTF("building scatter plan for [%u, %u]/%u\n", begin, end,
- total_bits);
- if (!total_bits) {
- return;
- }
-
- if (total_bits <= MMB_FLAT_MAX_BITS) {
- // Handle flat model cases: first a bunch of 64-bit full-sized blocks,
- // then a single runt block at the end.
- u32 dest = 0; // dest offset
- u32 bits = total_bits;
- u32 base = 0;
- for (; bits > 64; bits -= 64, base += 64, dest += 8) {
- MMB_TYPE mask = get_flat_masks(base, begin, end);
- add_scatter(&out->p_u64a, dest, mask);
- }
-
- // Last chunk.
- assert(bits > 0 && bits <= 64);
-
- MMB_TYPE mask = get_flat_masks(base, begin, end);
- if (bits <= 8) {
- add_scatter(&out->p_u8, dest + 0, mask);
- } else if (bits <= 16) {
- add_scatter(&out->p_u16, dest + 0, mask);
- } else if (bits <= 24) {
- add_scatter(&out->p_u16, dest + 0, mask);
- add_scatter(&out->p_u8, dest + 2, mask >> 16);
- } else if (bits <= 32) {
- add_scatter(&out->p_u32, dest + 0, mask);
- } else if (bits <= 40) {
- add_scatter(&out->p_u32, dest + 0, mask);
- add_scatter(&out->p_u8, dest + 4, mask >> 32);
- } else if (bits <= 48) {
- add_scatter(&out->p_u32, dest + 0, mask);
- add_scatter(&out->p_u16, dest + 4, mask >> 32);
- } else if (bits <= 56) {
- add_scatter(&out->p_u32, dest + 0, mask);
- add_scatter(&out->p_u16, dest + 4, mask >> 32);
- add_scatter(&out->p_u8, dest + 6, mask >> 48);
- } else {
- add_scatter(&out->p_u64a, dest + 0, mask);
- }
- return;
- }
-
- /* handle the multilevel case */
- s32 ks = mmbit_keyshift(total_bits);
- u32 level = 0;
- assert(sizeof(MMB_TYPE) == sizeof(u64a));
-
- if (begin == end) {
- add_scatter(&out->p_u64a, 0, 0);
- return;
- }
-
- for (;;) {
- u32 block_offset = mmbit_get_level_root_offset(level);
- u32 k1 = begin >> ks, k2 = end >> ks;
-
- // Summary blocks need to account for the runt block on the end.
- if ((k2 << ks) != end) {
- k2++;
- }
-
- // Partial block to deal with beginning.
+ memset(&su, 0, sizeof(su));
+ su.offset = offset;
+ su.val = mask;
+ DEBUG_PRINTF("add %llu at offset %u\n", mask, offset);
+}
+
+static
+u32 mmbit_get_level_root_offset(u32 level) {
+ return mmbit_root_offset_from_level[level] * sizeof(MMB_TYPE);
+}
+
+void mmbBuildInitRangePlan(u32 total_bits, u32 begin, u32 end,
+ scatter_plan_raw *out) {
+ DEBUG_PRINTF("building scatter plan for [%u, %u]/%u\n", begin, end,
+ total_bits);
+ if (!total_bits) {
+ return;
+ }
+
+ if (total_bits <= MMB_FLAT_MAX_BITS) {
+ // Handle flat model cases: first a bunch of 64-bit full-sized blocks,
+ // then a single runt block at the end.
+ u32 dest = 0; // dest offset
+ u32 bits = total_bits;
+ u32 base = 0;
+ for (; bits > 64; bits -= 64, base += 64, dest += 8) {
+ MMB_TYPE mask = get_flat_masks(base, begin, end);
+ add_scatter(&out->p_u64a, dest, mask);
+ }
+
+ // Last chunk.
+ assert(bits > 0 && bits <= 64);
+
+ MMB_TYPE mask = get_flat_masks(base, begin, end);
+ if (bits <= 8) {
+ add_scatter(&out->p_u8, dest + 0, mask);
+ } else if (bits <= 16) {
+ add_scatter(&out->p_u16, dest + 0, mask);
+ } else if (bits <= 24) {
+ add_scatter(&out->p_u16, dest + 0, mask);
+ add_scatter(&out->p_u8, dest + 2, mask >> 16);
+ } else if (bits <= 32) {
+ add_scatter(&out->p_u32, dest + 0, mask);
+ } else if (bits <= 40) {
+ add_scatter(&out->p_u32, dest + 0, mask);
+ add_scatter(&out->p_u8, dest + 4, mask >> 32);
+ } else if (bits <= 48) {
+ add_scatter(&out->p_u32, dest + 0, mask);
+ add_scatter(&out->p_u16, dest + 4, mask >> 32);
+ } else if (bits <= 56) {
+ add_scatter(&out->p_u32, dest + 0, mask);
+ add_scatter(&out->p_u16, dest + 4, mask >> 32);
+ add_scatter(&out->p_u8, dest + 6, mask >> 48);
+ } else {
+ add_scatter(&out->p_u64a, dest + 0, mask);
+ }
+ return;
+ }
+
+ /* handle the multilevel case */
+ s32 ks = mmbit_keyshift(total_bits);
+ u32 level = 0;
+ assert(sizeof(MMB_TYPE) == sizeof(u64a));
+
+ if (begin == end) {
+ add_scatter(&out->p_u64a, 0, 0);
+ return;
+ }
+
+ for (;;) {
+ u32 block_offset = mmbit_get_level_root_offset(level);
+ u32 k1 = begin >> ks, k2 = end >> ks;
+
+ // Summary blocks need to account for the runt block on the end.
+ if ((k2 << ks) != end) {
+ k2++;
+ }
+
+ // Partial block to deal with beginning.
block_offset += (k1 / MMB_KEY_BITS) * sizeof(MMB_TYPE);
- if (k1 % MMB_KEY_BITS) {
- u32 idx = k1 / MMB_KEY_BITS;
- u32 block_end = (idx + 1) * MMB_KEY_BITS;
-
- // Because k1 % MMB_KEY_BITS != 0, we can avoid checking edge cases
- // here (see the branch in mmb_mask_zero_to).
- MMB_TYPE mask = (-MMB_ONE) << (k1 % MMB_KEY_BITS);
-
- if (k2 < block_end) {
- assert(k2 % MMB_KEY_BITS);
- mask &= mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
- add_scatter(&out->p_u64a, block_offset, mask);
- goto next_level;
- } else {
- add_scatter(&out->p_u64a, block_offset, mask);
- k1 = block_end;
- block_offset += sizeof(MMB_TYPE);
- }
- }
-
- // Write blocks filled with ones until we get to the last block.
- for (; k1 < (k2 & ~MMB_KEY_MASK); k1 += MMB_KEY_BITS) {
- add_scatter(&out->p_u64a, block_offset, -MMB_ONE);
- block_offset += sizeof(MMB_TYPE);
- }
-
- // Final block.
- if (likely(k1 < k2)) {
- // Again, if k2 was at a block boundary, it would have been handled
- // by the previous loop, so we know k2 % MMB_KEY_BITS != 0 and can
- // avoid the branch in mmb_mask_zero_to here.
- assert(k2 % MMB_KEY_BITS);
- MMB_TYPE mask = mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
-
- add_scatter(&out->p_u64a, block_offset, mask);
- }
-
- next_level:
- if (ks == 0) {
- break; // Last level is done, finished.
- }
-
- ks -= MMB_KEY_SHIFT;
- level++;
- }
-}
-
-void mmbBuildClearPlan(u32 total_bits, scatter_plan_raw *out) {
- return mmbBuildInitRangePlan(total_bits, 0, 0, out);
-}
-
-} // namespace ue2
+ if (k1 % MMB_KEY_BITS) {
+ u32 idx = k1 / MMB_KEY_BITS;
+ u32 block_end = (idx + 1) * MMB_KEY_BITS;
+
+ // Because k1 % MMB_KEY_BITS != 0, we can avoid checking edge cases
+ // here (see the branch in mmb_mask_zero_to).
+ MMB_TYPE mask = (-MMB_ONE) << (k1 % MMB_KEY_BITS);
+
+ if (k2 < block_end) {
+ assert(k2 % MMB_KEY_BITS);
+ mask &= mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
+ add_scatter(&out->p_u64a, block_offset, mask);
+ goto next_level;
+ } else {
+ add_scatter(&out->p_u64a, block_offset, mask);
+ k1 = block_end;
+ block_offset += sizeof(MMB_TYPE);
+ }
+ }
+
+ // Write blocks filled with ones until we get to the last block.
+ for (; k1 < (k2 & ~MMB_KEY_MASK); k1 += MMB_KEY_BITS) {
+ add_scatter(&out->p_u64a, block_offset, -MMB_ONE);
+ block_offset += sizeof(MMB_TYPE);
+ }
+
+ // Final block.
+ if (likely(k1 < k2)) {
+ // Again, if k2 was at a block boundary, it would have been handled
+ // by the previous loop, so we know k2 % MMB_KEY_BITS != 0 and can
+ // avoid the branch in mmb_mask_zero_to here.
+ assert(k2 % MMB_KEY_BITS);
+ MMB_TYPE mask = mmb_mask_zero_to_nocheck(k2 % MMB_KEY_BITS);
+
+ add_scatter(&out->p_u64a, block_offset, mask);
+ }
+
+ next_level:
+ if (ks == 0) {
+ break; // Last level is done, finished.
+ }
+
+ ks -= MMB_KEY_SHIFT;
+ level++;
+ }
+}
+
+void mmbBuildClearPlan(u32 total_bits, scatter_plan_raw *out) {
+ return mmbBuildInitRangePlan(total_bits, 0, 0, out);
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/multibit_build.h b/contrib/libs/hyperscan/src/util/multibit_build.h
index 24f1bb55b0..595350a59f 100644
--- a/contrib/libs/hyperscan/src/util/multibit_build.h
+++ b/contrib/libs/hyperscan/src/util/multibit_build.h
@@ -1,49 +1,49 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Multibit: build code (for sparse iterators)
- */
-
-#ifndef MULTIBIT_BUILD_H
-#define MULTIBIT_BUILD_H
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Multibit: build code (for sparse iterators)
+ */
+
+#ifndef MULTIBIT_BUILD_H
+#define MULTIBIT_BUILD_H
+
#include "hs_common.h"
-#include "multibit_internal.h"
+#include "multibit_internal.h"
#include "hash.h"
-
-#include <vector>
-
+
+#include <vector>
+
inline
bool operator==(const mmbit_sparse_iter &a, const mmbit_sparse_iter &b) {
return a.mask == b.mask && a.val == b.val;
-}
-
+}
+
namespace std {
template<>
@@ -55,8 +55,8 @@ struct hash<mmbit_sparse_iter> {
} // namespace std
-namespace ue2 {
-
+namespace ue2 {
+
/**
* \brief Return the size in bytes of a multibit that can store the given
* number of bits.
@@ -66,17 +66,17 @@ namespace ue2 {
*/
u32 mmbit_size(u32 total_bits);
-/** \brief Construct a sparse iterator over the values in \a bits for a
- * multibit of size \a total_bits. */
+/** \brief Construct a sparse iterator over the values in \a bits for a
+ * multibit of size \a total_bits. */
std::vector<mmbit_sparse_iter>
mmbBuildSparseIterator(const std::vector<u32> &bits, u32 total_bits);
-
-struct scatter_plan_raw;
-
-void mmbBuildInitRangePlan(u32 total_bits, u32 begin, u32 end,
- scatter_plan_raw *out);
-void mmbBuildClearPlan(u32 total_bits, scatter_plan_raw *out);
-
-} // namespace ue2
-
-#endif // MULTIBIT_BUILD_H
+
+struct scatter_plan_raw;
+
+void mmbBuildInitRangePlan(u32 total_bits, u32 begin, u32 end,
+ scatter_plan_raw *out);
+void mmbBuildClearPlan(u32 total_bits, scatter_plan_raw *out);
+
+} // namespace ue2
+
+#endif // MULTIBIT_BUILD_H
diff --git a/contrib/libs/hyperscan/src/util/multibit_internal.h b/contrib/libs/hyperscan/src/util/multibit_internal.h
index 350f3bfd47..0f74442e84 100644
--- a/contrib/libs/hyperscan/src/util/multibit_internal.h
+++ b/contrib/libs/hyperscan/src/util/multibit_internal.h
@@ -1,81 +1,81 @@
-/*
+/*
* Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Multibit: data structures.
- *
- * If all you need is the sizes of multibit's few structures, then including
- * this file is a much better idea than including all of multibit.h.
- */
-#ifndef MULTIBIT_INTERNAL_H
-#define MULTIBIT_INTERNAL_H
-
-#include "ue2common.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** \brief Sentinel value meaning "no key found". */
-#define MMB_INVALID 0xffffffffu
-
-typedef u64a MMB_TYPE; /**< Basic block type for mmbit operations. */
-#define MMB_MAX_LEVEL 6 /**< Maximum level in the mmbit pyramid. */
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Multibit: data structures.
+ *
+ * If all you need is the sizes of multibit's few structures, then including
+ * this file is a much better idea than including all of multibit.h.
+ */
+#ifndef MULTIBIT_INTERNAL_H
+#define MULTIBIT_INTERNAL_H
+
+#include "ue2common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** \brief Sentinel value meaning "no key found". */
+#define MMB_INVALID 0xffffffffu
+
+typedef u64a MMB_TYPE; /**< Basic block type for mmbit operations. */
+#define MMB_MAX_LEVEL 6 /**< Maximum level in the mmbit pyramid. */
+
/** \brief Maximum number of keys (bits) in a multibit. */
#define MMB_MAX_BITS (1U << 31)
-/** \brief Sparse iterator record type.
- *
- * A sparse iterator is a tree of these records, where val identifies the
- * offset of the result for leaf nodes and points to the next record for
- * intermediate nodes. Built by the code in multibit_build.cpp.
- */
-struct mmbit_sparse_iter {
- MMB_TYPE mask;
- u32 val;
-};
-
-/** \brief Sparse iterator runtime state type.
- *
- * An array of these records (one per "level" in the multibit pyramid) is used
- * to store the current iteration state.
- */
-struct mmbit_sparse_state {
- MMB_TYPE mask; //!< \brief masked last block read at this level.
- u32 itkey; //!< \brief iterator offset for this level.
-};
-
-/** \brief Maximum number of \ref mmbit_sparse_state that could be needed. */
-#define MAX_SPARSE_ITER_STATES (6 + 1)
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // MULTIBIT_INTERNAL_H
+/** \brief Sparse iterator record type.
+ *
+ * A sparse iterator is a tree of these records, where val identifies the
+ * offset of the result for leaf nodes and points to the next record for
+ * intermediate nodes. Built by the code in multibit_build.cpp.
+ */
+struct mmbit_sparse_iter {
+ MMB_TYPE mask;
+ u32 val;
+};
+
+/** \brief Sparse iterator runtime state type.
+ *
+ * An array of these records (one per "level" in the multibit pyramid) is used
+ * to store the current iteration state.
+ */
+struct mmbit_sparse_state {
+ MMB_TYPE mask; //!< \brief masked last block read at this level.
+ u32 itkey; //!< \brief iterator offset for this level.
+};
+
+/** \brief Maximum number of \ref mmbit_sparse_state that could be needed. */
+#define MAX_SPARSE_ITER_STATES (6 + 1)
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // MULTIBIT_INTERNAL_H
diff --git a/contrib/libs/hyperscan/src/util/order_check.h b/contrib/libs/hyperscan/src/util/order_check.h
index 33f3869d73..1a8fc2a3fa 100644
--- a/contrib/libs/hyperscan/src/util/order_check.h
+++ b/contrib/libs/hyperscan/src/util/order_check.h
@@ -1,37 +1,37 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/* for implementing operator<, assumes objects are a and b */
-#define ORDER_CHECK(field) do { \
- if (a.field < b.field) { \
- return 1; \
- } \
- if (b.field < a.field) { \
- return 0; \
- } \
- } while (0)
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* for implementing operator<, assumes objects are a and b */
+#define ORDER_CHECK(field) do { \
+ if (a.field < b.field) { \
+ return 1; \
+ } \
+ if (b.field < a.field) { \
+ return 0; \
+ } \
+ } while (0)
diff --git a/contrib/libs/hyperscan/src/util/pack_bits.h b/contrib/libs/hyperscan/src/util/pack_bits.h
index 800ce25ec7..301c2664c0 100644
--- a/contrib/libs/hyperscan/src/util/pack_bits.h
+++ b/contrib/libs/hyperscan/src/util/pack_bits.h
@@ -1,227 +1,227 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Functions for packing/unpacking arrays.
- */
-
-#ifndef UTIL_PACK_BITS_H
-#define UTIL_PACK_BITS_H
-
-#include "ue2common.h"
-#include "unaligned.h"
-#include "partial_store.h"
-
-/**
- * \brief Pack bits from an array of 32-bit words into \a out.
- *
- * \param out Output array. Must be large enough to store sum(bits).
- * \param v Input array.
- * \param bits Number of low bits in the corresponding element of \a v to pack.
- * \param elements Size of the \a v and \a bits arrays.
- */
-static really_inline
-void pack_bits_32(char *out, const u32 *v, const u32 *bits,
- const unsigned int elements);
-
-/**
- * \brief Pack bits from an array of 64-bit words into \a out.
- *
- * \param out Output array. Must be large enough to store sum(bits).
- * \param v Input array.
- * \param bits Number of low bits in the corresponding element of \a v to pack.
- * \param elements Size of the \a v and \a bits arrays.
- */
-static really_inline
-void pack_bits_64(char *out, const u64a *v, const u32 *bits,
- const unsigned int elements);
-
-/**
- * \brief Unpack bits into an array of 32-bit words according to the counts
- * given.
- *
- * \param v Output array.
- * \param in Packed input array.
- * \param bits Number of bits to unpack into the corresponding element of \a v.
- * \param elements Size of the \a v and \a bits arrays.
- */
-static really_inline
-void unpack_bits_32(u32 *v, const u8 *in, const u32 *bits,
- const unsigned int elements);
-
-/**
- * \brief Unpack bits into an array of 64-bit words according to the counts
- * given.
- *
- * \param v Output array.
- * \param in Packed input array.
- * \param bits Number of bits to unpack into the corresponding element of \a v.
- * \param elements Size of the \a v and \a bits arrays.
- */
-static really_inline
-void unpack_bits_64(u64a *v, const u8 *in, const u32 *bits,
- const unsigned int elements);
-
-/*
- * Inline implementations follow.
- */
-
-static really_inline
-void pack_bits_32(char *out, const u32 *v, const u32 *bits,
- const unsigned int elements) {
- u32 write = 0; // accumulator
- u32 idx = 0; // acc holds this many bits
-
- for (unsigned int i = 0; i < elements; i++) {
- assert(bits[i] <= 32);
- write |= (v[i] << idx);
- idx += bits[i];
- if (idx >= 32) {
- unaligned_store_u32(out, write);
- out += 4;
- idx -= 32;
- u32 leftover = bits[i] - idx;
- if (leftover == 32) {
- write = 0;
- } else {
- assert(leftover < 32);
- write = v[i] >> leftover;
- }
- }
- }
-
- // There might be a write left over.
- partial_store_u32(out, write, (idx + 7) / 8);
-}
-
-static really_inline
-void pack_bits_64(char *out, const u64a *v, const u32 *bits,
- const unsigned int elements) {
- u64a write = 0; // accumulator
- u32 idx = 0; // acc holds this many bits
-
- for (unsigned int i = 0; i < elements; i++) {
- assert(bits[i] <= 64);
- write |= (v[i] << idx);
- idx += bits[i];
- if (idx >= 64) {
- unaligned_store_u64a(out, write);
- out += 8;
- idx -= 64;
- u32 leftover = bits[i] - idx;
- if (leftover == 64) {
- write = 0;
- } else {
- assert(leftover < 64);
- write = v[i] >> leftover;
- }
- }
- }
-
- // There might be a write left over.
- DEBUG_PRINTF("partial store of idx=%u\n", idx);
- partial_store_u64a(out, write, (idx + 7) / 8);
-}
-
-static really_inline
-void unpack_bits_32(u32 *v, const u8 *in, const u32 *bits,
- const unsigned int elements) {
- u32 used = 0; // bits used from *in
-
- for (unsigned int i = 0; i < elements; i++) {
- assert(bits[i] <= 32);
- u32 v_out = 0; // accumulator for v[i]
- u32 b = bits[i]; // bits left to read for v[i]
- u32 vidx = 0; // bits written to v[i]
-
- while (b) {
- u32 read = *in >> used;
- u32 bits_read = 8 - used;
-
- if (b <= bits_read) {
- u32 mask = read & ((1U << b) - 1);
- v_out |= mask << vidx;
- vidx += b;
- used += b;
- b = 0;
- if (used < 8) {
- continue; // more from this *in
- }
- } else {
- v_out |= read << vidx;
- vidx += bits_read;
- b -= bits_read;
- }
-
- used = 0;
- in++;
- }
-
- v[i] = v_out;
- }
-}
-
-static really_inline
-void unpack_bits_64(u64a *v, const u8 *in, const u32 *bits,
- const unsigned int elements) {
- u32 used = 0; // bits used from *in
-
- for (unsigned int i = 0; i < elements; i++) {
- assert(bits[i] <= 64);
- u64a v_out = 0; // accumulator for v[i]
- u32 b = bits[i]; // bits left to read for v[i]
- u32 vidx = 0; // bits written to v[i]
-
- while (b) {
- u64a read = *in >> used;
- u32 bits_read = 8 - used;
-
- if (b <= bits_read) {
- u64a mask = read & ((1U << b) - 1);
- v_out |= mask << vidx;
- vidx += b;
- used += b;
- b = 0;
- if (used < 8) {
- continue; // more from this *in
- }
- } else {
- v_out |= read << vidx;
- vidx += bits_read;
- b -= bits_read;
- }
-
- used = 0;
- in++;
- }
-
- v[i] = v_out;
- }
-}
-
-#endif // UTIL_PACK_BITS_H
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Functions for packing/unpacking arrays.
+ */
+
+#ifndef UTIL_PACK_BITS_H
+#define UTIL_PACK_BITS_H
+
+#include "ue2common.h"
+#include "unaligned.h"
+#include "partial_store.h"
+
+/**
+ * \brief Pack bits from an array of 32-bit words into \a out.
+ *
+ * \param out Output array. Must be large enough to store sum(bits).
+ * \param v Input array.
+ * \param bits Number of low bits in the corresponding element of \a v to pack.
+ * \param elements Size of the \a v and \a bits arrays.
+ */
+static really_inline
+void pack_bits_32(char *out, const u32 *v, const u32 *bits,
+ const unsigned int elements);
+
+/**
+ * \brief Pack bits from an array of 64-bit words into \a out.
+ *
+ * \param out Output array. Must be large enough to store sum(bits).
+ * \param v Input array.
+ * \param bits Number of low bits in the corresponding element of \a v to pack.
+ * \param elements Size of the \a v and \a bits arrays.
+ */
+static really_inline
+void pack_bits_64(char *out, const u64a *v, const u32 *bits,
+ const unsigned int elements);
+
+/**
+ * \brief Unpack bits into an array of 32-bit words according to the counts
+ * given.
+ *
+ * \param v Output array.
+ * \param in Packed input array.
+ * \param bits Number of bits to unpack into the corresponding element of \a v.
+ * \param elements Size of the \a v and \a bits arrays.
+ */
+static really_inline
+void unpack_bits_32(u32 *v, const u8 *in, const u32 *bits,
+ const unsigned int elements);
+
+/**
+ * \brief Unpack bits into an array of 64-bit words according to the counts
+ * given.
+ *
+ * \param v Output array.
+ * \param in Packed input array.
+ * \param bits Number of bits to unpack into the corresponding element of \a v.
+ * \param elements Size of the \a v and \a bits arrays.
+ */
+static really_inline
+void unpack_bits_64(u64a *v, const u8 *in, const u32 *bits,
+ const unsigned int elements);
+
+/*
+ * Inline implementations follow.
+ */
+
+static really_inline
+void pack_bits_32(char *out, const u32 *v, const u32 *bits,
+ const unsigned int elements) {
+ u32 write = 0; // accumulator
+ u32 idx = 0; // acc holds this many bits
+
+ for (unsigned int i = 0; i < elements; i++) {
+ assert(bits[i] <= 32);
+ write |= (v[i] << idx);
+ idx += bits[i];
+ if (idx >= 32) {
+ unaligned_store_u32(out, write);
+ out += 4;
+ idx -= 32;
+ u32 leftover = bits[i] - idx;
+ if (leftover == 32) {
+ write = 0;
+ } else {
+ assert(leftover < 32);
+ write = v[i] >> leftover;
+ }
+ }
+ }
+
+ // There might be a write left over.
+ partial_store_u32(out, write, (idx + 7) / 8);
+}
+
+static really_inline
+void pack_bits_64(char *out, const u64a *v, const u32 *bits,
+ const unsigned int elements) {
+ u64a write = 0; // accumulator
+ u32 idx = 0; // acc holds this many bits
+
+ for (unsigned int i = 0; i < elements; i++) {
+ assert(bits[i] <= 64);
+ write |= (v[i] << idx);
+ idx += bits[i];
+ if (idx >= 64) {
+ unaligned_store_u64a(out, write);
+ out += 8;
+ idx -= 64;
+ u32 leftover = bits[i] - idx;
+ if (leftover == 64) {
+ write = 0;
+ } else {
+ assert(leftover < 64);
+ write = v[i] >> leftover;
+ }
+ }
+ }
+
+ // There might be a write left over.
+ DEBUG_PRINTF("partial store of idx=%u\n", idx);
+ partial_store_u64a(out, write, (idx + 7) / 8);
+}
+
+static really_inline
+void unpack_bits_32(u32 *v, const u8 *in, const u32 *bits,
+ const unsigned int elements) {
+ u32 used = 0; // bits used from *in
+
+ for (unsigned int i = 0; i < elements; i++) {
+ assert(bits[i] <= 32);
+ u32 v_out = 0; // accumulator for v[i]
+ u32 b = bits[i]; // bits left to read for v[i]
+ u32 vidx = 0; // bits written to v[i]
+
+ while (b) {
+ u32 read = *in >> used;
+ u32 bits_read = 8 - used;
+
+ if (b <= bits_read) {
+ u32 mask = read & ((1U << b) - 1);
+ v_out |= mask << vidx;
+ vidx += b;
+ used += b;
+ b = 0;
+ if (used < 8) {
+ continue; // more from this *in
+ }
+ } else {
+ v_out |= read << vidx;
+ vidx += bits_read;
+ b -= bits_read;
+ }
+
+ used = 0;
+ in++;
+ }
+
+ v[i] = v_out;
+ }
+}
+
+static really_inline
+void unpack_bits_64(u64a *v, const u8 *in, const u32 *bits,
+ const unsigned int elements) {
+ u32 used = 0; // bits used from *in
+
+ for (unsigned int i = 0; i < elements; i++) {
+ assert(bits[i] <= 64);
+ u64a v_out = 0; // accumulator for v[i]
+ u32 b = bits[i]; // bits left to read for v[i]
+ u32 vidx = 0; // bits written to v[i]
+
+ while (b) {
+ u64a read = *in >> used;
+ u32 bits_read = 8 - used;
+
+ if (b <= bits_read) {
+ u64a mask = read & ((1U << b) - 1);
+ v_out |= mask << vidx;
+ vidx += b;
+ used += b;
+ b = 0;
+ if (used < 8) {
+ continue; // more from this *in
+ }
+ } else {
+ v_out |= read << vidx;
+ vidx += bits_read;
+ b -= bits_read;
+ }
+
+ used = 0;
+ in++;
+ }
+
+ v[i] = v_out;
+ }
+}
+
+#endif // UTIL_PACK_BITS_H
diff --git a/contrib/libs/hyperscan/src/util/partial_store.h b/contrib/libs/hyperscan/src/util/partial_store.h
index a49d1fae1d..8ee23bdb1e 100644
--- a/contrib/libs/hyperscan/src/util/partial_store.h
+++ b/contrib/libs/hyperscan/src/util/partial_store.h
@@ -1,163 +1,163 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PARTIAL_STORE_H
-#define PARTIAL_STORE_H
-
-#include "ue2common.h"
-#include "unaligned.h"
-
-/* loads/stores the least significant bytes of the values. */
-
-static really_inline
-void partial_store_u32(void *ptr, u32 value, u32 numBytes) {
- assert(numBytes <= 4);
- switch (numBytes) {
- case 4:
- unaligned_store_u32(ptr, value);
- break;
- case 3:
- unaligned_store_u16(ptr, (u16)value);
- *((u8 *)ptr + 2) = (u8)(value >> 16);
- break;
- case 2:
- unaligned_store_u16(ptr, (u16)value);
- break;
- case 1:
- *(u8 *)ptr = (u8)value;
- break;
- case 0:
- break;
- }
-}
-
-static really_inline
-u32 partial_load_u32(const void *ptr, u32 numBytes) {
- u32 value;
- assert(numBytes <= 4);
- switch (numBytes) {
- case 4:
- value = unaligned_load_u32(ptr);
- return value;
- case 3:
- value = unaligned_load_u16(ptr);
- value |= ((u32)(*((const u8 *)ptr + 2)) << 16);
- return value;
- case 2:
- value = unaligned_load_u16(ptr);
- return value;
- case 1:
- value = *(const u8 *)ptr;
- return value;
- case 0:
- break;
- }
-
- return 0;
-}
-
-static really_inline
-void partial_store_u64a(void *ptr, u64a value, u32 numBytes) {
- assert(numBytes <= 8);
- switch (numBytes) {
- case 8:
- unaligned_store_u64a(ptr, value);
- break;
- case 7:
- unaligned_store_u32(ptr, (u32)value);
- unaligned_store_u16((u8 *)ptr + 4, (u16)(value >> 32));
- *((u8 *)ptr + 6) = (u8)(value >> 48);
- break;
- case 6:
- unaligned_store_u32(ptr, (u32)value);
- unaligned_store_u16((u8 *)ptr + 4, (u16)(value >> 32));
- break;
- case 5:
- unaligned_store_u32(ptr, (u32)value);
- *((u8 *)ptr + 4) = (u8)(value >> 32);
- break;
- case 4:
- unaligned_store_u32(ptr, (u32)value);
- break;
- case 3:
- unaligned_store_u16(ptr, (u16)value);
- *((u8 *)ptr + 2) = (u8)(value >> 16);
- break;
- case 2:
- unaligned_store_u16(ptr, (u16)value);
- break;
- case 1:
- *(u8 *)ptr = (u8)value;
- break;
- case 0:
- break;
- }
-}
-
-static really_inline
-u64a partial_load_u64a(const void *ptr, u32 numBytes) {
- u64a value;
- assert(numBytes <= 8);
- switch (numBytes) {
- case 8:
- value = unaligned_load_u64a(ptr);
- return value;
- case 7:
- value = unaligned_load_u32(ptr);
- value |= (u64a)unaligned_load_u16((const u8 *)ptr + 4) << 32;
- value |= (u64a)(*((const u8 *)ptr + 6)) << 48;
- return value;
- case 6:
- value = unaligned_load_u32(ptr);
- value |= (u64a)unaligned_load_u16((const u8 *)ptr + 4) << 32;
- return value;
- case 5:
- value = unaligned_load_u32(ptr);
- value |= (u64a)(*((const u8 *)ptr + 4)) << 32;
- return value;
- case 4:
- value = unaligned_load_u32(ptr);
- return value;
- case 3:
- value = unaligned_load_u16(ptr);
- value |= (u64a)(*((const u8 *)ptr + 2)) << 16;
- return value;
- case 2:
- value = unaligned_load_u16(ptr);
- return value;
- case 1:
- value = *(const u8 *)ptr;
- return value;
- case 0:
- break;
- }
-
- return 0;
-}
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PARTIAL_STORE_H
+#define PARTIAL_STORE_H
+
+#include "ue2common.h"
+#include "unaligned.h"
+
+/* loads/stores the least significant bytes of the values. */
+
+static really_inline
+void partial_store_u32(void *ptr, u32 value, u32 numBytes) {
+ assert(numBytes <= 4);
+ switch (numBytes) {
+ case 4:
+ unaligned_store_u32(ptr, value);
+ break;
+ case 3:
+ unaligned_store_u16(ptr, (u16)value);
+ *((u8 *)ptr + 2) = (u8)(value >> 16);
+ break;
+ case 2:
+ unaligned_store_u16(ptr, (u16)value);
+ break;
+ case 1:
+ *(u8 *)ptr = (u8)value;
+ break;
+ case 0:
+ break;
+ }
+}
+
+static really_inline
+u32 partial_load_u32(const void *ptr, u32 numBytes) {
+ u32 value;
+ assert(numBytes <= 4);
+ switch (numBytes) {
+ case 4:
+ value = unaligned_load_u32(ptr);
+ return value;
+ case 3:
+ value = unaligned_load_u16(ptr);
+ value |= ((u32)(*((const u8 *)ptr + 2)) << 16);
+ return value;
+ case 2:
+ value = unaligned_load_u16(ptr);
+ return value;
+ case 1:
+ value = *(const u8 *)ptr;
+ return value;
+ case 0:
+ break;
+ }
+
+ return 0;
+}
+
+static really_inline
+void partial_store_u64a(void *ptr, u64a value, u32 numBytes) {
+ assert(numBytes <= 8);
+ switch (numBytes) {
+ case 8:
+ unaligned_store_u64a(ptr, value);
+ break;
+ case 7:
+ unaligned_store_u32(ptr, (u32)value);
+ unaligned_store_u16((u8 *)ptr + 4, (u16)(value >> 32));
+ *((u8 *)ptr + 6) = (u8)(value >> 48);
+ break;
+ case 6:
+ unaligned_store_u32(ptr, (u32)value);
+ unaligned_store_u16((u8 *)ptr + 4, (u16)(value >> 32));
+ break;
+ case 5:
+ unaligned_store_u32(ptr, (u32)value);
+ *((u8 *)ptr + 4) = (u8)(value >> 32);
+ break;
+ case 4:
+ unaligned_store_u32(ptr, (u32)value);
+ break;
+ case 3:
+ unaligned_store_u16(ptr, (u16)value);
+ *((u8 *)ptr + 2) = (u8)(value >> 16);
+ break;
+ case 2:
+ unaligned_store_u16(ptr, (u16)value);
+ break;
+ case 1:
+ *(u8 *)ptr = (u8)value;
+ break;
+ case 0:
+ break;
+ }
+}
+
+static really_inline
+u64a partial_load_u64a(const void *ptr, u32 numBytes) {
+ u64a value;
+ assert(numBytes <= 8);
+ switch (numBytes) {
+ case 8:
+ value = unaligned_load_u64a(ptr);
+ return value;
+ case 7:
+ value = unaligned_load_u32(ptr);
+ value |= (u64a)unaligned_load_u16((const u8 *)ptr + 4) << 32;
+ value |= (u64a)(*((const u8 *)ptr + 6)) << 48;
+ return value;
+ case 6:
+ value = unaligned_load_u32(ptr);
+ value |= (u64a)unaligned_load_u16((const u8 *)ptr + 4) << 32;
+ return value;
+ case 5:
+ value = unaligned_load_u32(ptr);
+ value |= (u64a)(*((const u8 *)ptr + 4)) << 32;
+ return value;
+ case 4:
+ value = unaligned_load_u32(ptr);
+ return value;
+ case 3:
+ value = unaligned_load_u16(ptr);
+ value |= (u64a)(*((const u8 *)ptr + 2)) << 16;
+ return value;
+ case 2:
+ value = unaligned_load_u16(ptr);
+ return value;
+ case 1:
+ value = *(const u8 *)ptr;
+ return value;
+ case 0:
+ break;
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/partitioned_set.h b/contrib/libs/hyperscan/src/util/partitioned_set.h
index 8a4d3dd9e1..e6f907edd1 100644
--- a/contrib/libs/hyperscan/src/util/partitioned_set.h
+++ b/contrib/libs/hyperscan/src/util/partitioned_set.h
@@ -1,263 +1,263 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PARTITIONED_SET_H
-#define PARTITIONED_SET_H
-
-#include "container.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PARTITIONED_SET_H
+#define PARTITIONED_SET_H
+
+#include "container.h"
#include "noncopyable.h"
#include "flat_containers.h"
-#include "ue2common.h"
-
-#include <algorithm>
-#include <vector>
-
-#include <boost/dynamic_bitset.hpp>
-
-namespace ue2 {
-
-static constexpr size_t INVALID_SUBSET = ~(size_t)0;
-
-/**
- * partition_set represents a partitioning of a set of integers [0, n) into
- * disjoint non-empty subsets.
- *
- * The subsets themselves are also indexed by integers.
- *
- * The underlying integer type for the set members is parameterized.
- */
-
-template<typename T>
+#include "ue2common.h"
+
+#include <algorithm>
+#include <vector>
+
+#include <boost/dynamic_bitset.hpp>
+
+namespace ue2 {
+
+static constexpr size_t INVALID_SUBSET = ~(size_t)0;
+
+/**
+ * partition_set represents a partitioning of a set of integers [0, n) into
+ * disjoint non-empty subsets.
+ *
+ * The subsets themselves are also indexed by integers.
+ *
+ * The underlying integer type for the set members is parameterized.
+ */
+
+template<typename T>
class partitioned_set : noncopyable {
-public:
- class subset {
- public:
- typedef typename std::vector<T>::const_iterator const_iterator;
-
- size_t size() const {
- assert(members.size());
- return members.size();
- }
-
- const_iterator begin() const {
- return members.begin();
- }
-
- const_iterator end() const {
- return members.end();
- }
-
- private:
- std::vector<T> members; /**< sorted members of the subset */
-
- friend class partitioned_set;
- };
-
- /** returns the number of subsets in the partition */
- size_t size() const { return subsets.size(); }
-
- /** returns the subset with the given index */
- const subset &operator[](size_t subset_index) const {
- assert(subset_index < size());
- return subsets[subset_index];
- }
-
- /**
- * Splits the subset with the given subset_index based on whether its
- * members are also members of the splitter set.
- *
- * The smaller of the intersection and difference is placed into a new
- * subset, the index of which is returned. The larger part remains with the
- * subset index.
- *
- * If the set was not split (due to there being no overlap with splitter or
- * being a complete subset), INVALID_SUBSET is returned.
- */
+public:
+ class subset {
+ public:
+ typedef typename std::vector<T>::const_iterator const_iterator;
+
+ size_t size() const {
+ assert(members.size());
+ return members.size();
+ }
+
+ const_iterator begin() const {
+ return members.begin();
+ }
+
+ const_iterator end() const {
+ return members.end();
+ }
+
+ private:
+ std::vector<T> members; /**< sorted members of the subset */
+
+ friend class partitioned_set;
+ };
+
+ /** returns the number of subsets in the partition */
+ size_t size() const { return subsets.size(); }
+
+ /** returns the subset with the given index */
+ const subset &operator[](size_t subset_index) const {
+ assert(subset_index < size());
+ return subsets[subset_index];
+ }
+
+ /**
+ * Splits the subset with the given subset_index based on whether its
+ * members are also members of the splitter set.
+ *
+ * The smaller of the intersection and difference is placed into a new
+ * subset, the index of which is returned. The larger part remains with the
+ * subset index.
+ *
+ * If the set was not split (due to there being no overlap with splitter or
+ * being a complete subset), INVALID_SUBSET is returned.
+ */
size_t split(size_t subset_index, const flat_set<T> &splitter) {
- assert(!splitter.empty());
- if (splitter.empty()) {
- return INVALID_SUBSET;
- }
-
- subset &orig = subsets[subset_index];
-
- assert(orig.size());
-
- split_temp_diff.clear();
- split_temp_inter.clear();
-
- auto sp_it = splitter.begin();
- auto sp_e = splitter.end();
-
- /* subset members are always in sorted order. */
- assert(std::is_sorted(orig.members.begin(), orig.members.end()));
-
- if (orig.members.back() < *sp_it) {
- /* first splitter is greater than all our members */
- return INVALID_SUBSET;
- }
-
- if (orig.members.front() > *splitter.rbegin()) {
- /* last splitter is less than all our members */
- return INVALID_SUBSET;
- }
-
- for (auto it = orig.members.begin(); it != orig.members.end(); ++it) {
+ assert(!splitter.empty());
+ if (splitter.empty()) {
+ return INVALID_SUBSET;
+ }
+
+ subset &orig = subsets[subset_index];
+
+ assert(orig.size());
+
+ split_temp_diff.clear();
+ split_temp_inter.clear();
+
+ auto sp_it = splitter.begin();
+ auto sp_e = splitter.end();
+
+ /* subset members are always in sorted order. */
+ assert(std::is_sorted(orig.members.begin(), orig.members.end()));
+
+ if (orig.members.back() < *sp_it) {
+ /* first splitter is greater than all our members */
+ return INVALID_SUBSET;
+ }
+
+ if (orig.members.front() > *splitter.rbegin()) {
+ /* last splitter is less than all our members */
+ return INVALID_SUBSET;
+ }
+
+ for (auto it = orig.members.begin(); it != orig.members.end(); ++it) {
const auto &member = *it;
- assert(member < member_to_subset.size());
-
+ assert(member < member_to_subset.size());
+
sp_it = std::lower_bound(sp_it, sp_e, member);
- if (sp_it == sp_e) {
- split_temp_diff.insert(split_temp_diff.end(), it,
- orig.members.end());
- break;
- }
-
- if (*sp_it > member) {
- split_temp_diff.push_back(member);
- } else {
- split_temp_inter.push_back(member);
- }
- }
-
- assert(split_temp_diff.size() + split_temp_inter.size() == orig.size());
-
- if (split_temp_inter.empty()) {
- assert(split_temp_diff == orig.members);
- return INVALID_SUBSET;
- }
-
- if (split_temp_diff.empty()) {
- assert(split_temp_inter == orig.members);
- return INVALID_SUBSET;
- }
-
- assert(MIN(split_temp_inter[0], split_temp_diff[0]) == orig.members[0]);
-
- /* work out which is the bigger half */
- std::vector<T> *big;
- std::vector<T> *small;
- if (split_temp_diff.size() > split_temp_inter.size()) {
- big = &split_temp_diff;
- small = &split_temp_inter;
- } else {
- big = &split_temp_inter;
- small = &split_temp_diff;
- }
-
- /* larger subset replaces the input subset */
- std::vector<T> temp_i;
- insert(&temp_i, temp_i.end(), *big);
- orig.members.swap(temp_i);
-
- /* smaller subset is placed in the new subset */
- size_t new_index = subsets.size();
- subsets.push_back(subset());
- insert(&subsets.back().members, subsets.back().members.end(), *small);
-
- for (const auto &e : *small) {
- member_to_subset[e] = new_index;
- }
-
- return new_index;
- }
-
- /**
- * Returns all subsets which have a member in keys.
- */
+ if (sp_it == sp_e) {
+ split_temp_diff.insert(split_temp_diff.end(), it,
+ orig.members.end());
+ break;
+ }
+
+ if (*sp_it > member) {
+ split_temp_diff.push_back(member);
+ } else {
+ split_temp_inter.push_back(member);
+ }
+ }
+
+ assert(split_temp_diff.size() + split_temp_inter.size() == orig.size());
+
+ if (split_temp_inter.empty()) {
+ assert(split_temp_diff == orig.members);
+ return INVALID_SUBSET;
+ }
+
+ if (split_temp_diff.empty()) {
+ assert(split_temp_inter == orig.members);
+ return INVALID_SUBSET;
+ }
+
+ assert(MIN(split_temp_inter[0], split_temp_diff[0]) == orig.members[0]);
+
+ /* work out which is the bigger half */
+ std::vector<T> *big;
+ std::vector<T> *small;
+ if (split_temp_diff.size() > split_temp_inter.size()) {
+ big = &split_temp_diff;
+ small = &split_temp_inter;
+ } else {
+ big = &split_temp_inter;
+ small = &split_temp_diff;
+ }
+
+ /* larger subset replaces the input subset */
+ std::vector<T> temp_i;
+ insert(&temp_i, temp_i.end(), *big);
+ orig.members.swap(temp_i);
+
+ /* smaller subset is placed in the new subset */
+ size_t new_index = subsets.size();
+ subsets.push_back(subset());
+ insert(&subsets.back().members, subsets.back().members.end(), *small);
+
+ for (const auto &e : *small) {
+ member_to_subset[e] = new_index;
+ }
+
+ return new_index;
+ }
+
+ /**
+ * Returns all subsets which have a member in keys.
+ */
void find_overlapping(const flat_set<T> &keys,
- std::vector<size_t> *containing) const {
- boost::dynamic_bitset<> seen(subsets.size()); // all zero by default.
-
- for (const auto &key : keys) {
- assert(key < member_to_subset.size());
- size_t sub = member_to_subset[key];
- assert(sub < subsets.size());
- seen.set(sub);
- }
-
- for (size_t i = seen.find_first(); i != seen.npos;
- i = seen.find_next(i)) {
- containing->push_back(i);
- }
- }
-
- /**
- * Creates a partitioned set containing elements [0, state_to_subset.size() )
- *
- * The initial subset that an element belongs to is given by the
- * corresponding entry in state_to_subset. The subsets should be identified
- * by a dense range of indices starting from 0.
- */
- explicit partitioned_set(const std::vector<size_t> &state_to_subset) {
- assert(!state_to_subset.empty());
-
- subsets.reserve(state_to_subset.size());
- member_to_subset.resize(state_to_subset.size());
-
- split_temp_inter.reserve(state_to_subset.size());
- split_temp_diff.reserve(state_to_subset.size());
-
- size_t subset_count = 0;
- for (const auto &sub : state_to_subset) {
- assert(sub != INVALID_SUBSET);
- ENSURE_AT_LEAST(&subset_count, sub + 1);
- }
- assert(subset_count <= state_to_subset.size());
-
- subsets.resize(subset_count);
- for (size_t i = 0; i < state_to_subset.size(); i++) {
- /* ensure that our underlying type is big enough to hold all our
- * set members */
- assert(i == (size_t)(T)i);
-
- size_t sub = state_to_subset[i];
- assert(sub < subsets.size());
-
- member_to_subset[i] = sub;
- subsets[sub].members.push_back(i);
- }
-
- /* none of the subsets should be empty */
- assert(std::all_of(subsets.begin(), subsets.end(),
- [](const subset &sub){ return sub.size() > 0; }));
- }
-
-private:
- std::vector<size_t> member_to_subset;
- std::vector<subset> subsets;
-
- std::vector<T> split_temp_inter; /**< used internally by split to hold the
- * intersection. */
- std::vector<T> split_temp_diff; /**< used internally by split to hold the
- * set difference. */
-};
-
-} // namespace
-
-#endif
+ std::vector<size_t> *containing) const {
+ boost::dynamic_bitset<> seen(subsets.size()); // all zero by default.
+
+ for (const auto &key : keys) {
+ assert(key < member_to_subset.size());
+ size_t sub = member_to_subset[key];
+ assert(sub < subsets.size());
+ seen.set(sub);
+ }
+
+ for (size_t i = seen.find_first(); i != seen.npos;
+ i = seen.find_next(i)) {
+ containing->push_back(i);
+ }
+ }
+
+ /**
+ * Creates a partitioned set containing elements [0, state_to_subset.size() )
+ *
+ * The initial subset that an element belongs to is given by the
+ * corresponding entry in state_to_subset. The subsets should be identified
+ * by a dense range of indices starting from 0.
+ */
+ explicit partitioned_set(const std::vector<size_t> &state_to_subset) {
+ assert(!state_to_subset.empty());
+
+ subsets.reserve(state_to_subset.size());
+ member_to_subset.resize(state_to_subset.size());
+
+ split_temp_inter.reserve(state_to_subset.size());
+ split_temp_diff.reserve(state_to_subset.size());
+
+ size_t subset_count = 0;
+ for (const auto &sub : state_to_subset) {
+ assert(sub != INVALID_SUBSET);
+ ENSURE_AT_LEAST(&subset_count, sub + 1);
+ }
+ assert(subset_count <= state_to_subset.size());
+
+ subsets.resize(subset_count);
+ for (size_t i = 0; i < state_to_subset.size(); i++) {
+ /* ensure that our underlying type is big enough to hold all our
+ * set members */
+ assert(i == (size_t)(T)i);
+
+ size_t sub = state_to_subset[i];
+ assert(sub < subsets.size());
+
+ member_to_subset[i] = sub;
+ subsets[sub].members.push_back(i);
+ }
+
+ /* none of the subsets should be empty */
+ assert(std::all_of(subsets.begin(), subsets.end(),
+ [](const subset &sub){ return sub.size() > 0; }));
+ }
+
+private:
+ std::vector<size_t> member_to_subset;
+ std::vector<subset> subsets;
+
+ std::vector<T> split_temp_inter; /**< used internally by split to hold the
+ * intersection. */
+ std::vector<T> split_temp_diff; /**< used internally by split to hold the
+ * set difference. */
+};
+
+} // namespace
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/popcount.h b/contrib/libs/hyperscan/src/util/popcount.h
index eb08f6b1b2..f75397dd35 100644
--- a/contrib/libs/hyperscan/src/util/popcount.h
+++ b/contrib/libs/hyperscan/src/util/popcount.h
@@ -1,74 +1,74 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Platform specific popcount functions
- */
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Platform specific popcount functions
+ */
+
#ifndef UTIL_POPCOUNT_H_
#define UTIL_POPCOUNT_H_
-
-#include "ue2common.h"
+
+#include "ue2common.h"
#include "util/arch.h"
-
-static really_inline
-u32 popcount32(u32 x) {
-#if defined(HAVE_POPCOUNT_INSTR)
- // Single-instruction builtin.
+
+static really_inline
+u32 popcount32(u32 x) {
+#if defined(HAVE_POPCOUNT_INSTR)
+ // Single-instruction builtin.
return _mm_popcnt_u32(x);
-#else
+#else
// Fast branch-free version from bit-twiddling hacks as older Intel
- // processors do not have a POPCNT instruction.
- x -= (x >> 1) & 0x55555555;
- x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
- return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
-#endif
-}
-
-static really_inline
-u32 popcount64(u64a x) {
+ // processors do not have a POPCNT instruction.
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
+#endif
+}
+
+static really_inline
+u32 popcount64(u64a x) {
#if defined(ARCH_X86_64)
# if defined(HAVE_POPCOUNT_INSTR)
- // Single-instruction builtin.
+ // Single-instruction builtin.
return (u32)_mm_popcnt_u64(x);
# else
// Fast branch-free version from bit-twiddling hacks as older Intel
- // processors do not have a POPCNT instruction.
- x -= (x >> 1) & 0x5555555555555555;
- x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333);
- x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f;
- return (x * 0x0101010101010101) >> 56;
+ // processors do not have a POPCNT instruction.
+ x -= (x >> 1) & 0x5555555555555555;
+ x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333);
+ x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f;
+ return (x * 0x0101010101010101) >> 56;
# endif
-#else
- // Synthesise from two 32-bit cases.
- return popcount32(x >> 32) + popcount32(x);
-#endif
-}
-
+#else
+ // Synthesise from two 32-bit cases.
+ return popcount32(x >> 32) + popcount32(x);
+#endif
+}
+
#endif /* UTIL_POPCOUNT_H_ */
-
+
diff --git a/contrib/libs/hyperscan/src/util/pqueue.h b/contrib/libs/hyperscan/src/util/pqueue.h
index f0ba12e70f..8e7055bd76 100644
--- a/contrib/libs/hyperscan/src/util/pqueue.h
+++ b/contrib/libs/hyperscan/src/util/pqueue.h
@@ -1,109 +1,109 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PQUEUE_H
-#define PQUEUE_H
-
-#include "ue2common.h"
-
-static really_inline u32
-pq_left(u32 i) {
- return (i << 1) + 1;
-}
-
-static really_inline u32
-pq_right(u32 i) {
- return (i << 1) + 2;
-}
-
-static really_inline
-u32 pq_parent(u32 i) {
- return (i - 1) >> 1;
-}
-
-static really_inline
-void pq_sift(PQ_T *items, u32 start, u32 end) {
- u32 j = start;
- PQ_T j_temp = items[j];
-
- while (pq_left(j) < end) {
- u32 max_child;
-
- if (pq_right(j) < end && PQ_COMP(items, pq_right(j), pq_left(j))) {
- max_child = pq_right(j);
- } else {
- max_child = pq_left(j);
- }
-
- if (PQ_COMP_B(items, max_child, j_temp)) {
- items[j] = items[max_child];
- j = max_child;
- } else {
- /* j is already less than its children. We know heap property
- * is already maintained for children we are done */
- break;
- }
- }
- items[j] = j_temp;
-}
-
-static really_inline
-PQ_T *pq_top(PQ_T *items) {
- return items;
-}
-
-static really_inline
-void pq_pop(PQ_T *items, u32 item_count) {
- item_count--;
- items[0] = items[item_count];
- pq_sift(items, 0, item_count);
-}
-
-static really_inline
-void pq_insert(PQ_T *items, u32 item_count, PQ_T new_item) {
- u32 pos = item_count;
- while (pos) {
- u32 parent = pq_parent(pos);
- if (!PQ_COMP_B(items, parent, new_item)) {
- items[pos] = items[parent];
- pos = parent;
- } else {
- break;
- }
- }
- items[pos] = new_item;
-}
-
-static really_inline
-void pq_replace_top(PQ_T *items, u32 item_count, PQ_T new_item) {
- items[0] = new_item;
- pq_sift(items, 0, item_count);
-}
-
-#endif
-
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PQUEUE_H
+#define PQUEUE_H
+
+#include "ue2common.h"
+
+static really_inline u32
+pq_left(u32 i) {
+ return (i << 1) + 1;
+}
+
+static really_inline u32
+pq_right(u32 i) {
+ return (i << 1) + 2;
+}
+
+static really_inline
+u32 pq_parent(u32 i) {
+ return (i - 1) >> 1;
+}
+
+static really_inline
+void pq_sift(PQ_T *items, u32 start, u32 end) {
+ u32 j = start;
+ PQ_T j_temp = items[j];
+
+ while (pq_left(j) < end) {
+ u32 max_child;
+
+ if (pq_right(j) < end && PQ_COMP(items, pq_right(j), pq_left(j))) {
+ max_child = pq_right(j);
+ } else {
+ max_child = pq_left(j);
+ }
+
+ if (PQ_COMP_B(items, max_child, j_temp)) {
+ items[j] = items[max_child];
+ j = max_child;
+ } else {
+ /* j is already less than its children. We know heap property
+ * is already maintained for children we are done */
+ break;
+ }
+ }
+ items[j] = j_temp;
+}
+
+static really_inline
+PQ_T *pq_top(PQ_T *items) {
+ return items;
+}
+
+static really_inline
+void pq_pop(PQ_T *items, u32 item_count) {
+ item_count--;
+ items[0] = items[item_count];
+ pq_sift(items, 0, item_count);
+}
+
+static really_inline
+void pq_insert(PQ_T *items, u32 item_count, PQ_T new_item) {
+ u32 pos = item_count;
+ while (pos) {
+ u32 parent = pq_parent(pos);
+ if (!PQ_COMP_B(items, parent, new_item)) {
+ items[pos] = items[parent];
+ pos = parent;
+ } else {
+ break;
+ }
+ }
+ items[pos] = new_item;
+}
+
+static really_inline
+void pq_replace_top(PQ_T *items, u32 item_count, PQ_T new_item) {
+ items[0] = new_item;
+ pq_sift(items, 0, item_count);
+}
+
+#endif
+
diff --git a/contrib/libs/hyperscan/src/util/queue_index_factory.h b/contrib/libs/hyperscan/src/util/queue_index_factory.h
index e8f7028ec5..5db03a1003 100644
--- a/contrib/libs/hyperscan/src/util/queue_index_factory.h
+++ b/contrib/libs/hyperscan/src/util/queue_index_factory.h
@@ -1,52 +1,52 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief QueueIndexFactory used to hand out NFA queues at compile time.
- */
-#ifndef UTIL_QUEUE_INDEX_FACTORY_H
-#define UTIL_QUEUE_INDEX_FACTORY_H
-
-#include "ue2common.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief QueueIndexFactory used to hand out NFA queues at compile time.
+ */
+#ifndef UTIL_QUEUE_INDEX_FACTORY_H
+#define UTIL_QUEUE_INDEX_FACTORY_H
+
+#include "ue2common.h"
#include "util/noncopyable.h"
-
-namespace ue2 {
-
+
+namespace ue2 {
+
class QueueIndexFactory : noncopyable {
-public:
- QueueIndexFactory() : val(0) {}
- u32 get_queue() { return val++; }
- u32 allocated_count() const { return val; }
-
-private:
- u32 val;
-};
-
-} // namespace ue2
-
-#endif // UTIL_QUEUE_INDEX_FACTORY_H
+public:
+ QueueIndexFactory() : val(0) {}
+ u32 get_queue() { return val++; }
+ u32 allocated_count() const { return val; }
+
+private:
+ u32 val;
+};
+
+} // namespace ue2
+
+#endif // UTIL_QUEUE_INDEX_FACTORY_H
diff --git a/contrib/libs/hyperscan/src/util/report.h b/contrib/libs/hyperscan/src/util/report.h
index ee830d0f10..3006527f8b 100644
--- a/contrib/libs/hyperscan/src/util/report.h
+++ b/contrib/libs/hyperscan/src/util/report.h
@@ -1,51 +1,51 @@
-/*
+/*
* Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Report structure used to manage data associated with a report at
- * compile time.
- */
-
-#ifndef UTIL_REPORT_H
-#define UTIL_REPORT_H
-
-#include "ue2common.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Report structure used to manage data associated with a report at
+ * compile time.
+ */
+
+#ifndef UTIL_REPORT_H
+#define UTIL_REPORT_H
+
+#include "ue2common.h"
#include "util/exhaust.h" // for INVALID_EKEY
#include "util/logical.h" // for INVALID_LKEY
#include "util/hash.h"
#include "util/order_check.h"
-
-#include <cassert>
-
-namespace ue2 {
-
-class ReportManager;
-
+
+#include <cassert>
+
+namespace ue2 {
+
+class ReportManager;
+
enum ReportType {
EXTERNAL_CALLBACK,
EXTERNAL_CALLBACK_SOM_REL,
@@ -67,47 +67,47 @@ enum ReportType {
EXTERNAL_CALLBACK_SOM_PASS
};
-/**
- * \brief All the data we use for handling a match.
- *
- * Includes extparam constraints and bounds, exhaustion/dedupe keys, offset
- * adjustment and SOM information.
- *
+/**
+ * \brief All the data we use for handling a match.
+ *
+ * Includes extparam constraints and bounds, exhaustion/dedupe keys, offset
+ * adjustment and SOM information.
+ *
* The data in this structure eventually becomes a list of Rose programs
* instructions.
- */
-struct Report {
+ */
+struct Report {
Report(ReportType type_in, u32 onmatch_in)
: type(type_in), onmatch(onmatch_in) {}
-
- /** \brief True if this report has bounds from extended parameters, i.e.
- * min offset, max offset, min length. */
- bool hasBounds() const {
- return minOffset > 0 || maxOffset < MAX_OFFSET || minLength > 0;
- }
-
+
+ /** \brief True if this report has bounds from extended parameters, i.e.
+ * min offset, max offset, min length. */
+ bool hasBounds() const {
+ return minOffset > 0 || maxOffset < MAX_OFFSET || minLength > 0;
+ }
+
/** \brief Type of this report. */
ReportType type;
-
- /** \brief use SOM for minLength, but don't report it to user callback. */
- bool quashSom = false;
-
- /** \brief min offset in the stream at which this report can match. */
- u64a minOffset = 0;
-
- /** \brief max offset in the stream at which this report can match. */
- u64a maxOffset = MAX_OFFSET;
-
- /** \brief min match length (start of match to current offset) */
- u64a minLength = 0;
-
- /** \brief Exhaustion key.
- *
- * If exhaustible, the ekey to check before reporting a match.
- * Additionally after reporting a match the ekey will be set. If not
- * exhaustible, this will be INVALID_EKEY. */
- u32 ekey = INVALID_EKEY;
-
+
+ /** \brief use SOM for minLength, but don't report it to user callback. */
+ bool quashSom = false;
+
+ /** \brief min offset in the stream at which this report can match. */
+ u64a minOffset = 0;
+
+ /** \brief max offset in the stream at which this report can match. */
+ u64a maxOffset = MAX_OFFSET;
+
+ /** \brief min match length (start of match to current offset) */
+ u64a minLength = 0;
+
+ /** \brief Exhaustion key.
+ *
+ * If exhaustible, the ekey to check before reporting a match.
+ * Additionally after reporting a match the ekey will be set. If not
+ * exhaustible, this will be INVALID_EKEY. */
+ u32 ekey = INVALID_EKEY;
+
/** \brief Logical Combination key in each combination.
*
* If in Logical Combination, the lkey to check before reporting a match.
@@ -118,95 +118,95 @@ struct Report {
/** \brief Quiet flag for expressions in any logical combination. */
bool quiet = false;
- /** \brief Adjustment to add to the match offset when we report a match.
- *
- * This is usually used for reports attached to states that form part of a
- * zero-width assertion, like '$'. */
- s32 offsetAdjust = 0;
-
- /** \brief Match report ID, for external reports.
- *
- * - external callback -> external report id
- * - internal_som_* -> som loc to modify
- * - INTERNAL_ROSE_CHAIN -> top event to push on
- * - otherwise -> target subnfa */
- u32 onmatch;
-
- /** \brief Index of the reverse nfa.
- *
- * Used by EXTERNAL_CALLBACK_SOM_REV_NFA and
- * INTERNAL_SOM_LOC_SET_SOM_REV_NFA*.
- */
- u32 revNfaIndex = 0;
-
- /** \brief SOM distance value, use varies according to type.
- *
- * - for EXTERNAL_CALLBACK_SOM_REL, from-offset is this many bytes
- * before the to-offset.
- * - for EXTERNAL_CALLBACK_SOM_ABS, set from-offset to this value.
- * - for INTERNAL_SOM_LOC_COPY*, som location read_from.
- */
- u64a somDistance = 0;
-
- /** \brief Number of bytes behind us that we are allowed to squash
- * identical top events on the queue.
- *
- * Used by INTERNAL_ROSE_CHAIN.
- */
- u64a topSquashDistance = 0;
-};
-
-static inline
-bool isExternalReport(const Report &r) {
- switch (r.type) {
- case INTERNAL_SOM_LOC_SET:
- case INTERNAL_SOM_LOC_SET_IF_UNSET:
- case INTERNAL_SOM_LOC_SET_IF_WRITABLE:
- case INTERNAL_SOM_LOC_SET_SOM_REV_NFA:
- case INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_UNSET:
- case INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_WRITABLE:
- case INTERNAL_SOM_LOC_COPY:
- case INTERNAL_SOM_LOC_COPY_IF_WRITABLE:
- case INTERNAL_SOM_LOC_MAKE_WRITABLE:
- case INTERNAL_SOM_LOC_SET_FROM:
- case INTERNAL_SOM_LOC_SET_FROM_IF_WRITABLE:
- case INTERNAL_ROSE_CHAIN:
- return false;
- case EXTERNAL_CALLBACK:
- case EXTERNAL_CALLBACK_SOM_REL:
- case EXTERNAL_CALLBACK_SOM_STORED:
- case EXTERNAL_CALLBACK_SOM_ABS:
- case EXTERNAL_CALLBACK_SOM_REV_NFA:
+ /** \brief Adjustment to add to the match offset when we report a match.
+ *
+ * This is usually used for reports attached to states that form part of a
+ * zero-width assertion, like '$'. */
+ s32 offsetAdjust = 0;
+
+ /** \brief Match report ID, for external reports.
+ *
+ * - external callback -> external report id
+ * - internal_som_* -> som loc to modify
+ * - INTERNAL_ROSE_CHAIN -> top event to push on
+ * - otherwise -> target subnfa */
+ u32 onmatch;
+
+ /** \brief Index of the reverse nfa.
+ *
+ * Used by EXTERNAL_CALLBACK_SOM_REV_NFA and
+ * INTERNAL_SOM_LOC_SET_SOM_REV_NFA*.
+ */
+ u32 revNfaIndex = 0;
+
+ /** \brief SOM distance value, use varies according to type.
+ *
+ * - for EXTERNAL_CALLBACK_SOM_REL, from-offset is this many bytes
+ * before the to-offset.
+ * - for EXTERNAL_CALLBACK_SOM_ABS, set from-offset to this value.
+ * - for INTERNAL_SOM_LOC_COPY*, som location read_from.
+ */
+ u64a somDistance = 0;
+
+ /** \brief Number of bytes behind us that we are allowed to squash
+ * identical top events on the queue.
+ *
+ * Used by INTERNAL_ROSE_CHAIN.
+ */
+ u64a topSquashDistance = 0;
+};
+
+static inline
+bool isExternalReport(const Report &r) {
+ switch (r.type) {
+ case INTERNAL_SOM_LOC_SET:
+ case INTERNAL_SOM_LOC_SET_IF_UNSET:
+ case INTERNAL_SOM_LOC_SET_IF_WRITABLE:
+ case INTERNAL_SOM_LOC_SET_SOM_REV_NFA:
+ case INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_UNSET:
+ case INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_WRITABLE:
+ case INTERNAL_SOM_LOC_COPY:
+ case INTERNAL_SOM_LOC_COPY_IF_WRITABLE:
+ case INTERNAL_SOM_LOC_MAKE_WRITABLE:
+ case INTERNAL_SOM_LOC_SET_FROM:
+ case INTERNAL_SOM_LOC_SET_FROM_IF_WRITABLE:
+ case INTERNAL_ROSE_CHAIN:
+ return false;
+ case EXTERNAL_CALLBACK:
+ case EXTERNAL_CALLBACK_SOM_REL:
+ case EXTERNAL_CALLBACK_SOM_STORED:
+ case EXTERNAL_CALLBACK_SOM_ABS:
+ case EXTERNAL_CALLBACK_SOM_REV_NFA:
case EXTERNAL_CALLBACK_SOM_PASS:
- return true;
- default:
- break; // fall through
- }
- assert(0); // unknown?
- return true;
-}
-
-static inline
+ return true;
+ default:
+ break; // fall through
+ }
+ assert(0); // unknown?
+ return true;
+}
+
+static inline
bool isExternalSomReport(const Report &r) {
return r.type != EXTERNAL_CALLBACK && isExternalReport(r);
}
static inline
-bool operator<(const Report &a, const Report &b) {
- ORDER_CHECK(type);
- ORDER_CHECK(quashSom);
- ORDER_CHECK(ekey);
- ORDER_CHECK(offsetAdjust);
- ORDER_CHECK(onmatch);
- ORDER_CHECK(minOffset);
- ORDER_CHECK(maxOffset);
- ORDER_CHECK(minLength);
- ORDER_CHECK(somDistance);
- ORDER_CHECK(revNfaIndex);
- ORDER_CHECK(topSquashDistance);
- return false;
-}
-
+bool operator<(const Report &a, const Report &b) {
+ ORDER_CHECK(type);
+ ORDER_CHECK(quashSom);
+ ORDER_CHECK(ekey);
+ ORDER_CHECK(offsetAdjust);
+ ORDER_CHECK(onmatch);
+ ORDER_CHECK(minOffset);
+ ORDER_CHECK(maxOffset);
+ ORDER_CHECK(minLength);
+ ORDER_CHECK(somDistance);
+ ORDER_CHECK(revNfaIndex);
+ ORDER_CHECK(topSquashDistance);
+ return false;
+}
+
inline
bool operator==(const Report &a, const Report &b) {
return a.type == b.type && a.quashSom == b.quashSom &&
@@ -217,60 +217,60 @@ bool operator==(const Report &a, const Report &b) {
a.topSquashDistance == b.topSquashDistance;
}
-static inline
+static inline
Report makeECallback(u32 report, s32 offsetAdjust, u32 ekey, bool quiet) {
- Report ir(EXTERNAL_CALLBACK, report);
- ir.offsetAdjust = offsetAdjust;
- ir.ekey = ekey;
+ Report ir(EXTERNAL_CALLBACK, report);
+ ir.offsetAdjust = offsetAdjust;
+ ir.ekey = ekey;
ir.quiet = (u8)quiet;
- return ir;
-}
-
-static inline
-Report makeCallback(u32 report, s32 offsetAdjust) {
+ return ir;
+}
+
+static inline
+Report makeCallback(u32 report, s32 offsetAdjust) {
return makeECallback(report, offsetAdjust, INVALID_EKEY, false);
-}
-
-static inline
-Report makeSomRelativeCallback(u32 report, s32 offsetAdjust, u64a distance) {
- Report ir(EXTERNAL_CALLBACK_SOM_REL, report);
- ir.offsetAdjust = offsetAdjust;
- ir.ekey = INVALID_EKEY;
- ir.somDistance = distance;
- return ir;
-}
-
-static inline
+}
+
+static inline
+Report makeSomRelativeCallback(u32 report, s32 offsetAdjust, u64a distance) {
+ Report ir(EXTERNAL_CALLBACK_SOM_REL, report);
+ ir.offsetAdjust = offsetAdjust;
+ ir.ekey = INVALID_EKEY;
+ ir.somDistance = distance;
+ return ir;
+}
+
+static inline
Report makeMpvTrigger(u32 event, u64a squashDistance) {
- Report ir(INTERNAL_ROSE_CHAIN, event);
- ir.ekey = INVALID_EKEY;
- ir.topSquashDistance = squashDistance;
- return ir;
-}
-
-/** simple exhaustible: exhaustible and if the first attempted match does not
- * succeed, no later matches will succeed either */
-static inline
-bool isSimpleExhaustible(const Report &ir) {
- if (ir.ekey == INVALID_EKEY) {
- return false;
- }
-
- if (ir.hasBounds() && (ir.minOffset || ir.minLength)) {
- return false;
- }
-
- if (!isExternalReport(ir)) {
- return false;
- }
-
- return true;
-}
-
+ Report ir(INTERNAL_ROSE_CHAIN, event);
+ ir.ekey = INVALID_EKEY;
+ ir.topSquashDistance = squashDistance;
+ return ir;
+}
+
+/** simple exhaustible: exhaustible and if the first attempted match does not
+ * succeed, no later matches will succeed either */
+static inline
+bool isSimpleExhaustible(const Report &ir) {
+ if (ir.ekey == INVALID_EKEY) {
+ return false;
+ }
+
+ if (ir.hasBounds() && (ir.minOffset || ir.minLength)) {
+ return false;
+ }
+
+ if (!isExternalReport(ir)) {
+ return false;
+ }
+
+ return true;
+}
+
} // namespace ue2
-
+
namespace std {
-
+
template<>
struct hash<ue2::Report> {
std::size_t operator()(const ue2::Report &r) const {
@@ -279,7 +279,7 @@ struct hash<ue2::Report> {
r.revNfaIndex, r.somDistance, r.topSquashDistance);
}
};
-
+
} // namespace std
-
-#endif // UTIL_REPORT_H
+
+#endif // UTIL_REPORT_H
diff --git a/contrib/libs/hyperscan/src/util/report_manager.cpp b/contrib/libs/hyperscan/src/util/report_manager.cpp
index 78b9b73dfc..10f8975e37 100644
--- a/contrib/libs/hyperscan/src/util/report_manager.cpp
+++ b/contrib/libs/hyperscan/src/util/report_manager.cpp
@@ -1,100 +1,100 @@
-/*
+/*
* Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief ReportManager: tracks Report structures, exhaustion and dedupe keys.
- */
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief ReportManager: tracks Report structures, exhaustion and dedupe keys.
+ */
#include "report_manager.h"
-#include "grey.h"
-#include "ue2common.h"
+#include "grey.h"
+#include "ue2common.h"
#include "compiler/compiler.h"
-#include "nfagraph/ng.h"
-#include "rose/rose_build.h"
-#include "util/compile_error.h"
-#include "util/container.h"
-
-#include <deque>
-#include <map>
-#include <sstream>
-#include <vector>
-
-using namespace std;
-
-namespace ue2 {
-
-ReportManager::ReportManager(const Grey &g)
- : grey(g), freeEIndex(0), global_exhaust(true) {}
-
-u32 ReportManager::getInternalId(const Report &ir) {
- auto it = reportIdToInternalMap.find(ir);
- if (it != reportIdToInternalMap.end()) {
- DEBUG_PRINTF("existing report %zu\n", it->second);
- return it->second;
- }
-
- // Construct a new internal report and assign it a ReportID.
-
- if (numReports() >= grey.limitReportCount) {
- throw ResourceLimitError();
- }
-
- u32 size = reportIds.size();
- reportIds.push_back(ir);
+#include "nfagraph/ng.h"
+#include "rose/rose_build.h"
+#include "util/compile_error.h"
+#include "util/container.h"
+
+#include <deque>
+#include <map>
+#include <sstream>
+#include <vector>
+
+using namespace std;
+
+namespace ue2 {
+
+ReportManager::ReportManager(const Grey &g)
+ : grey(g), freeEIndex(0), global_exhaust(true) {}
+
+u32 ReportManager::getInternalId(const Report &ir) {
+ auto it = reportIdToInternalMap.find(ir);
+ if (it != reportIdToInternalMap.end()) {
+ DEBUG_PRINTF("existing report %zu\n", it->second);
+ return it->second;
+ }
+
+ // Construct a new internal report and assign it a ReportID.
+
+ if (numReports() >= grey.limitReportCount) {
+ throw ResourceLimitError();
+ }
+
+ u32 size = reportIds.size();
+ reportIds.push_back(ir);
reportIdToInternalMap.emplace(ir, size);
- DEBUG_PRINTF("new report %u\n", size);
- return size;
-}
-
-const Report &ReportManager::getReport(u32 id) const {
- assert(id < reportIds.size());
- return reportIds.at(id);
-}
-
-size_t ReportManager::numReports() const {
- return reportIds.size();
-}
-
-u32 ReportManager::getExhaustibleKey(u32 a) {
- auto it = toExhaustibleKeyMap.find(a);
- if (it == toExhaustibleKeyMap.end()) {
- // get size before assigning to avoid wacky LHS shenanigans
- u32 size = toExhaustibleKeyMap.size();
- bool inserted;
- tie(it, inserted) = toExhaustibleKeyMap.emplace(s64a{a}, size);
- assert(inserted);
- }
-
- DEBUG_PRINTF("%lld -> ekey %u\n", it->first, it->second);
- return it->second;
-}
-
+ DEBUG_PRINTF("new report %u\n", size);
+ return size;
+}
+
+const Report &ReportManager::getReport(u32 id) const {
+ assert(id < reportIds.size());
+ return reportIds.at(id);
+}
+
+size_t ReportManager::numReports() const {
+ return reportIds.size();
+}
+
+u32 ReportManager::getExhaustibleKey(u32 a) {
+ auto it = toExhaustibleKeyMap.find(a);
+ if (it == toExhaustibleKeyMap.end()) {
+ // get size before assigning to avoid wacky LHS shenanigans
+ u32 size = toExhaustibleKeyMap.size();
+ bool inserted;
+ tie(it, inserted) = toExhaustibleKeyMap.emplace(s64a{a}, size);
+ assert(inserted);
+ }
+
+ DEBUG_PRINTF("%lld -> ekey %u\n", it->first, it->second);
+ return it->second;
+}
+
const set<u32> &ReportManager::getRelateCKeys(u32 lkey) {
auto it = pl.lkey2ckeys.find(lkey);
assert(it != pl.lkey2ckeys.end());
@@ -120,26 +120,26 @@ const vector<CombInfo> &ReportManager::getCombInfoMap() const {
return pl.combInfoMap;
}
-u32 ReportManager::getUnassociatedExhaustibleKey(void) {
- u32 rv = toExhaustibleKeyMap.size();
- bool inserted;
- map<s64a, u32>::const_iterator it;
- tie(it, inserted) = toExhaustibleKeyMap.emplace(--freeEIndex, rv);
- assert(inserted);
- assert(it->second == rv);
-
- return rv;
-}
-
-u32 ReportManager::numDkeys() const {
- DEBUG_PRINTF("%zu dkeys\n", reportIdToDedupeKey.size());
- return reportIdToDedupeKey.size();
-}
-
-u32 ReportManager::numEkeys() const {
- return (u32) toExhaustibleKeyMap.size();
-}
-
+u32 ReportManager::getUnassociatedExhaustibleKey(void) {
+ u32 rv = toExhaustibleKeyMap.size();
+ bool inserted;
+ map<s64a, u32>::const_iterator it;
+ tie(it, inserted) = toExhaustibleKeyMap.emplace(--freeEIndex, rv);
+ assert(inserted);
+ assert(it->second == rv);
+
+ return rv;
+}
+
+u32 ReportManager::numDkeys() const {
+ DEBUG_PRINTF("%zu dkeys\n", reportIdToDedupeKey.size());
+ return reportIdToDedupeKey.size();
+}
+
+u32 ReportManager::numEkeys() const {
+ return (u32) toExhaustibleKeyMap.size();
+}
+
u32 ReportManager::numLogicalKeys() const {
return (u32) pl.toLogicalKeyMap.size();
}
@@ -152,113 +152,113 @@ u32 ReportManager::numCkeys() const {
return (u32) pl.toCombKeyMap.size();
}
-bool ReportManager::patternSetCanExhaust() const {
- return global_exhaust && !toExhaustibleKeyMap.empty();
-}
-
-vector<ReportID> ReportManager::getDkeyToReportTable() const {
- vector<ReportID> rv(reportIdToDedupeKey.size());
-
- for (const auto &m : reportIdToDedupeKey) {
- assert(m.second < rv.size());
- rv[m.second] = m.first;
- }
-
- return rv;
-}
-
-void ReportManager::assignDkeys(const RoseBuild *rose) {
- DEBUG_PRINTF("assigning...\n");
-
+bool ReportManager::patternSetCanExhaust() const {
+ return global_exhaust && !toExhaustibleKeyMap.empty();
+}
+
+vector<ReportID> ReportManager::getDkeyToReportTable() const {
+ vector<ReportID> rv(reportIdToDedupeKey.size());
+
+ for (const auto &m : reportIdToDedupeKey) {
+ assert(m.second < rv.size());
+ rv[m.second] = m.first;
+ }
+
+ return rv;
+}
+
+void ReportManager::assignDkeys(const RoseBuild *rose) {
+ DEBUG_PRINTF("assigning...\n");
+
map<u32, flat_set<ReportID>> ext_to_int;
-
- for (u32 i = 0; i < reportIds.size(); i++) {
- const Report &ir = reportIds[i];
-
- /* need to populate dkey */
- if (isExternalReport(ir)) {
- ext_to_int[ir.onmatch].insert(i);
- }
- }
-
- auto dedupe = rose->generateDedupeAux();
-
- for (const auto &m : ext_to_int) {
- u32 ext = m.first;
-
- if (!dedupe->requiresDedupeSupport(m.second)) {
- DEBUG_PRINTF("%u does not require dedupe\n", ext);
- continue; /* no dedupe required for this set */
- }
-
- u32 dkey = reportIdToDedupeKey.size();
- reportIdToDedupeKey[ext] = dkey;
- DEBUG_PRINTF("ext=%u -> dkey=%u\n", ext, dkey);
- }
-}
-
-u32 ReportManager::getDkey(const Report &r) const {
- if (!isExternalReport(r)) {
- return ~u32{0};
- }
-
- auto it = reportIdToDedupeKey.find(r.onmatch);
- if (it == reportIdToDedupeKey.end()) {
- return ~u32{0};
- }
- return it->second;
-}
-
-void ReportManager::registerExtReport(ReportID id,
- const external_report_info &ext) {
+
+ for (u32 i = 0; i < reportIds.size(); i++) {
+ const Report &ir = reportIds[i];
+
+ /* need to populate dkey */
+ if (isExternalReport(ir)) {
+ ext_to_int[ir.onmatch].insert(i);
+ }
+ }
+
+ auto dedupe = rose->generateDedupeAux();
+
+ for (const auto &m : ext_to_int) {
+ u32 ext = m.first;
+
+ if (!dedupe->requiresDedupeSupport(m.second)) {
+ DEBUG_PRINTF("%u does not require dedupe\n", ext);
+ continue; /* no dedupe required for this set */
+ }
+
+ u32 dkey = reportIdToDedupeKey.size();
+ reportIdToDedupeKey[ext] = dkey;
+ DEBUG_PRINTF("ext=%u -> dkey=%u\n", ext, dkey);
+ }
+}
+
+u32 ReportManager::getDkey(const Report &r) const {
+ if (!isExternalReport(r)) {
+ return ~u32{0};
+ }
+
+ auto it = reportIdToDedupeKey.find(r.onmatch);
+ if (it == reportIdToDedupeKey.end()) {
+ return ~u32{0};
+ }
+ return it->second;
+}
+
+void ReportManager::registerExtReport(ReportID id,
+ const external_report_info &ext) {
auto it = externalIdMap.find(id);
if (it != externalIdMap.end()) {
const external_report_info &eri = it->second;
- if (eri.highlander != ext.highlander) {
- /* we have a problem */
- ostringstream out;
- out << "Expression (index " << ext.first_pattern_index
- << ") with match ID " << id << " ";
- if (!ext.highlander) {
- out << "did not specify ";
- } else {
- out << "specified ";
- }
- out << "HS_FLAG_SINGLEMATCH whereas previous expression (index "
- << eri.first_pattern_index << ") with the same match ID did";
- if (ext.highlander) {
- out << " not";
- }
- out << ".";
- throw CompileError(ext.first_pattern_index, out.str());
- }
- } else {
- externalIdMap.emplace(id, ext);
- }
-
- // Any non-highlander pattern will render us not globally exhaustible.
- if (!ext.highlander) {
- global_exhaust = false;
- }
-}
-
+ if (eri.highlander != ext.highlander) {
+ /* we have a problem */
+ ostringstream out;
+ out << "Expression (index " << ext.first_pattern_index
+ << ") with match ID " << id << " ";
+ if (!ext.highlander) {
+ out << "did not specify ";
+ } else {
+ out << "specified ";
+ }
+ out << "HS_FLAG_SINGLEMATCH whereas previous expression (index "
+ << eri.first_pattern_index << ") with the same match ID did";
+ if (ext.highlander) {
+ out << " not";
+ }
+ out << ".";
+ throw CompileError(ext.first_pattern_index, out.str());
+ }
+ } else {
+ externalIdMap.emplace(id, ext);
+ }
+
+ // Any non-highlander pattern will render us not globally exhaustible.
+ if (!ext.highlander) {
+ global_exhaust = false;
+ }
+}
+
Report ReportManager::getBasicInternalReport(const ExpressionInfo &expr,
s32 adj) {
- /* validate that we are not violating highlander constraints, this will
- * throw a CompileError if so. */
+ /* validate that we are not violating highlander constraints, this will
+ * throw a CompileError if so. */
registerExtReport(expr.report,
external_report_info(expr.highlander, expr.index));
-
- /* create the internal report */
- u32 ekey = INVALID_EKEY;
+
+ /* create the internal report */
+ u32 ekey = INVALID_EKEY;
if (expr.highlander) {
- /* all patterns with the same report id share an ekey */
+ /* all patterns with the same report id share an ekey */
ekey = getExhaustibleKey(expr.report);
- }
-
+ }
+
return makeECallback(expr.report, adj, ekey, expr.quiet);
-}
-
+}
+
void ReportManager::setProgramOffset(ReportID id, u32 programOffset) {
assert(id < reportIds.size());
assert(!contains(reportIdToProgramOffset, id));
@@ -271,34 +271,34 @@ u32 ReportManager::getProgramOffset(ReportID id) const {
return reportIdToProgramOffset.at(id);
}
-static
-void ekeysUnion(std::set<u32> *ekeys, u32 more) {
- if (!ekeys->empty()) {
- if (more == INVALID_EKEY) {
- ekeys->clear();
- } else {
- ekeys->insert(more);
- }
- }
-}
-
-set<u32> reportsToEkeys(const set<ReportID> &reports, const ReportManager &rm) {
- assert(!reports.empty());
-
- set<u32> ekeys;
-
- for (auto it = reports.begin(), ite = reports.end(); it != ite; ++it) {
- u32 e = rm.getReport(*it).ekey;
- if (it == reports.begin()) {
- if (e != INVALID_EKEY) {
- ekeys.insert(e);
- }
- } else {
- ekeysUnion(&ekeys, e);
- }
- }
-
- return ekeys;
-}
-
-} // namespace ue2
+static
+void ekeysUnion(std::set<u32> *ekeys, u32 more) {
+ if (!ekeys->empty()) {
+ if (more == INVALID_EKEY) {
+ ekeys->clear();
+ } else {
+ ekeys->insert(more);
+ }
+ }
+}
+
+set<u32> reportsToEkeys(const set<ReportID> &reports, const ReportManager &rm) {
+ assert(!reports.empty());
+
+ set<u32> ekeys;
+
+ for (auto it = reports.begin(), ite = reports.end(); it != ite; ++it) {
+ u32 e = rm.getReport(*it).ekey;
+ if (it == reports.begin()) {
+ if (e != INVALID_EKEY) {
+ ekeys.insert(e);
+ }
+ } else {
+ ekeysUnion(&ekeys, e);
+ }
+ }
+
+ return ekeys;
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/report_manager.h b/contrib/libs/hyperscan/src/util/report_manager.h
index 015dc9c855..08e7dd65d8 100644
--- a/contrib/libs/hyperscan/src/util/report_manager.h
+++ b/contrib/libs/hyperscan/src/util/report_manager.h
@@ -1,86 +1,86 @@
-/*
+/*
* Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief ReportManager: tracks Report structures, exhaustion and
- * dedupe keys.
- */
-
-#ifndef REPORT_MANAGER_H
-#define REPORT_MANAGER_H
-
-#include "ue2common.h"
-#include "util/compile_error.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief ReportManager: tracks Report structures, exhaustion and
+ * dedupe keys.
+ */
+
+#ifndef REPORT_MANAGER_H
+#define REPORT_MANAGER_H
+
+#include "ue2common.h"
+#include "util/compile_error.h"
#include "util/noncopyable.h"
-#include "util/report.h"
+#include "util/report.h"
#include "parser/logical_combination.h"
-
-#include <map>
-#include <set>
+
+#include <map>
+#include <set>
#include <unordered_map>
-#include <vector>
-
-namespace ue2 {
-
-struct Grey;
-class RoseBuild;
+#include <vector>
+
+namespace ue2 {
+
+struct Grey;
+class RoseBuild;
class ExpressionInfo;
-
-struct external_report_info {
- external_report_info(bool h, u32 fpi)
- : highlander(h), first_pattern_index(fpi) { }
- const bool highlander;
- const u32 first_pattern_index;
-};
-
-/** \brief Tracks Report structures, exhaustion and dedupe keys. */
+
+struct external_report_info {
+ external_report_info(bool h, u32 fpi)
+ : highlander(h), first_pattern_index(fpi) { }
+ const bool highlander;
+ const u32 first_pattern_index;
+};
+
+/** \brief Tracks Report structures, exhaustion and dedupe keys. */
class ReportManager : noncopyable {
-public:
- explicit ReportManager(const Grey &g);
-
- /** \brief Fetch the ID associated with the given Report. */
- u32 getInternalId(const Report &r);
-
- /** \brief Fetch the Report associated with \a id. */
- const Report &getReport(u32 id) const;
-
- /** \brief Total number of reports. */
- size_t numReports() const;
-
- /** \brief Return an unused exhaustion key (the next available one). */
- u32 getUnassociatedExhaustibleKey(void);
-
- /** \brief Total number of dedupe keys. */
- u32 numDkeys() const;
-
- /** \brief Total number of exhaustion keys. */
- u32 numEkeys() const;
-
+public:
+ explicit ReportManager(const Grey &g);
+
+ /** \brief Fetch the ID associated with the given Report. */
+ u32 getInternalId(const Report &r);
+
+ /** \brief Fetch the Report associated with \a id. */
+ const Report &getReport(u32 id) const;
+
+ /** \brief Total number of reports. */
+ size_t numReports() const;
+
+ /** \brief Return an unused exhaustion key (the next available one). */
+ u32 getUnassociatedExhaustibleKey(void);
+
+ /** \brief Total number of dedupe keys. */
+ u32 numDkeys() const;
+
+ /** \brief Total number of exhaustion keys. */
+ u32 numEkeys() const;
+
/** \brief Total number of logical keys. */
u32 numLogicalKeys() const;
@@ -90,36 +90,36 @@ public:
/** \brief Total number of combination keys. */
u32 numCkeys() const;
- /** \brief True if the pattern set can exhaust (i.e. all patterns are
- * highlander). */
- bool patternSetCanExhaust() const;
-
- void assignDkeys(const RoseBuild *rose);
-
- std::vector<ReportID> getDkeyToReportTable() const;
-
- /** \brief Return a const reference to the table of Report
- * structures. */
- const std::vector<Report> &reports() const { return reportIds; }
-
- /**
+ /** \brief True if the pattern set can exhaust (i.e. all patterns are
+ * highlander). */
+ bool patternSetCanExhaust() const;
+
+ void assignDkeys(const RoseBuild *rose);
+
+ std::vector<ReportID> getDkeyToReportTable() const;
+
+ /** \brief Return a const reference to the table of Report
+ * structures. */
+ const std::vector<Report> &reports() const { return reportIds; }
+
+ /**
* Get a simple internal report corresponding to the expression. An ekey
* will be setup if required.
- *
- * Note: this function may throw a CompileError if constraints on external
- * match id are violated (mixed highlander status for example).
- */
+ *
+ * Note: this function may throw a CompileError if constraints on external
+ * match id are violated (mixed highlander status for example).
+ */
Report getBasicInternalReport(const ExpressionInfo &expr, s32 adj = 0);
-
- /** \brief Register an external report and validate that we are not
- * violating highlander constraints (which will cause an exception to be
- * thrown). */
- void registerExtReport(ReportID id, const external_report_info &ext);
-
- /** \brief Fetch the ekey associated with the given expression index,
- * assigning one if necessary. */
- u32 getExhaustibleKey(u32 expressionIndex);
-
+
+ /** \brief Register an external report and validate that we are not
+ * violating highlander constraints (which will cause an exception to be
+ * thrown). */
+ void registerExtReport(ReportID id, const external_report_info &ext);
+
+ /** \brief Fetch the ekey associated with the given expression index,
+ * assigning one if necessary. */
+ u32 getExhaustibleKey(u32 expressionIndex);
+
/** \brief Get lkey's corresponding ckeys. */
const std::set<u32> &getRelateCKeys(u32 lkey);
@@ -133,10 +133,10 @@ public:
/** \brief Used in Rose for writing bytecode. */
const std::vector<CombInfo> &getCombInfoMap() const;
- /** \brief Fetch the dedupe key associated with the given report. Returns
- * ~0U if no dkey is needed. */
- u32 getDkey(const Report &r) const;
-
+ /** \brief Fetch the dedupe key associated with the given report. Returns
+ * ~0U if no dkey is needed. */
+ u32 getDkey(const Report &r) const;
+
/** \brief Register a Rose program offset with the given report. */
void setProgramOffset(ReportID id, u32 programOffset);
@@ -148,45 +148,45 @@ public:
/** \brief Parsed logical combination structure. */
ParsedLogical pl;
-private:
- /** \brief Grey box ref, for checking resource limits. */
- const Grey &grey;
-
- /** \brief Report structures, indexed by ID. */
- std::vector<Report> reportIds;
-
- /** \brief Mapping from Report to ID (inverse of \ref reportIds
+private:
+ /** \brief Grey box ref, for checking resource limits. */
+ const Grey &grey;
+
+ /** \brief Report structures, indexed by ID. */
+ std::vector<Report> reportIds;
+
+ /** \brief Mapping from Report to ID (inverse of \ref reportIds
* vector). */
std::unordered_map<Report, size_t> reportIdToInternalMap;
-
- /** \brief Mapping from ReportID to dedupe key. */
+
+ /** \brief Mapping from ReportID to dedupe key. */
std::unordered_map<ReportID, u32> reportIdToDedupeKey;
-
+
/** \brief Mapping from ReportID to Rose program offset in bytecode. */
std::unordered_map<ReportID, u32> reportIdToProgramOffset;
- /** \brief Mapping from external match ids to information about that
- * id. */
+ /** \brief Mapping from external match ids to information about that
+ * id. */
std::unordered_map<ReportID, external_report_info> externalIdMap;
-
- /** \brief Mapping from expression index to exhaustion key. */
- std::map<s64a, u32> toExhaustibleKeyMap;
-
- /** \brief Unallocated expression index, used for \ref
- * getUnassociatedExhaustibleKey.
- *
- * TODO: work out why this is signed.
- */
- s64a freeEIndex;
-
- /** \brief True if database is globally exhaustible (all patterns must be
- * highlander for this to be the case). */
- bool global_exhaust;
-};
-
-std::set<u32> reportsToEkeys(const std::set<ReportID> &reports,
- const ReportManager &rm);
-
-} // namespace ue2
-
-#endif
+
+ /** \brief Mapping from expression index to exhaustion key. */
+ std::map<s64a, u32> toExhaustibleKeyMap;
+
+ /** \brief Unallocated expression index, used for \ref
+ * getUnassociatedExhaustibleKey.
+ *
+ * TODO: work out why this is signed.
+ */
+ s64a freeEIndex;
+
+ /** \brief True if database is globally exhaustible (all patterns must be
+ * highlander for this to be the case). */
+ bool global_exhaust;
+};
+
+std::set<u32> reportsToEkeys(const std::set<ReportID> &reports,
+ const ReportManager &rm);
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/scatter.h b/contrib/libs/hyperscan/src/util/scatter.h
index 40a1ab248d..f651439452 100644
--- a/contrib/libs/hyperscan/src/util/scatter.h
+++ b/contrib/libs/hyperscan/src/util/scatter.h
@@ -1,55 +1,55 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_SCATTER_H
-#define UTIL_SCATTER_H
-
-#include "ue2common.h"
-
-#define SCATTER_STRUCT(t) \
- struct scatter_unit_##t { u32 offset; t val; };
-
-SCATTER_STRUCT(u64a)
-SCATTER_STRUCT(u32)
-SCATTER_STRUCT(u16)
-SCATTER_STRUCT(u8)
-
-struct scatter_full_plan {
- u32 s_u64a_offset;
- u32 s_u64a_count;
- u32 s_u32_offset;
- u32 s_u32_count;
- u32 s_u16_offset;
- u32 s_u16_count;
- u32 s_u8_count;
- u32 s_u8_offset;
-};
-
-#undef SCATTER_STRUCT
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_SCATTER_H
+#define UTIL_SCATTER_H
+
+#include "ue2common.h"
+
+#define SCATTER_STRUCT(t) \
+ struct scatter_unit_##t { u32 offset; t val; };
+
+SCATTER_STRUCT(u64a)
+SCATTER_STRUCT(u32)
+SCATTER_STRUCT(u16)
+SCATTER_STRUCT(u8)
+
+struct scatter_full_plan {
+ u32 s_u64a_offset;
+ u32 s_u64a_count;
+ u32 s_u32_offset;
+ u32 s_u32_count;
+ u32 s_u16_offset;
+ u32 s_u16_count;
+ u32 s_u8_count;
+ u32 s_u8_offset;
+};
+
+#undef SCATTER_STRUCT
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/scatter_runtime.h b/contrib/libs/hyperscan/src/util/scatter_runtime.h
index 09bc742d97..d839199192 100644
--- a/contrib/libs/hyperscan/src/util/scatter_runtime.h
+++ b/contrib/libs/hyperscan/src/util/scatter_runtime.h
@@ -1,74 +1,74 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_SCATTER_RUNTIME_H
-#define UTIL_SCATTER_RUNTIME_H
-
-#include "scatter.h"
-
-#include "uniform_ops.h"
-
-#define SCATTER_DEF(t) \
-static really_inline \
-void scatter_##t(void *out, const struct scatter_unit_##t *plan, u32 count) { \
- for (u32 i = 0; i < count; i++) { \
- const struct scatter_unit_##t *item = plan + i; \
- DEBUG_PRINTF("storing %llu into offset %u\n", (u64a)item->val, \
- item->offset); \
- storeu_##t((char *)out + item->offset, item->val); \
- } \
-}
-
-SCATTER_DEF(u64a)
-SCATTER_DEF(u32)
-SCATTER_DEF(u16)
-SCATTER_DEF(u8)
-
-#undef SCATTER_DEF
-
-static really_inline
-void scatter(void *out, const void *base, const struct scatter_full_plan *p) {
-#define RUN_SUB(t) \
- if (p->s_##t##_offset) { \
- assert(p->s_##t##_count); \
- const struct scatter_unit_##t *pp \
- = (const void *)(b + p->s_##t##_offset); \
- scatter_##t(out, pp, p->s_##t##_count); \
- }
-
- const char *b = base;
-
- RUN_SUB(u64a);
- RUN_SUB(u32);
- RUN_SUB(u16);
- RUN_SUB(u8);
-
-#undef RUN_SUB
-}
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_SCATTER_RUNTIME_H
+#define UTIL_SCATTER_RUNTIME_H
+
+#include "scatter.h"
+
+#include "uniform_ops.h"
+
+#define SCATTER_DEF(t) \
+static really_inline \
+void scatter_##t(void *out, const struct scatter_unit_##t *plan, u32 count) { \
+ for (u32 i = 0; i < count; i++) { \
+ const struct scatter_unit_##t *item = plan + i; \
+ DEBUG_PRINTF("storing %llu into offset %u\n", (u64a)item->val, \
+ item->offset); \
+ storeu_##t((char *)out + item->offset, item->val); \
+ } \
+}
+
+SCATTER_DEF(u64a)
+SCATTER_DEF(u32)
+SCATTER_DEF(u16)
+SCATTER_DEF(u8)
+
+#undef SCATTER_DEF
+
+static really_inline
+void scatter(void *out, const void *base, const struct scatter_full_plan *p) {
+#define RUN_SUB(t) \
+ if (p->s_##t##_offset) { \
+ assert(p->s_##t##_count); \
+ const struct scatter_unit_##t *pp \
+ = (const void *)(b + p->s_##t##_offset); \
+ scatter_##t(out, pp, p->s_##t##_count); \
+ }
+
+ const char *b = base;
+
+ RUN_SUB(u64a);
+ RUN_SUB(u32);
+ RUN_SUB(u16);
+ RUN_SUB(u8);
+
+#undef RUN_SUB
+}
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/simd_types.h b/contrib/libs/hyperscan/src/util/simd_types.h
index 962cad6c97..331026dc9b 100644
--- a/contrib/libs/hyperscan/src/util/simd_types.h
+++ b/contrib/libs/hyperscan/src/util/simd_types.h
@@ -1,57 +1,57 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SIMD_TYPES_H
-#define SIMD_TYPES_H
-
-#include "config.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SIMD_TYPES_H
+#define SIMD_TYPES_H
+
+#include "config.h"
#include "util/arch.h"
#include "util/intrinsics.h"
-#include "ue2common.h"
-
+#include "ue2common.h"
+
#if defined(HAVE_SSE2)
typedef __m128i m128;
-#else
+#else
typedef struct ALIGN_DIRECTIVE {u64a hi; u64a lo;} m128;
-#endif
-
+#endif
+
#if defined(HAVE_AVX2)
-typedef __m256i m256;
-#else
-typedef struct ALIGN_AVX_DIRECTIVE {m128 lo; m128 hi;} m256;
-#endif
-
-typedef struct {m128 lo; m128 mid; m128 hi;} m384;
+typedef __m256i m256;
+#else
+typedef struct ALIGN_AVX_DIRECTIVE {m128 lo; m128 hi;} m256;
+#endif
+
+typedef struct {m128 lo; m128 mid; m128 hi;} m384;
#if defined(HAVE_AVX512)
typedef __m512i m512;
#else
typedef struct ALIGN_ATTR(64) {m256 lo; m256 hi;} m512;
#endif
-
-#endif /* SIMD_TYPES_H */
-
+
+#endif /* SIMD_TYPES_H */
+
diff --git a/contrib/libs/hyperscan/src/util/simd_utils.h b/contrib/libs/hyperscan/src/util/simd_utils.h
index d1f060b070..4928065131 100644
--- a/contrib/libs/hyperscan/src/util/simd_utils.h
+++ b/contrib/libs/hyperscan/src/util/simd_utils.h
@@ -1,68 +1,68 @@
-/*
+/*
* Copyright (c) 2015-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief SIMD types and primitive operations.
- */
-
-#ifndef SIMD_UTILS
-#define SIMD_UTILS
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief SIMD types and primitive operations.
+ */
+
+#ifndef SIMD_UTILS
+#define SIMD_UTILS
+
#if !defined(_WIN32) && !defined(__SSSE3__)
#error SSSE3 instructions must be enabled
-#endif
-
+#endif
+
#include "config.h"
-#include "ue2common.h"
-#include "simd_types.h"
+#include "ue2common.h"
+#include "simd_types.h"
#include "unaligned.h"
#include "util/arch.h"
#include "util/intrinsics.h"
-
+
#include <string.h> // for memcpy
-
-// Define a common assume_aligned using an appropriate compiler built-in, if
-// it's available. Note that we need to handle C or C++ compilation.
-#ifdef __cplusplus
-# ifdef HAVE_CXX_BUILTIN_ASSUME_ALIGNED
-# define assume_aligned(x, y) __builtin_assume_aligned((x), (y))
-# endif
-#else
-# ifdef HAVE_CC_BUILTIN_ASSUME_ALIGNED
-# define assume_aligned(x, y) __builtin_assume_aligned((x), (y))
-# endif
-#endif
-
-// Fallback to identity case.
-#ifndef assume_aligned
-#define assume_aligned(x, y) (x)
-#endif
-
+
+// Define a common assume_aligned using an appropriate compiler built-in, if
+// it's available. Note that we need to handle C or C++ compilation.
+#ifdef __cplusplus
+# ifdef HAVE_CXX_BUILTIN_ASSUME_ALIGNED
+# define assume_aligned(x, y) __builtin_assume_aligned((x), (y))
+# endif
+#else
+# ifdef HAVE_CC_BUILTIN_ASSUME_ALIGNED
+# define assume_aligned(x, y) __builtin_assume_aligned((x), (y))
+# endif
+#endif
+
+// Fallback to identity case.
+#ifndef assume_aligned
+#define assume_aligned(x, y) (x)
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -71,58 +71,58 @@ extern const char vbs_mask_data[];
}
#endif
-static really_inline m128 ones128(void) {
+static really_inline m128 ones128(void) {
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
/* gcc gets this right */
return _mm_set1_epi8(0xFF);
-#else
+#else
/* trick from Intel's optimization guide to generate all-ones.
* ICC converts this to the single cmpeq instruction */
- return _mm_cmpeq_epi8(_mm_setzero_si128(), _mm_setzero_si128());
-#endif
-}
-
-static really_inline m128 zeroes128(void) {
- return _mm_setzero_si128();
-}
-
-/** \brief Bitwise not for m128*/
-static really_inline m128 not128(m128 a) {
- return _mm_xor_si128(a, ones128());
-}
-
-/** \brief Return 1 if a and b are different otherwise 0 */
-static really_inline int diff128(m128 a, m128 b) {
- return (_mm_movemask_epi8(_mm_cmpeq_epi8(a, b)) ^ 0xffff);
-}
-
-static really_inline int isnonzero128(m128 a) {
- return !!diff128(a, zeroes128());
-}
-
-/**
- * "Rich" version of diff128(). Takes two vectors a and b and returns a 4-bit
- * mask indicating which 32-bit words contain differences.
- */
-static really_inline u32 diffrich128(m128 a, m128 b) {
- a = _mm_cmpeq_epi32(a, b);
- return ~(_mm_movemask_ps(_mm_castsi128_ps(a))) & 0xf;
-}
-
-/**
- * "Rich" version of diff128(), 64-bit variant. Takes two vectors a and b and
- * returns a 4-bit mask indicating which 64-bit words contain differences.
- */
-static really_inline u32 diffrich64_128(m128 a, m128 b) {
+ return _mm_cmpeq_epi8(_mm_setzero_si128(), _mm_setzero_si128());
+#endif
+}
+
+static really_inline m128 zeroes128(void) {
+ return _mm_setzero_si128();
+}
+
+/** \brief Bitwise not for m128*/
+static really_inline m128 not128(m128 a) {
+ return _mm_xor_si128(a, ones128());
+}
+
+/** \brief Return 1 if a and b are different otherwise 0 */
+static really_inline int diff128(m128 a, m128 b) {
+ return (_mm_movemask_epi8(_mm_cmpeq_epi8(a, b)) ^ 0xffff);
+}
+
+static really_inline int isnonzero128(m128 a) {
+ return !!diff128(a, zeroes128());
+}
+
+/**
+ * "Rich" version of diff128(). Takes two vectors a and b and returns a 4-bit
+ * mask indicating which 32-bit words contain differences.
+ */
+static really_inline u32 diffrich128(m128 a, m128 b) {
+ a = _mm_cmpeq_epi32(a, b);
+ return ~(_mm_movemask_ps(_mm_castsi128_ps(a))) & 0xf;
+}
+
+/**
+ * "Rich" version of diff128(), 64-bit variant. Takes two vectors a and b and
+ * returns a 4-bit mask indicating which 64-bit words contain differences.
+ */
+static really_inline u32 diffrich64_128(m128 a, m128 b) {
#if defined(HAVE_SSE41)
- a = _mm_cmpeq_epi64(a, b);
- return ~(_mm_movemask_ps(_mm_castsi128_ps(a))) & 0x5;
-#else
- u32 d = diffrich128(a, b);
- return (d | (d >> 1)) & 0x5;
-#endif
-}
-
+ a = _mm_cmpeq_epi64(a, b);
+ return ~(_mm_movemask_ps(_mm_castsi128_ps(a))) & 0x5;
+#else
+ u32 d = diffrich128(a, b);
+ return (d | (d >> 1)) & 0x5;
+#endif
+}
+
static really_really_inline
m128 lshift64_m128(m128 a, unsigned b) {
#if defined(HAVE__BUILTIN_CONSTANT_P)
@@ -132,30 +132,30 @@ m128 lshift64_m128(m128 a, unsigned b) {
#endif
m128 x = _mm_cvtsi32_si128(b);
return _mm_sll_epi64(a, x);
-}
-
+}
+
#define rshift64_m128(a, b) _mm_srli_epi64((a), (b))
-#define eq128(a, b) _mm_cmpeq_epi8((a), (b))
-#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
-
+#define eq128(a, b) _mm_cmpeq_epi8((a), (b))
+#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
+
#if defined(HAVE_AVX512)
static really_inline m128 cast512to128(const m512 in) {
return _mm512_castsi512_si128(in);
}
#endif
-static really_inline m128 set16x8(u8 c) {
- return _mm_set1_epi8(c);
-}
-
+static really_inline m128 set16x8(u8 c) {
+ return _mm_set1_epi8(c);
+}
+
static really_inline m128 set4x32(u32 c) {
return _mm_set1_epi32(c);
}
-static really_inline u32 movd(const m128 in) {
- return _mm_cvtsi128_si32(in);
-}
-
+static really_inline u32 movd(const m128 in) {
+ return _mm_cvtsi128_si32(in);
+}
+
#if defined(HAVE_AVX512)
static really_inline u32 movd512(const m512 in) {
// NOTE: seems gcc doesn't support _mm512_cvtsi512_si32(in),
@@ -170,25 +170,25 @@ static really_inline u64a movq512(const m512 in) {
}
#endif
-static really_inline u64a movq(const m128 in) {
-#if defined(ARCH_X86_64)
- return _mm_cvtsi128_si64(in);
-#else // 32-bit - this is horrific
- u32 lo = movd(in);
- u32 hi = movd(_mm_srli_epi64(in, 32));
- return (u64a)hi << 32 | lo;
-#endif
-}
-
+static really_inline u64a movq(const m128 in) {
+#if defined(ARCH_X86_64)
+ return _mm_cvtsi128_si64(in);
+#else // 32-bit - this is horrific
+ u32 lo = movd(in);
+ u32 hi = movd(_mm_srli_epi64(in, 32));
+ return (u64a)hi << 32 | lo;
+#endif
+}
+
/* another form of movq */
static really_inline
m128 load_m128_from_u64a(const u64a *p) {
return _mm_set_epi64x(0LL, *p);
-}
-
+}
+
#define rshiftbyte_m128(a, count_immed) _mm_srli_si128(a, count_immed)
#define lshiftbyte_m128(a, count_immed) _mm_slli_si128(a, count_immed)
-
+
#if defined(HAVE_SSE41)
#define extract32from128(a, imm) _mm_extract_epi32(a, imm)
#define extract64from128(a, imm) _mm_extract_epi64(a, imm)
@@ -196,33 +196,33 @@ m128 load_m128_from_u64a(const u64a *p) {
#define extract32from128(a, imm) movd(_mm_srli_si128(a, imm << 2))
#define extract64from128(a, imm) movq(_mm_srli_si128(a, imm << 3))
#endif
-
+
#if !defined(HAVE_AVX2)
-// TODO: this entire file needs restructuring - this carveout is awful
-#define extractlow64from256(a) movq(a.lo)
-#define extractlow32from256(a) movd(a.lo)
+// TODO: this entire file needs restructuring - this carveout is awful
+#define extractlow64from256(a) movq(a.lo)
+#define extractlow32from256(a) movd(a.lo)
#if defined(HAVE_SSE41)
-#define extract32from256(a, imm) _mm_extract_epi32((imm >> 2) ? a.hi : a.lo, imm % 4)
+#define extract32from256(a, imm) _mm_extract_epi32((imm >> 2) ? a.hi : a.lo, imm % 4)
#define extract64from256(a, imm) _mm_extract_epi64((imm >> 1) ? a.hi : a.lo, imm % 2)
-#else
+#else
#define extract32from256(a, imm) movd(_mm_srli_si128((imm >> 2) ? a.hi : a.lo, (imm % 4) * 4))
#define extract64from256(a, imm) movq(_mm_srli_si128((imm >> 1) ? a.hi : a.lo, (imm % 2) * 8))
-#endif
-
-#endif // !AVX2
-
-static really_inline m128 and128(m128 a, m128 b) {
- return _mm_and_si128(a,b);
-}
-
-static really_inline m128 xor128(m128 a, m128 b) {
- return _mm_xor_si128(a,b);
-}
-
-static really_inline m128 or128(m128 a, m128 b) {
- return _mm_or_si128(a,b);
-}
-
+#endif
+
+#endif // !AVX2
+
+static really_inline m128 and128(m128 a, m128 b) {
+ return _mm_and_si128(a,b);
+}
+
+static really_inline m128 xor128(m128 a, m128 b) {
+ return _mm_xor_si128(a,b);
+}
+
+static really_inline m128 or128(m128 a, m128 b) {
+ return _mm_or_si128(a,b);
+}
+
#if defined(HAVE_AVX512VBMI)
static really_inline m512 expand128(m128 a) {
return _mm512_broadcast_i32x4(a);
@@ -241,50 +241,50 @@ static really_inline m512 expand384(m384 a) {
}
#endif
-static really_inline m128 andnot128(m128 a, m128 b) {
- return _mm_andnot_si128(a, b);
-}
-
-// aligned load
-static really_inline m128 load128(const void *ptr) {
- assert(ISALIGNED_N(ptr, alignof(m128)));
- ptr = assume_aligned(ptr, 16);
- return _mm_load_si128((const m128 *)ptr);
-}
-
-// aligned store
-static really_inline void store128(void *ptr, m128 a) {
- assert(ISALIGNED_N(ptr, alignof(m128)));
- ptr = assume_aligned(ptr, 16);
- *(m128 *)ptr = a;
-}
-
-// unaligned load
-static really_inline m128 loadu128(const void *ptr) {
- return _mm_loadu_si128((const m128 *)ptr);
-}
-
-// unaligned store
-static really_inline void storeu128(void *ptr, m128 a) {
- _mm_storeu_si128 ((m128 *)ptr, a);
-}
-
-// packed unaligned store of first N bytes
-static really_inline
-void storebytes128(void *ptr, m128 a, unsigned int n) {
- assert(n <= sizeof(a));
- memcpy(ptr, &a, n);
-}
-
-// packed unaligned load of first N bytes, pad with zero
-static really_inline
-m128 loadbytes128(const void *ptr, unsigned int n) {
- m128 a = zeroes128();
- assert(n <= sizeof(a));
- memcpy(&a, ptr, n);
- return a;
-}
-
+static really_inline m128 andnot128(m128 a, m128 b) {
+ return _mm_andnot_si128(a, b);
+}
+
+// aligned load
+static really_inline m128 load128(const void *ptr) {
+ assert(ISALIGNED_N(ptr, alignof(m128)));
+ ptr = assume_aligned(ptr, 16);
+ return _mm_load_si128((const m128 *)ptr);
+}
+
+// aligned store
+static really_inline void store128(void *ptr, m128 a) {
+ assert(ISALIGNED_N(ptr, alignof(m128)));
+ ptr = assume_aligned(ptr, 16);
+ *(m128 *)ptr = a;
+}
+
+// unaligned load
+static really_inline m128 loadu128(const void *ptr) {
+ return _mm_loadu_si128((const m128 *)ptr);
+}
+
+// unaligned store
+static really_inline void storeu128(void *ptr, m128 a) {
+ _mm_storeu_si128 ((m128 *)ptr, a);
+}
+
+// packed unaligned store of first N bytes
+static really_inline
+void storebytes128(void *ptr, m128 a, unsigned int n) {
+ assert(n <= sizeof(a));
+ memcpy(ptr, &a, n);
+}
+
+// packed unaligned load of first N bytes, pad with zero
+static really_inline
+m128 loadbytes128(const void *ptr, unsigned int n) {
+ m128 a = zeroes128();
+ assert(n <= sizeof(a));
+ memcpy(&a, ptr, n);
+ return a;
+}
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -301,18 +301,18 @@ m128 mask1bit128(unsigned int n) {
return loadu128(&simd_onebit_masks[mask_idx]);
}
-// switches on bit N in the given vector.
-static really_inline
-void setbit128(m128 *ptr, unsigned int n) {
+// switches on bit N in the given vector.
+static really_inline
+void setbit128(m128 *ptr, unsigned int n) {
*ptr = or128(mask1bit128(n), *ptr);
-}
-
-// switches off bit N in the given vector.
-static really_inline
-void clearbit128(m128 *ptr, unsigned int n) {
+}
+
+// switches off bit N in the given vector.
+static really_inline
+void clearbit128(m128 *ptr, unsigned int n) {
*ptr = andnot128(mask1bit128(n), *ptr);
}
-
+
// tests bit N in the given vector.
static really_inline
char testbit128(m128 val, unsigned int n) {
@@ -323,7 +323,7 @@ char testbit128(m128 val, unsigned int n) {
return isnonzero128(and128(mask, val));
#endif
}
-
+
// offset must be an immediate
#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
@@ -332,9 +332,9 @@ m128 pshufb_m128(m128 a, m128 b) {
m128 result;
result = _mm_shuffle_epi8(a, b);
return result;
-}
-
-static really_inline
+}
+
+static really_inline
m256 pshufb_m256(m256 a, m256 b) {
#if defined(HAVE_AVX2)
return _mm256_shuffle_epi8(a, b);
@@ -344,8 +344,8 @@ m256 pshufb_m256(m256 a, m256 b) {
rv.hi = pshufb_m128(a.hi, b.hi);
return rv;
#endif
-}
-
+}
+
#if defined(HAVE_AVX512)
static really_inline
m512 pshufb_m512(m512 a, m512 b) {
@@ -396,12 +396,12 @@ m128 set64x2(u64a hi, u64a lo) {
return _mm_set_epi64x(hi, lo);
}
-/****
- **** 256-bit Primitives
- ****/
-
+/****
+ **** 256-bit Primitives
+ ****/
+
#if defined(HAVE_AVX2)
-
+
static really_really_inline
m256 lshift64_m256(m256 a, unsigned b) {
#if defined(HAVE__BUILTIN_CONSTANT_P)
@@ -415,44 +415,44 @@ m256 lshift64_m256(m256 a, unsigned b) {
#define rshift64_m256(a, b) _mm256_srli_epi64((a), (b))
-static really_inline
-m256 set32x8(u32 in) {
+static really_inline
+m256 set32x8(u32 in) {
return _mm256_set1_epi8(in);
-}
-
-#define eq256(a, b) _mm256_cmpeq_epi8((a), (b))
-#define movemask256(a) ((u32)_mm256_movemask_epi8((a)))
-
-static really_inline
-m256 set2x128(m128 a) {
- return _mm256_broadcastsi128_si256(a);
-}
-
-#else
-
+}
+
+#define eq256(a, b) _mm256_cmpeq_epi8((a), (b))
+#define movemask256(a) ((u32)_mm256_movemask_epi8((a)))
+
+static really_inline
+m256 set2x128(m128 a) {
+ return _mm256_broadcastsi128_si256(a);
+}
+
+#else
+
static really_really_inline
m256 lshift64_m256(m256 a, int b) {
- m256 rv = a;
+ m256 rv = a;
rv.lo = lshift64_m128(rv.lo, b);
rv.hi = lshift64_m128(rv.hi, b);
- return rv;
-}
-
-static really_inline
+ return rv;
+}
+
+static really_inline
m256 rshift64_m256(m256 a, int b) {
- m256 rv = a;
+ m256 rv = a;
rv.lo = rshift64_m128(rv.lo, b);
rv.hi = rshift64_m128(rv.hi, b);
- return rv;
-}
-static really_inline
-m256 set32x8(u32 in) {
- m256 rv;
- rv.lo = set16x8((u8) in);
- rv.hi = rv.lo;
- return rv;
-}
-
+ return rv;
+}
+static really_inline
+m256 set32x8(u32 in) {
+ m256 rv;
+ rv.lo = set16x8((u8) in);
+ rv.hi = rv.lo;
+ return rv;
+}
+
static really_inline
m256 eq256(m256 a, m256 b) {
m256 rv;
@@ -473,207 +473,207 @@ m256 set2x128(m128 a) {
m256 rv = {a, a};
return rv;
}
-#endif
-
-static really_inline m256 zeroes256(void) {
+#endif
+
+static really_inline m256 zeroes256(void) {
#if defined(HAVE_AVX2)
- return _mm256_setzero_si256();
-#else
- m256 rv = {zeroes128(), zeroes128()};
- return rv;
-#endif
-}
-
-static really_inline m256 ones256(void) {
+ return _mm256_setzero_si256();
+#else
+ m256 rv = {zeroes128(), zeroes128()};
+ return rv;
+#endif
+}
+
+static really_inline m256 ones256(void) {
#if defined(HAVE_AVX2)
m256 rv = _mm256_set1_epi8(0xFF);
-#else
- m256 rv = {ones128(), ones128()};
-#endif
- return rv;
-}
-
+#else
+ m256 rv = {ones128(), ones128()};
+#endif
+ return rv;
+}
+
#if defined(HAVE_AVX2)
-static really_inline m256 and256(m256 a, m256 b) {
- return _mm256_and_si256(a, b);
-}
-#else
-static really_inline m256 and256(m256 a, m256 b) {
- m256 rv;
- rv.lo = and128(a.lo, b.lo);
- rv.hi = and128(a.hi, b.hi);
- return rv;
-}
-#endif
-
+static really_inline m256 and256(m256 a, m256 b) {
+ return _mm256_and_si256(a, b);
+}
+#else
+static really_inline m256 and256(m256 a, m256 b) {
+ m256 rv;
+ rv.lo = and128(a.lo, b.lo);
+ rv.hi = and128(a.hi, b.hi);
+ return rv;
+}
+#endif
+
#if defined(HAVE_AVX2)
-static really_inline m256 or256(m256 a, m256 b) {
- return _mm256_or_si256(a, b);
-}
-#else
-static really_inline m256 or256(m256 a, m256 b) {
- m256 rv;
- rv.lo = or128(a.lo, b.lo);
- rv.hi = or128(a.hi, b.hi);
- return rv;
-}
-#endif
-
+static really_inline m256 or256(m256 a, m256 b) {
+ return _mm256_or_si256(a, b);
+}
+#else
+static really_inline m256 or256(m256 a, m256 b) {
+ m256 rv;
+ rv.lo = or128(a.lo, b.lo);
+ rv.hi = or128(a.hi, b.hi);
+ return rv;
+}
+#endif
+
#if defined(HAVE_AVX2)
-static really_inline m256 xor256(m256 a, m256 b) {
- return _mm256_xor_si256(a, b);
-}
-#else
-static really_inline m256 xor256(m256 a, m256 b) {
- m256 rv;
- rv.lo = xor128(a.lo, b.lo);
- rv.hi = xor128(a.hi, b.hi);
- return rv;
-}
-#endif
-
+static really_inline m256 xor256(m256 a, m256 b) {
+ return _mm256_xor_si256(a, b);
+}
+#else
+static really_inline m256 xor256(m256 a, m256 b) {
+ m256 rv;
+ rv.lo = xor128(a.lo, b.lo);
+ rv.hi = xor128(a.hi, b.hi);
+ return rv;
+}
+#endif
+
#if defined(HAVE_AVX2)
-static really_inline m256 not256(m256 a) {
- return _mm256_xor_si256(a, ones256());
-}
-#else
-static really_inline m256 not256(m256 a) {
- m256 rv;
- rv.lo = not128(a.lo);
- rv.hi = not128(a.hi);
- return rv;
-}
-#endif
-
+static really_inline m256 not256(m256 a) {
+ return _mm256_xor_si256(a, ones256());
+}
+#else
+static really_inline m256 not256(m256 a) {
+ m256 rv;
+ rv.lo = not128(a.lo);
+ rv.hi = not128(a.hi);
+ return rv;
+}
+#endif
+
#if defined(HAVE_AVX2)
-static really_inline m256 andnot256(m256 a, m256 b) {
- return _mm256_andnot_si256(a, b);
-}
-#else
-static really_inline m256 andnot256(m256 a, m256 b) {
- m256 rv;
- rv.lo = andnot128(a.lo, b.lo);
- rv.hi = andnot128(a.hi, b.hi);
- return rv;
-}
-#endif
-
-static really_inline int diff256(m256 a, m256 b) {
+static really_inline m256 andnot256(m256 a, m256 b) {
+ return _mm256_andnot_si256(a, b);
+}
+#else
+static really_inline m256 andnot256(m256 a, m256 b) {
+ m256 rv;
+ rv.lo = andnot128(a.lo, b.lo);
+ rv.hi = andnot128(a.hi, b.hi);
+ return rv;
+}
+#endif
+
+static really_inline int diff256(m256 a, m256 b) {
#if defined(HAVE_AVX2)
- return !!(_mm256_movemask_epi8(_mm256_cmpeq_epi8(a, b)) ^ (int)-1);
-#else
- return diff128(a.lo, b.lo) || diff128(a.hi, b.hi);
-#endif
-}
-
-static really_inline int isnonzero256(m256 a) {
+ return !!(_mm256_movemask_epi8(_mm256_cmpeq_epi8(a, b)) ^ (int)-1);
+#else
+ return diff128(a.lo, b.lo) || diff128(a.hi, b.hi);
+#endif
+}
+
+static really_inline int isnonzero256(m256 a) {
#if defined(HAVE_AVX2)
- return !!diff256(a, zeroes256());
-#else
- return isnonzero128(or128(a.lo, a.hi));
-#endif
-}
-
-/**
- * "Rich" version of diff256(). Takes two vectors a and b and returns an 8-bit
- * mask indicating which 32-bit words contain differences.
- */
-static really_inline u32 diffrich256(m256 a, m256 b) {
+ return !!diff256(a, zeroes256());
+#else
+ return isnonzero128(or128(a.lo, a.hi));
+#endif
+}
+
+/**
+ * "Rich" version of diff256(). Takes two vectors a and b and returns an 8-bit
+ * mask indicating which 32-bit words contain differences.
+ */
+static really_inline u32 diffrich256(m256 a, m256 b) {
#if defined(HAVE_AVX2)
- a = _mm256_cmpeq_epi32(a, b);
- return ~(_mm256_movemask_ps(_mm256_castsi256_ps(a))) & 0xFF;
-#else
- m128 z = zeroes128();
- a.lo = _mm_cmpeq_epi32(a.lo, b.lo);
- a.hi = _mm_cmpeq_epi32(a.hi, b.hi);
- m128 packed = _mm_packs_epi16(_mm_packs_epi32(a.lo, a.hi), z);
- return ~(_mm_movemask_epi8(packed)) & 0xff;
-#endif
-}
-
-/**
- * "Rich" version of diff256(), 64-bit variant. Takes two vectors a and b and
- * returns an 8-bit mask indicating which 64-bit words contain differences.
- */
-static really_inline u32 diffrich64_256(m256 a, m256 b) {
- u32 d = diffrich256(a, b);
- return (d | (d >> 1)) & 0x55555555;
-}
-
-// aligned load
-static really_inline m256 load256(const void *ptr) {
- assert(ISALIGNED_N(ptr, alignof(m256)));
+ a = _mm256_cmpeq_epi32(a, b);
+ return ~(_mm256_movemask_ps(_mm256_castsi256_ps(a))) & 0xFF;
+#else
+ m128 z = zeroes128();
+ a.lo = _mm_cmpeq_epi32(a.lo, b.lo);
+ a.hi = _mm_cmpeq_epi32(a.hi, b.hi);
+ m128 packed = _mm_packs_epi16(_mm_packs_epi32(a.lo, a.hi), z);
+ return ~(_mm_movemask_epi8(packed)) & 0xff;
+#endif
+}
+
+/**
+ * "Rich" version of diff256(), 64-bit variant. Takes two vectors a and b and
+ * returns an 8-bit mask indicating which 64-bit words contain differences.
+ */
+static really_inline u32 diffrich64_256(m256 a, m256 b) {
+ u32 d = diffrich256(a, b);
+ return (d | (d >> 1)) & 0x55555555;
+}
+
+// aligned load
+static really_inline m256 load256(const void *ptr) {
+ assert(ISALIGNED_N(ptr, alignof(m256)));
#if defined(HAVE_AVX2)
- return _mm256_load_si256((const m256 *)ptr);
-#else
- m256 rv = { load128(ptr), load128((const char *)ptr + 16) };
- return rv;
-#endif
-}
-
-// aligned load of 128-bit value to low and high part of 256-bit value
-static really_inline m256 load2x128(const void *ptr) {
+ return _mm256_load_si256((const m256 *)ptr);
+#else
+ m256 rv = { load128(ptr), load128((const char *)ptr + 16) };
+ return rv;
+#endif
+}
+
+// aligned load of 128-bit value to low and high part of 256-bit value
+static really_inline m256 load2x128(const void *ptr) {
#if defined(HAVE_AVX2)
- return set2x128(load128(ptr));
-#else
- assert(ISALIGNED_N(ptr, alignof(m128)));
- m256 rv;
- rv.hi = rv.lo = load128(ptr);
- return rv;
-#endif
-}
-
+ return set2x128(load128(ptr));
+#else
+ assert(ISALIGNED_N(ptr, alignof(m128)));
+ m256 rv;
+ rv.hi = rv.lo = load128(ptr);
+ return rv;
+#endif
+}
+
static really_inline m256 loadu2x128(const void *ptr) {
return set2x128(loadu128(ptr));
}
-// aligned store
-static really_inline void store256(void *ptr, m256 a) {
- assert(ISALIGNED_N(ptr, alignof(m256)));
+// aligned store
+static really_inline void store256(void *ptr, m256 a) {
+ assert(ISALIGNED_N(ptr, alignof(m256)));
#if defined(HAVE_AVX2)
- _mm256_store_si256((m256 *)ptr, a);
-#else
- ptr = assume_aligned(ptr, 16);
- *(m256 *)ptr = a;
-#endif
-}
-
-// unaligned load
-static really_inline m256 loadu256(const void *ptr) {
+ _mm256_store_si256((m256 *)ptr, a);
+#else
+ ptr = assume_aligned(ptr, 16);
+ *(m256 *)ptr = a;
+#endif
+}
+
+// unaligned load
+static really_inline m256 loadu256(const void *ptr) {
#if defined(HAVE_AVX2)
- return _mm256_loadu_si256((const m256 *)ptr);
-#else
- m256 rv = { loadu128(ptr), loadu128((const char *)ptr + 16) };
- return rv;
-#endif
-}
-
+ return _mm256_loadu_si256((const m256 *)ptr);
+#else
+ m256 rv = { loadu128(ptr), loadu128((const char *)ptr + 16) };
+ return rv;
+#endif
+}
+
// unaligned store
static really_inline void storeu256(void *ptr, m256 a) {
#if defined(HAVE_AVX2)
_mm256_storeu_si256((m256 *)ptr, a);
-#else
+#else
storeu128(ptr, a.lo);
storeu128((char *)ptr + 16, a.hi);
-#endif
-}
-
-// packed unaligned store of first N bytes
-static really_inline
-void storebytes256(void *ptr, m256 a, unsigned int n) {
- assert(n <= sizeof(a));
- memcpy(ptr, &a, n);
-}
-
-// packed unaligned load of first N bytes, pad with zero
-static really_inline
-m256 loadbytes256(const void *ptr, unsigned int n) {
- m256 a = zeroes256();
- assert(n <= sizeof(a));
- memcpy(&a, ptr, n);
- return a;
-}
-
+#endif
+}
+
+// packed unaligned store of first N bytes
+static really_inline
+void storebytes256(void *ptr, m256 a, unsigned int n) {
+ assert(n <= sizeof(a));
+ memcpy(ptr, &a, n);
+}
+
+// packed unaligned load of first N bytes, pad with zero
+static really_inline
+m256 loadbytes256(const void *ptr, unsigned int n) {
+ m256 a = zeroes256();
+ assert(n <= sizeof(a));
+ memcpy(&a, ptr, n);
+ return a;
+}
+
static really_inline
m256 mask1bit256(unsigned int n) {
assert(n < sizeof(m256) * 8);
@@ -695,48 +695,48 @@ m256 set64x4(u64a hi_1, u64a hi_0, u64a lo_1, u64a lo_0) {
}
#if !defined(HAVE_AVX2)
-// switches on bit N in the given vector.
-static really_inline
-void setbit256(m256 *ptr, unsigned int n) {
- assert(n < sizeof(*ptr) * 8);
- m128 *sub;
- if (n < 128) {
- sub = &ptr->lo;
- } else {
- sub = &ptr->hi;
- n -= 128;
- }
- setbit128(sub, n);
-}
-
-// switches off bit N in the given vector.
-static really_inline
-void clearbit256(m256 *ptr, unsigned int n) {
- assert(n < sizeof(*ptr) * 8);
- m128 *sub;
- if (n < 128) {
- sub = &ptr->lo;
- } else {
- sub = &ptr->hi;
- n -= 128;
- }
- clearbit128(sub, n);
-}
-
-// tests bit N in the given vector.
-static really_inline
+// switches on bit N in the given vector.
+static really_inline
+void setbit256(m256 *ptr, unsigned int n) {
+ assert(n < sizeof(*ptr) * 8);
+ m128 *sub;
+ if (n < 128) {
+ sub = &ptr->lo;
+ } else {
+ sub = &ptr->hi;
+ n -= 128;
+ }
+ setbit128(sub, n);
+}
+
+// switches off bit N in the given vector.
+static really_inline
+void clearbit256(m256 *ptr, unsigned int n) {
+ assert(n < sizeof(*ptr) * 8);
+ m128 *sub;
+ if (n < 128) {
+ sub = &ptr->lo;
+ } else {
+ sub = &ptr->hi;
+ n -= 128;
+ }
+ clearbit128(sub, n);
+}
+
+// tests bit N in the given vector.
+static really_inline
char testbit256(m256 val, unsigned int n) {
assert(n < sizeof(val) * 8);
m128 sub;
- if (n < 128) {
+ if (n < 128) {
sub = val.lo;
- } else {
+ } else {
sub = val.hi;
- n -= 128;
- }
- return testbit128(sub, n);
-}
-
+ n -= 128;
+ }
+ return testbit128(sub, n);
+}
+
static really_really_inline
m128 movdq_hi(m256 x) {
return x.hi;
@@ -753,50 +753,50 @@ m256 combine2x128(m128 hi, m128 lo) {
return rv;
}
-#else // AVX2
-
-// switches on bit N in the given vector.
-static really_inline
-void setbit256(m256 *ptr, unsigned int n) {
+#else // AVX2
+
+// switches on bit N in the given vector.
+static really_inline
+void setbit256(m256 *ptr, unsigned int n) {
*ptr = or256(mask1bit256(n), *ptr);
-}
-
-static really_inline
-void clearbit256(m256 *ptr, unsigned int n) {
+}
+
+static really_inline
+void clearbit256(m256 *ptr, unsigned int n) {
*ptr = andnot256(mask1bit256(n), *ptr);
-}
-
-// tests bit N in the given vector.
-static really_inline
+}
+
+// tests bit N in the given vector.
+static really_inline
char testbit256(m256 val, unsigned int n) {
const m256 mask = mask1bit256(n);
return !_mm256_testz_si256(mask, val);
-}
-
-static really_really_inline
-m128 movdq_hi(m256 x) {
- return _mm256_extracti128_si256(x, 1);
-}
-
-static really_really_inline
-m128 movdq_lo(m256 x) {
- return _mm256_extracti128_si256(x, 0);
-}
-
-#define cast256to128(a) _mm256_castsi256_si128(a)
-#define cast128to256(a) _mm256_castsi128_si256(a)
-#define swap128in256(a) _mm256_permute4x64_epi64(a, 0x4E)
-#define insert128to256(a, b, imm) _mm256_inserti128_si256(a, b, imm)
+}
+
+static really_really_inline
+m128 movdq_hi(m256 x) {
+ return _mm256_extracti128_si256(x, 1);
+}
+
+static really_really_inline
+m128 movdq_lo(m256 x) {
+ return _mm256_extracti128_si256(x, 0);
+}
+
+#define cast256to128(a) _mm256_castsi256_si128(a)
+#define cast128to256(a) _mm256_castsi128_si256(a)
+#define swap128in256(a) _mm256_permute4x64_epi64(a, 0x4E)
+#define insert128to256(a, b, imm) _mm256_inserti128_si256(a, b, imm)
#define rshift128_m256(a, count_immed) _mm256_srli_si256(a, count_immed)
#define lshift128_m256(a, count_immed) _mm256_slli_si256(a, count_immed)
-#define extract64from256(a, imm) _mm_extract_epi64(_mm256_extracti128_si256(a, imm >> 1), imm % 2)
-#define extract32from256(a, imm) _mm_extract_epi32(_mm256_extracti128_si256(a, imm >> 2), imm % 4)
-#define extractlow64from256(a) _mm_cvtsi128_si64(cast256to128(a))
-#define extractlow32from256(a) movd(cast256to128(a))
+#define extract64from256(a, imm) _mm_extract_epi64(_mm256_extracti128_si256(a, imm >> 1), imm % 2)
+#define extract32from256(a, imm) _mm_extract_epi32(_mm256_extracti128_si256(a, imm >> 2), imm % 4)
+#define extractlow64from256(a) _mm_cvtsi128_si64(cast256to128(a))
+#define extractlow32from256(a) movd(cast256to128(a))
#define interleave256hi(a, b) _mm256_unpackhi_epi8(a, b)
#define interleave256lo(a, b) _mm256_unpacklo_epi8(a, b)
#define vpalignr(r, l, offset) _mm256_alignr_epi8(r, l, offset)
-
+
static really_inline
m256 combine2x128(m128 hi, m128 lo) {
#if defined(_mm256_set_m128i)
@@ -805,8 +805,8 @@ m256 combine2x128(m128 hi, m128 lo) {
return insert128to256(cast128to256(lo), hi, 1);
#endif
}
-#endif //AVX2
-
+#endif //AVX2
+
#if defined(HAVE_AVX512)
#define extract128from512(a, imm) _mm512_extracti32x4_epi32(a, imm)
#define interleave512hi(a, b) _mm512_unpackhi_epi8(a, b)
@@ -816,185 +816,185 @@ m256 combine2x128(m128 hi, m128 lo) {
#define vpermq512(idx, a) _mm512_permutexvar_epi64(idx, a)
#endif
-/****
- **** 384-bit Primitives
- ****/
-
-static really_inline m384 and384(m384 a, m384 b) {
- m384 rv;
- rv.lo = and128(a.lo, b.lo);
- rv.mid = and128(a.mid, b.mid);
- rv.hi = and128(a.hi, b.hi);
- return rv;
-}
-
-static really_inline m384 or384(m384 a, m384 b) {
- m384 rv;
- rv.lo = or128(a.lo, b.lo);
- rv.mid = or128(a.mid, b.mid);
- rv.hi = or128(a.hi, b.hi);
- return rv;
-}
-
-static really_inline m384 xor384(m384 a, m384 b) {
- m384 rv;
- rv.lo = xor128(a.lo, b.lo);
- rv.mid = xor128(a.mid, b.mid);
- rv.hi = xor128(a.hi, b.hi);
- return rv;
-}
-static really_inline m384 not384(m384 a) {
- m384 rv;
- rv.lo = not128(a.lo);
- rv.mid = not128(a.mid);
- rv.hi = not128(a.hi);
- return rv;
-}
-static really_inline m384 andnot384(m384 a, m384 b) {
- m384 rv;
- rv.lo = andnot128(a.lo, b.lo);
- rv.mid = andnot128(a.mid, b.mid);
- rv.hi = andnot128(a.hi, b.hi);
- return rv;
-}
-
+/****
+ **** 384-bit Primitives
+ ****/
+
+static really_inline m384 and384(m384 a, m384 b) {
+ m384 rv;
+ rv.lo = and128(a.lo, b.lo);
+ rv.mid = and128(a.mid, b.mid);
+ rv.hi = and128(a.hi, b.hi);
+ return rv;
+}
+
+static really_inline m384 or384(m384 a, m384 b) {
+ m384 rv;
+ rv.lo = or128(a.lo, b.lo);
+ rv.mid = or128(a.mid, b.mid);
+ rv.hi = or128(a.hi, b.hi);
+ return rv;
+}
+
+static really_inline m384 xor384(m384 a, m384 b) {
+ m384 rv;
+ rv.lo = xor128(a.lo, b.lo);
+ rv.mid = xor128(a.mid, b.mid);
+ rv.hi = xor128(a.hi, b.hi);
+ return rv;
+}
+static really_inline m384 not384(m384 a) {
+ m384 rv;
+ rv.lo = not128(a.lo);
+ rv.mid = not128(a.mid);
+ rv.hi = not128(a.hi);
+ return rv;
+}
+static really_inline m384 andnot384(m384 a, m384 b) {
+ m384 rv;
+ rv.lo = andnot128(a.lo, b.lo);
+ rv.mid = andnot128(a.mid, b.mid);
+ rv.hi = andnot128(a.hi, b.hi);
+ return rv;
+}
+
static really_really_inline
m384 lshift64_m384(m384 a, unsigned b) {
- m384 rv;
+ m384 rv;
rv.lo = lshift64_m128(a.lo, b);
rv.mid = lshift64_m128(a.mid, b);
rv.hi = lshift64_m128(a.hi, b);
- return rv;
-}
-
-static really_inline m384 zeroes384(void) {
- m384 rv = {zeroes128(), zeroes128(), zeroes128()};
- return rv;
-}
-
-static really_inline m384 ones384(void) {
- m384 rv = {ones128(), ones128(), ones128()};
- return rv;
-}
-
-static really_inline int diff384(m384 a, m384 b) {
- return diff128(a.lo, b.lo) || diff128(a.mid, b.mid) || diff128(a.hi, b.hi);
-}
-
-static really_inline int isnonzero384(m384 a) {
- return isnonzero128(or128(or128(a.lo, a.mid), a.hi));
-}
-
-/**
- * "Rich" version of diff384(). Takes two vectors a and b and returns a 12-bit
- * mask indicating which 32-bit words contain differences.
- */
-static really_inline u32 diffrich384(m384 a, m384 b) {
- m128 z = zeroes128();
- a.lo = _mm_cmpeq_epi32(a.lo, b.lo);
- a.mid = _mm_cmpeq_epi32(a.mid, b.mid);
- a.hi = _mm_cmpeq_epi32(a.hi, b.hi);
- m128 packed = _mm_packs_epi16(_mm_packs_epi32(a.lo, a.mid),
- _mm_packs_epi32(a.hi, z));
- return ~(_mm_movemask_epi8(packed)) & 0xfff;
-}
-
-/**
- * "Rich" version of diff384(), 64-bit variant. Takes two vectors a and b and
- * returns a 12-bit mask indicating which 64-bit words contain differences.
- */
-static really_inline u32 diffrich64_384(m384 a, m384 b) {
- u32 d = diffrich384(a, b);
- return (d | (d >> 1)) & 0x55555555;
-}
-
-// aligned load
-static really_inline m384 load384(const void *ptr) {
- assert(ISALIGNED_16(ptr));
- m384 rv = { load128(ptr), load128((const char *)ptr + 16),
- load128((const char *)ptr + 32) };
- return rv;
-}
-
-// aligned store
-static really_inline void store384(void *ptr, m384 a) {
- assert(ISALIGNED_16(ptr));
- ptr = assume_aligned(ptr, 16);
- *(m384 *)ptr = a;
-}
-
-// unaligned load
-static really_inline m384 loadu384(const void *ptr) {
- m384 rv = { loadu128(ptr), loadu128((const char *)ptr + 16),
- loadu128((const char *)ptr + 32)};
- return rv;
-}
-
-// packed unaligned store of first N bytes
-static really_inline
-void storebytes384(void *ptr, m384 a, unsigned int n) {
- assert(n <= sizeof(a));
- memcpy(ptr, &a, n);
-}
-
-// packed unaligned load of first N bytes, pad with zero
-static really_inline
-m384 loadbytes384(const void *ptr, unsigned int n) {
- m384 a = zeroes384();
- assert(n <= sizeof(a));
- memcpy(&a, ptr, n);
- return a;
-}
-
-// switches on bit N in the given vector.
-static really_inline
-void setbit384(m384 *ptr, unsigned int n) {
- assert(n < sizeof(*ptr) * 8);
- m128 *sub;
- if (n < 128) {
- sub = &ptr->lo;
- } else if (n < 256) {
- sub = &ptr->mid;
- } else {
- sub = &ptr->hi;
- }
- setbit128(sub, n % 128);
-}
-
-// switches off bit N in the given vector.
-static really_inline
-void clearbit384(m384 *ptr, unsigned int n) {
- assert(n < sizeof(*ptr) * 8);
- m128 *sub;
- if (n < 128) {
- sub = &ptr->lo;
- } else if (n < 256) {
- sub = &ptr->mid;
- } else {
- sub = &ptr->hi;
- }
- clearbit128(sub, n % 128);
-}
-
-// tests bit N in the given vector.
-static really_inline
+ return rv;
+}
+
+static really_inline m384 zeroes384(void) {
+ m384 rv = {zeroes128(), zeroes128(), zeroes128()};
+ return rv;
+}
+
+static really_inline m384 ones384(void) {
+ m384 rv = {ones128(), ones128(), ones128()};
+ return rv;
+}
+
+static really_inline int diff384(m384 a, m384 b) {
+ return diff128(a.lo, b.lo) || diff128(a.mid, b.mid) || diff128(a.hi, b.hi);
+}
+
+static really_inline int isnonzero384(m384 a) {
+ return isnonzero128(or128(or128(a.lo, a.mid), a.hi));
+}
+
+/**
+ * "Rich" version of diff384(). Takes two vectors a and b and returns a 12-bit
+ * mask indicating which 32-bit words contain differences.
+ */
+static really_inline u32 diffrich384(m384 a, m384 b) {
+ m128 z = zeroes128();
+ a.lo = _mm_cmpeq_epi32(a.lo, b.lo);
+ a.mid = _mm_cmpeq_epi32(a.mid, b.mid);
+ a.hi = _mm_cmpeq_epi32(a.hi, b.hi);
+ m128 packed = _mm_packs_epi16(_mm_packs_epi32(a.lo, a.mid),
+ _mm_packs_epi32(a.hi, z));
+ return ~(_mm_movemask_epi8(packed)) & 0xfff;
+}
+
+/**
+ * "Rich" version of diff384(), 64-bit variant. Takes two vectors a and b and
+ * returns a 12-bit mask indicating which 64-bit words contain differences.
+ */
+static really_inline u32 diffrich64_384(m384 a, m384 b) {
+ u32 d = diffrich384(a, b);
+ return (d | (d >> 1)) & 0x55555555;
+}
+
+// aligned load
+static really_inline m384 load384(const void *ptr) {
+ assert(ISALIGNED_16(ptr));
+ m384 rv = { load128(ptr), load128((const char *)ptr + 16),
+ load128((const char *)ptr + 32) };
+ return rv;
+}
+
+// aligned store
+static really_inline void store384(void *ptr, m384 a) {
+ assert(ISALIGNED_16(ptr));
+ ptr = assume_aligned(ptr, 16);
+ *(m384 *)ptr = a;
+}
+
+// unaligned load
+static really_inline m384 loadu384(const void *ptr) {
+ m384 rv = { loadu128(ptr), loadu128((const char *)ptr + 16),
+ loadu128((const char *)ptr + 32)};
+ return rv;
+}
+
+// packed unaligned store of first N bytes
+static really_inline
+void storebytes384(void *ptr, m384 a, unsigned int n) {
+ assert(n <= sizeof(a));
+ memcpy(ptr, &a, n);
+}
+
+// packed unaligned load of first N bytes, pad with zero
+static really_inline
+m384 loadbytes384(const void *ptr, unsigned int n) {
+ m384 a = zeroes384();
+ assert(n <= sizeof(a));
+ memcpy(&a, ptr, n);
+ return a;
+}
+
+// switches on bit N in the given vector.
+static really_inline
+void setbit384(m384 *ptr, unsigned int n) {
+ assert(n < sizeof(*ptr) * 8);
+ m128 *sub;
+ if (n < 128) {
+ sub = &ptr->lo;
+ } else if (n < 256) {
+ sub = &ptr->mid;
+ } else {
+ sub = &ptr->hi;
+ }
+ setbit128(sub, n % 128);
+}
+
+// switches off bit N in the given vector.
+static really_inline
+void clearbit384(m384 *ptr, unsigned int n) {
+ assert(n < sizeof(*ptr) * 8);
+ m128 *sub;
+ if (n < 128) {
+ sub = &ptr->lo;
+ } else if (n < 256) {
+ sub = &ptr->mid;
+ } else {
+ sub = &ptr->hi;
+ }
+ clearbit128(sub, n % 128);
+}
+
+// tests bit N in the given vector.
+static really_inline
char testbit384(m384 val, unsigned int n) {
assert(n < sizeof(val) * 8);
m128 sub;
- if (n < 128) {
+ if (n < 128) {
sub = val.lo;
- } else if (n < 256) {
+ } else if (n < 256) {
sub = val.mid;
- } else {
+ } else {
sub = val.hi;
- }
- return testbit128(sub, n % 128);
-}
-
-/****
- **** 512-bit Primitives
- ****/
-
+ }
+ return testbit128(sub, n % 128);
+}
+
+/****
+ **** 512-bit Primitives
+ ****/
+
#define eq512mask(a, b) _mm512_cmpeq_epi8_mask((a), (b))
#define masked_eq512mask(k, a, b) _mm512_mask_cmpeq_epi8_mask((k), (a), (b))
@@ -1002,7 +1002,7 @@ static really_inline
m512 zeroes512(void) {
#if defined(HAVE_AVX512)
return _mm512_setzero_si512();
-#else
+#else
m512 rv = {zeroes256(), zeroes256()};
return rv;
#endif
@@ -1079,60 +1079,60 @@ m512 and512(m512 a, m512 b) {
#if defined(HAVE_AVX512)
return _mm512_and_si512(a, b);
#else
- m512 rv;
- rv.lo = and256(a.lo, b.lo);
- rv.hi = and256(a.hi, b.hi);
- return rv;
+ m512 rv;
+ rv.lo = and256(a.lo, b.lo);
+ rv.hi = and256(a.hi, b.hi);
+ return rv;
#endif
-}
-
+}
+
static really_inline
m512 or512(m512 a, m512 b) {
#if defined(HAVE_AVX512)
return _mm512_or_si512(a, b);
-#else
- m512 rv;
- rv.lo = or256(a.lo, b.lo);
- rv.hi = or256(a.hi, b.hi);
- return rv;
+#else
+ m512 rv;
+ rv.lo = or256(a.lo, b.lo);
+ rv.hi = or256(a.hi, b.hi);
+ return rv;
#endif
-}
-
+}
+
static really_inline
m512 xor512(m512 a, m512 b) {
#if defined(HAVE_AVX512)
return _mm512_xor_si512(a, b);
-#else
- m512 rv;
- rv.lo = xor256(a.lo, b.lo);
- rv.hi = xor256(a.hi, b.hi);
- return rv;
+#else
+ m512 rv;
+ rv.lo = xor256(a.lo, b.lo);
+ rv.hi = xor256(a.hi, b.hi);
+ return rv;
#endif
-}
-
+}
+
static really_inline
m512 not512(m512 a) {
#if defined(HAVE_AVX512)
return _mm512_xor_si512(a, ones512());
-#else
- m512 rv;
- rv.lo = not256(a.lo);
- rv.hi = not256(a.hi);
- return rv;
+#else
+ m512 rv;
+ rv.lo = not256(a.lo);
+ rv.hi = not256(a.hi);
+ return rv;
#endif
-}
-
+}
+
static really_inline
m512 andnot512(m512 a, m512 b) {
#if defined(HAVE_AVX512)
return _mm512_andnot_si512(a, b);
-#else
- m512 rv;
- rv.lo = andnot256(a.lo, b.lo);
- rv.hi = andnot256(a.hi, b.hi);
- return rv;
+#else
+ m512 rv;
+ rv.lo = andnot256(a.lo, b.lo);
+ rv.hi = andnot256(a.hi, b.hi);
+ return rv;
#endif
-}
+}
#if defined(HAVE_AVX512)
static really_really_inline
@@ -1141,39 +1141,39 @@ m512 lshift64_m512(m512 a, unsigned b) {
if (__builtin_constant_p(b)) {
return _mm512_slli_epi64(a, b);
}
-#endif
+#endif
m128 x = _mm_cvtsi32_si128(b);
return _mm512_sll_epi64(a, x);
}
-#else
+#else
static really_really_inline
m512 lshift64_m512(m512 a, unsigned b) {
- m512 rv;
+ m512 rv;
rv.lo = lshift64_m256(a.lo, b);
rv.hi = lshift64_m256(a.hi, b);
- return rv;
-}
-#endif
-
+ return rv;
+}
+#endif
+
#if defined(HAVE_AVX512)
#define rshift64_m512(a, b) _mm512_srli_epi64((a), (b))
#define rshift128_m512(a, count_immed) _mm512_bsrli_epi128(a, count_immed)
#define lshift128_m512(a, count_immed) _mm512_bslli_epi128(a, count_immed)
#endif
-
+
#if !defined(_MM_CMPINT_NE)
#define _MM_CMPINT_NE 0x4
#endif
-
+
static really_inline
int diff512(m512 a, m512 b) {
#if defined(HAVE_AVX512)
return !!_mm512_cmp_epi8_mask(a, b, _MM_CMPINT_NE);
#else
- return diff256(a.lo, b.lo) || diff256(a.hi, b.hi);
+ return diff256(a.lo, b.lo) || diff256(a.hi, b.hi);
#endif
-}
-
+}
+
static really_inline
int isnonzero512(m512 a) {
#if defined(HAVE_AVX512)
@@ -1182,83 +1182,83 @@ int isnonzero512(m512 a) {
m256 x = or256(a.lo, a.hi);
return !!diff256(x, zeroes256());
#else
- m128 x = or128(a.lo.lo, a.lo.hi);
- m128 y = or128(a.hi.lo, a.hi.hi);
- return isnonzero128(or128(x, y));
-#endif
-}
-
-/**
- * "Rich" version of diff512(). Takes two vectors a and b and returns a 16-bit
- * mask indicating which 32-bit words contain differences.
- */
+ m128 x = or128(a.lo.lo, a.lo.hi);
+ m128 y = or128(a.hi.lo, a.hi.hi);
+ return isnonzero128(or128(x, y));
+#endif
+}
+
+/**
+ * "Rich" version of diff512(). Takes two vectors a and b and returns a 16-bit
+ * mask indicating which 32-bit words contain differences.
+ */
static really_inline
u32 diffrich512(m512 a, m512 b) {
#if defined(HAVE_AVX512)
return _mm512_cmp_epi32_mask(a, b, _MM_CMPINT_NE);
#elif defined(HAVE_AVX2)
- return diffrich256(a.lo, b.lo) | (diffrich256(a.hi, b.hi) << 8);
-#else
- a.lo.lo = _mm_cmpeq_epi32(a.lo.lo, b.lo.lo);
- a.lo.hi = _mm_cmpeq_epi32(a.lo.hi, b.lo.hi);
- a.hi.lo = _mm_cmpeq_epi32(a.hi.lo, b.hi.lo);
- a.hi.hi = _mm_cmpeq_epi32(a.hi.hi, b.hi.hi);
- m128 packed = _mm_packs_epi16(_mm_packs_epi32(a.lo.lo, a.lo.hi),
- _mm_packs_epi32(a.hi.lo, a.hi.hi));
- return ~(_mm_movemask_epi8(packed)) & 0xffff;
-#endif
-}
-
-/**
- * "Rich" version of diffrich(), 64-bit variant. Takes two vectors a and b and
- * returns a 16-bit mask indicating which 64-bit words contain differences.
- */
+ return diffrich256(a.lo, b.lo) | (diffrich256(a.hi, b.hi) << 8);
+#else
+ a.lo.lo = _mm_cmpeq_epi32(a.lo.lo, b.lo.lo);
+ a.lo.hi = _mm_cmpeq_epi32(a.lo.hi, b.lo.hi);
+ a.hi.lo = _mm_cmpeq_epi32(a.hi.lo, b.hi.lo);
+ a.hi.hi = _mm_cmpeq_epi32(a.hi.hi, b.hi.hi);
+ m128 packed = _mm_packs_epi16(_mm_packs_epi32(a.lo.lo, a.lo.hi),
+ _mm_packs_epi32(a.hi.lo, a.hi.hi));
+ return ~(_mm_movemask_epi8(packed)) & 0xffff;
+#endif
+}
+
+/**
+ * "Rich" version of diffrich(), 64-bit variant. Takes two vectors a and b and
+ * returns a 16-bit mask indicating which 64-bit words contain differences.
+ */
static really_inline
u32 diffrich64_512(m512 a, m512 b) {
//TODO: cmp_epi64?
- u32 d = diffrich512(a, b);
- return (d | (d >> 1)) & 0x55555555;
-}
-
-// aligned load
+ u32 d = diffrich512(a, b);
+ return (d | (d >> 1)) & 0x55555555;
+}
+
+// aligned load
static really_inline
m512 load512(const void *ptr) {
#if defined(HAVE_AVX512)
return _mm512_load_si512(ptr);
#else
assert(ISALIGNED_N(ptr, alignof(m256)));
- m512 rv = { load256(ptr), load256((const char *)ptr + 32) };
- return rv;
+ m512 rv = { load256(ptr), load256((const char *)ptr + 32) };
+ return rv;
#endif
-}
-
-// aligned store
+}
+
+// aligned store
static really_inline
void store512(void *ptr, m512 a) {
assert(ISALIGNED_N(ptr, alignof(m512)));
#if defined(HAVE_AVX512)
return _mm512_store_si512(ptr, a);
#elif defined(HAVE_AVX2)
- m512 *x = (m512 *)ptr;
- store256(&x->lo, a.lo);
- store256(&x->hi, a.hi);
-#else
- ptr = assume_aligned(ptr, 16);
- *(m512 *)ptr = a;
-#endif
-}
-
-// unaligned load
+ m512 *x = (m512 *)ptr;
+ store256(&x->lo, a.lo);
+ store256(&x->hi, a.hi);
+#else
+ ptr = assume_aligned(ptr, 16);
+ *(m512 *)ptr = a;
+#endif
+}
+
+// unaligned load
static really_inline
m512 loadu512(const void *ptr) {
#if defined(HAVE_AVX512)
return _mm512_loadu_si512(ptr);
#else
- m512 rv = { loadu256(ptr), loadu256((const char *)ptr + 32) };
- return rv;
+ m512 rv = { loadu256(ptr), loadu256((const char *)ptr + 32) };
+ return rv;
#endif
-}
-
+}
+
// unaligned store
static really_inline
void storeu512(void *ptr, m512 a) {
@@ -1302,22 +1302,22 @@ m256 loadu_maskz_m256(__mmask32 k, const void *ptr) {
}
#endif
-// packed unaligned store of first N bytes
-static really_inline
-void storebytes512(void *ptr, m512 a, unsigned int n) {
- assert(n <= sizeof(a));
- memcpy(ptr, &a, n);
-}
-
-// packed unaligned load of first N bytes, pad with zero
-static really_inline
-m512 loadbytes512(const void *ptr, unsigned int n) {
- m512 a = zeroes512();
- assert(n <= sizeof(a));
- memcpy(&a, ptr, n);
- return a;
-}
-
+// packed unaligned store of first N bytes
+static really_inline
+void storebytes512(void *ptr, m512 a, unsigned int n) {
+ assert(n <= sizeof(a));
+ memcpy(ptr, &a, n);
+}
+
+// packed unaligned load of first N bytes, pad with zero
+static really_inline
+m512 loadbytes512(const void *ptr, unsigned int n) {
+ m512 a = zeroes512();
+ assert(n <= sizeof(a));
+ memcpy(&a, ptr, n);
+ return a;
+}
+
static really_inline
m512 mask1bit512(unsigned int n) {
assert(n < sizeof(m512) * 8);
@@ -1326,95 +1326,95 @@ m512 mask1bit512(unsigned int n) {
return loadu512(&simd_onebit_masks[mask_idx]);
}
-// switches on bit N in the given vector.
-static really_inline
-void setbit512(m512 *ptr, unsigned int n) {
- assert(n < sizeof(*ptr) * 8);
+// switches on bit N in the given vector.
+static really_inline
+void setbit512(m512 *ptr, unsigned int n) {
+ assert(n < sizeof(*ptr) * 8);
#if !defined(HAVE_AVX2)
- m128 *sub;
- if (n < 128) {
- sub = &ptr->lo.lo;
- } else if (n < 256) {
- sub = &ptr->lo.hi;
- } else if (n < 384) {
- sub = &ptr->hi.lo;
- } else {
- sub = &ptr->hi.hi;
- }
- setbit128(sub, n % 128);
+ m128 *sub;
+ if (n < 128) {
+ sub = &ptr->lo.lo;
+ } else if (n < 256) {
+ sub = &ptr->lo.hi;
+ } else if (n < 384) {
+ sub = &ptr->hi.lo;
+ } else {
+ sub = &ptr->hi.hi;
+ }
+ setbit128(sub, n % 128);
#elif defined(HAVE_AVX512)
*ptr = or512(mask1bit512(n), *ptr);
-#else
- m256 *sub;
- if (n < 256) {
- sub = &ptr->lo;
- } else {
- sub = &ptr->hi;
- n -= 256;
- }
- setbit256(sub, n);
-#endif
-}
-
-// switches off bit N in the given vector.
-static really_inline
-void clearbit512(m512 *ptr, unsigned int n) {
- assert(n < sizeof(*ptr) * 8);
+#else
+ m256 *sub;
+ if (n < 256) {
+ sub = &ptr->lo;
+ } else {
+ sub = &ptr->hi;
+ n -= 256;
+ }
+ setbit256(sub, n);
+#endif
+}
+
+// switches off bit N in the given vector.
+static really_inline
+void clearbit512(m512 *ptr, unsigned int n) {
+ assert(n < sizeof(*ptr) * 8);
#if !defined(HAVE_AVX2)
- m128 *sub;
- if (n < 128) {
- sub = &ptr->lo.lo;
- } else if (n < 256) {
- sub = &ptr->lo.hi;
- } else if (n < 384) {
- sub = &ptr->hi.lo;
- } else {
- sub = &ptr->hi.hi;
- }
- clearbit128(sub, n % 128);
+ m128 *sub;
+ if (n < 128) {
+ sub = &ptr->lo.lo;
+ } else if (n < 256) {
+ sub = &ptr->lo.hi;
+ } else if (n < 384) {
+ sub = &ptr->hi.lo;
+ } else {
+ sub = &ptr->hi.hi;
+ }
+ clearbit128(sub, n % 128);
#elif defined(HAVE_AVX512)
*ptr = andnot512(mask1bit512(n), *ptr);
-#else
- m256 *sub;
- if (n < 256) {
- sub = &ptr->lo;
- } else {
- sub = &ptr->hi;
- n -= 256;
- }
- clearbit256(sub, n);
-#endif
-}
-
-// tests bit N in the given vector.
-static really_inline
+#else
+ m256 *sub;
+ if (n < 256) {
+ sub = &ptr->lo;
+ } else {
+ sub = &ptr->hi;
+ n -= 256;
+ }
+ clearbit256(sub, n);
+#endif
+}
+
+// tests bit N in the given vector.
+static really_inline
char testbit512(m512 val, unsigned int n) {
assert(n < sizeof(val) * 8);
#if !defined(HAVE_AVX2)
m128 sub;
- if (n < 128) {
+ if (n < 128) {
sub = val.lo.lo;
- } else if (n < 256) {
+ } else if (n < 256) {
sub = val.lo.hi;
- } else if (n < 384) {
+ } else if (n < 384) {
sub = val.hi.lo;
- } else {
+ } else {
sub = val.hi.hi;
- }
- return testbit128(sub, n % 128);
+ }
+ return testbit128(sub, n % 128);
#elif defined(HAVE_AVX512)
const m512 mask = mask1bit512(n);
return !!_mm512_test_epi8_mask(mask, val);
-#else
+#else
m256 sub;
- if (n < 256) {
+ if (n < 256) {
sub = val.lo;
- } else {
+ } else {
sub = val.hi;
- n -= 256;
- }
- return testbit256(sub, n);
-#endif
-}
-
-#endif
+ n -= 256;
+ }
+ return testbit256(sub, n);
+#endif
+}
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/state_compress.c b/contrib/libs/hyperscan/src/util/state_compress.c
index 7238849e7f..0861aa78ac 100644
--- a/contrib/libs/hyperscan/src/util/state_compress.c
+++ b/contrib/libs/hyperscan/src/util/state_compress.c
@@ -1,552 +1,552 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Mask-based state compression, used by the NFA.
- */
-#include "config.h"
-#include "ue2common.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Mask-based state compression, used by the NFA.
+ */
+#include "config.h"
+#include "ue2common.h"
#include "arch.h"
-#include "bitutils.h"
-#include "unaligned.h"
-#include "pack_bits.h"
-#include "partial_store.h"
-#include "popcount.h"
-#include "state_compress.h"
-
-#include <string.h>
-
-/*
- * 32-bit store/load.
- */
-
-void storecompressed32(void *ptr, const u32 *x, const u32 *m, u32 bytes) {
- assert(popcount32(*m) <= bytes * 8);
-
- u32 v = compress32(*x, *m);
- partial_store_u32(ptr, v, bytes);
-}
-
-void loadcompressed32(u32 *x, const void *ptr, const u32 *m, u32 bytes) {
- assert(popcount32(*m) <= bytes * 8);
-
- u32 v = partial_load_u32(ptr, bytes);
- *x = expand32(v, *m);
-}
-
-/*
- * 64-bit store/load.
- */
-
-void storecompressed64(void *ptr, const u64a *x, const u64a *m, u32 bytes) {
- assert(popcount64(*m) <= bytes * 8);
-
- u64a v = compress64(*x, *m);
- partial_store_u64a(ptr, v, bytes);
-}
-
-void loadcompressed64(u64a *x, const void *ptr, const u64a *m, u32 bytes) {
- assert(popcount64(*m) <= bytes * 8);
-
- u64a v = partial_load_u64a(ptr, bytes);
- *x = expand64(v, *m);
-}
-
-/*
- * 128-bit store/load.
- */
-
-#if defined(ARCH_32_BIT)
-static really_inline
-void storecompressed128_32bit(void *ptr, m128 xvec, m128 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 x[4];
- memcpy(x, &xvec, sizeof(xvec));
- u32 m[4];
- memcpy(m, &mvec, sizeof(mvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[4] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]) };
-
- // Compress each 32-bit chunk individually.
- u32 v[4] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
- compress32(x[2], m[2]), compress32(x[3], m[3]) };
-
- // Write packed data out.
- pack_bits_32(ptr, v, bits, 4);
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_inline
-void storecompressed128_64bit(void *ptr, m128 xvec, m128 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a x[2];
- memcpy(x, &xvec, sizeof(xvec));
- u64a m[2];
- memcpy(m, &mvec, sizeof(mvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[2] = { popcount64(m[0]), popcount64(m[1]) };
-
- // Compress each 64-bit chunk individually.
- u64a v[2] = { compress64(x[0], m[0]), compress64(x[1], m[1]) };
-
- // Write packed data out.
- pack_bits_64(ptr, v, bits, 2);
-}
-#endif
-
-void storecompressed128(void *ptr, const m128 *x, const m128 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- storecompressed128_64bit(ptr, *x, *m);
-#else
- storecompressed128_32bit(ptr, *x, *m);
-#endif
-}
-
-#if defined(ARCH_32_BIT)
-static really_inline
-m128 loadcompressed128_32bit(const void *ptr, m128 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 m[8];
- memcpy(m, &mvec, sizeof(mvec));
-
- u32 bits[4] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]) };
- u32 v[4];
-
- unpack_bits_32(v, (const u8 *)ptr, bits, 4);
-
- u32 x[4] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
- expand32(v[2], m[2]), expand32(v[3], m[3]) };
-
- return _mm_set_epi32(x[3], x[2], x[1], x[0]);
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_inline
-m128 loadcompressed128_64bit(const void *ptr, m128 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a m[2] = { movq(mvec), movq(_mm_srli_si128(mvec, 8)) };
-
- u32 bits[2] = { popcount64(m[0]), popcount64(m[1]) };
- u64a v[2];
-
- unpack_bits_64(v, (const u8 *)ptr, bits, 2);
-
- u64a x[2] = { expand64(v[0], m[0]), expand64(v[1], m[1]) };
-
- return _mm_set_epi64x(x[1], x[0]);
-}
-#endif
-
-void loadcompressed128(m128 *x, const void *ptr, const m128 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- *x = loadcompressed128_64bit(ptr, *m);
-#else
- *x = loadcompressed128_32bit(ptr, *m);
-#endif
-}
-
-/*
- * 256-bit store/load.
- */
-
-#if defined(ARCH_32_BIT)
-static really_inline
-void storecompressed256_32bit(void *ptr, m256 xvec, m256 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 x[8];
- memcpy(x, &xvec, sizeof(xvec));
- u32 m[8];
- memcpy(m, &mvec, sizeof(mvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[8] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]),
- popcount32(m[4]), popcount32(m[5]),
- popcount32(m[6]), popcount32(m[7])};
-
- // Compress each 32-bit chunk individually.
- u32 v[8] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
- compress32(x[2], m[2]), compress32(x[3], m[3]),
- compress32(x[4], m[4]), compress32(x[5], m[5]),
- compress32(x[6], m[6]), compress32(x[7], m[7]) };
-
- // Write packed data out.
- pack_bits_32(ptr, v, bits, 8);
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_really_inline
-void storecompressed256_64bit(void *ptr, m256 xvec, m256 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a x[4];
- memcpy(x, &xvec, sizeof(xvec));
- u64a m[4];
- memcpy(m, &mvec, sizeof(mvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[4] = { popcount64(m[0]), popcount64(m[1]),
- popcount64(m[2]), popcount64(m[3]) };
-
- // Compress each 64-bit chunk individually.
- u64a v[4] = { compress64(x[0], m[0]), compress64(x[1], m[1]),
- compress64(x[2], m[2]), compress64(x[3], m[3]) };
-
- // Write packed data out.
- pack_bits_64(ptr, v, bits, 4);
-}
-#endif
-
-void storecompressed256(void *ptr, const m256 *x, const m256 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- storecompressed256_64bit(ptr, *x, *m);
-#else
- storecompressed256_32bit(ptr, *x, *m);
-#endif
-}
-
-#if defined(ARCH_32_BIT)
-static really_inline
-m256 loadcompressed256_32bit(const void *ptr, m256 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 m[8];
- memcpy(m, &mvec, sizeof(mvec));
-
- u32 bits[8] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]),
- popcount32(m[4]), popcount32(m[5]),
- popcount32(m[6]), popcount32(m[7])};
- u32 v[8];
-
- unpack_bits_32(v, (const u8 *)ptr, bits, 8);
-
- u32 x[8] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
- expand32(v[2], m[2]), expand32(v[3], m[3]),
- expand32(v[4], m[4]), expand32(v[5], m[5]),
- expand32(v[6], m[6]), expand32(v[7], m[7]) };
-
+#include "bitutils.h"
+#include "unaligned.h"
+#include "pack_bits.h"
+#include "partial_store.h"
+#include "popcount.h"
+#include "state_compress.h"
+
+#include <string.h>
+
+/*
+ * 32-bit store/load.
+ */
+
+void storecompressed32(void *ptr, const u32 *x, const u32 *m, u32 bytes) {
+ assert(popcount32(*m) <= bytes * 8);
+
+ u32 v = compress32(*x, *m);
+ partial_store_u32(ptr, v, bytes);
+}
+
+void loadcompressed32(u32 *x, const void *ptr, const u32 *m, u32 bytes) {
+ assert(popcount32(*m) <= bytes * 8);
+
+ u32 v = partial_load_u32(ptr, bytes);
+ *x = expand32(v, *m);
+}
+
+/*
+ * 64-bit store/load.
+ */
+
+void storecompressed64(void *ptr, const u64a *x, const u64a *m, u32 bytes) {
+ assert(popcount64(*m) <= bytes * 8);
+
+ u64a v = compress64(*x, *m);
+ partial_store_u64a(ptr, v, bytes);
+}
+
+void loadcompressed64(u64a *x, const void *ptr, const u64a *m, u32 bytes) {
+ assert(popcount64(*m) <= bytes * 8);
+
+ u64a v = partial_load_u64a(ptr, bytes);
+ *x = expand64(v, *m);
+}
+
+/*
+ * 128-bit store/load.
+ */
+
+#if defined(ARCH_32_BIT)
+static really_inline
+void storecompressed128_32bit(void *ptr, m128 xvec, m128 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 x[4];
+ memcpy(x, &xvec, sizeof(xvec));
+ u32 m[4];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[4] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]) };
+
+ // Compress each 32-bit chunk individually.
+ u32 v[4] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
+ compress32(x[2], m[2]), compress32(x[3], m[3]) };
+
+ // Write packed data out.
+ pack_bits_32(ptr, v, bits, 4);
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_inline
+void storecompressed128_64bit(void *ptr, m128 xvec, m128 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a x[2];
+ memcpy(x, &xvec, sizeof(xvec));
+ u64a m[2];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[2] = { popcount64(m[0]), popcount64(m[1]) };
+
+ // Compress each 64-bit chunk individually.
+ u64a v[2] = { compress64(x[0], m[0]), compress64(x[1], m[1]) };
+
+ // Write packed data out.
+ pack_bits_64(ptr, v, bits, 2);
+}
+#endif
+
+void storecompressed128(void *ptr, const m128 *x, const m128 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ storecompressed128_64bit(ptr, *x, *m);
+#else
+ storecompressed128_32bit(ptr, *x, *m);
+#endif
+}
+
+#if defined(ARCH_32_BIT)
+static really_inline
+m128 loadcompressed128_32bit(const void *ptr, m128 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 m[8];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ u32 bits[4] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]) };
+ u32 v[4];
+
+ unpack_bits_32(v, (const u8 *)ptr, bits, 4);
+
+ u32 x[4] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
+ expand32(v[2], m[2]), expand32(v[3], m[3]) };
+
+ return _mm_set_epi32(x[3], x[2], x[1], x[0]);
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_inline
+m128 loadcompressed128_64bit(const void *ptr, m128 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a m[2] = { movq(mvec), movq(_mm_srli_si128(mvec, 8)) };
+
+ u32 bits[2] = { popcount64(m[0]), popcount64(m[1]) };
+ u64a v[2];
+
+ unpack_bits_64(v, (const u8 *)ptr, bits, 2);
+
+ u64a x[2] = { expand64(v[0], m[0]), expand64(v[1], m[1]) };
+
+ return _mm_set_epi64x(x[1], x[0]);
+}
+#endif
+
+void loadcompressed128(m128 *x, const void *ptr, const m128 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ *x = loadcompressed128_64bit(ptr, *m);
+#else
+ *x = loadcompressed128_32bit(ptr, *m);
+#endif
+}
+
+/*
+ * 256-bit store/load.
+ */
+
+#if defined(ARCH_32_BIT)
+static really_inline
+void storecompressed256_32bit(void *ptr, m256 xvec, m256 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 x[8];
+ memcpy(x, &xvec, sizeof(xvec));
+ u32 m[8];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[8] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]),
+ popcount32(m[4]), popcount32(m[5]),
+ popcount32(m[6]), popcount32(m[7])};
+
+ // Compress each 32-bit chunk individually.
+ u32 v[8] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
+ compress32(x[2], m[2]), compress32(x[3], m[3]),
+ compress32(x[4], m[4]), compress32(x[5], m[5]),
+ compress32(x[6], m[6]), compress32(x[7], m[7]) };
+
+ // Write packed data out.
+ pack_bits_32(ptr, v, bits, 8);
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_really_inline
+void storecompressed256_64bit(void *ptr, m256 xvec, m256 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a x[4];
+ memcpy(x, &xvec, sizeof(xvec));
+ u64a m[4];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[4] = { popcount64(m[0]), popcount64(m[1]),
+ popcount64(m[2]), popcount64(m[3]) };
+
+ // Compress each 64-bit chunk individually.
+ u64a v[4] = { compress64(x[0], m[0]), compress64(x[1], m[1]),
+ compress64(x[2], m[2]), compress64(x[3], m[3]) };
+
+ // Write packed data out.
+ pack_bits_64(ptr, v, bits, 4);
+}
+#endif
+
+void storecompressed256(void *ptr, const m256 *x, const m256 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ storecompressed256_64bit(ptr, *x, *m);
+#else
+ storecompressed256_32bit(ptr, *x, *m);
+#endif
+}
+
+#if defined(ARCH_32_BIT)
+static really_inline
+m256 loadcompressed256_32bit(const void *ptr, m256 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 m[8];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ u32 bits[8] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]),
+ popcount32(m[4]), popcount32(m[5]),
+ popcount32(m[6]), popcount32(m[7])};
+ u32 v[8];
+
+ unpack_bits_32(v, (const u8 *)ptr, bits, 8);
+
+ u32 x[8] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
+ expand32(v[2], m[2]), expand32(v[3], m[3]),
+ expand32(v[4], m[4]), expand32(v[5], m[5]),
+ expand32(v[6], m[6]), expand32(v[7], m[7]) };
+
#if !defined(HAVE_AVX2)
- m256 xvec = { .lo = _mm_set_epi32(x[3], x[2], x[1], x[0]),
- .hi = _mm_set_epi32(x[7], x[6], x[5], x[4]) };
-#else
- m256 xvec = _mm256_set_epi32(x[7], x[6], x[5], x[4],
- x[3], x[2], x[1], x[0]);
-#endif
- return xvec;
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_inline
-m256 loadcompressed256_64bit(const void *ptr, m256 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a m[4];
- memcpy(m, &mvec, sizeof(mvec));
-
- u32 bits[4] = { popcount64(m[0]), popcount64(m[1]),
- popcount64(m[2]), popcount64(m[3]) };
- u64a v[4];
-
- unpack_bits_64(v, (const u8 *)ptr, bits, 4);
-
- u64a x[4] = { expand64(v[0], m[0]), expand64(v[1], m[1]),
- expand64(v[2], m[2]), expand64(v[3], m[3]) };
-
+ m256 xvec = { .lo = _mm_set_epi32(x[3], x[2], x[1], x[0]),
+ .hi = _mm_set_epi32(x[7], x[6], x[5], x[4]) };
+#else
+ m256 xvec = _mm256_set_epi32(x[7], x[6], x[5], x[4],
+ x[3], x[2], x[1], x[0]);
+#endif
+ return xvec;
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_inline
+m256 loadcompressed256_64bit(const void *ptr, m256 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a m[4];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ u32 bits[4] = { popcount64(m[0]), popcount64(m[1]),
+ popcount64(m[2]), popcount64(m[3]) };
+ u64a v[4];
+
+ unpack_bits_64(v, (const u8 *)ptr, bits, 4);
+
+ u64a x[4] = { expand64(v[0], m[0]), expand64(v[1], m[1]),
+ expand64(v[2], m[2]), expand64(v[3], m[3]) };
+
#if !defined(HAVE_AVX2)
- m256 xvec = { .lo = _mm_set_epi64x(x[1], x[0]),
- .hi = _mm_set_epi64x(x[3], x[2]) };
-#else
- m256 xvec = _mm256_set_epi64x(x[3], x[2], x[1], x[0]);
-#endif
- return xvec;
-}
-#endif
-
-void loadcompressed256(m256 *x, const void *ptr, const m256 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- *x = loadcompressed256_64bit(ptr, *m);
-#else
- *x = loadcompressed256_32bit(ptr, *m);
-#endif
-}
-
-/*
- * 384-bit store/load.
- */
-
-#if defined(ARCH_32_BIT)
-static really_inline
-void storecompressed384_32bit(void *ptr, m384 xvec, m384 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 x[12];
- memcpy(x, &xvec, sizeof(xvec));
- u32 m[12];
- memcpy(m, &mvec, sizeof(mvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[12] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]),
- popcount32(m[4]), popcount32(m[5]),
- popcount32(m[6]), popcount32(m[7]),
- popcount32(m[8]), popcount32(m[9]),
- popcount32(m[10]), popcount32(m[11]) };
-
- // Compress each 32-bit chunk individually.
- u32 v[12] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
- compress32(x[2], m[2]), compress32(x[3], m[3]),
- compress32(x[4], m[4]), compress32(x[5], m[5]),
- compress32(x[6], m[6]), compress32(x[7], m[7]),
- compress32(x[8], m[8]), compress32(x[9], m[9]),
- compress32(x[10], m[10]), compress32(x[11], m[11])};
-
- // Write packed data out.
- pack_bits_32(ptr, v, bits, 12);
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_inline
-void storecompressed384_64bit(void *ptr, m384 xvec, m384 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a x[6];
- memcpy(x, &xvec, sizeof(xvec));
- u64a m[6];
- memcpy(m, &mvec, sizeof(mvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[6] = { popcount64(m[0]), popcount64(m[1]),
- popcount64(m[2]), popcount64(m[3]),
- popcount64(m[4]), popcount64(m[5]) };
-
- // Compress each 64-bit chunk individually.
- u64a v[6] = { compress64(x[0], m[0]), compress64(x[1], m[1]),
- compress64(x[2], m[2]), compress64(x[3], m[3]),
- compress64(x[4], m[4]), compress64(x[5], m[5]) };
-
- // Write packed data out.
- pack_bits_64(ptr, v, bits, 6);
-}
-#endif
-
-void storecompressed384(void *ptr, const m384 *x, const m384 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- storecompressed384_64bit(ptr, *x, *m);
-#else
- storecompressed384_32bit(ptr, *x, *m);
-#endif
-}
-
-#if defined(ARCH_32_BIT)
-static really_inline
-m384 loadcompressed384_32bit(const void *ptr, m384 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 m[12];
- memcpy(m, &mvec, sizeof(mvec));
-
- u32 bits[12] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]),
- popcount32(m[4]), popcount32(m[5]),
- popcount32(m[6]), popcount32(m[7]),
- popcount32(m[8]), popcount32(m[9]),
- popcount32(m[10]), popcount32(m[11]) };
- u32 v[12];
-
- unpack_bits_32(v, (const u8 *)ptr, bits, 12);
-
- u32 x[12] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
- expand32(v[2], m[2]), expand32(v[3], m[3]),
- expand32(v[4], m[4]), expand32(v[5], m[5]),
- expand32(v[6], m[6]), expand32(v[7], m[7]),
- expand32(v[8], m[8]), expand32(v[9], m[9]),
- expand32(v[10], m[10]), expand32(v[11], m[11]) };
-
- m384 xvec = { .lo = _mm_set_epi32(x[3], x[2], x[1], x[0]),
- .mid = _mm_set_epi32(x[7], x[6], x[5], x[4]),
- .hi = _mm_set_epi32(x[11], x[10], x[9], x[8]) };
- return xvec;
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_inline
-m384 loadcompressed384_64bit(const void *ptr, m384 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a m[6];
- memcpy(m, &mvec, sizeof(mvec));
-
- u32 bits[6] = { popcount64(m[0]), popcount64(m[1]),
- popcount64(m[2]), popcount64(m[3]),
- popcount64(m[4]), popcount64(m[5]) };
- u64a v[6];
-
- unpack_bits_64(v, (const u8 *)ptr, bits, 6);
-
- u64a x[6] = { expand64(v[0], m[0]), expand64(v[1], m[1]),
- expand64(v[2], m[2]), expand64(v[3], m[3]),
- expand64(v[4], m[4]), expand64(v[5], m[5]) };
-
- m384 xvec = { .lo = _mm_set_epi64x(x[1], x[0]),
- .mid = _mm_set_epi64x(x[3], x[2]),
- .hi = _mm_set_epi64x(x[5], x[4]) };
- return xvec;
-}
-#endif
-
-void loadcompressed384(m384 *x, const void *ptr, const m384 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- *x = loadcompressed384_64bit(ptr, *m);
-#else
- *x = loadcompressed384_32bit(ptr, *m);
-#endif
-}
-
-/*
- * 512-bit store/load.
- */
-
-#if defined(ARCH_32_BIT)
-static really_inline
-void storecompressed512_32bit(void *ptr, m512 xvec, m512 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 x[16];
- memcpy(x, &xvec, sizeof(xvec));
- u32 m[16];
- memcpy(m, &mvec, sizeof(mvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[16] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]),
- popcount32(m[4]), popcount32(m[5]),
- popcount32(m[6]), popcount32(m[7]),
- popcount32(m[8]), popcount32(m[9]),
- popcount32(m[10]), popcount32(m[11]),
- popcount32(m[12]), popcount32(m[13]),
- popcount32(m[14]), popcount32(m[15])};
-
- // Compress each 32-bit chunk individually.
- u32 v[16] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
- compress32(x[2], m[2]), compress32(x[3], m[3]),
- compress32(x[4], m[4]), compress32(x[5], m[5]),
- compress32(x[6], m[6]), compress32(x[7], m[7]),
- compress32(x[8], m[8]), compress32(x[9], m[9]),
- compress32(x[10], m[10]), compress32(x[11], m[11]),
- compress32(x[12], m[12]), compress32(x[13], m[13]),
- compress32(x[14], m[14]), compress32(x[15], m[15]) };
-
- // Write packed data out.
- pack_bits_32(ptr, v, bits, 16);
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_inline
-void storecompressed512_64bit(void *ptr, m512 xvec, m512 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a m[8];
- memcpy(m, &mvec, sizeof(mvec));
- u64a x[8];
- memcpy(x, &xvec, sizeof(xvec));
-
- // Count the number of bits of compressed state we're writing out per
- // chunk.
- u32 bits[8] = { popcount64(m[0]), popcount64(m[1]),
- popcount64(m[2]), popcount64(m[3]),
- popcount64(m[4]), popcount64(m[5]),
- popcount64(m[6]), popcount64(m[7]) };
-
- // Compress each 64-bit chunk individually.
- u64a v[8] = { compress64(x[0], m[0]), compress64(x[1], m[1]),
- compress64(x[2], m[2]), compress64(x[3], m[3]),
- compress64(x[4], m[4]), compress64(x[5], m[5]),
- compress64(x[6], m[6]), compress64(x[7], m[7]) };
-
- // Write packed data out.
- pack_bits_64(ptr, v, bits, 8);
-}
-#endif
-
-void storecompressed512(void *ptr, const m512 *x, const m512 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- storecompressed512_64bit(ptr, *x, *m);
-#else
- storecompressed512_32bit(ptr, *x, *m);
-#endif
-}
-
-#if defined(ARCH_32_BIT)
-static really_inline
-m512 loadcompressed512_32bit(const void *ptr, m512 mvec) {
- // First, decompose our vectors into 32-bit chunks.
- u32 m[16];
- memcpy(m, &mvec, sizeof(mvec));
-
- u32 bits[16] = { popcount32(m[0]), popcount32(m[1]),
- popcount32(m[2]), popcount32(m[3]),
- popcount32(m[4]), popcount32(m[5]),
- popcount32(m[6]), popcount32(m[7]),
- popcount32(m[8]), popcount32(m[9]),
- popcount32(m[10]), popcount32(m[11]),
- popcount32(m[12]), popcount32(m[13]),
- popcount32(m[14]), popcount32(m[15]) };
- u32 v[16];
-
- unpack_bits_32(v, (const u8 *)ptr, bits, 16);
-
- u32 x[16] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
- expand32(v[2], m[2]), expand32(v[3], m[3]),
- expand32(v[4], m[4]), expand32(v[5], m[5]),
- expand32(v[6], m[6]), expand32(v[7], m[7]),
- expand32(v[8], m[8]), expand32(v[9], m[9]),
- expand32(v[10], m[10]), expand32(v[11], m[11]),
- expand32(v[12], m[12]), expand32(v[13], m[13]),
- expand32(v[14], m[14]), expand32(v[15], m[15]) };
-
- m512 xvec;
+ m256 xvec = { .lo = _mm_set_epi64x(x[1], x[0]),
+ .hi = _mm_set_epi64x(x[3], x[2]) };
+#else
+ m256 xvec = _mm256_set_epi64x(x[3], x[2], x[1], x[0]);
+#endif
+ return xvec;
+}
+#endif
+
+void loadcompressed256(m256 *x, const void *ptr, const m256 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ *x = loadcompressed256_64bit(ptr, *m);
+#else
+ *x = loadcompressed256_32bit(ptr, *m);
+#endif
+}
+
+/*
+ * 384-bit store/load.
+ */
+
+#if defined(ARCH_32_BIT)
+static really_inline
+void storecompressed384_32bit(void *ptr, m384 xvec, m384 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 x[12];
+ memcpy(x, &xvec, sizeof(xvec));
+ u32 m[12];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[12] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]),
+ popcount32(m[4]), popcount32(m[5]),
+ popcount32(m[6]), popcount32(m[7]),
+ popcount32(m[8]), popcount32(m[9]),
+ popcount32(m[10]), popcount32(m[11]) };
+
+ // Compress each 32-bit chunk individually.
+ u32 v[12] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
+ compress32(x[2], m[2]), compress32(x[3], m[3]),
+ compress32(x[4], m[4]), compress32(x[5], m[5]),
+ compress32(x[6], m[6]), compress32(x[7], m[7]),
+ compress32(x[8], m[8]), compress32(x[9], m[9]),
+ compress32(x[10], m[10]), compress32(x[11], m[11])};
+
+ // Write packed data out.
+ pack_bits_32(ptr, v, bits, 12);
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_inline
+void storecompressed384_64bit(void *ptr, m384 xvec, m384 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a x[6];
+ memcpy(x, &xvec, sizeof(xvec));
+ u64a m[6];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[6] = { popcount64(m[0]), popcount64(m[1]),
+ popcount64(m[2]), popcount64(m[3]),
+ popcount64(m[4]), popcount64(m[5]) };
+
+ // Compress each 64-bit chunk individually.
+ u64a v[6] = { compress64(x[0], m[0]), compress64(x[1], m[1]),
+ compress64(x[2], m[2]), compress64(x[3], m[3]),
+ compress64(x[4], m[4]), compress64(x[5], m[5]) };
+
+ // Write packed data out.
+ pack_bits_64(ptr, v, bits, 6);
+}
+#endif
+
+void storecompressed384(void *ptr, const m384 *x, const m384 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ storecompressed384_64bit(ptr, *x, *m);
+#else
+ storecompressed384_32bit(ptr, *x, *m);
+#endif
+}
+
+#if defined(ARCH_32_BIT)
+static really_inline
+m384 loadcompressed384_32bit(const void *ptr, m384 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 m[12];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ u32 bits[12] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]),
+ popcount32(m[4]), popcount32(m[5]),
+ popcount32(m[6]), popcount32(m[7]),
+ popcount32(m[8]), popcount32(m[9]),
+ popcount32(m[10]), popcount32(m[11]) };
+ u32 v[12];
+
+ unpack_bits_32(v, (const u8 *)ptr, bits, 12);
+
+ u32 x[12] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
+ expand32(v[2], m[2]), expand32(v[3], m[3]),
+ expand32(v[4], m[4]), expand32(v[5], m[5]),
+ expand32(v[6], m[6]), expand32(v[7], m[7]),
+ expand32(v[8], m[8]), expand32(v[9], m[9]),
+ expand32(v[10], m[10]), expand32(v[11], m[11]) };
+
+ m384 xvec = { .lo = _mm_set_epi32(x[3], x[2], x[1], x[0]),
+ .mid = _mm_set_epi32(x[7], x[6], x[5], x[4]),
+ .hi = _mm_set_epi32(x[11], x[10], x[9], x[8]) };
+ return xvec;
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_inline
+m384 loadcompressed384_64bit(const void *ptr, m384 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a m[6];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ u32 bits[6] = { popcount64(m[0]), popcount64(m[1]),
+ popcount64(m[2]), popcount64(m[3]),
+ popcount64(m[4]), popcount64(m[5]) };
+ u64a v[6];
+
+ unpack_bits_64(v, (const u8 *)ptr, bits, 6);
+
+ u64a x[6] = { expand64(v[0], m[0]), expand64(v[1], m[1]),
+ expand64(v[2], m[2]), expand64(v[3], m[3]),
+ expand64(v[4], m[4]), expand64(v[5], m[5]) };
+
+ m384 xvec = { .lo = _mm_set_epi64x(x[1], x[0]),
+ .mid = _mm_set_epi64x(x[3], x[2]),
+ .hi = _mm_set_epi64x(x[5], x[4]) };
+ return xvec;
+}
+#endif
+
+void loadcompressed384(m384 *x, const void *ptr, const m384 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ *x = loadcompressed384_64bit(ptr, *m);
+#else
+ *x = loadcompressed384_32bit(ptr, *m);
+#endif
+}
+
+/*
+ * 512-bit store/load.
+ */
+
+#if defined(ARCH_32_BIT)
+static really_inline
+void storecompressed512_32bit(void *ptr, m512 xvec, m512 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 x[16];
+ memcpy(x, &xvec, sizeof(xvec));
+ u32 m[16];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[16] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]),
+ popcount32(m[4]), popcount32(m[5]),
+ popcount32(m[6]), popcount32(m[7]),
+ popcount32(m[8]), popcount32(m[9]),
+ popcount32(m[10]), popcount32(m[11]),
+ popcount32(m[12]), popcount32(m[13]),
+ popcount32(m[14]), popcount32(m[15])};
+
+ // Compress each 32-bit chunk individually.
+ u32 v[16] = { compress32(x[0], m[0]), compress32(x[1], m[1]),
+ compress32(x[2], m[2]), compress32(x[3], m[3]),
+ compress32(x[4], m[4]), compress32(x[5], m[5]),
+ compress32(x[6], m[6]), compress32(x[7], m[7]),
+ compress32(x[8], m[8]), compress32(x[9], m[9]),
+ compress32(x[10], m[10]), compress32(x[11], m[11]),
+ compress32(x[12], m[12]), compress32(x[13], m[13]),
+ compress32(x[14], m[14]), compress32(x[15], m[15]) };
+
+ // Write packed data out.
+ pack_bits_32(ptr, v, bits, 16);
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_inline
+void storecompressed512_64bit(void *ptr, m512 xvec, m512 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a m[8];
+ memcpy(m, &mvec, sizeof(mvec));
+ u64a x[8];
+ memcpy(x, &xvec, sizeof(xvec));
+
+ // Count the number of bits of compressed state we're writing out per
+ // chunk.
+ u32 bits[8] = { popcount64(m[0]), popcount64(m[1]),
+ popcount64(m[2]), popcount64(m[3]),
+ popcount64(m[4]), popcount64(m[5]),
+ popcount64(m[6]), popcount64(m[7]) };
+
+ // Compress each 64-bit chunk individually.
+ u64a v[8] = { compress64(x[0], m[0]), compress64(x[1], m[1]),
+ compress64(x[2], m[2]), compress64(x[3], m[3]),
+ compress64(x[4], m[4]), compress64(x[5], m[5]),
+ compress64(x[6], m[6]), compress64(x[7], m[7]) };
+
+ // Write packed data out.
+ pack_bits_64(ptr, v, bits, 8);
+}
+#endif
+
+void storecompressed512(void *ptr, const m512 *x, const m512 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ storecompressed512_64bit(ptr, *x, *m);
+#else
+ storecompressed512_32bit(ptr, *x, *m);
+#endif
+}
+
+#if defined(ARCH_32_BIT)
+static really_inline
+m512 loadcompressed512_32bit(const void *ptr, m512 mvec) {
+ // First, decompose our vectors into 32-bit chunks.
+ u32 m[16];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ u32 bits[16] = { popcount32(m[0]), popcount32(m[1]),
+ popcount32(m[2]), popcount32(m[3]),
+ popcount32(m[4]), popcount32(m[5]),
+ popcount32(m[6]), popcount32(m[7]),
+ popcount32(m[8]), popcount32(m[9]),
+ popcount32(m[10]), popcount32(m[11]),
+ popcount32(m[12]), popcount32(m[13]),
+ popcount32(m[14]), popcount32(m[15]) };
+ u32 v[16];
+
+ unpack_bits_32(v, (const u8 *)ptr, bits, 16);
+
+ u32 x[16] = { expand32(v[0], m[0]), expand32(v[1], m[1]),
+ expand32(v[2], m[2]), expand32(v[3], m[3]),
+ expand32(v[4], m[4]), expand32(v[5], m[5]),
+ expand32(v[6], m[6]), expand32(v[7], m[7]),
+ expand32(v[8], m[8]), expand32(v[9], m[9]),
+ expand32(v[10], m[10]), expand32(v[11], m[11]),
+ expand32(v[12], m[12]), expand32(v[13], m[13]),
+ expand32(v[14], m[14]), expand32(v[15], m[15]) };
+
+ m512 xvec;
#if defined(HAVE_AVX512)
xvec = _mm512_set_epi32(x[15], x[14], x[13], x[12],
x[11], x[10], x[9], x[8],
@@ -558,35 +558,35 @@ m512 loadcompressed512_32bit(const void *ptr, m512 mvec) {
xvec.hi = _mm256_set_epi32(x[15], x[14], x[13], x[12],
x[11], x[10], x[9], x[8]);
#else
- xvec.lo.lo = _mm_set_epi32(x[3], x[2], x[1], x[0]);
- xvec.lo.hi = _mm_set_epi32(x[7], x[6], x[5], x[4]);
- xvec.hi.lo = _mm_set_epi32(x[11], x[10], x[9], x[8]);
- xvec.hi.hi = _mm_set_epi32(x[15], x[14], x[13], x[12]);
-#endif
- return xvec;
-}
-#endif
-
-#if defined(ARCH_64_BIT)
-static really_inline
-m512 loadcompressed512_64bit(const void *ptr, m512 mvec) {
- // First, decompose our vectors into 64-bit chunks.
- u64a m[8];
- memcpy(m, &mvec, sizeof(mvec));
-
- u32 bits[8] = { popcount64(m[0]), popcount64(m[1]),
- popcount64(m[2]), popcount64(m[3]),
- popcount64(m[4]), popcount64(m[5]),
- popcount64(m[6]), popcount64(m[7]) };
- u64a v[8];
-
- unpack_bits_64(v, (const u8 *)ptr, bits, 8);
-
- u64a x[8] = { expand64(v[0], m[0]), expand64(v[1], m[1]),
- expand64(v[2], m[2]), expand64(v[3], m[3]),
- expand64(v[4], m[4]), expand64(v[5], m[5]),
- expand64(v[6], m[6]), expand64(v[7], m[7]) };
-
+ xvec.lo.lo = _mm_set_epi32(x[3], x[2], x[1], x[0]);
+ xvec.lo.hi = _mm_set_epi32(x[7], x[6], x[5], x[4]);
+ xvec.hi.lo = _mm_set_epi32(x[11], x[10], x[9], x[8]);
+ xvec.hi.hi = _mm_set_epi32(x[15], x[14], x[13], x[12]);
+#endif
+ return xvec;
+}
+#endif
+
+#if defined(ARCH_64_BIT)
+static really_inline
+m512 loadcompressed512_64bit(const void *ptr, m512 mvec) {
+ // First, decompose our vectors into 64-bit chunks.
+ u64a m[8];
+ memcpy(m, &mvec, sizeof(mvec));
+
+ u32 bits[8] = { popcount64(m[0]), popcount64(m[1]),
+ popcount64(m[2]), popcount64(m[3]),
+ popcount64(m[4]), popcount64(m[5]),
+ popcount64(m[6]), popcount64(m[7]) };
+ u64a v[8];
+
+ unpack_bits_64(v, (const u8 *)ptr, bits, 8);
+
+ u64a x[8] = { expand64(v[0], m[0]), expand64(v[1], m[1]),
+ expand64(v[2], m[2]), expand64(v[3], m[3]),
+ expand64(v[4], m[4]), expand64(v[5], m[5]),
+ expand64(v[6], m[6]), expand64(v[7], m[7]) };
+
#if defined(HAVE_AVX512)
m512 xvec = _mm512_set_epi64(x[7], x[6], x[5], x[4],
x[3], x[2], x[1], x[0]);
@@ -594,20 +594,20 @@ m512 loadcompressed512_64bit(const void *ptr, m512 mvec) {
m512 xvec = { .lo = _mm256_set_epi64x(x[3], x[2], x[1], x[0]),
.hi = _mm256_set_epi64x(x[7], x[6], x[5], x[4])};
#else
- m512 xvec = { .lo = { _mm_set_epi64x(x[1], x[0]),
- _mm_set_epi64x(x[3], x[2]) },
- .hi = { _mm_set_epi64x(x[5], x[4]),
- _mm_set_epi64x(x[7], x[6]) } };
-#endif
- return xvec;
-}
-#endif
-
-void loadcompressed512(m512 *x, const void *ptr, const m512 *m,
- UNUSED u32 bytes) {
-#if defined(ARCH_64_BIT)
- *x = loadcompressed512_64bit(ptr, *m);
-#else
- *x = loadcompressed512_32bit(ptr, *m);
-#endif
-}
+ m512 xvec = { .lo = { _mm_set_epi64x(x[1], x[0]),
+ _mm_set_epi64x(x[3], x[2]) },
+ .hi = { _mm_set_epi64x(x[5], x[4]),
+ _mm_set_epi64x(x[7], x[6]) } };
+#endif
+ return xvec;
+}
+#endif
+
+void loadcompressed512(m512 *x, const void *ptr, const m512 *m,
+ UNUSED u32 bytes) {
+#if defined(ARCH_64_BIT)
+ *x = loadcompressed512_64bit(ptr, *m);
+#else
+ *x = loadcompressed512_32bit(ptr, *m);
+#endif
+}
diff --git a/contrib/libs/hyperscan/src/util/state_compress.h b/contrib/libs/hyperscan/src/util/state_compress.h
index a17d2355cc..183f173888 100644
--- a/contrib/libs/hyperscan/src/util/state_compress.h
+++ b/contrib/libs/hyperscan/src/util/state_compress.h
@@ -1,68 +1,68 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Mask-based state compression, used by the NFA.
- */
-
-#ifndef STATE_COMPRESS_H
-#define STATE_COMPRESS_H
-
-#include "simd_utils.h"
-#include "ue2common.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* Note: bytes is not used by implementations >= 128 */
-
-void storecompressed32(void *ptr, const u32 *x, const u32 *m, u32 bytes);
-void loadcompressed32(u32 *x, const void *ptr, const u32 *m, u32 bytes);
-
-void storecompressed64(void *ptr, const u64a *x, const u64a *m, u32 bytes);
-void loadcompressed64(u64a *x, const void *ptr, const u64a *m, u32 bytes);
-
-void storecompressed128(void *ptr, const m128 *x, const m128 *m, u32 bytes);
-void loadcompressed128(m128 *x, const void *ptr, const m128 *m, u32 bytes);
-
-void storecompressed256(void *ptr, const m256 *x, const m256 *m, u32 bytes);
-void loadcompressed256(m256 *x, const void *ptr, const m256 *m, u32 bytes);
-
-void storecompressed384(void *ptr, const m384 *x, const m384 *m, u32 bytes);
-void loadcompressed384(m384 *x, const void *ptr, const m384 *m, u32 bytes);
-
-void storecompressed512(void *ptr, const m512 *x, const m512 *m, u32 bytes);
-void loadcompressed512(m512 *x, const void *ptr, const m512 *m, u32 bytes);
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Mask-based state compression, used by the NFA.
+ */
+
+#ifndef STATE_COMPRESS_H
+#define STATE_COMPRESS_H
+
+#include "simd_utils.h"
+#include "ue2common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Note: bytes is not used by implementations >= 128 */
+
+void storecompressed32(void *ptr, const u32 *x, const u32 *m, u32 bytes);
+void loadcompressed32(u32 *x, const void *ptr, const u32 *m, u32 bytes);
+
+void storecompressed64(void *ptr, const u64a *x, const u64a *m, u32 bytes);
+void loadcompressed64(u64a *x, const void *ptr, const u64a *m, u32 bytes);
+
+void storecompressed128(void *ptr, const m128 *x, const m128 *m, u32 bytes);
+void loadcompressed128(m128 *x, const void *ptr, const m128 *m, u32 bytes);
+
+void storecompressed256(void *ptr, const m256 *x, const m256 *m, u32 bytes);
+void loadcompressed256(m256 *x, const void *ptr, const m256 *m, u32 bytes);
+
+void storecompressed384(void *ptr, const m384 *x, const m384 *m, u32 bytes);
+void loadcompressed384(m384 *x, const void *ptr, const m384 *m, u32 bytes);
+
+void storecompressed512(void *ptr, const m512 *x, const m512 *m, u32 bytes);
+void loadcompressed512(m512 *x, const void *ptr, const m512 *m, u32 bytes);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/target_info.cpp b/contrib/libs/hyperscan/src/util/target_info.cpp
index 66ba5f5acc..d30c9ae2ca 100644
--- a/contrib/libs/hyperscan/src/util/target_info.cpp
+++ b/contrib/libs/hyperscan/src/util/target_info.cpp
@@ -1,51 +1,51 @@
-/*
+/*
* Copyright (c) 2015-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "hs_compile.h" // for various hs_platform_info flags
-#include "target_info.h"
-#include "util/cpuid_flags.h"
-
-namespace ue2 {
-
-target_t get_current_target(void) {
- hs_platform_info p;
- p.cpu_features = cpuid_flags();
- p.tune = cpuid_tune();
-
- return target_t(p);
-}
-
-bool target_t::can_run_on_code_built_for(const target_t &code_target) const {
- if (!has_avx2() && code_target.has_avx2()) {
- return false;
- }
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "hs_compile.h" // for various hs_platform_info flags
+#include "target_info.h"
+#include "util/cpuid_flags.h"
+
+namespace ue2 {
+
+target_t get_current_target(void) {
+ hs_platform_info p;
+ p.cpu_features = cpuid_flags();
+ p.tune = cpuid_tune();
+
+ return target_t(p);
+}
+
+bool target_t::can_run_on_code_built_for(const target_t &code_target) const {
+ if (!has_avx2() && code_target.has_avx2()) {
+ return false;
+ }
+
if (!has_avx512() && code_target.has_avx512()) {
return false;
}
@@ -54,16 +54,16 @@ bool target_t::can_run_on_code_built_for(const target_t &code_target) const {
return false;
}
- return true;
-}
-
-target_t::target_t(const hs_platform_info &p)
- : tune(p.tune), cpu_features(p.cpu_features) {}
-
-bool target_t::has_avx2(void) const {
+ return true;
+}
+
+target_t::target_t(const hs_platform_info &p)
+ : tune(p.tune), cpu_features(p.cpu_features) {}
+
+bool target_t::has_avx2(void) const {
return cpu_features & HS_CPU_FEATURES_AVX2;
-}
-
+}
+
bool target_t::has_avx512(void) const {
return cpu_features & HS_CPU_FEATURES_AVX512;
}
@@ -72,8 +72,8 @@ bool target_t::has_avx512vbmi(void) const {
return cpu_features & HS_CPU_FEATURES_AVX512VBMI;
}
-bool target_t::is_atom_class(void) const {
+bool target_t::is_atom_class(void) const {
return tune == HS_TUNE_FAMILY_SLM || tune == HS_TUNE_FAMILY_GLM;
-}
-
-} // namespace ue2
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/target_info.h b/contrib/libs/hyperscan/src/util/target_info.h
index f64573aeda..7fefbbed16 100644
--- a/contrib/libs/hyperscan/src/util/target_info.h
+++ b/contrib/libs/hyperscan/src/util/target_info.h
@@ -1,63 +1,63 @@
-/*
+/*
* Copyright (c) 2015-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TARGET_INFO_H
-#define TARGET_INFO_H
-
-#include "ue2common.h"
-
-struct hs_platform_info;
-
-namespace ue2 {
-
-struct target_t {
- explicit target_t(const hs_platform_info &pi);
-
- bool has_avx2(void) const;
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TARGET_INFO_H
+#define TARGET_INFO_H
+
+#include "ue2common.h"
+
+struct hs_platform_info;
+
+namespace ue2 {
+
+struct target_t {
+ explicit target_t(const hs_platform_info &pi);
+
+ bool has_avx2(void) const;
+
bool has_avx512(void) const;
bool has_avx512vbmi(void) const;
- bool is_atom_class(void) const;
-
- // This asks: can this target (the object) run on code that was built for
- // "code_target". Very wordy but less likely to be misinterpreted than
- // is_compatible() or some such.
- bool can_run_on_code_built_for(const target_t &code_target) const;
-
-private:
- u32 tune;
- u64a cpu_features;
-};
-
-target_t get_current_target(void);
-
-} // namespace ue2
-
-#endif
+ bool is_atom_class(void) const;
+
+ // This asks: can this target (the object) run on code that was built for
+ // "code_target". Very wordy but less likely to be misinterpreted than
+ // is_compatible() or some such.
+ bool can_run_on_code_built_for(const target_t &code_target) const;
+
+private:
+ u32 tune;
+ u64a cpu_features;
+};
+
+target_t get_current_target(void);
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/ue2string.cpp b/contrib/libs/hyperscan/src/util/ue2string.cpp
index 50b2bbcc89..3beb222ff2 100644
--- a/contrib/libs/hyperscan/src/util/ue2string.cpp
+++ b/contrib/libs/hyperscan/src/util/ue2string.cpp
@@ -1,184 +1,184 @@
-/*
+/*
* Copyright (c) 2015-2019, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Tools for string manipulation, ue2_literal definition.
- */
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Tools for string manipulation, ue2_literal definition.
+ */
#include "ue2string.h"
-#include "charreach.h"
-#include "compare.h"
+#include "charreach.h"
+#include "compare.h"
#include "hash_dynamic_bitset.h"
-
-#include <algorithm>
+
+#include <algorithm>
#include <cstring>
-#include <iomanip>
-#include <sstream>
-#include <string>
-
-using namespace std;
-
-namespace ue2 {
-
-#if defined(DUMP_SUPPORT) || defined(DEBUG)
-
-// Escape a string so that it's screen-printable
-string escapeString(const string &s) {
- ostringstream os;
- for (unsigned int i = 0; i < s.size(); ++i) {
- char c = s[i];
- if (0x20 <= c && c <= 0x7e && c != '\\') {
- os << c;
- } else if (c == '\n') {
- os << "\\n";
- } else if (c == '\r') {
- os << "\\r";
- } else if (c == '\t') {
- os << "\\t";
- } else {
- os << "\\x" << hex << setw(2) << setfill('0')
- << (unsigned)(c & 0xff) << dec;
- }
- }
- return os.str();
-}
-
-string escapeString(const ue2_literal &lit) {
- ostringstream os;
- for (ue2_literal::const_iterator it = lit.begin(); it != lit.end(); ++it) {
- char c = it->c;
- if (0x20 <= c && c <= 0x7e && c != '\\') {
- os << c;
- } else if (c == '\n') {
- os << "\\n";
- } else {
- os << "\\x" << hex << setw(2) << setfill('0')
- << (unsigned)(c & 0xff) << dec;
- }
- }
- return os.str();
-}
-
-// escape any metacharacters in a literal string
-string escapeStringMeta(const string &s) {
- ostringstream os;
- for (unsigned int i = 0; i < s.size(); ++i) {
- char c = s[i];
- switch (c) {
- case '#': case '$': case '(': case ')':
- case '*': case '+': case '.': case '/':
- case '?': case '[': case ']': case '^':
- case '|':
- os << "\\" << c; break;
- default:
- os << c; break;
- }
- }
- return os.str();
-}
-
-string dotEscapeString(const string &s) {
- string ss = escapeString(s);
- string out;
- out.reserve(ss.size());
- for (size_t i = 0; i != ss.size(); i++) {
- char c = ss[i];
- switch (c) {
- case '\"':
- case '\\':
- out.push_back('\\');
- // fall through
- default:
- out.push_back(c);
- break;
- }
- }
- return out;
-}
-
-string dumpString(const ue2_literal &lit) {
- string s = escapeString(lit.get_string());
- if (lit.any_nocase()) {
- s += " (nocase)";
- }
-
- return s;
-}
-#endif
-
-void upperString(string &s) {
+#include <iomanip>
+#include <sstream>
+#include <string>
+
+using namespace std;
+
+namespace ue2 {
+
+#if defined(DUMP_SUPPORT) || defined(DEBUG)
+
+// Escape a string so that it's screen-printable
+string escapeString(const string &s) {
+ ostringstream os;
+ for (unsigned int i = 0; i < s.size(); ++i) {
+ char c = s[i];
+ if (0x20 <= c && c <= 0x7e && c != '\\') {
+ os << c;
+ } else if (c == '\n') {
+ os << "\\n";
+ } else if (c == '\r') {
+ os << "\\r";
+ } else if (c == '\t') {
+ os << "\\t";
+ } else {
+ os << "\\x" << hex << setw(2) << setfill('0')
+ << (unsigned)(c & 0xff) << dec;
+ }
+ }
+ return os.str();
+}
+
+string escapeString(const ue2_literal &lit) {
+ ostringstream os;
+ for (ue2_literal::const_iterator it = lit.begin(); it != lit.end(); ++it) {
+ char c = it->c;
+ if (0x20 <= c && c <= 0x7e && c != '\\') {
+ os << c;
+ } else if (c == '\n') {
+ os << "\\n";
+ } else {
+ os << "\\x" << hex << setw(2) << setfill('0')
+ << (unsigned)(c & 0xff) << dec;
+ }
+ }
+ return os.str();
+}
+
+// escape any metacharacters in a literal string
+string escapeStringMeta(const string &s) {
+ ostringstream os;
+ for (unsigned int i = 0; i < s.size(); ++i) {
+ char c = s[i];
+ switch (c) {
+ case '#': case '$': case '(': case ')':
+ case '*': case '+': case '.': case '/':
+ case '?': case '[': case ']': case '^':
+ case '|':
+ os << "\\" << c; break;
+ default:
+ os << c; break;
+ }
+ }
+ return os.str();
+}
+
+string dotEscapeString(const string &s) {
+ string ss = escapeString(s);
+ string out;
+ out.reserve(ss.size());
+ for (size_t i = 0; i != ss.size(); i++) {
+ char c = ss[i];
+ switch (c) {
+ case '\"':
+ case '\\':
+ out.push_back('\\');
+ // fall through
+ default:
+ out.push_back(c);
+ break;
+ }
+ }
+ return out;
+}
+
+string dumpString(const ue2_literal &lit) {
+ string s = escapeString(lit.get_string());
+ if (lit.any_nocase()) {
+ s += " (nocase)";
+ }
+
+ return s;
+}
+#endif
+
+void upperString(string &s) {
for (auto &c : s) {
c = mytoupper(c);
}
-}
-
-size_t maxStringOverlap(const string &a, const string &b, bool nocase) {
- size_t lena = a.length(), lenb = b.length();
- const char *astart = a.c_str();
- const char *bstart = b.c_str();
- const char *aend = astart + lena;
- size_t i = lenb;
-
- for (; i > lena; i--) {
- if (!cmp(astart, bstart + i - lena, lena, nocase)) {
- return i;
- }
- }
-
- for (; i && cmp(aend - i, bstart, i, nocase); i--) {
- ;
- }
-
- return i;
-}
-
-size_t maxStringOverlap(const ue2_literal &a, const ue2_literal &b) {
- /* todo: handle nocase better */
- return maxStringOverlap(a.get_string(), b.get_string(),
- a.any_nocase() || b.any_nocase());
-}
-
-size_t maxStringSelfOverlap(const string &a, bool nocase) {
- size_t lena = a.length();
- const char *astart = a.c_str();
- const char *bstart = a.c_str();
- const char *aend = astart + lena;
- size_t i = lena - 1;
-
- for (; i && cmp(aend - i, bstart, i, nocase); i--) {
- ;
- }
-
- return i;
-}
-
-u32 cmp(const char *a, const char *b, size_t len, bool nocase) {
+}
+
+size_t maxStringOverlap(const string &a, const string &b, bool nocase) {
+ size_t lena = a.length(), lenb = b.length();
+ const char *astart = a.c_str();
+ const char *bstart = b.c_str();
+ const char *aend = astart + lena;
+ size_t i = lenb;
+
+ for (; i > lena; i--) {
+ if (!cmp(astart, bstart + i - lena, lena, nocase)) {
+ return i;
+ }
+ }
+
+ for (; i && cmp(aend - i, bstart, i, nocase); i--) {
+ ;
+ }
+
+ return i;
+}
+
+size_t maxStringOverlap(const ue2_literal &a, const ue2_literal &b) {
+ /* todo: handle nocase better */
+ return maxStringOverlap(a.get_string(), b.get_string(),
+ a.any_nocase() || b.any_nocase());
+}
+
+size_t maxStringSelfOverlap(const string &a, bool nocase) {
+ size_t lena = a.length();
+ const char *astart = a.c_str();
+ const char *bstart = a.c_str();
+ const char *aend = astart + lena;
+ size_t i = lena - 1;
+
+ for (; i && cmp(aend - i, bstart, i, nocase); i--) {
+ ;
+ }
+
+ return i;
+}
+
+u32 cmp(const char *a, const char *b, size_t len, bool nocase) {
if (!nocase) {
return memcmp(a, b, len);
}
@@ -189,110 +189,110 @@ u32 cmp(const char *a, const char *b, size_t len, bool nocase) {
}
}
return 0;
-}
-
-case_iter::case_iter(const ue2_literal &ss) : s(ss.get_string()),
- s_orig(ss.get_string()) {
- for (ue2_literal::const_iterator it = ss.begin(); it != ss.end(); ++it) {
- nocase.push_back(it->nocase);
- }
-}
-
-case_iter caseIterateBegin(const ue2_literal &s) {
- return case_iter(s);
-}
-
-case_iter caseIterateEnd() {
- return case_iter(ue2_literal());
-}
-
-case_iter &case_iter::operator++ () {
- for (size_t i = s.length(); i != 0; i--) {
- char lower = mytolower(s[i - 1]);
- if (nocase[i - 1] && lower != s[i - 1]) {
- s[i - 1] = lower;
- copy(s_orig.begin() + i, s_orig.end(), s.begin() + i);
- return *this;
- }
- }
-
- s.clear();
- return *this;
-}
-
-static
-string toUpperString(string s) {
- upperString(s);
- return s;
-}
-
-ue2_literal::elem::operator CharReach () const {
- if (!nocase) {
- return CharReach(c);
- } else {
- CharReach rv;
- rv.set(mytoupper(c));
- rv.set(mytolower(c));
- return rv;
- }
-}
-
+}
+
+case_iter::case_iter(const ue2_literal &ss) : s(ss.get_string()),
+ s_orig(ss.get_string()) {
+ for (ue2_literal::const_iterator it = ss.begin(); it != ss.end(); ++it) {
+ nocase.push_back(it->nocase);
+ }
+}
+
+case_iter caseIterateBegin(const ue2_literal &s) {
+ return case_iter(s);
+}
+
+case_iter caseIterateEnd() {
+ return case_iter(ue2_literal());
+}
+
+case_iter &case_iter::operator++ () {
+ for (size_t i = s.length(); i != 0; i--) {
+ char lower = mytolower(s[i - 1]);
+ if (nocase[i - 1] && lower != s[i - 1]) {
+ s[i - 1] = lower;
+ copy(s_orig.begin() + i, s_orig.end(), s.begin() + i);
+ return *this;
+ }
+ }
+
+ s.clear();
+ return *this;
+}
+
+static
+string toUpperString(string s) {
+ upperString(s);
+ return s;
+}
+
+ue2_literal::elem::operator CharReach () const {
+ if (!nocase) {
+ return CharReach(c);
+ } else {
+ CharReach rv;
+ rv.set(mytoupper(c));
+ rv.set(mytolower(c));
+ return rv;
+ }
+}
+
const ue2_literal::size_type ue2_literal::npos = std::string::npos;
-ue2_literal::ue2_literal(const std::string &s_in, bool nc_in)
+ue2_literal::ue2_literal(const std::string &s_in, bool nc_in)
: s(nc_in ? toUpperString(s_in) : s_in), nocase(s_in.size()) {
- if (nc_in) {
+ if (nc_in) {
// Switch on nocase bit for all alpha characters.
- for (size_t i = 0; i < s.length(); i++) {
+ for (size_t i = 0; i < s.length(); i++) {
if (ourisalpha(s[i])) {
nocase.set(i);
- }
- }
- }
-}
-
-ue2_literal::ue2_literal(char c, bool nc)
- : s(1, nc ? mytoupper(c) : c), nocase(1, ourisalpha(c) ? nc : false) {}
-
-ue2_literal ue2_literal::substr(size_type pos, size_type n) const {
- ue2_literal rv;
- rv.s = s.substr(pos, n);
- size_type upper = nocase.size();
+ }
+ }
+ }
+}
+
+ue2_literal::ue2_literal(char c, bool nc)
+ : s(1, nc ? mytoupper(c) : c), nocase(1, ourisalpha(c) ? nc : false) {}
+
+ue2_literal ue2_literal::substr(size_type pos, size_type n) const {
+ ue2_literal rv;
+ rv.s = s.substr(pos, n);
+ size_type upper = nocase.size();
if (n != npos && n + pos < nocase.size()) {
- upper = n + pos;
- }
+ upper = n + pos;
+ }
rv.nocase.resize(upper - pos, false);
for (size_t i = pos; i < upper; i++) {
rv.nocase.set(i - pos, nocase.test(i));
}
assert(s.size() == nocase.size());
- return rv;
-}
-
-ue2_literal &ue2_literal::erase(size_type pos, size_type n) {
- s.erase(pos, n);
+ return rv;
+}
+
+ue2_literal &ue2_literal::erase(size_type pos, size_type n) {
+ s.erase(pos, n);
if (n != npos) {
for (size_type i = pos + n; i < nocase.size(); i++) {
nocase.set(i - n, nocase.test(i));
}
- }
+ }
nocase.resize(s.size());
- return *this;
-}
-
-void ue2_literal::push_back(char c, bool nc) {
- if (nc) {
- c = mytoupper(c);
- }
- nocase.push_back(nc);
- s.push_back(c);
-}
-
+ return *this;
+}
+
+void ue2_literal::push_back(char c, bool nc) {
+ if (nc) {
+ c = mytoupper(c);
+ }
+ nocase.push_back(nc);
+ s.push_back(c);
+}
+
void ue2_literal::reverse() {
std::reverse(s.begin(), s.end());
-
+
const size_t len = nocase.size();
for (size_t i = 0; i < len / 2; i++) {
size_t j = len - i - 1;
@@ -300,9 +300,9 @@ void ue2_literal::reverse() {
bool b = nocase.test(j);
nocase.set(i, b);
nocase.set(j, a);
- }
-}
-
+ }
+}
+
// Return a copy of this literal in reverse order.
ue2_literal reverse_literal(const ue2_literal &in) {
auto out = in;
@@ -310,94 +310,94 @@ ue2_literal reverse_literal(const ue2_literal &in) {
return out;
}
-bool ue2_literal::operator<(const ue2_literal &b) const {
- if (s < b.s) {
- return true;
- }
- if (s > b.s) {
- return false;
- }
- return nocase < b.nocase;
-}
-
-void ue2_literal::operator+=(const ue2_literal &b) {
- s += b.s;
+bool ue2_literal::operator<(const ue2_literal &b) const {
+ if (s < b.s) {
+ return true;
+ }
+ if (s > b.s) {
+ return false;
+ }
+ return nocase < b.nocase;
+}
+
+void ue2_literal::operator+=(const ue2_literal &b) {
+ s += b.s;
size_t prefix = nocase.size();
nocase.resize(prefix + b.nocase.size());
for (size_t i = 0; i < b.nocase.size(); i++) {
nocase.set(prefix + i, b.nocase[i]);
}
-}
-
-bool ue2_literal::any_nocase() const {
+}
+
+bool ue2_literal::any_nocase() const {
return nocase.any();
-}
-
+}
+
size_t ue2_literal::hash() const {
return hash_all(s, hash_dynamic_bitset()(nocase));
-}
-
-void make_nocase(ue2_literal *lit) {
- ue2_literal rv;
-
+}
+
+void make_nocase(ue2_literal *lit) {
+ ue2_literal rv;
+
for (const auto &elem: *lit) {
rv.push_back(elem.c, ourisalpha(elem.c));
- }
-
- lit->swap(rv);
-}
-
-static
-bool testchar(char c, const CharReach &cr, bool nocase) {
- if (nocase) {
- return cr.test((unsigned char)mytolower(c))
- || cr.test((unsigned char)mytoupper(c));
- } else {
- return cr.test((unsigned char)c);
- }
-}
-
-// Returns true if the given literal contains a char in the given CharReach
-bool contains(const ue2_literal &s, const CharReach &cr) {
- for (ue2_literal::const_iterator it = s.begin(), ite = s.end();
- it != ite; ++it) {
- if (testchar(it->c, cr, it->nocase)) {
- return true;
- }
- }
- return false;
-}
-
-size_t maxStringSelfOverlap(const ue2_literal &a) {
- /* overly conservative if only part of the string is nocase, TODO: fix */
- return maxStringSelfOverlap(a.get_string(), a.any_nocase());
-}
-
-size_t minStringPeriod(const ue2_literal &a) {
- return a.length() - maxStringSelfOverlap(a);
-}
-
-// Returns true if `a' is a suffix of (or equal to) `b'.
-bool isSuffix(const ue2_literal &a, const ue2_literal &b) {
- size_t alen = a.length(), blen = b.length();
- if (alen > blen) {
- return false;
- }
- return equal(a.begin(), a.end(), b.begin() + (blen - alen));
-}
-
-bool is_flood(const ue2_literal &s) {
- assert(!s.empty());
-
- ue2_literal::const_iterator it = s.begin(), ite = s.end();
- ue2_literal::elem f = *it;
- for (++it; it != ite; ++it) {
- if (*it != f) {
- return false;
- }
- }
-
- return true;
-}
-
-} // namespace ue2
+ }
+
+ lit->swap(rv);
+}
+
+static
+bool testchar(char c, const CharReach &cr, bool nocase) {
+ if (nocase) {
+ return cr.test((unsigned char)mytolower(c))
+ || cr.test((unsigned char)mytoupper(c));
+ } else {
+ return cr.test((unsigned char)c);
+ }
+}
+
+// Returns true if the given literal contains a char in the given CharReach
+bool contains(const ue2_literal &s, const CharReach &cr) {
+ for (ue2_literal::const_iterator it = s.begin(), ite = s.end();
+ it != ite; ++it) {
+ if (testchar(it->c, cr, it->nocase)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+size_t maxStringSelfOverlap(const ue2_literal &a) {
+ /* overly conservative if only part of the string is nocase, TODO: fix */
+ return maxStringSelfOverlap(a.get_string(), a.any_nocase());
+}
+
+size_t minStringPeriod(const ue2_literal &a) {
+ return a.length() - maxStringSelfOverlap(a);
+}
+
+// Returns true if `a' is a suffix of (or equal to) `b'.
+bool isSuffix(const ue2_literal &a, const ue2_literal &b) {
+ size_t alen = a.length(), blen = b.length();
+ if (alen > blen) {
+ return false;
+ }
+ return equal(a.begin(), a.end(), b.begin() + (blen - alen));
+}
+
+bool is_flood(const ue2_literal &s) {
+ assert(!s.empty());
+
+ ue2_literal::const_iterator it = s.begin(), ite = s.end();
+ ue2_literal::elem f = *it;
+ for (++it; it != ite; ++it) {
+ if (*it != f) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/ue2string.h b/contrib/libs/hyperscan/src/util/ue2string.h
index 0aa846896e..2e89b2b478 100644
--- a/contrib/libs/hyperscan/src/util/ue2string.h
+++ b/contrib/libs/hyperscan/src/util/ue2string.h
@@ -1,64 +1,64 @@
-/*
+/*
* Copyright (c) 2015-2019, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Tools for string manipulation, ue2_literal definition.
- */
-
-#ifndef UE2STRING_H
-#define UE2STRING_H
-
-#include "ue2common.h"
-#include "util/charreach.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Tools for string manipulation, ue2_literal definition.
+ */
+
+#ifndef UE2STRING_H
+#define UE2STRING_H
+
+#include "ue2common.h"
+#include "util/charreach.h"
#include "util/compare.h"
#include "util/hash.h"
#include "util/operators.h"
-
-#include <iterator>
-#include <string>
-#include <vector>
-
+
+#include <iterator>
+#include <string>
+#include <vector>
+
#include <boost/dynamic_bitset.hpp>
-#include <boost/iterator/iterator_facade.hpp>
-
-namespace ue2 {
-
-/// Force the given string to upper-case.
-void upperString(std::string &s);
-
-size_t maxStringOverlap(const std::string &a, const std::string &b,
- bool nocase);
-
-size_t maxStringSelfOverlap(const std::string &a, bool nocase);
-
-/// Compares two strings, returns non-zero if they're different.
-u32 cmp(const char *a, const char *b, size_t len, bool nocase);
-
+#include <boost/iterator/iterator_facade.hpp>
+
+namespace ue2 {
+
+/// Force the given string to upper-case.
+void upperString(std::string &s);
+
+size_t maxStringOverlap(const std::string &a, const std::string &b,
+ bool nocase);
+
+size_t maxStringSelfOverlap(const std::string &a, bool nocase);
+
+/// Compares two strings, returns non-zero if they're different.
+u32 cmp(const char *a, const char *b, size_t len, bool nocase);
+
/**
* \brief String type that also records whether the whole string is caseful or
* caseless.
@@ -73,7 +73,7 @@ struct ue2_case_string {
upperString(s);
}
}
-
+
bool operator==(const ue2_case_string &other) const {
return s == other.s && nocase == other.nocase;
}
@@ -83,72 +83,72 @@ struct ue2_case_string {
};
struct ue2_literal : totally_ordered<ue2_literal> {
-public:
- /// Single element proxy, pointed to by our const_iterator.
- struct elem {
- elem() : c(0), nocase(false) {}
- elem(char c_in, bool nc_in) : c(c_in), nocase(nc_in) {}
- bool operator==(const elem &o) const {
- return c == o.c && nocase == o.nocase;
- }
- bool operator!=(const elem &o) const {
- return c != o.c || nocase != o.nocase;
- }
- operator CharReach() const;
- char c;
- bool nocase;
- };
-
- /// Boost iterator_facade lets us synthesize an iterator simply.
- class const_iterator : public boost::iterator_facade<
- const_iterator,
- elem const,
- boost::random_access_traversal_tag,
- elem const> {
- public:
- const_iterator() {}
- private:
- friend class boost::iterator_core_access;
- void increment() {
+public:
+ /// Single element proxy, pointed to by our const_iterator.
+ struct elem {
+ elem() : c(0), nocase(false) {}
+ elem(char c_in, bool nc_in) : c(c_in), nocase(nc_in) {}
+ bool operator==(const elem &o) const {
+ return c == o.c && nocase == o.nocase;
+ }
+ bool operator!=(const elem &o) const {
+ return c != o.c || nocase != o.nocase;
+ }
+ operator CharReach() const;
+ char c;
+ bool nocase;
+ };
+
+ /// Boost iterator_facade lets us synthesize an iterator simply.
+ class const_iterator : public boost::iterator_facade<
+ const_iterator,
+ elem const,
+ boost::random_access_traversal_tag,
+ elem const> {
+ public:
+ const_iterator() {}
+ private:
+ friend class boost::iterator_core_access;
+ void increment() {
++idx;
- }
- void decrement() {
+ }
+ void decrement() {
--idx;
- }
- void advance(size_t n) {
+ }
+ void advance(size_t n) {
idx += n;
- }
- difference_type distance_to(const const_iterator &other) const {
+ }
+ difference_type distance_to(const const_iterator &other) const {
return other.idx - idx;
- }
- bool equal(const const_iterator &other) const {
+ }
+ bool equal(const const_iterator &other) const {
return idx == other.idx && lit == other.lit;
- }
- const elem dereference() const {
+ }
+ const elem dereference() const {
return elem(lit->s[idx], lit->nocase[idx]);
- }
-
- friend struct ue2_literal;
+ }
+
+ friend struct ue2_literal;
const_iterator(const ue2_literal &lit_in, size_t idx_in)
: lit(&lit_in), idx(idx_in) {}
-
+
const ue2_literal *lit = nullptr;
size_t idx;
- };
-
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ };
+
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
using size_type = std::string::size_type;
-
+
static const size_type npos;
-
+
ue2_literal() = default;
- ue2_literal(const std::string &s_in, bool nc_in);
- ue2_literal(char c, bool nc_in);
- ue2_literal(const ue2_literal &) = default;
- ue2_literal(ue2_literal &&) = default;
- ue2_literal &operator=(const ue2_literal &) = default;
- ue2_literal &operator=(ue2_literal &&) = default;
-
+ ue2_literal(const std::string &s_in, bool nc_in);
+ ue2_literal(char c, bool nc_in);
+ ue2_literal(const ue2_literal &) = default;
+ ue2_literal(ue2_literal &&) = default;
+ ue2_literal &operator=(const ue2_literal &) = default;
+ ue2_literal &operator=(ue2_literal &&) = default;
+
template<typename InputIt>
ue2_literal(InputIt b, InputIt e) {
for (; b != e; ++b) {
@@ -156,36 +156,36 @@ public:
}
}
- size_type length() const { return s.length(); }
- bool empty() const { return s.empty(); }
+ size_type length() const { return s.length(); }
+ bool empty() const { return s.empty(); }
ue2_literal substr(size_type pos, size_type n = npos) const;
- const char *c_str() const { return s.c_str(); }
- bool any_nocase() const;
-
- const_iterator begin() const {
+ const char *c_str() const { return s.c_str(); }
+ bool any_nocase() const;
+
+ const_iterator begin() const {
return const_iterator(*this, 0);
- }
-
- const_iterator end() const {
+ }
+
+ const_iterator end() const {
return const_iterator(*this, s.size());
- }
-
- const_reverse_iterator rbegin() const {
- return const_reverse_iterator(end());
- }
-
- const_reverse_iterator rend() const {
- return const_reverse_iterator(begin());
- }
-
+ }
+
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
ue2_literal &erase(size_type pos = 0, size_type n = npos);
- void push_back(const elem &e) {
- push_back(e.c, e.nocase);
- }
-
- void push_back(char c, bool nc);
+ void push_back(const elem &e) {
+ push_back(e.c, e.nocase);
+ }
+
+ void push_back(char c, bool nc);
const elem back() const { return *rbegin(); }
-
+
friend ue2_literal operator+(ue2_literal a, const ue2_literal &b) {
a += b;
return a;
@@ -194,40 +194,40 @@ public:
/// Reverse this literal in-place.
void reverse();
- void operator+=(const ue2_literal &b);
- bool operator==(const ue2_literal &b) const {
- return s == b.s && nocase == b.nocase;
- }
- bool operator<(const ue2_literal &b) const;
-
- void clear(void) { s.clear(); nocase.clear(); }
-
- const std::string &get_string() const { return s; }
-
- void swap(ue2_literal &other) {
- s.swap(other.s);
- nocase.swap(other.nocase);
- }
-
+ void operator+=(const ue2_literal &b);
+ bool operator==(const ue2_literal &b) const {
+ return s == b.s && nocase == b.nocase;
+ }
+ bool operator<(const ue2_literal &b) const;
+
+ void clear(void) { s.clear(); nocase.clear(); }
+
+ const std::string &get_string() const { return s; }
+
+ void swap(ue2_literal &other) {
+ s.swap(other.s);
+ nocase.swap(other.nocase);
+ }
+
size_t hash() const;
-private:
+private:
friend const_iterator;
- std::string s;
+ std::string s;
boost::dynamic_bitset<> nocase;
-};
-
-/// Return a reversed copy of this literal.
-ue2_literal reverse_literal(const ue2_literal &in);
-
-// Escape any meta characters in a string
-std::string escapeStringMeta(const std::string &s);
-
-/** Note: may be overly conservative if only partially nocase */
-size_t maxStringSelfOverlap(const ue2_literal &a);
-size_t minStringPeriod(const ue2_literal &a);
-size_t maxStringOverlap(const ue2_literal &a, const ue2_literal &b);
-
+};
+
+/// Return a reversed copy of this literal.
+ue2_literal reverse_literal(const ue2_literal &in);
+
+// Escape any meta characters in a string
+std::string escapeStringMeta(const std::string &s);
+
+/** Note: may be overly conservative if only partially nocase */
+size_t maxStringSelfOverlap(const ue2_literal &a);
+size_t minStringPeriod(const ue2_literal &a);
+size_t maxStringOverlap(const ue2_literal &a, const ue2_literal &b);
+
/**
* \brief True iff the range of a literal given cannot be considered entirely
* case-sensitive nor entirely case-insensitive.
@@ -246,7 +246,7 @@ bool mixed_sensitivity_in(Iter begin, Iter end) {
cs = true;
}
}
-
+
return cs && nc;
}
@@ -259,62 +259,62 @@ bool mixed_sensitivity(const ue2_literal &s) {
return mixed_sensitivity_in(s.begin(), s.end());
}
-void make_nocase(ue2_literal *lit);
-
-struct case_iter {
- explicit case_iter(const ue2_literal &ss);
- const std::string &operator*() const { return s; } /* limited lifetime */
- case_iter &operator++ ();
- bool operator!=(const case_iter &b) const { return s != b.s; }
-private:
- std::string s;
- std::string s_orig;
- std::vector<bool> nocase;
-};
-
-case_iter caseIterateBegin(const ue2_literal &lit);
-case_iter caseIterateEnd();
-
-/** \brief True if there is any overlap between the characters in \a s and the
- * set characters in \a cr.
- *
- * Note: this means that if \a s is nocase, then \a cr only needs to have
- * either the lower-case or upper-case version of a letter set. */
-bool contains(const ue2_literal &s, const CharReach &cr);
-
-/// Returns true if \a a is a suffix of (or equal to) \a b.
-bool isSuffix(const ue2_literal &a, const ue2_literal &b);
-
-static inline
-std::vector<CharReach> as_cr_seq(const ue2_literal &s) {
- std::vector<CharReach> rv;
- rv.reserve(s.length());
- rv.insert(rv.end(), s.begin(), s.end());
- return rv;
-}
-
-/** \brief True if the given literal consists entirely of a flood of the same
- * character. */
-bool is_flood(const ue2_literal &s);
-
-#if defined(DUMP_SUPPORT) || defined(DEBUG)
-/* Utility functions for debugging/dumping */
-
-/// Escape a string so it's dot-printable.
-std::string dotEscapeString(const std::string &s);
-
-std::string dumpString(const ue2_literal &lit);
-
-/// Escape a string so that it's screen-printable.
-std::string escapeString(const std::string &s);
-
-/// Escape a ue2_literal so that it's screen-printable.
-std::string escapeString(const ue2_literal &lit);
-
-#endif
-
-} // namespace ue2
-
+void make_nocase(ue2_literal *lit);
+
+struct case_iter {
+ explicit case_iter(const ue2_literal &ss);
+ const std::string &operator*() const { return s; } /* limited lifetime */
+ case_iter &operator++ ();
+ bool operator!=(const case_iter &b) const { return s != b.s; }
+private:
+ std::string s;
+ std::string s_orig;
+ std::vector<bool> nocase;
+};
+
+case_iter caseIterateBegin(const ue2_literal &lit);
+case_iter caseIterateEnd();
+
+/** \brief True if there is any overlap between the characters in \a s and the
+ * set characters in \a cr.
+ *
+ * Note: this means that if \a s is nocase, then \a cr only needs to have
+ * either the lower-case or upper-case version of a letter set. */
+bool contains(const ue2_literal &s, const CharReach &cr);
+
+/// Returns true if \a a is a suffix of (or equal to) \a b.
+bool isSuffix(const ue2_literal &a, const ue2_literal &b);
+
+static inline
+std::vector<CharReach> as_cr_seq(const ue2_literal &s) {
+ std::vector<CharReach> rv;
+ rv.reserve(s.length());
+ rv.insert(rv.end(), s.begin(), s.end());
+ return rv;
+}
+
+/** \brief True if the given literal consists entirely of a flood of the same
+ * character. */
+bool is_flood(const ue2_literal &s);
+
+#if defined(DUMP_SUPPORT) || defined(DEBUG)
+/* Utility functions for debugging/dumping */
+
+/// Escape a string so it's dot-printable.
+std::string dotEscapeString(const std::string &s);
+
+std::string dumpString(const ue2_literal &lit);
+
+/// Escape a string so that it's screen-printable.
+std::string escapeString(const std::string &s);
+
+/// Escape a ue2_literal so that it's screen-printable.
+std::string escapeString(const ue2_literal &lit);
+
+#endif
+
+} // namespace ue2
+
namespace std {
template<>
@@ -333,4 +333,4 @@ struct hash<ue2::ue2_literal> {
} // namespace std
-#endif
+#endif
diff --git a/contrib/libs/hyperscan/src/util/unaligned.h b/contrib/libs/hyperscan/src/util/unaligned.h
index 299e5677c3..dd7f3b5abb 100644
--- a/contrib/libs/hyperscan/src/util/unaligned.h
+++ b/contrib/libs/hyperscan/src/util/unaligned.h
@@ -1,98 +1,98 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Helper functions for unaligned loads and stores.
- */
-
-#ifndef UNALIGNED_H
-#define UNALIGNED_H
-
-#include "ue2common.h"
-
-#if !defined(_WIN32)
-#define PACKED__MAY_ALIAS __attribute__((packed, may_alias))
-#else
-#define PACKED__MAY_ALIAS
-#pragma pack(push, 1) // pack everything until told otherwise
-#endif
-
-/// Perform an unaligned 16-bit load
-static really_inline
-u16 unaligned_load_u16(const void *ptr) {
- struct unaligned { u16 u; } PACKED__MAY_ALIAS;
- const struct unaligned *uptr = (const struct unaligned *)ptr;
- return uptr->u;
-}
-
-/// Perform an unaligned 32-bit load
-static really_inline
-u32 unaligned_load_u32(const void *ptr) {
- struct unaligned { u32 u; } PACKED__MAY_ALIAS;
- const struct unaligned *uptr = (const struct unaligned *)ptr;
- return uptr->u;
-}
-
-/// Perform an unaligned 64-bit load
-static really_inline
-u64a unaligned_load_u64a(const void *ptr) {
- struct unaligned { u64a u; } PACKED__MAY_ALIAS;
- const struct unaligned *uptr = (const struct unaligned *)ptr;
- return uptr->u;
-}
-
-/// Perform an unaligned 16-bit store
-static really_inline
-void unaligned_store_u16(void *ptr, u16 val) {
- struct unaligned { u16 u; } PACKED__MAY_ALIAS;
- struct unaligned *uptr = (struct unaligned *)ptr;
- uptr->u = val;
-}
-
-/// Perform an unaligned 32-bit store
-static really_inline
-void unaligned_store_u32(void *ptr, u32 val) {
- struct unaligned { u32 u; } PACKED__MAY_ALIAS;
- struct unaligned *uptr = (struct unaligned *)ptr;
- uptr->u = val;
-}
-
-/// Perform an unaligned 64-bit store
-static really_inline
-void unaligned_store_u64a(void *ptr, u64a val) {
- struct unaligned { u64a u; } PACKED__MAY_ALIAS;
- struct unaligned *uptr = (struct unaligned *)ptr;
- uptr->u = val;
-}
-#if defined(_WIN32)
-#pragma pack(pop)
-#endif // win32
-
-#undef PACKED__MAY_ALIAS
-
-#endif // UNALIGNED_H
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Helper functions for unaligned loads and stores.
+ */
+
+#ifndef UNALIGNED_H
+#define UNALIGNED_H
+
+#include "ue2common.h"
+
+#if !defined(_WIN32)
+#define PACKED__MAY_ALIAS __attribute__((packed, may_alias))
+#else
+#define PACKED__MAY_ALIAS
+#pragma pack(push, 1) // pack everything until told otherwise
+#endif
+
+/// Perform an unaligned 16-bit load
+static really_inline
+u16 unaligned_load_u16(const void *ptr) {
+ struct unaligned { u16 u; } PACKED__MAY_ALIAS;
+ const struct unaligned *uptr = (const struct unaligned *)ptr;
+ return uptr->u;
+}
+
+/// Perform an unaligned 32-bit load
+static really_inline
+u32 unaligned_load_u32(const void *ptr) {
+ struct unaligned { u32 u; } PACKED__MAY_ALIAS;
+ const struct unaligned *uptr = (const struct unaligned *)ptr;
+ return uptr->u;
+}
+
+/// Perform an unaligned 64-bit load
+static really_inline
+u64a unaligned_load_u64a(const void *ptr) {
+ struct unaligned { u64a u; } PACKED__MAY_ALIAS;
+ const struct unaligned *uptr = (const struct unaligned *)ptr;
+ return uptr->u;
+}
+
+/// Perform an unaligned 16-bit store
+static really_inline
+void unaligned_store_u16(void *ptr, u16 val) {
+ struct unaligned { u16 u; } PACKED__MAY_ALIAS;
+ struct unaligned *uptr = (struct unaligned *)ptr;
+ uptr->u = val;
+}
+
+/// Perform an unaligned 32-bit store
+static really_inline
+void unaligned_store_u32(void *ptr, u32 val) {
+ struct unaligned { u32 u; } PACKED__MAY_ALIAS;
+ struct unaligned *uptr = (struct unaligned *)ptr;
+ uptr->u = val;
+}
+
+/// Perform an unaligned 64-bit store
+static really_inline
+void unaligned_store_u64a(void *ptr, u64a val) {
+ struct unaligned { u64a u; } PACKED__MAY_ALIAS;
+ struct unaligned *uptr = (struct unaligned *)ptr;
+ uptr->u = val;
+}
+#if defined(_WIN32)
+#pragma pack(pop)
+#endif // win32
+
+#undef PACKED__MAY_ALIAS
+
+#endif // UNALIGNED_H
diff --git a/contrib/libs/hyperscan/src/util/unicode_def.h b/contrib/libs/hyperscan/src/util/unicode_def.h
index 73ff5e87d4..e817cb42b2 100644
--- a/contrib/libs/hyperscan/src/util/unicode_def.h
+++ b/contrib/libs/hyperscan/src/util/unicode_def.h
@@ -1,85 +1,85 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UNICODE_DEF_H
-#define UNICODE_DEF_H
-
-#include "ue2common.h"
-
-#define MAX_UNICODE 0x10FFFF
-#define INVALID_UNICODE 0xffffffff /* unicode could never go above 2^31 */
-
-#define UTF_2CHAR_MIN (1U << 7)
-#define UTF_3CHAR_MIN (1U << 11)
-#define UTF_4CHAR_MIN (1U << 16)
-#define UTF_CONT_SHIFT 6
-#define UTF_CONT_BYTE_RANGE (1U << UTF_CONT_SHIFT)
-#define UTF_CONT_BYTE_HEADER ((u8)0x80) /* 10xx xxxx */
-#define UTF_TWO_BYTE_HEADER ((u8)0xc0) /* 110x xxxx */
-#define UTF_THREE_BYTE_HEADER ((u8)0xe0) /* 1110 xxxx */
-#define UTF_FOUR_BYTE_HEADER ((u8)0xf0) /* 1111 0xxx */
-
-#define UTF_CONT_BYTE_VALUE_MASK 0x3f
-
-#define UTF_CONT_MIN UTF_CONT_BYTE_HEADER
-#define UTF_CONT_MAX (UTF_TWO_BYTE_HEADER - 1)
-
-#define UTF_TWO_BYTE_MIN UTF_TWO_BYTE_HEADER
-#define UTF_TWO_BYTE_MAX (UTF_THREE_BYTE_HEADER - 1)
-
-#define UTF_THREE_BYTE_MIN UTF_THREE_BYTE_HEADER
-#define UTF_THREE_BYTE_MAX (UTF_FOUR_BYTE_HEADER - 1)
-
-#define UTF_FOUR_BYTE_MIN UTF_FOUR_BYTE_HEADER
-#define UTF_FOUR_BYTE_MAX ((u8)0xf4)
-
-#define UTF_CONT_CR CharReach(UTF_CONT_MIN, UTF_CONT_MAX)
-#define UTF_ASCII_CR CharReach(0, 127)
-#define UTF_START_CR CharReach(UTF_TWO_BYTE_MIN, UTF_FOUR_BYTE_MAX)
-#define UTF_TWO_START_CR CharReach(UTF_TWO_BYTE_MIN, UTF_TWO_BYTE_MAX)
-#define UTF_THREE_START_CR CharReach(UTF_THREE_BYTE_MIN, UTF_THREE_BYTE_MAX)
-#define UTF_FOUR_START_CR CharReach(UTF_FOUR_BYTE_MIN, UTF_FOUR_BYTE_MAX)
-
-#define UNICODE_SURROGATE_MIN 0xd800
-#define UNICODE_SURROGATE_MAX 0xdfff
-
-#ifdef __cplusplus
-
-namespace ue2 {
-typedef u32 unichar; /* represents a unicode code point */
-
-static UNUSED
-u8 makeContByte(u8 val) {
- return UTF_CONT_BYTE_HEADER | (val & UTF_CONT_BYTE_VALUE_MASK);
-}
-
-} // namespace
-
-#endif // __cplusplus
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UNICODE_DEF_H
+#define UNICODE_DEF_H
+
+#include "ue2common.h"
+
+#define MAX_UNICODE 0x10FFFF
+#define INVALID_UNICODE 0xffffffff /* unicode could never go above 2^31 */
+
+#define UTF_2CHAR_MIN (1U << 7)
+#define UTF_3CHAR_MIN (1U << 11)
+#define UTF_4CHAR_MIN (1U << 16)
+#define UTF_CONT_SHIFT 6
+#define UTF_CONT_BYTE_RANGE (1U << UTF_CONT_SHIFT)
+#define UTF_CONT_BYTE_HEADER ((u8)0x80) /* 10xx xxxx */
+#define UTF_TWO_BYTE_HEADER ((u8)0xc0) /* 110x xxxx */
+#define UTF_THREE_BYTE_HEADER ((u8)0xe0) /* 1110 xxxx */
+#define UTF_FOUR_BYTE_HEADER ((u8)0xf0) /* 1111 0xxx */
+
+#define UTF_CONT_BYTE_VALUE_MASK 0x3f
+
+#define UTF_CONT_MIN UTF_CONT_BYTE_HEADER
+#define UTF_CONT_MAX (UTF_TWO_BYTE_HEADER - 1)
+
+#define UTF_TWO_BYTE_MIN UTF_TWO_BYTE_HEADER
+#define UTF_TWO_BYTE_MAX (UTF_THREE_BYTE_HEADER - 1)
+
+#define UTF_THREE_BYTE_MIN UTF_THREE_BYTE_HEADER
+#define UTF_THREE_BYTE_MAX (UTF_FOUR_BYTE_HEADER - 1)
+
+#define UTF_FOUR_BYTE_MIN UTF_FOUR_BYTE_HEADER
+#define UTF_FOUR_BYTE_MAX ((u8)0xf4)
+
+#define UTF_CONT_CR CharReach(UTF_CONT_MIN, UTF_CONT_MAX)
+#define UTF_ASCII_CR CharReach(0, 127)
+#define UTF_START_CR CharReach(UTF_TWO_BYTE_MIN, UTF_FOUR_BYTE_MAX)
+#define UTF_TWO_START_CR CharReach(UTF_TWO_BYTE_MIN, UTF_TWO_BYTE_MAX)
+#define UTF_THREE_START_CR CharReach(UTF_THREE_BYTE_MIN, UTF_THREE_BYTE_MAX)
+#define UTF_FOUR_START_CR CharReach(UTF_FOUR_BYTE_MIN, UTF_FOUR_BYTE_MAX)
+
+#define UNICODE_SURROGATE_MIN 0xd800
+#define UNICODE_SURROGATE_MAX 0xdfff
+
+#ifdef __cplusplus
+
+namespace ue2 {
+typedef u32 unichar; /* represents a unicode code point */
+
+static UNUSED
+u8 makeContByte(u8 val) {
+ return UTF_CONT_BYTE_HEADER | (val & UTF_CONT_BYTE_VALUE_MASK);
+}
+
+} // namespace
+
+#endif // __cplusplus
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/unicode_set.h b/contrib/libs/hyperscan/src/util/unicode_set.h
index e2dd351a62..25a7b6cfbc 100644
--- a/contrib/libs/hyperscan/src/util/unicode_set.h
+++ b/contrib/libs/hyperscan/src/util/unicode_set.h
@@ -1,141 +1,141 @@
-/*
- * Copyright (c) 2015, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UNICODE_SET
-#define UNICODE_SET
-
-#include "unicode_def.h"
-
-#include <boost/icl/interval_set.hpp>
-
-namespace ue2 {
-
-class CodePointSet {
-public:
- typedef boost::icl::closed_interval<unichar> interval;
- typedef boost::icl::interval_set<unichar, std::less, interval> implT;
- typedef implT::const_iterator const_iterator;
-
- CodePointSet(void) {}
-
- explicit CodePointSet(const interval &st) : impl(st) {}
-
- bool none(void) const {
- return impl.empty();
- }
-
- void set(unichar c) {
- assert(c <= MAX_UNICODE);
- impl.insert(c);
- }
-
- void unset(unichar c) {
- assert(c <= MAX_UNICODE);
- impl.subtract(c);
- }
-
- void setRange(unichar from, unichar to) { /* inclusive */
- assert(from <= to);
- assert(to <= MAX_UNICODE);
- impl.insert(interval(from, to));
- }
-
- void unsetRange(unichar from, unichar to) { /* inclusive */
- assert(from <= to);
- assert(to <= MAX_UNICODE);
- impl.subtract(interval(from, to));
- }
-
- void flip(void) {
- impl = implT(interval(0, MAX_UNICODE)) - impl;
- }
-
- void operator|=(const CodePointSet &a) {
- impl += a.impl;
- }
-
- const_iterator begin(void) const {
- return impl.begin();
- }
-
- const_iterator end(void) const {
- return impl.end();
- }
-
- size_t count(void) const {
- return cardinality(impl);
- }
-
- CodePointSet operator~(void) const {
- CodePointSet rv = *this;
- rv.flip();
- return rv;
- }
-
- bool operator==(const CodePointSet &a) const {
- return is_element_equal(impl, a.impl);
- }
-
- bool operator!=(const CodePointSet &a) const {
- return !is_element_equal(impl, a.impl);
- }
-
- bool isSubset(const CodePointSet &a) const {
- // Check that adding an interval set has no effect
- return ((impl + a.impl) == impl);
- }
-
- void operator-=(const CodePointSet &a) {
- impl -= a.impl;
- }
-
- /* finds the nth set codepoint, returns INVALID_UNICODE on failure */
- unichar at(size_t pos) const {
- for (const_iterator i = begin(), e = end(); i != e; ++i) {
- size_t int_count = cardinality(*i);
- if (int_count <= pos) {
- /* not in this interval, check next */
- pos -= int_count;
- continue;
- } else {
- return lower(*i) + pos;
- }
- }
-
- return INVALID_UNICODE;
- }
-
- void swap(CodePointSet &other) { impl.swap(other.impl); }
-
-private:
- implT impl;
-};
-
-} // namespace ue2
-
-#endif
+/*
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UNICODE_SET
+#define UNICODE_SET
+
+#include "unicode_def.h"
+
+#include <boost/icl/interval_set.hpp>
+
+namespace ue2 {
+
+class CodePointSet {
+public:
+ typedef boost::icl::closed_interval<unichar> interval;
+ typedef boost::icl::interval_set<unichar, std::less, interval> implT;
+ typedef implT::const_iterator const_iterator;
+
+ CodePointSet(void) {}
+
+ explicit CodePointSet(const interval &st) : impl(st) {}
+
+ bool none(void) const {
+ return impl.empty();
+ }
+
+ void set(unichar c) {
+ assert(c <= MAX_UNICODE);
+ impl.insert(c);
+ }
+
+ void unset(unichar c) {
+ assert(c <= MAX_UNICODE);
+ impl.subtract(c);
+ }
+
+ void setRange(unichar from, unichar to) { /* inclusive */
+ assert(from <= to);
+ assert(to <= MAX_UNICODE);
+ impl.insert(interval(from, to));
+ }
+
+ void unsetRange(unichar from, unichar to) { /* inclusive */
+ assert(from <= to);
+ assert(to <= MAX_UNICODE);
+ impl.subtract(interval(from, to));
+ }
+
+ void flip(void) {
+ impl = implT(interval(0, MAX_UNICODE)) - impl;
+ }
+
+ void operator|=(const CodePointSet &a) {
+ impl += a.impl;
+ }
+
+ const_iterator begin(void) const {
+ return impl.begin();
+ }
+
+ const_iterator end(void) const {
+ return impl.end();
+ }
+
+ size_t count(void) const {
+ return cardinality(impl);
+ }
+
+ CodePointSet operator~(void) const {
+ CodePointSet rv = *this;
+ rv.flip();
+ return rv;
+ }
+
+ bool operator==(const CodePointSet &a) const {
+ return is_element_equal(impl, a.impl);
+ }
+
+ bool operator!=(const CodePointSet &a) const {
+ return !is_element_equal(impl, a.impl);
+ }
+
+ bool isSubset(const CodePointSet &a) const {
+ // Check that adding an interval set has no effect
+ return ((impl + a.impl) == impl);
+ }
+
+ void operator-=(const CodePointSet &a) {
+ impl -= a.impl;
+ }
+
+ /* finds the nth set codepoint, returns INVALID_UNICODE on failure */
+ unichar at(size_t pos) const {
+ for (const_iterator i = begin(), e = end(); i != e; ++i) {
+ size_t int_count = cardinality(*i);
+ if (int_count <= pos) {
+ /* not in this interval, check next */
+ pos -= int_count;
+ continue;
+ } else {
+ return lower(*i) + pos;
+ }
+ }
+
+ return INVALID_UNICODE;
+ }
+
+ void swap(CodePointSet &other) { impl.swap(other.impl); }
+
+private:
+ implT impl;
+};
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/uniform_ops.h b/contrib/libs/hyperscan/src/util/uniform_ops.h
index 262104aca2..89afe39fc7 100644
--- a/contrib/libs/hyperscan/src/util/uniform_ops.h
+++ b/contrib/libs/hyperscan/src/util/uniform_ops.h
@@ -1,106 +1,106 @@
-/*
+/*
* Copyright (c) 2015-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Uniformly-named primitives named by target type.
- *
- * The following are a set of primitives named by target type, so that we can
- * macro the hell out of all our NFA implementations. Hurrah!
- */
-
-#ifndef UNIFORM_OPS_H
-#define UNIFORM_OPS_H
-
-#include "ue2common.h"
-#include "simd_utils.h"
-#include "unaligned.h"
-
-// Aligned loads
-#define load_u8(a) (*(const u8 *)(a))
-#define load_u16(a) (*(const u16 *)(a))
-#define load_u32(a) (*(const u32 *)(a))
-#define load_u64a(a) (*(const u64a *)(a))
-#define load_m128(a) load128(a)
-#define load_m256(a) load256(a)
-#define load_m384(a) load384(a)
-#define load_m512(a) load512(a)
-
-// Unaligned loads
-#define loadu_u8(a) (*(const u8 *)(a))
-#define loadu_u16(a) unaligned_load_u16((const u8 *)(a))
-#define loadu_u32(a) unaligned_load_u32((const u8 *)(a))
-#define loadu_u64a(a) unaligned_load_u64a((const u8 *)(a))
-#define loadu_m128(a) loadu128(a)
-#define loadu_m256(a) loadu256(a)
-#define loadu_m384(a) loadu384(a)
-#define loadu_m512(a) loadu512(a)
-
-// Aligned stores
-#define store_u8(ptr, a) do { *(u8 *)(ptr) = (a); } while(0)
-#define store_u16(ptr, a) do { *(u16 *)(ptr) = (a); } while(0)
-#define store_u32(ptr, a) do { *(u32 *)(ptr) = (a); } while(0)
-#define store_u64a(ptr, a) do { *(u64a *)(ptr) = (a); } while(0)
-#define store_m128(ptr, a) store128(ptr, a)
-#define store_m256(ptr, a) store256(ptr, a)
-#define store_m384(ptr, a) store384(ptr, a)
-#define store_m512(ptr, a) store512(ptr, a)
-
-// Unaligned stores
-#define storeu_u8(ptr, a) do { *(u8 *)(ptr) = (a); } while(0)
-#define storeu_u16(ptr, a) unaligned_store_u16(ptr, a)
-#define storeu_u32(ptr, a) unaligned_store_u32(ptr, a)
-#define storeu_u64a(ptr, a) unaligned_store_u64a(ptr, a)
-#define storeu_m128(ptr, a) storeu128(ptr, a)
-
-#define zero_u8 0
-#define zero_u32 0
-#define zero_u64a 0
-#define zero_m128 zeroes128()
-#define zero_m256 zeroes256()
-#define zero_m384 zeroes384()
-#define zero_m512 zeroes512()
-
-#define ones_u8 0xff
-#define ones_u32 0xfffffffful
-#define ones_u64a 0xffffffffffffffffull
-#define ones_m128 ones128()
-#define ones_m256 ones256()
-#define ones_m384 ones384()
-#define ones_m512 ones512()
-
-#define or_u8(a, b) ((a) | (b))
-#define or_u32(a, b) ((a) | (b))
-#define or_u64a(a, b) ((a) | (b))
-#define or_m128(a, b) (or128(a, b))
-#define or_m256(a, b) (or256(a, b))
-#define or_m384(a, b) (or384(a, b))
-#define or_m512(a, b) (or512(a, b))
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Uniformly-named primitives named by target type.
+ *
+ * The following are a set of primitives named by target type, so that we can
+ * macro the hell out of all our NFA implementations. Hurrah!
+ */
+
+#ifndef UNIFORM_OPS_H
+#define UNIFORM_OPS_H
+
+#include "ue2common.h"
+#include "simd_utils.h"
+#include "unaligned.h"
+
+// Aligned loads
+#define load_u8(a) (*(const u8 *)(a))
+#define load_u16(a) (*(const u16 *)(a))
+#define load_u32(a) (*(const u32 *)(a))
+#define load_u64a(a) (*(const u64a *)(a))
+#define load_m128(a) load128(a)
+#define load_m256(a) load256(a)
+#define load_m384(a) load384(a)
+#define load_m512(a) load512(a)
+
+// Unaligned loads
+#define loadu_u8(a) (*(const u8 *)(a))
+#define loadu_u16(a) unaligned_load_u16((const u8 *)(a))
+#define loadu_u32(a) unaligned_load_u32((const u8 *)(a))
+#define loadu_u64a(a) unaligned_load_u64a((const u8 *)(a))
+#define loadu_m128(a) loadu128(a)
+#define loadu_m256(a) loadu256(a)
+#define loadu_m384(a) loadu384(a)
+#define loadu_m512(a) loadu512(a)
+
+// Aligned stores
+#define store_u8(ptr, a) do { *(u8 *)(ptr) = (a); } while(0)
+#define store_u16(ptr, a) do { *(u16 *)(ptr) = (a); } while(0)
+#define store_u32(ptr, a) do { *(u32 *)(ptr) = (a); } while(0)
+#define store_u64a(ptr, a) do { *(u64a *)(ptr) = (a); } while(0)
+#define store_m128(ptr, a) store128(ptr, a)
+#define store_m256(ptr, a) store256(ptr, a)
+#define store_m384(ptr, a) store384(ptr, a)
+#define store_m512(ptr, a) store512(ptr, a)
+
+// Unaligned stores
+#define storeu_u8(ptr, a) do { *(u8 *)(ptr) = (a); } while(0)
+#define storeu_u16(ptr, a) unaligned_store_u16(ptr, a)
+#define storeu_u32(ptr, a) unaligned_store_u32(ptr, a)
+#define storeu_u64a(ptr, a) unaligned_store_u64a(ptr, a)
+#define storeu_m128(ptr, a) storeu128(ptr, a)
+
+#define zero_u8 0
+#define zero_u32 0
+#define zero_u64a 0
+#define zero_m128 zeroes128()
+#define zero_m256 zeroes256()
+#define zero_m384 zeroes384()
+#define zero_m512 zeroes512()
+
+#define ones_u8 0xff
+#define ones_u32 0xfffffffful
+#define ones_u64a 0xffffffffffffffffull
+#define ones_m128 ones128()
+#define ones_m256 ones256()
+#define ones_m384 ones384()
+#define ones_m512 ones512()
+
+#define or_u8(a, b) ((a) | (b))
+#define or_u32(a, b) ((a) | (b))
+#define or_u64a(a, b) ((a) | (b))
+#define or_m128(a, b) (or128(a, b))
+#define or_m256(a, b) (or256(a, b))
+#define or_m384(a, b) (or384(a, b))
+#define or_m512(a, b) (or512(a, b))
+
#if defined(HAVE_AVX512VBMI)
#define expand_m128(a) (expand128(a))
#define expand_m256(a) (expand256(a))
@@ -113,131 +113,131 @@
#define shuffle_byte_m512(a, b) (vpermb512(a, b))
#endif
-#define and_u8(a, b) ((a) & (b))
-#define and_u32(a, b) ((a) & (b))
-#define and_u64a(a, b) ((a) & (b))
-#define and_m128(a, b) (and128(a, b))
-#define and_m256(a, b) (and256(a, b))
-#define and_m384(a, b) (and384(a, b))
-#define and_m512(a, b) (and512(a, b))
-
-#define not_u8(a) (~(a))
-#define not_u32(a) (~(a))
-#define not_u64a(a) (~(a))
-#define not_m128(a) (not128(a))
-#define not_m256(a) (not256(a))
-#define not_m384(a) (not384(a))
-#define not_m512(a) (not512(a))
-
-#define andnot_u8(a, b) ((~(a)) & (b))
-#define andnot_u32(a, b) ((~(a)) & (b))
-#define andnot_u64a(a, b) ((~(a)) & (b))
-#define andnot_m128(a, b) (andnot128(a, b))
-#define andnot_m256(a, b) (andnot256(a, b))
-#define andnot_m384(a, b) (andnot384(a, b))
-#define andnot_m512(a, b) (andnot512(a, b))
-
+#define and_u8(a, b) ((a) & (b))
+#define and_u32(a, b) ((a) & (b))
+#define and_u64a(a, b) ((a) & (b))
+#define and_m128(a, b) (and128(a, b))
+#define and_m256(a, b) (and256(a, b))
+#define and_m384(a, b) (and384(a, b))
+#define and_m512(a, b) (and512(a, b))
+
+#define not_u8(a) (~(a))
+#define not_u32(a) (~(a))
+#define not_u64a(a) (~(a))
+#define not_m128(a) (not128(a))
+#define not_m256(a) (not256(a))
+#define not_m384(a) (not384(a))
+#define not_m512(a) (not512(a))
+
+#define andnot_u8(a, b) ((~(a)) & (b))
+#define andnot_u32(a, b) ((~(a)) & (b))
+#define andnot_u64a(a, b) ((~(a)) & (b))
+#define andnot_m128(a, b) (andnot128(a, b))
+#define andnot_m256(a, b) (andnot256(a, b))
+#define andnot_m384(a, b) (andnot384(a, b))
+#define andnot_m512(a, b) (andnot512(a, b))
+
#define lshift_u32(a, b) ((a) << (b))
#define lshift_u64a(a, b) ((a) << (b))
#define lshift_m128(a, b) (lshift64_m128(a, b))
#define lshift_m256(a, b) (lshift64_m256(a, b))
#define lshift_m384(a, b) (lshift64_m384(a, b))
#define lshift_m512(a, b) (lshift64_m512(a, b))
-
-#define isZero_u8(a) ((a) == 0)
-#define isZero_u32(a) ((a) == 0)
-#define isZero_u64a(a) ((a) == 0)
-#define isZero_m128(a) (!isnonzero128(a))
-#define isZero_m256(a) (!isnonzero256(a))
-#define isZero_m384(a) (!isnonzero384(a))
-#define isZero_m512(a) (!isnonzero512(a))
-
-#define isNonZero_u8(a) ((a) != 0)
-#define isNonZero_u32(a) ((a) != 0)
-#define isNonZero_u64a(a) ((a) != 0)
-#define isNonZero_m128(a) (isnonzero128(a))
-#define isNonZero_m256(a) (isnonzero256(a))
-#define isNonZero_m384(a) (isnonzero384(a))
-#define isNonZero_m512(a) (isnonzero512(a))
-
-#define diffrich_u32(a, b) ((a) != (b))
-#define diffrich_u64a(a, b) ((a) != (b) ? 3 : 0) //TODO: impl 32bit granularity
-#define diffrich_m128(a, b) (diffrich128(a, b))
-#define diffrich_m256(a, b) (diffrich256(a, b))
-#define diffrich_m384(a, b) (diffrich384(a, b))
-#define diffrich_m512(a, b) (diffrich512(a, b))
-
-#define diffrich64_u32(a, b) ((a) != (b))
-#define diffrich64_u64a(a, b) ((a) != (b) ? 1 : 0)
-#define diffrich64_m128(a, b) (diffrich64_128(a, b))
-#define diffrich64_m256(a, b) (diffrich64_256(a, b))
-#define diffrich64_m384(a, b) (diffrich64_384(a, b))
-#define diffrich64_m512(a, b) (diffrich64_512(a, b))
-
-#define noteq_u8(a, b) ((a) != (b))
-#define noteq_u32(a, b) ((a) != (b))
-#define noteq_u64a(a, b) ((a) != (b))
-#define noteq_m128(a, b) (diff128(a, b))
-#define noteq_m256(a, b) (diff256(a, b))
-#define noteq_m384(a, b) (diff384(a, b))
-#define noteq_m512(a, b) (diff512(a, b))
-
-#define partial_store_m128(ptr, v, sz) storebytes128(ptr, v, sz)
-#define partial_store_m256(ptr, v, sz) storebytes256(ptr, v, sz)
-#define partial_store_m384(ptr, v, sz) storebytes384(ptr, v, sz)
-#define partial_store_m512(ptr, v, sz) storebytes512(ptr, v, sz)
-
-#define partial_load_m128(ptr, sz) loadbytes128(ptr, sz)
-#define partial_load_m256(ptr, sz) loadbytes256(ptr, sz)
-#define partial_load_m384(ptr, sz) loadbytes384(ptr, sz)
-#define partial_load_m512(ptr, sz) loadbytes512(ptr, sz)
-
+
+#define isZero_u8(a) ((a) == 0)
+#define isZero_u32(a) ((a) == 0)
+#define isZero_u64a(a) ((a) == 0)
+#define isZero_m128(a) (!isnonzero128(a))
+#define isZero_m256(a) (!isnonzero256(a))
+#define isZero_m384(a) (!isnonzero384(a))
+#define isZero_m512(a) (!isnonzero512(a))
+
+#define isNonZero_u8(a) ((a) != 0)
+#define isNonZero_u32(a) ((a) != 0)
+#define isNonZero_u64a(a) ((a) != 0)
+#define isNonZero_m128(a) (isnonzero128(a))
+#define isNonZero_m256(a) (isnonzero256(a))
+#define isNonZero_m384(a) (isnonzero384(a))
+#define isNonZero_m512(a) (isnonzero512(a))
+
+#define diffrich_u32(a, b) ((a) != (b))
+#define diffrich_u64a(a, b) ((a) != (b) ? 3 : 0) //TODO: impl 32bit granularity
+#define diffrich_m128(a, b) (diffrich128(a, b))
+#define diffrich_m256(a, b) (diffrich256(a, b))
+#define diffrich_m384(a, b) (diffrich384(a, b))
+#define diffrich_m512(a, b) (diffrich512(a, b))
+
+#define diffrich64_u32(a, b) ((a) != (b))
+#define diffrich64_u64a(a, b) ((a) != (b) ? 1 : 0)
+#define diffrich64_m128(a, b) (diffrich64_128(a, b))
+#define diffrich64_m256(a, b) (diffrich64_256(a, b))
+#define diffrich64_m384(a, b) (diffrich64_384(a, b))
+#define diffrich64_m512(a, b) (diffrich64_512(a, b))
+
+#define noteq_u8(a, b) ((a) != (b))
+#define noteq_u32(a, b) ((a) != (b))
+#define noteq_u64a(a, b) ((a) != (b))
+#define noteq_m128(a, b) (diff128(a, b))
+#define noteq_m256(a, b) (diff256(a, b))
+#define noteq_m384(a, b) (diff384(a, b))
+#define noteq_m512(a, b) (diff512(a, b))
+
+#define partial_store_m128(ptr, v, sz) storebytes128(ptr, v, sz)
+#define partial_store_m256(ptr, v, sz) storebytes256(ptr, v, sz)
+#define partial_store_m384(ptr, v, sz) storebytes384(ptr, v, sz)
+#define partial_store_m512(ptr, v, sz) storebytes512(ptr, v, sz)
+
+#define partial_load_m128(ptr, sz) loadbytes128(ptr, sz)
+#define partial_load_m256(ptr, sz) loadbytes256(ptr, sz)
+#define partial_load_m384(ptr, sz) loadbytes384(ptr, sz)
+#define partial_load_m512(ptr, sz) loadbytes512(ptr, sz)
+
#define store_compressed_u32(ptr, x, m, len) storecompressed32(ptr, x, m, len)
#define store_compressed_u64a(ptr, x, m, len) storecompressed64(ptr, x, m, len)
#define store_compressed_m128(ptr, x, m, len) storecompressed128(ptr, x, m, len)
#define store_compressed_m256(ptr, x, m, len) storecompressed256(ptr, x, m, len)
#define store_compressed_m384(ptr, x, m, len) storecompressed384(ptr, x, m, len)
#define store_compressed_m512(ptr, x, m, len) storecompressed512(ptr, x, m, len)
-
+
#define load_compressed_u32(x, ptr, m, len) loadcompressed32(x, ptr, m, len)
#define load_compressed_u64a(x, ptr, m, len) loadcompressed64(x, ptr, m, len)
#define load_compressed_m128(x, ptr, m, len) loadcompressed128(x, ptr, m, len)
#define load_compressed_m256(x, ptr, m, len) loadcompressed256(x, ptr, m, len)
#define load_compressed_m384(x, ptr, m, len) loadcompressed384(x, ptr, m, len)
#define load_compressed_m512(x, ptr, m, len) loadcompressed512(x, ptr, m, len)
-
+
static really_inline
void clearbit_u32(u32 *p, u32 n) {
- assert(n < sizeof(*p) * 8);
- *p &= ~(1U << n);
-}
+ assert(n < sizeof(*p) * 8);
+ *p &= ~(1U << n);
+}
static really_inline
void clearbit_u64a(u64a *p, u32 n) {
- assert(n < sizeof(*p) * 8);
- *p &= ~(1ULL << n);
-}
-
-#define clearbit_m128(ptr, n) (clearbit128(ptr, n))
-#define clearbit_m256(ptr, n) (clearbit256(ptr, n))
-#define clearbit_m384(ptr, n) (clearbit384(ptr, n))
-#define clearbit_m512(ptr, n) (clearbit512(ptr, n))
-
+ assert(n < sizeof(*p) * 8);
+ *p &= ~(1ULL << n);
+}
+
+#define clearbit_m128(ptr, n) (clearbit128(ptr, n))
+#define clearbit_m256(ptr, n) (clearbit256(ptr, n))
+#define clearbit_m384(ptr, n) (clearbit384(ptr, n))
+#define clearbit_m512(ptr, n) (clearbit512(ptr, n))
+
static really_inline
char testbit_u32(u32 val, u32 n) {
assert(n < sizeof(val) * 8);
return !!(val & (1U << n));
-}
+}
static really_inline
char testbit_u64a(u64a val, u32 n) {
assert(n < sizeof(val) * 8);
return !!(val & (1ULL << n));
-}
-
+}
+
#define testbit_m128(val, n) (testbit128(val, n))
#define testbit_m256(val, n) (testbit256(val, n))
#define testbit_m384(val, n) (testbit384(val, n))
#define testbit_m512(val, n) (testbit512(val, n))
-#endif
+#endif
diff --git a/contrib/libs/hyperscan/src/util/verify_types.h b/contrib/libs/hyperscan/src/util/verify_types.h
index 5833d5ec62..7426acdd29 100644
--- a/contrib/libs/hyperscan/src/util/verify_types.h
+++ b/contrib/libs/hyperscan/src/util/verify_types.h
@@ -1,42 +1,42 @@
-/*
+/*
* Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_VERIFY_TYPES
-#define UTIL_VERIFY_TYPES
-
-#include "ue2common.h"
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_VERIFY_TYPES
+#define UTIL_VERIFY_TYPES
+
+#include "ue2common.h"
#include "util/compile_error.h"
-
-#include <cassert>
+
+#include <cassert>
#include <type_traits>
-
-namespace ue2 {
-
+
+namespace ue2 {
+
template<typename To_T, typename From_T>
To_T verify_cast(From_T val) {
static_assert(std::is_integral<To_T>::value,
@@ -53,38 +53,38 @@ To_T verify_cast(From_T val) {
}
return conv_val;
-}
-
+}
+
template<typename T>
s8 verify_s8(T val) {
return verify_cast<s8>(val);
-}
-
+}
+
template<typename T>
u8 verify_u8(T val) {
return verify_cast<u8>(val);
-}
-
+}
+
template<typename T>
s16 verify_s16(T val) {
return verify_cast<s16>(val);
-}
-
+}
+
template<typename T>
u16 verify_u16(T val) {
return verify_cast<u16>(val);
-}
-
+}
+
template<typename T>
s32 verify_s32(T val) {
return verify_cast<s32>(val);
-}
-
+}
+
template<typename T>
u32 verify_u32(T val) {
return verify_cast<u32>(val);
}
-} // namespace ue2
-
-#endif // UTIL_VERIFY_TYPES
+} // namespace ue2
+
+#endif // UTIL_VERIFY_TYPES