aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/hyperscan/src/util
diff options
context:
space:
mode:
authorIvan Blinkov <ivan@blinkov.ru>2022-02-10 16:47:11 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:11 +0300
commit5b283123c882433dafbaf6b338adeea16c1a0ea0 (patch)
tree339adc63bce23800021202ae4a8328a843dc447a /contrib/libs/hyperscan/src/util
parent1aeb9a455974457866f78722ad98114bafc84e8a (diff)
downloadydb-5b283123c882433dafbaf6b338adeea16c1a0ea0.tar.gz
Restoring authorship annotation for Ivan Blinkov <ivan@blinkov.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/hyperscan/src/util')
-rw-r--r--contrib/libs/hyperscan/src/util/accel_scheme.h102
-rw-r--r--contrib/libs/hyperscan/src/util/alloc.h60
-rw-r--r--contrib/libs/hyperscan/src/util/arch.h86
-rw-r--r--contrib/libs/hyperscan/src/util/bitfield.h98
-rw-r--r--contrib/libs/hyperscan/src/util/bitutils.h186
-rw-r--r--contrib/libs/hyperscan/src/util/boundary_reports.h6
-rw-r--r--contrib/libs/hyperscan/src/util/bytecode_ptr.h322
-rw-r--r--contrib/libs/hyperscan/src/util/charreach.cpp2
-rw-r--r--contrib/libs/hyperscan/src/util/charreach.h68
-rw-r--r--contrib/libs/hyperscan/src/util/charreach_util.h4
-rw-r--r--contrib/libs/hyperscan/src/util/clique.cpp260
-rw-r--r--contrib/libs/hyperscan/src/util/clique.h120
-rw-r--r--contrib/libs/hyperscan/src/util/compare.h28
-rw-r--r--contrib/libs/hyperscan/src/util/container.h58
-rw-r--r--contrib/libs/hyperscan/src/util/copybytes.h162
-rw-r--r--contrib/libs/hyperscan/src/util/cpuid_flags.c70
-rw-r--r--contrib/libs/hyperscan/src/util/cpuid_flags.h20
-rw-r--r--contrib/libs/hyperscan/src/util/cpuid_inline.h422
-rw-r--r--contrib/libs/hyperscan/src/util/depth.h112
-rw-r--r--contrib/libs/hyperscan/src/util/determinise.h78
-rw-r--r--contrib/libs/hyperscan/src/util/dump_charclass.h32
-rw-r--r--contrib/libs/hyperscan/src/util/exhaust.h6
-rw-r--r--contrib/libs/hyperscan/src/util/fatbit.h24
-rw-r--r--contrib/libs/hyperscan/src/util/fatbit_build.cpp88
-rw-r--r--contrib/libs/hyperscan/src/util/fatbit_build.h96
-rw-r--r--contrib/libs/hyperscan/src/util/flat_containers.h1328
-rw-r--r--contrib/libs/hyperscan/src/util/graph.h252
-rw-r--r--contrib/libs/hyperscan/src/util/graph_range.h2
-rw-r--r--contrib/libs/hyperscan/src/util/graph_small_color_map.h320
-rw-r--r--contrib/libs/hyperscan/src/util/hash.h414
-rw-r--r--contrib/libs/hyperscan/src/util/hash_dynamic_bitset.h192
-rw-r--r--contrib/libs/hyperscan/src/util/insertion_ordered.h736
-rw-r--r--contrib/libs/hyperscan/src/util/intrinsics.h132
-rw-r--r--contrib/libs/hyperscan/src/util/join.h12
-rw-r--r--contrib/libs/hyperscan/src/util/make_unique.h6
-rw-r--r--contrib/libs/hyperscan/src/util/masked_move.c8
-rw-r--r--contrib/libs/hyperscan/src/util/masked_move.h24
-rw-r--r--contrib/libs/hyperscan/src/util/math.h100
-rw-r--r--contrib/libs/hyperscan/src/util/multibit.c2
-rw-r--r--contrib/libs/hyperscan/src/util/multibit.h160
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_build.cpp68
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_build.h54
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_compress.h408
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_internal.h8
-rw-r--r--contrib/libs/hyperscan/src/util/noncopyable.h100
-rw-r--r--contrib/libs/hyperscan/src/util/operators.h120
-rw-r--r--contrib/libs/hyperscan/src/util/partitioned_set.h16
-rw-r--r--contrib/libs/hyperscan/src/util/popcount.h26
-rw-r--r--contrib/libs/hyperscan/src/util/queue_index_factory.h6
-rw-r--r--contrib/libs/hyperscan/src/util/report.h116
-rw-r--r--contrib/libs/hyperscan/src/util/report_manager.cpp54
-rw-r--r--contrib/libs/hyperscan/src/util/report_manager.h44
-rw-r--r--contrib/libs/hyperscan/src/util/simd_types.h24
-rw-r--r--contrib/libs/hyperscan/src/util/simd_utils.c124
-rw-r--r--contrib/libs/hyperscan/src/util/simd_utils.h982
-rw-r--r--contrib/libs/hyperscan/src/util/small_vector.h140
-rw-r--r--contrib/libs/hyperscan/src/util/state_compress.c44
-rw-r--r--contrib/libs/hyperscan/src/util/target_info.cpp20
-rw-r--r--contrib/libs/hyperscan/src/util/target_info.h4
-rw-r--r--contrib/libs/hyperscan/src/util/ue2_graph.h2462
-rw-r--r--contrib/libs/hyperscan/src/util/ue2string.cpp126
-rw-r--r--contrib/libs/hyperscan/src/util/ue2string.h230
-rw-r--r--contrib/libs/hyperscan/src/util/uniform_ops.h80
-rw-r--r--contrib/libs/hyperscan/src/util/unordered.h106
-rw-r--r--contrib/libs/hyperscan/src/util/verify_types.h78
65 files changed, 5819 insertions, 5819 deletions
diff --git a/contrib/libs/hyperscan/src/util/accel_scheme.h b/contrib/libs/hyperscan/src/util/accel_scheme.h
index 080b77dfda..2a067b30c6 100644
--- a/contrib/libs/hyperscan/src/util/accel_scheme.h
+++ b/contrib/libs/hyperscan/src/util/accel_scheme.h
@@ -1,51 +1,51 @@
-/*
- * Copyright (c) 2016-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef ACCEL_SCHEME_H
-#define ACCEL_SCHEME_H
-
-#include "util/charreach.h"
-#include "util/flat_containers.h"
-
-#include <utility>
-
-namespace ue2 {
-
-#define MAX_ACCEL_DEPTH 4
-
-struct AccelScheme {
- flat_set<std::pair<u8, u8>> double_byte;
- CharReach cr = CharReach::dot();
- CharReach double_cr;
- u32 offset = MAX_ACCEL_DEPTH + 1;
- u32 double_offset = 0;
-};
-
-}
-
-#endif
+/*
+ * Copyright (c) 2016-2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ACCEL_SCHEME_H
+#define ACCEL_SCHEME_H
+
+#include "util/charreach.h"
+#include "util/flat_containers.h"
+
+#include <utility>
+
+namespace ue2 {
+
+#define MAX_ACCEL_DEPTH 4
+
+struct AccelScheme {
+ flat_set<std::pair<u8, u8>> double_byte;
+ CharReach cr = CharReach::dot();
+ CharReach double_cr;
+ u32 offset = MAX_ACCEL_DEPTH + 1;
+ u32 double_offset = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/alloc.h b/contrib/libs/hyperscan/src/util/alloc.h
index bec3fdb24a..de20c8d028 100644
--- a/contrib/libs/hyperscan/src/util/alloc.h
+++ b/contrib/libs/hyperscan/src/util/alloc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -26,8 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-/**
- * \file
+/**
+ * \file
* \brief Aligned memory alloc/free.
*/
@@ -60,41 +60,41 @@ void aligned_free_internal(void *ptr);
/** \brief Aligned allocator class for use with STL containers. Ensures that
* your objects are aligned to N bytes. */
-template <class T, std::size_t N>
-class AlignedAllocator {
+template <class T, std::size_t N>
+class AlignedAllocator {
public:
- using value_type = T;
+ using value_type = T;
- AlignedAllocator() noexcept {}
+ AlignedAllocator() noexcept {}
- template <class U, std::size_t N2>
- AlignedAllocator(const AlignedAllocator<U, N2> &) noexcept {}
+ template <class U, std::size_t N2>
+ AlignedAllocator(const AlignedAllocator<U, N2> &) noexcept {}
- template <class U> struct rebind {
- using other = AlignedAllocator<U, N>;
- };
+ template <class U> struct rebind {
+ using other = AlignedAllocator<U, N>;
+ };
- T *allocate(std::size_t size) const {
- size_t alloc_size = size * sizeof(T);
- return static_cast<T *>(aligned_malloc_internal(alloc_size, N));
+ T *allocate(std::size_t size) const {
+ size_t alloc_size = size * sizeof(T);
+ return static_cast<T *>(aligned_malloc_internal(alloc_size, N));
}
- void deallocate(T *x, std::size_t) const noexcept {
- aligned_free_internal(x);
+ void deallocate(T *x, std::size_t) const noexcept {
+ aligned_free_internal(x);
}
-};
-
-template <class T, class U, std::size_t N, std::size_t N2>
-bool operator==(const AlignedAllocator<T, N> &,
- const AlignedAllocator<U, N2> &) {
- return true;
-}
-
-template <class T, class U, std::size_t N, std::size_t N2>
-bool operator!=(const AlignedAllocator<T, N> &a,
- const AlignedAllocator<U, N2> &b) {
- return !(a == b);
-}
+};
+
+template <class T, class U, std::size_t N, std::size_t N2>
+bool operator==(const AlignedAllocator<T, N> &,
+ const AlignedAllocator<U, N2> &) {
+ return true;
+}
+
+template <class T, class U, std::size_t N, std::size_t N2>
+bool operator!=(const AlignedAllocator<T, N> &a,
+ const AlignedAllocator<U, N2> &b) {
+ return !(a == b);
+}
} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/arch.h b/contrib/libs/hyperscan/src/util/arch.h
index a38dff6e84..6220f12bc1 100644
--- a/contrib/libs/hyperscan/src/util/arch.h
+++ b/contrib/libs/hyperscan/src/util/arch.h
@@ -1,45 +1,45 @@
-/*
+/*
* Copyright (c) 2017-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Per-platform architecture definitions
- */
-
-#ifndef UTIL_ARCH_H_
-#define UTIL_ARCH_H_
-
-#define HAVE_SSE2
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-/*
- * MSVC uses a different form of inline asm
- */
-#if defined(_WIN32) && defined(_MSC_VER)
-#define NO_ASM
-#endif
-
-#endif // UTIL_ARCH_H_
+/** \file
+ * \brief Per-platform architecture definitions
+ */
+
+#ifndef UTIL_ARCH_H_
+#define UTIL_ARCH_H_
+
+#define HAVE_SSE2
+
+/*
+ * MSVC uses a different form of inline asm
+ */
+#if defined(_WIN32) && defined(_MSC_VER)
+#define NO_ASM
+#endif
+
+#endif // UTIL_ARCH_H_
diff --git a/contrib/libs/hyperscan/src/util/bitfield.h b/contrib/libs/hyperscan/src/util/bitfield.h
index a1c735971b..a580da7b60 100644
--- a/contrib/libs/hyperscan/src/util/bitfield.h
+++ b/contrib/libs/hyperscan/src/util/bitfield.h
@@ -36,7 +36,7 @@
#include "ue2common.h"
#include "popcount.h"
#include "util/bitutils.h"
-#include "util/hash.h"
+#include "util/hash.h"
#include <array>
#include <cassert>
@@ -187,16 +187,16 @@ public:
size_t count() const {
static_assert(block_size == 64, "adjust popcount for block_type");
size_t sum = 0;
- size_t i = 0;
- for (; i + 4 <= num_blocks; i += 4) {
- sum += popcount64(bits[i]);
- sum += popcount64(bits[i + 1]);
- sum += popcount64(bits[i + 2]);
- sum += popcount64(bits[i + 3]);
+ size_t i = 0;
+ for (; i + 4 <= num_blocks; i += 4) {
+ sum += popcount64(bits[i]);
+ sum += popcount64(bits[i + 1]);
+ sum += popcount64(bits[i + 2]);
+ sum += popcount64(bits[i + 3]);
+ }
+ for (; i < num_blocks; i++) {
+ sum += popcount64(bits[i]);
}
- for (; i < num_blocks; i++) {
- sum += popcount64(bits[i]);
- }
assert(sum <= size());
return sum;
}
@@ -313,14 +313,14 @@ public:
/// Bitwise OR-equals.
void operator|=(const bitfield &a) {
- size_t i = 0;
- for (; i + 4 <= num_blocks; i += 4) {
- bits[i] |= a.bits[i];
- bits[i + 1] |= a.bits[i + 1];
- bits[i + 2] |= a.bits[i + 2];
- bits[i + 3] |= a.bits[i + 3];
- }
- for (; i < num_blocks; i++) {
+ size_t i = 0;
+ for (; i + 4 <= num_blocks; i += 4) {
+ bits[i] |= a.bits[i];
+ bits[i + 1] |= a.bits[i + 1];
+ bits[i + 2] |= a.bits[i + 2];
+ bits[i + 3] |= a.bits[i + 3];
+ }
+ for (; i < num_blocks; i++) {
bits[i] |= a.bits[i];
}
}
@@ -334,34 +334,34 @@ public:
/// Bitwise AND-equals.
void operator&=(const bitfield &a) {
- size_t i = 0;
- for (; i + 4 <= num_blocks; i += 4) {
- bits[i] &= a.bits[i];
- bits[i + 1] &= a.bits[i + 1];
- bits[i + 2] &= a.bits[i + 2];
- bits[i + 3] &= a.bits[i + 3];
- }
- for (; i < num_blocks; i++) {
+ size_t i = 0;
+ for (; i + 4 <= num_blocks; i += 4) {
+ bits[i] &= a.bits[i];
+ bits[i + 1] &= a.bits[i + 1];
+ bits[i + 2] &= a.bits[i + 2];
+ bits[i + 3] &= a.bits[i + 3];
+ }
+ for (; i < num_blocks; i++) {
bits[i] &= a.bits[i];
}
}
/// Bitwise XOR.
- bitfield operator^(bitfield a) const {
- a ^= *this;
- return a;
+ bitfield operator^(bitfield a) const {
+ a ^= *this;
+ return a;
}
/// Bitwise XOR-equals.
- void operator^=(bitfield a) {
- size_t i = 0;
- for (; i + 4 <= num_blocks; i += 4) {
- bits[i] ^= a.bits[i];
- bits[i + 1] ^= a.bits[i + 1];
- bits[i + 2] ^= a.bits[i + 2];
- bits[i + 3] ^= a.bits[i + 3];
- }
- for (; i < num_blocks; i++) {
+ void operator^=(bitfield a) {
+ size_t i = 0;
+ for (; i + 4 <= num_blocks; i += 4) {
+ bits[i] ^= a.bits[i];
+ bits[i + 1] ^= a.bits[i + 1];
+ bits[i + 2] ^= a.bits[i + 2];
+ bits[i + 3] ^= a.bits[i + 3];
+ }
+ for (; i < num_blocks; i++) {
bits[i] ^= a.bits[i];
}
}
@@ -375,7 +375,7 @@ public:
/// Simple hash.
size_t hash() const {
- return ue2_hasher()(bits);
+ return ue2_hasher()(bits);
}
/// Sentinel value meaning "no more bits", used by find_first and
@@ -422,17 +422,17 @@ private:
std::array<block_type, num_blocks> bits;
};
-} // namespace ue2
-
-namespace std {
-
+} // namespace ue2
+
+namespace std {
+
template<size_t requested_size>
-struct hash<ue2::bitfield<requested_size>> {
- size_t operator()(const ue2::bitfield<requested_size> &b) const {
- return b.hash();
- }
-};
+struct hash<ue2::bitfield<requested_size>> {
+ size_t operator()(const ue2::bitfield<requested_size> &b) const {
+ return b.hash();
+ }
+};
-} // namespace std
+} // namespace std
#endif // BITFIELD_H
diff --git a/contrib/libs/hyperscan/src/util/bitutils.h b/contrib/libs/hyperscan/src/util/bitutils.h
index 1baa1c64b4..c545ee1872 100644
--- a/contrib/libs/hyperscan/src/util/bitutils.h
+++ b/contrib/libs/hyperscan/src/util/bitutils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,13 +35,13 @@
#include "ue2common.h"
#include "popcount.h"
-#include "util/arch.h"
-#include "util/intrinsics.h"
+#include "util/arch.h"
+#include "util/intrinsics.h"
#define CASE_BIT 0x20
#define CASE_CLEAR 0xdf
#define DOUBLE_CASE_CLEAR 0xdfdf
-#define OCTO_CASE_CLEAR 0xdfdfdfdfdfdfdfdfULL
+#define OCTO_CASE_CLEAR 0xdfdfdfdfdfdfdfdfULL
static really_inline
u32 clz32(u32 x) {
@@ -58,20 +58,20 @@ u32 clz32(u32 x) {
static really_inline
u32 clz64(u64a x) {
assert(x); // behaviour not defined for x == 0
-#if defined(_WIN64)
+#if defined(_WIN64)
unsigned long r;
_BitScanReverse64(&r, x);
return 63 - r;
-#elif defined(_WIN32)
- unsigned long x1 = (u32)x;
- unsigned long x2 = (u32)(x >> 32);
- unsigned long r;
- if (x2) {
- _BitScanReverse(&r, x2);
- return (u32)(31 - r);
- }
- _BitScanReverse(&r, (u32)x1);
- return (u32)(63 - r);
+#elif defined(_WIN32)
+ unsigned long x1 = (u32)x;
+ unsigned long x2 = (u32)(x >> 32);
+ unsigned long r;
+ if (x2) {
+ _BitScanReverse(&r, x2);
+ return (u32)(31 - r);
+ }
+ _BitScanReverse(&r, (u32)x1);
+ return (u32)(63 - r);
#else
return (u32)__builtin_clzll(x);
#endif
@@ -93,17 +93,17 @@ u32 ctz32(u32 x) {
static really_inline
u32 ctz64(u64a x) {
assert(x); // behaviour not defined for x == 0
-#if defined(_WIN64)
+#if defined(_WIN64)
unsigned long r;
_BitScanForward64(&r, x);
return r;
-#elif defined(_WIN32)
- unsigned long r;
- if (_BitScanForward(&r, (u32)x)) {
- return (u32)r;
- }
- _BitScanForward(&r, x >> 32);
- return (u32)(r + 32);
+#elif defined(_WIN32)
+ unsigned long r;
+ if (_BitScanForward(&r, (u32)x)) {
+ return (u32)r;
+ }
+ _BitScanForward(&r, x >> 32);
+ return (u32)(r + 32);
#else
return (u32)__builtin_ctzll(x);
#endif
@@ -166,8 +166,8 @@ u32 findAndClearLSB_64(u64a *v) {
#else
// fall back to doing things with two 32-bit cases, since gcc-4.1 doesn't
// inline calls to __builtin_ctzll
- u32 v1 = (u32)*v;
- u32 v2 = (u32)(*v >> 32);
+ u32 v1 = (u32)*v;
+ u32 v2 = (u32)(*v >> 32);
u32 offset;
if (v1) {
offset = findAndClearLSB_32(&v1);
@@ -222,7 +222,7 @@ u32 findAndClearMSB_64(u64a *v) {
#else
// fall back to doing things with two 32-bit cases, since gcc-4.1 doesn't
// inline calls to __builtin_ctzll
- u32 v1 = (u32)*v;
+ u32 v1 = (u32)*v;
u32 v2 = (*v >> 32);
u32 offset;
if (v2) {
@@ -240,7 +240,7 @@ u32 findAndClearMSB_64(u64a *v) {
static really_inline
u32 compress32(u32 x, u32 m) {
-#if defined(HAVE_BMI2)
+#if defined(HAVE_BMI2)
// BMI2 has a single instruction for this operation.
return _pext_u32(x, m);
#else
@@ -275,7 +275,7 @@ u32 compress32(u32 x, u32 m) {
static really_inline
u64a compress64(u64a x, u64a m) {
-#if defined(ARCH_X86_64) && defined(HAVE_BMI2)
+#if defined(ARCH_X86_64) && defined(HAVE_BMI2)
// BMI2 has a single instruction for this operation.
return _pext_u64(x, m);
#else
@@ -311,7 +311,7 @@ u64a compress64(u64a x, u64a m) {
static really_inline
u32 expand32(u32 x, u32 m) {
-#if defined(HAVE_BMI2)
+#if defined(HAVE_BMI2)
// BMI2 has a single instruction for this operation.
return _pdep_u32(x, m);
#else
@@ -351,7 +351,7 @@ u32 expand32(u32 x, u32 m) {
static really_inline
u64a expand64(u64a x, u64a m) {
-#if defined(ARCH_X86_64) && defined(HAVE_BMI2)
+#if defined(ARCH_X86_64) && defined(HAVE_BMI2)
// BMI2 has a single instruction for this operation.
return _pdep_u64(x, m);
#else
@@ -426,67 +426,67 @@ void bf64_unset(u64a *bitfield, u32 i) {
*bitfield &= ~(1ULL << i);
}
-static really_inline
-u32 rank_in_mask32(u32 mask, u32 bit) {
- assert(bit < sizeof(u32) * 8);
- assert(mask & (u32)(1U << bit));
- mask &= (u32)(1U << bit) - 1;
- return popcount32(mask);
-}
-
-static really_inline
-u32 rank_in_mask64(u64a mask, u32 bit) {
- assert(bit < sizeof(u64a) * 8);
- assert(mask & (u64a)(1ULL << bit));
- mask &= (u64a)(1ULL << bit) - 1;
- return popcount64(mask);
-}
-
-static really_inline
-u32 pext32(u32 x, u32 mask) {
-#if defined(HAVE_BMI2)
- // Intel BMI2 can do this operation in one instruction.
- return _pext_u32(x, mask);
-#else
-
- u32 result = 0, num = 1;
- while (mask != 0) {
- u32 bit = findAndClearLSB_32(&mask);
- if (x & (1U << bit)) {
- assert(num != 0); // more than 32 bits!
- result |= num;
- }
- num <<= 1;
- }
- return result;
-#endif
-}
-
-static really_inline
-u64a pext64(u64a x, u64a mask) {
-#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
- // Intel BMI2 can do this operation in one instruction.
- return _pext_u64(x, mask);
-#else
-
- u32 result = 0, num = 1;
- while (mask != 0) {
- u32 bit = findAndClearLSB_64(&mask);
- if (x & (1ULL << bit)) {
- assert(num != 0); // more than 32 bits!
- result |= num;
- }
- num <<= 1;
- }
- return result;
-#endif
-}
-
-#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
-static really_inline
-u64a pdep64(u64a x, u64a mask) {
- return _pdep_u64(x, mask);
-}
-#endif
-
+static really_inline
+u32 rank_in_mask32(u32 mask, u32 bit) {
+ assert(bit < sizeof(u32) * 8);
+ assert(mask & (u32)(1U << bit));
+ mask &= (u32)(1U << bit) - 1;
+ return popcount32(mask);
+}
+
+static really_inline
+u32 rank_in_mask64(u64a mask, u32 bit) {
+ assert(bit < sizeof(u64a) * 8);
+ assert(mask & (u64a)(1ULL << bit));
+ mask &= (u64a)(1ULL << bit) - 1;
+ return popcount64(mask);
+}
+
+static really_inline
+u32 pext32(u32 x, u32 mask) {
+#if defined(HAVE_BMI2)
+ // Intel BMI2 can do this operation in one instruction.
+ return _pext_u32(x, mask);
+#else
+
+ u32 result = 0, num = 1;
+ while (mask != 0) {
+ u32 bit = findAndClearLSB_32(&mask);
+ if (x & (1U << bit)) {
+ assert(num != 0); // more than 32 bits!
+ result |= num;
+ }
+ num <<= 1;
+ }
+ return result;
+#endif
+}
+
+static really_inline
+u64a pext64(u64a x, u64a mask) {
+#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
+ // Intel BMI2 can do this operation in one instruction.
+ return _pext_u64(x, mask);
+#else
+
+ u32 result = 0, num = 1;
+ while (mask != 0) {
+ u32 bit = findAndClearLSB_64(&mask);
+ if (x & (1ULL << bit)) {
+ assert(num != 0); // more than 32 bits!
+ result |= num;
+ }
+ num <<= 1;
+ }
+ return result;
+#endif
+}
+
+#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
+static really_inline
+u64a pdep64(u64a x, u64a mask) {
+ return _pdep_u64(x, mask);
+}
+#endif
+
#endif // BITUTILS_H
diff --git a/contrib/libs/hyperscan/src/util/boundary_reports.h b/contrib/libs/hyperscan/src/util/boundary_reports.h
index f7c70c366a..b2bb1c9b0a 100644
--- a/contrib/libs/hyperscan/src/util/boundary_reports.h
+++ b/contrib/libs/hyperscan/src/util/boundary_reports.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,13 +30,13 @@
#define BOUNDARY_REPORTS_H
#include "ue2common.h"
-#include "util/noncopyable.h"
+#include "util/noncopyable.h"
#include <set>
namespace ue2 {
-struct BoundaryReports : noncopyable {
+struct BoundaryReports : noncopyable {
std::set<ReportID> report_at_0; /* set of internal reports to fire
* unconditionally at offset 0 */
std::set<ReportID> report_at_0_eod; /* set of internal reports to fire
diff --git a/contrib/libs/hyperscan/src/util/bytecode_ptr.h b/contrib/libs/hyperscan/src/util/bytecode_ptr.h
index 58b0167ed4..f1f2e5ef8e 100644
--- a/contrib/libs/hyperscan/src/util/bytecode_ptr.h
+++ b/contrib/libs/hyperscan/src/util/bytecode_ptr.h
@@ -1,161 +1,161 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * \file
- * \brief bytecode_ptr: Smart pointer with unique ownership that knows its
- * length and alignment.
- */
-
-#ifndef UTIL_BYTECODE_PTR_H
-#define UTIL_BYTECODE_PTR_H
-
-#include "util/alloc.h"
-#include "util/operators.h"
-
-#include <algorithm> // std::max
-#include <cstring>
-#include <memory>
-#include <stdexcept> // std::logic_error
-
-namespace ue2 {
-
-/**
- * \brief Smart pointer that knows its length and alignment and behaves like a
- * std::unique_ptr -- i.e. it retains unique ownership of the memory region.
- *
- * This is intended to be used for flat aligned memory regions that will
- * eventually end up copied into the Hyperscan bytecode.
- */
-template<typename T>
-class bytecode_ptr : totally_ordered<bytecode_ptr<T>> {
-public:
- bytecode_ptr() = default;
- explicit bytecode_ptr(size_t bytes_in, size_t alignment_in = alignof(T))
- : bytes(bytes_in), alignment(alignment_in) {
- // posix_memalign doesn't like us asking for smaller alignment.
- size_t mem_align = std::max(alignment, sizeof(void *));
- ptr.reset(static_cast<T *>(aligned_malloc_internal(bytes, mem_align)));
- if (!ptr) {
- throw std::bad_alloc();
- }
- }
-
- bytecode_ptr(std::nullptr_t) {}
-
- T *get() const { return ptr.get(); }
-
- T &operator*() { return *ptr; }
- const T &operator*() const { return *ptr; }
-
- T *operator->() { return ptr.get(); }
- const T *operator->() const { return ptr.get(); }
-
- explicit operator bool() const { return ptr != nullptr; }
-
- /** \brief Move converter for shared_ptr. */
- template <typename ST, class = typename std::enable_if<
- std::is_convertible<T *, ST *>::value>::type>
- operator std::shared_ptr<ST>() && {
- auto d = ptr.get_deleter();
- return std::shared_ptr<ST>(ptr.release(), d);
- }
-
- void reset(T *p = nullptr) { ptr.reset(p); }
-
- T *release() {
- auto *p = ptr.release();
- bytes = 0;
- alignment = 0;
- return p;
- }
-
- void swap(bytecode_ptr &other) {
- using std::swap;
- swap(ptr, other.ptr);
- swap(bytes, other.bytes);
- swap(alignment, other.alignment);
- }
-
- /**
- * \brief Reduces the apparent size of the memory region. Note that this
- * does not reallocate and copy, it just changes the value returned by
- * size().
- */
- void shrink(size_t new_size) {
- if (new_size > bytes) {
- assert(0);
- throw std::logic_error("Must shrink to a smaller value");
- }
- bytes = new_size;
- }
-
- /** \brief Returns size of the memory region in bytes. */
- size_t size() const { return bytes; }
-
- /** \brief Returns alignment of the memory region in bytes. */
- size_t align() const { return alignment; }
-
- bool operator==(const bytecode_ptr &a) const { return ptr == a.ptr; }
- bool operator<(const bytecode_ptr &a) const { return ptr < a.ptr; }
-
-private:
- /** \brief Deleter function for std::unique_ptr. */
- template <typename DT> struct deleter {
- void operator()(DT *p) const { aligned_free_internal(p); }
- };
-
- std::unique_ptr<T, deleter<T>> ptr; //!< Underlying pointer.
- size_t bytes = 0; //!< Size of memory region in bytes.
- size_t alignment = 0; //!< Alignment of memory region in bytes.
-};
-
-/**
- * \brief Constructs a bytecode_ptr<T> with the given size and alignment.
- */
-template<typename T>
-inline bytecode_ptr<T> make_bytecode_ptr(size_t size,
- size_t align = alignof(T)) {
- return bytecode_ptr<T>(size, align);
-}
-
-/**
- * \brief Constructs a bytecode_ptr<T> with the given size and alignment and
- * fills the memory region with zeroes.
- */
-template<typename T>
-inline bytecode_ptr<T> make_zeroed_bytecode_ptr(size_t size,
- size_t align = alignof(T)) {
- auto ptr = make_bytecode_ptr<T>(size, align);
- std::memset(ptr.get(), 0, size);
- return ptr;
-}
-
-} // namespace ue2
-
-#endif // UTIL_BYTECODE_PTR_H
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ * \brief bytecode_ptr: Smart pointer with unique ownership that knows its
+ * length and alignment.
+ */
+
+#ifndef UTIL_BYTECODE_PTR_H
+#define UTIL_BYTECODE_PTR_H
+
+#include "util/alloc.h"
+#include "util/operators.h"
+
+#include <algorithm> // std::max
+#include <cstring>
+#include <memory>
+#include <stdexcept> // std::logic_error
+
+namespace ue2 {
+
+/**
+ * \brief Smart pointer that knows its length and alignment and behaves like a
+ * std::unique_ptr -- i.e. it retains unique ownership of the memory region.
+ *
+ * This is intended to be used for flat aligned memory regions that will
+ * eventually end up copied into the Hyperscan bytecode.
+ */
+template<typename T>
+class bytecode_ptr : totally_ordered<bytecode_ptr<T>> {
+public:
+ bytecode_ptr() = default;
+ explicit bytecode_ptr(size_t bytes_in, size_t alignment_in = alignof(T))
+ : bytes(bytes_in), alignment(alignment_in) {
+ // posix_memalign doesn't like us asking for smaller alignment.
+ size_t mem_align = std::max(alignment, sizeof(void *));
+ ptr.reset(static_cast<T *>(aligned_malloc_internal(bytes, mem_align)));
+ if (!ptr) {
+ throw std::bad_alloc();
+ }
+ }
+
+ bytecode_ptr(std::nullptr_t) {}
+
+ T *get() const { return ptr.get(); }
+
+ T &operator*() { return *ptr; }
+ const T &operator*() const { return *ptr; }
+
+ T *operator->() { return ptr.get(); }
+ const T *operator->() const { return ptr.get(); }
+
+ explicit operator bool() const { return ptr != nullptr; }
+
+ /** \brief Move converter for shared_ptr. */
+ template <typename ST, class = typename std::enable_if<
+ std::is_convertible<T *, ST *>::value>::type>
+ operator std::shared_ptr<ST>() && {
+ auto d = ptr.get_deleter();
+ return std::shared_ptr<ST>(ptr.release(), d);
+ }
+
+ void reset(T *p = nullptr) { ptr.reset(p); }
+
+ T *release() {
+ auto *p = ptr.release();
+ bytes = 0;
+ alignment = 0;
+ return p;
+ }
+
+ void swap(bytecode_ptr &other) {
+ using std::swap;
+ swap(ptr, other.ptr);
+ swap(bytes, other.bytes);
+ swap(alignment, other.alignment);
+ }
+
+ /**
+ * \brief Reduces the apparent size of the memory region. Note that this
+ * does not reallocate and copy, it just changes the value returned by
+ * size().
+ */
+ void shrink(size_t new_size) {
+ if (new_size > bytes) {
+ assert(0);
+ throw std::logic_error("Must shrink to a smaller value");
+ }
+ bytes = new_size;
+ }
+
+ /** \brief Returns size of the memory region in bytes. */
+ size_t size() const { return bytes; }
+
+ /** \brief Returns alignment of the memory region in bytes. */
+ size_t align() const { return alignment; }
+
+ bool operator==(const bytecode_ptr &a) const { return ptr == a.ptr; }
+ bool operator<(const bytecode_ptr &a) const { return ptr < a.ptr; }
+
+private:
+ /** \brief Deleter function for std::unique_ptr. */
+ template <typename DT> struct deleter {
+ void operator()(DT *p) const { aligned_free_internal(p); }
+ };
+
+ std::unique_ptr<T, deleter<T>> ptr; //!< Underlying pointer.
+ size_t bytes = 0; //!< Size of memory region in bytes.
+ size_t alignment = 0; //!< Alignment of memory region in bytes.
+};
+
+/**
+ * \brief Constructs a bytecode_ptr<T> with the given size and alignment.
+ */
+template<typename T>
+inline bytecode_ptr<T> make_bytecode_ptr(size_t size,
+ size_t align = alignof(T)) {
+ return bytecode_ptr<T>(size, align);
+}
+
+/**
+ * \brief Constructs a bytecode_ptr<T> with the given size and alignment and
+ * fills the memory region with zeroes.
+ */
+template<typename T>
+inline bytecode_ptr<T> make_zeroed_bytecode_ptr(size_t size,
+ size_t align = alignof(T)) {
+ auto ptr = make_bytecode_ptr<T>(size, align);
+ std::memset(ptr.get(), 0, size);
+ return ptr;
+}
+
+} // namespace ue2
+
+#endif // UTIL_BYTECODE_PTR_H
diff --git a/contrib/libs/hyperscan/src/util/charreach.cpp b/contrib/libs/hyperscan/src/util/charreach.cpp
index 18b6b6be74..9116b719db 100644
--- a/contrib/libs/hyperscan/src/util/charreach.cpp
+++ b/contrib/libs/hyperscan/src/util/charreach.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/util/charreach.h b/contrib/libs/hyperscan/src/util/charreach.h
index 12df7d3711..f6d3a2af3e 100644
--- a/contrib/libs/hyperscan/src/util/charreach.h
+++ b/contrib/libs/hyperscan/src/util/charreach.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -135,38 +135,38 @@ public:
size_t find_nth(size_t n) const { return bits.find_nth(n); }
/// Bitwise OR.
- CharReach operator|(const CharReach &a) const {
- CharReach cr(*this);
- cr.bits |= a.bits;
- return cr;
- }
+ CharReach operator|(const CharReach &a) const {
+ CharReach cr(*this);
+ cr.bits |= a.bits;
+ return cr;
+ }
/// Bitwise OR-equals.
- void operator|=(const CharReach &a) { bits |= a.bits; }
+ void operator|=(const CharReach &a) { bits |= a.bits; }
/// Bitwise AND.
- CharReach operator&(const CharReach &a) const {
- CharReach cr(*this);
- cr.bits &= a.bits;
- return cr;
- }
+ CharReach operator&(const CharReach &a) const {
+ CharReach cr(*this);
+ cr.bits &= a.bits;
+ return cr;
+ }
/// Bitwise AND-equals.
- void operator&=(const CharReach &a) { bits &= a.bits; }
+ void operator&=(const CharReach &a) { bits &= a.bits; }
/// Bitwise XOR.
- CharReach operator^(const CharReach &a) const {
- CharReach cr(*this);
- cr.bits ^= a.bits;
- return cr;
- }
+ CharReach operator^(const CharReach &a) const {
+ CharReach cr(*this);
+ cr.bits ^= a.bits;
+ return cr;
+ }
/// Bitwise complement.
- CharReach operator~(void) const {
- CharReach cr(*this);
- cr.flip();
- return cr;
- }
+ CharReach operator~(void) const {
+ CharReach cr(*this);
+ cr.flip();
+ return cr;
+ }
/// Do we only contain bits representing alpha characters?
bool isAlpha() const;
@@ -198,15 +198,15 @@ bool isutf8start(const CharReach &cr);
} // namespace ue2
-namespace std {
-
-template<>
-struct hash<ue2::CharReach> {
- size_t operator()(const ue2::CharReach &cr) const {
- return cr.hash();
- }
-};
-
-} // namespace std
-
+namespace std {
+
+template<>
+struct hash<ue2::CharReach> {
+ size_t operator()(const ue2::CharReach &cr) const {
+ return cr.hash();
+ }
+};
+
+} // namespace std
+
#endif // NG_CHARREACH_H
diff --git a/contrib/libs/hyperscan/src/util/charreach_util.h b/contrib/libs/hyperscan/src/util/charreach_util.h
index b843482dd1..f0dc4227b0 100644
--- a/contrib/libs/hyperscan/src/util/charreach_util.h
+++ b/contrib/libs/hyperscan/src/util/charreach_util.h
@@ -29,11 +29,11 @@
#ifndef CHARREACH_UTIL_H
#define CHARREACH_UTIL_H
-#include "ue2common.h"
+#include "ue2common.h"
namespace ue2 {
-class CharReach;
+class CharReach;
void make_caseless(CharReach *cr);
diff --git a/contrib/libs/hyperscan/src/util/clique.cpp b/contrib/libs/hyperscan/src/util/clique.cpp
index f6e07571ad..c2befea497 100644
--- a/contrib/libs/hyperscan/src/util/clique.cpp
+++ b/contrib/libs/hyperscan/src/util/clique.cpp
@@ -1,130 +1,130 @@
-/*
- * Copyright (c) 2016-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief An algorithm to find cliques.
- */
-
-#include "clique.h"
-#include "container.h"
-#include "graph_range.h"
-#include "make_unique.h"
-
-#include <map>
-#include <set>
-#include <stack>
-
-using namespace std;
-
-namespace ue2 {
-
-static
-vector<u32> getNeighborInfo(const CliqueGraph &g,
- const CliqueVertex &cv, const set<u32> &group) {
- u32 id = g[cv].stateId;
- vector<u32> neighbor;
- // find neighbors for cv
- for (const auto &v : adjacent_vertices_range(cv, g)) {
- if (g[v].stateId != id && contains(group, g[v].stateId)){
- neighbor.push_back(g[v].stateId);
- DEBUG_PRINTF("Neighbor:%u\n", g[v].stateId);
- }
- }
-
- return neighbor;
-}
-
-static
-vector<u32> findCliqueGroup(CliqueGraph &cg) {
- stack<vector<u32>> gStack;
-
- // Create mapping between vertex and id
- map<u32, CliqueVertex> vertexMap;
- vector<u32> init;
- for (const auto &v : vertices_range(cg)) {
- vertexMap[cg[v].stateId] = v;
- init.push_back(cg[v].stateId);
- }
- gStack.push(init);
-
- // Get the vertex to start from
- vector<u32> clique;
- while (!gStack.empty()) {
- vector<u32> g = move(gStack.top());
- gStack.pop();
-
- // Choose a vertex from the graph
- u32 id = g[0];
- CliqueVertex &n = vertexMap.at(id);
- clique.push_back(id);
- // Corresponding vertex in the original graph
- set<u32> subgraphId(g.begin(), g.end());
- auto neighbor = getNeighborInfo(cg, n, subgraphId);
- // Get graph consisting of neighbors for left branch
- if (!neighbor.empty()) {
- gStack.push(neighbor);
- }
- }
-
- return clique;
-}
-
-template<typename Graph>
-bool graph_empty(const Graph &g) {
- typename Graph::vertex_iterator vi, ve;
- tie(vi, ve) = vertices(g);
- return vi == ve;
-}
-
-vector<vector<u32>> removeClique(CliqueGraph &cg) {
- DEBUG_PRINTF("graph size:%zu\n", num_vertices(cg));
- vector<vector<u32>> cliquesVec = {findCliqueGroup(cg)};
- while (!graph_empty(cg)) {
- const vector<u32> &c = cliquesVec.back();
- vector<CliqueVertex> dead;
- for (const auto &v : vertices_range(cg)) {
- u32 id = cg[v].stateId;
- if (find(c.begin(), c.end(), id) != c.end()) {
- dead.push_back(v);
- }
- }
- for (const auto &v : dead) {
- clear_vertex(v, cg);
- remove_vertex(v, cg);
- }
- if (graph_empty(cg)) {
- break;
- }
- auto clique = findCliqueGroup(cg);
- cliquesVec.push_back(clique);
- }
-
- return cliquesVec;
-}
-
-} // namespace ue2
+/*
+ * Copyright (c) 2016-2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief An algorithm to find cliques.
+ */
+
+#include "clique.h"
+#include "container.h"
+#include "graph_range.h"
+#include "make_unique.h"
+
+#include <map>
+#include <set>
+#include <stack>
+
+using namespace std;
+
+namespace ue2 {
+
+static
+vector<u32> getNeighborInfo(const CliqueGraph &g,
+ const CliqueVertex &cv, const set<u32> &group) {
+ u32 id = g[cv].stateId;
+ vector<u32> neighbor;
+ // find neighbors for cv
+ for (const auto &v : adjacent_vertices_range(cv, g)) {
+ if (g[v].stateId != id && contains(group, g[v].stateId)){
+ neighbor.push_back(g[v].stateId);
+ DEBUG_PRINTF("Neighbor:%u\n", g[v].stateId);
+ }
+ }
+
+ return neighbor;
+}
+
+static
+vector<u32> findCliqueGroup(CliqueGraph &cg) {
+ stack<vector<u32>> gStack;
+
+ // Create mapping between vertex and id
+ map<u32, CliqueVertex> vertexMap;
+ vector<u32> init;
+ for (const auto &v : vertices_range(cg)) {
+ vertexMap[cg[v].stateId] = v;
+ init.push_back(cg[v].stateId);
+ }
+ gStack.push(init);
+
+ // Get the vertex to start from
+ vector<u32> clique;
+ while (!gStack.empty()) {
+ vector<u32> g = move(gStack.top());
+ gStack.pop();
+
+ // Choose a vertex from the graph
+ u32 id = g[0];
+ CliqueVertex &n = vertexMap.at(id);
+ clique.push_back(id);
+ // Corresponding vertex in the original graph
+ set<u32> subgraphId(g.begin(), g.end());
+ auto neighbor = getNeighborInfo(cg, n, subgraphId);
+ // Get graph consisting of neighbors for left branch
+ if (!neighbor.empty()) {
+ gStack.push(neighbor);
+ }
+ }
+
+ return clique;
+}
+
+template<typename Graph>
+bool graph_empty(const Graph &g) {
+ typename Graph::vertex_iterator vi, ve;
+ tie(vi, ve) = vertices(g);
+ return vi == ve;
+}
+
+vector<vector<u32>> removeClique(CliqueGraph &cg) {
+ DEBUG_PRINTF("graph size:%zu\n", num_vertices(cg));
+ vector<vector<u32>> cliquesVec = {findCliqueGroup(cg)};
+ while (!graph_empty(cg)) {
+ const vector<u32> &c = cliquesVec.back();
+ vector<CliqueVertex> dead;
+ for (const auto &v : vertices_range(cg)) {
+ u32 id = cg[v].stateId;
+ if (find(c.begin(), c.end(), id) != c.end()) {
+ dead.push_back(v);
+ }
+ }
+ for (const auto &v : dead) {
+ clear_vertex(v, cg);
+ remove_vertex(v, cg);
+ }
+ if (graph_empty(cg)) {
+ break;
+ }
+ auto clique = findCliqueGroup(cg);
+ cliquesVec.push_back(clique);
+ }
+
+ return cliquesVec;
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/clique.h b/contrib/libs/hyperscan/src/util/clique.h
index 4886c8b7dd..89c6d4ed48 100644
--- a/contrib/libs/hyperscan/src/util/clique.h
+++ b/contrib/libs/hyperscan/src/util/clique.h
@@ -1,60 +1,60 @@
-/*
- * Copyright (c) 2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief An algorithm to find cliques.
- */
-
-#ifndef CLIQUE_H
-#define CLIQUE_H
-
-#include "ue2common.h"
-
-#include <vector>
-
-#include <boost/graph/adjacency_list.hpp>
-
-namespace ue2 {
-
-struct CliqueVertexProps {
- CliqueVertexProps() {}
- explicit CliqueVertexProps(u32 state_in) : stateId(state_in) {}
-
- u32 stateId = ~0U;
-};
-
-typedef boost::adjacency_list<boost::listS, boost::listS, boost::undirectedS,
- CliqueVertexProps> CliqueGraph;
-typedef CliqueGraph::vertex_descriptor CliqueVertex;
-
-/** \brief Returns a vector of cliques found in a graph. */
-std::vector<std::vector<u32>> removeClique(CliqueGraph &cg);
-
-} // namespace ue2
-
-#endif
+/*
+ * Copyright (c) 2016, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief An algorithm to find cliques.
+ */
+
+#ifndef CLIQUE_H
+#define CLIQUE_H
+
+#include "ue2common.h"
+
+#include <vector>
+
+#include <boost/graph/adjacency_list.hpp>
+
+namespace ue2 {
+
+struct CliqueVertexProps {
+ CliqueVertexProps() {}
+ explicit CliqueVertexProps(u32 state_in) : stateId(state_in) {}
+
+ u32 stateId = ~0U;
+};
+
+typedef boost::adjacency_list<boost::listS, boost::listS, boost::undirectedS,
+ CliqueVertexProps> CliqueGraph;
+typedef CliqueGraph::vertex_descriptor CliqueVertex;
+
+/** \brief Returns a vector of cliques found in a graph. */
+std::vector<std::vector<u32>> removeClique(CliqueGraph &cg);
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/compare.h b/contrib/libs/hyperscan/src/util/compare.h
index ca1114aa47..eaa717a4c2 100644
--- a/contrib/libs/hyperscan/src/util/compare.h
+++ b/contrib/libs/hyperscan/src/util/compare.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -97,10 +97,10 @@ u64a theirtoupper64(const u64a x) {
static really_inline
int cmpNocaseNaive(const u8 *p1, const u8 *p2, size_t len) {
- const u8 *pEnd = p1 + len;
+ const u8 *pEnd = p1 + len;
for (; p1 < pEnd; p1++, p2++) {
- assert(!ourisalpha(*p2) || myisupper(*p2)); // Already upper-case.
- if ((u8)mytoupper(*p1) != *p2) {
+ assert(!ourisalpha(*p2) || myisupper(*p2)); // Already upper-case.
+ if ((u8)mytoupper(*p1) != *p2) {
return 1;
}
}
@@ -109,7 +109,7 @@ int cmpNocaseNaive(const u8 *p1, const u8 *p2, size_t len) {
static really_inline
int cmpCaseNaive(const u8 *p1, const u8 *p2, size_t len) {
- const u8 *pEnd = p1 + len;
+ const u8 *pEnd = p1 + len;
for (; p1 < pEnd; p1++, p2++) {
if (*p1 != *p2) {
return 1;
@@ -130,11 +130,11 @@ int cmpCaseNaive(const u8 *p1, const u8 *p2, size_t len) {
#define CMP_SIZE sizeof(CMP_T)
-/**
- * \brief Compare two strings, optionally caselessly.
- *
- * Note: If nocase is true, p2 is assumed to be already upper-case.
- */
+/**
+ * \brief Compare two strings, optionally caselessly.
+ *
+ * Note: If nocase is true, p2 is assumed to be already upper-case.
+ */
#if defined(ARCH_IA32)
static UNUSED never_inline
#else
@@ -151,13 +151,13 @@ int cmpForward(const u8 *p1, const u8 *p2, size_t len, char nocase) {
if (nocase) { // Case-insensitive version.
for (; p1 < p1_end; p1 += CMP_SIZE, p2 += CMP_SIZE) {
- assert(ULOAD(p2) == TOUPPER(ULOAD(p2))); // Already upper-case.
- if (TOUPPER(ULOAD(p1)) != ULOAD(p2)) {
+ assert(ULOAD(p2) == TOUPPER(ULOAD(p2))); // Already upper-case.
+ if (TOUPPER(ULOAD(p1)) != ULOAD(p2)) {
return 1;
}
}
- assert(ULOAD(p2_end) == TOUPPER(ULOAD(p2_end))); // Already upper-case.
- if (TOUPPER(ULOAD(p1_end)) != ULOAD(p2_end)) {
+ assert(ULOAD(p2_end) == TOUPPER(ULOAD(p2_end))); // Already upper-case.
+ if (TOUPPER(ULOAD(p1_end)) != ULOAD(p2_end)) {
return 1;
}
} else { // Case-sensitive version.
diff --git a/contrib/libs/hyperscan/src/util/container.h b/contrib/libs/hyperscan/src/util/container.h
index dd329d9b80..68f60e99ee 100644
--- a/contrib/libs/hyperscan/src/util/container.h
+++ b/contrib/libs/hyperscan/src/util/container.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,7 +41,7 @@
#include <set>
#include <type_traits>
#include <utility>
-#include <vector>
+#include <vector>
namespace ue2 {
@@ -79,9 +79,9 @@ void insert(C *container, typename C::iterator pos, const D &donor) {
}
/**
- * \brief Constructs a vector from a range bounded by the given pair of
- * iterators.
- */
+ * \brief Constructs a vector from a range bounded by the given pair of
+ * iterators.
+ */
template <typename It>
auto make_vector_from(const std::pair<It, It> &range)
-> std::vector<decltype(*range.first)> {
@@ -89,14 +89,14 @@ auto make_vector_from(const std::pair<It, It> &range)
return std::vector<T>(range.first, range.second);
}
-/** \brief Sort a sequence container and remove duplicates. */
-template <typename C, typename Compare = std::less<typename C::value_type>>
-void sort_and_unique(C &container, Compare comp = Compare()) {
- std::sort(std::begin(container), std::end(container), comp);
- container.erase(std::unique(std::begin(container), std::end(container)),
- std::end(container));
-}
-
+/** \brief Sort a sequence container and remove duplicates. */
+template <typename C, typename Compare = std::less<typename C::value_type>>
+void sort_and_unique(C &container, Compare comp = Compare()) {
+ std::sort(std::begin(container), std::end(container), comp);
+ container.erase(std::unique(std::begin(container), std::end(container)),
+ std::end(container));
+}
+
/** \brief Returns a set containing the keys in the given associative
* container. */
template <typename C>
@@ -111,9 +111,9 @@ std::set<typename C::key_type> assoc_keys(const C &container) {
/**
* \brief Return the length in bytes of the given vector of (POD) objects.
*/
-template <typename T, typename Alloc>
-typename std::vector<T, Alloc>::size_type
-byte_length(const std::vector<T, Alloc> &vec) {
+template <typename T, typename Alloc>
+typename std::vector<T, Alloc>::size_type
+byte_length(const std::vector<T, Alloc> &vec) {
static_assert(std::is_pod<T>::value, "should be pod");
return vec.size() * sizeof(T);
}
@@ -122,8 +122,8 @@ byte_length(const std::vector<T, Alloc> &vec) {
* \brief Copy the given vector of POD objects to the given location in memory.
* It is safe to give this function an empty vector.
*/
-template<typename T, typename Alloc>
-void *copy_bytes(void *dest, const std::vector<T, Alloc> &vec) {
+template<typename T, typename Alloc>
+void *copy_bytes(void *dest, const std::vector<T, Alloc> &vec) {
static_assert(std::is_pod<T>::value, "should be pod");
assert(dest);
@@ -202,17 +202,17 @@ void erase_all(C *container, const D &donor) {
}
}
-
-template<typename C, typename Pred>
-bool any_of_in(const C &c, Pred p) {
- return std::any_of(c.begin(), c.end(), std::move(p));
-}
-
-template<typename C, typename Pred>
-bool all_of_in(const C &c, Pred p) {
- return std::all_of(c.begin(), c.end(), std::move(p));
-}
-
+
+template<typename C, typename Pred>
+bool any_of_in(const C &c, Pred p) {
+ return std::any_of(c.begin(), c.end(), std::move(p));
+}
+
+template<typename C, typename Pred>
+bool all_of_in(const C &c, Pred p) {
+ return std::all_of(c.begin(), c.end(), std::move(p));
+}
+
} // namespace ue2
#ifdef DUMP_SUPPORT
diff --git a/contrib/libs/hyperscan/src/util/copybytes.h b/contrib/libs/hyperscan/src/util/copybytes.h
index aa14615359..7f37d96bc5 100644
--- a/contrib/libs/hyperscan/src/util/copybytes.h
+++ b/contrib/libs/hyperscan/src/util/copybytes.h
@@ -1,77 +1,77 @@
-/*
+/*
* Copyright (c) 2016-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef COPY_BYTES_H
-#define COPY_BYTES_H
-
-#include "unaligned.h"
-#include "simd_utils.h"
-
-static really_inline
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef COPY_BYTES_H
+#define COPY_BYTES_H
+
+#include "unaligned.h"
+#include "simd_utils.h"
+
+static really_inline
void copy_upto_64_bytes(u8 *dst, const u8 *src, unsigned int len) {
- switch (len) {
- case 0:
- break;
- case 1:
- *dst = *src;
- break;
- case 2:
- unaligned_store_u16(dst, unaligned_load_u16(src));
- break;
- case 3:
- unaligned_store_u16(dst, unaligned_load_u16(src));
- dst[2] = src[2];
- break;
- case 4:
- unaligned_store_u32(dst, unaligned_load_u32(src));
- break;
- case 5:
- case 6:
- case 7:
- unaligned_store_u32(dst + len - 4, unaligned_load_u32(src + len - 4));
- unaligned_store_u32(dst, unaligned_load_u32(src));
- break;
- case 8:
- unaligned_store_u64a(dst, unaligned_load_u64a(src));
- break;
- case 9:
- case 10:
- case 11:
- case 12:
- case 13:
- case 14:
- case 15:
- unaligned_store_u64a(dst + len - 8, unaligned_load_u64a(src + len - 8));
- unaligned_store_u64a(dst, unaligned_load_u64a(src));
- break;
- case 16:
- storeu128(dst, loadu128(src));
- break;
+ switch (len) {
+ case 0:
+ break;
+ case 1:
+ *dst = *src;
+ break;
+ case 2:
+ unaligned_store_u16(dst, unaligned_load_u16(src));
+ break;
+ case 3:
+ unaligned_store_u16(dst, unaligned_load_u16(src));
+ dst[2] = src[2];
+ break;
+ case 4:
+ unaligned_store_u32(dst, unaligned_load_u32(src));
+ break;
+ case 5:
+ case 6:
+ case 7:
+ unaligned_store_u32(dst + len - 4, unaligned_load_u32(src + len - 4));
+ unaligned_store_u32(dst, unaligned_load_u32(src));
+ break;
+ case 8:
+ unaligned_store_u64a(dst, unaligned_load_u64a(src));
+ break;
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ unaligned_store_u64a(dst + len - 8, unaligned_load_u64a(src + len - 8));
+ unaligned_store_u64a(dst, unaligned_load_u64a(src));
+ break;
+ case 16:
+ storeu128(dst, loadu128(src));
+ break;
case 17:
case 18:
case 19:
@@ -90,24 +90,24 @@ void copy_upto_64_bytes(u8 *dst, const u8 *src, unsigned int len) {
storeu128(dst + len - 16, loadu128(src + len - 16));
storeu128(dst, loadu128(src));
break;
- case 32:
- storeu256(dst, loadu256(src));
- break;
+ case 32:
+ storeu256(dst, loadu256(src));
+ break;
#ifdef HAVE_AVX512
case 64:
storebytes512(dst, loadu512(src), 64);
break;
- default:
+ default:
assert(len < 64);
u64a k = (1ULL << len) - 1;
storeu_mask_m512(dst, k, loadu_maskz_m512(k, src));
- break;
+ break;
#else
default:
assert(0);
break;
#endif
- }
-}
-
-#endif
+ }
+}
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/cpuid_flags.c b/contrib/libs/hyperscan/src/util/cpuid_flags.c
index c15aafc0ee..c00ce58e2d 100644
--- a/contrib/libs/hyperscan/src/util/cpuid_flags.c
+++ b/contrib/libs/hyperscan/src/util/cpuid_flags.c
@@ -27,43 +27,43 @@
*/
#include "cpuid_flags.h"
-#include "cpuid_inline.h"
+#include "cpuid_inline.h"
#include "ue2common.h"
#include "hs_compile.h" // for HS_MODE_ flags
#include "hs_internal.h"
-#include "util/arch.h"
+#include "util/arch.h"
-#if !defined(_WIN32) && !defined(CPUID_H_)
+#if !defined(_WIN32) && !defined(CPUID_H_)
#include <cpuid.h>
#endif
u64a cpuid_flags(void) {
u64a cap = 0;
- if (check_avx2()) {
- DEBUG_PRINTF("AVX2 enabled\n");
+ if (check_avx2()) {
+ DEBUG_PRINTF("AVX2 enabled\n");
cap |= HS_CPU_FEATURES_AVX2;
}
- if (check_avx512()) {
- DEBUG_PRINTF("AVX512 enabled\n");
- cap |= HS_CPU_FEATURES_AVX512;
- }
-
+ if (check_avx512()) {
+ DEBUG_PRINTF("AVX512 enabled\n");
+ cap |= HS_CPU_FEATURES_AVX512;
+ }
+
if (check_avx512vbmi()) {
DEBUG_PRINTF("AVX512VBMI enabled\n");
cap |= HS_CPU_FEATURES_AVX512VBMI;
}
-#if !defined(FAT_RUNTIME) && !defined(HAVE_AVX2)
+#if !defined(FAT_RUNTIME) && !defined(HAVE_AVX2)
cap &= ~HS_CPU_FEATURES_AVX2;
#endif
-#if (!defined(FAT_RUNTIME) && !defined(HAVE_AVX512)) || \
- (defined(FAT_RUNTIME) && !defined(BUILD_AVX512))
- cap &= ~HS_CPU_FEATURES_AVX512;
-#endif
-
+#if (!defined(FAT_RUNTIME) && !defined(HAVE_AVX512)) || \
+ (defined(FAT_RUNTIME) && !defined(BUILD_AVX512))
+ cap &= ~HS_CPU_FEATURES_AVX512;
+#endif
+
#if (!defined(FAT_RUNTIME) && !defined(HAVE_AVX512VBMI)) || \
(defined(FAT_RUNTIME) && !defined(BUILD_AVX512VBMI))
cap &= ~HS_CPU_FEATURES_AVX512VBMI;
@@ -83,37 +83,37 @@ struct family_id {
* Family Numbers" */
static const struct family_id known_microarch[] = {
{ 0x6, 0x37, HS_TUNE_FAMILY_SLM }, /* baytrail */
- { 0x6, 0x4A, HS_TUNE_FAMILY_SLM }, /* silvermont */
- { 0x6, 0x4C, HS_TUNE_FAMILY_SLM }, /* silvermont */
+ { 0x6, 0x4A, HS_TUNE_FAMILY_SLM }, /* silvermont */
+ { 0x6, 0x4C, HS_TUNE_FAMILY_SLM }, /* silvermont */
{ 0x6, 0x4D, HS_TUNE_FAMILY_SLM }, /* avoton, rangley */
- { 0x6, 0x5A, HS_TUNE_FAMILY_SLM }, /* silvermont */
- { 0x6, 0x5D, HS_TUNE_FAMILY_SLM }, /* silvermont */
+ { 0x6, 0x5A, HS_TUNE_FAMILY_SLM }, /* silvermont */
+ { 0x6, 0x5D, HS_TUNE_FAMILY_SLM }, /* silvermont */
+
+ { 0x6, 0x5C, HS_TUNE_FAMILY_GLM }, /* goldmont */
+ { 0x6, 0x5F, HS_TUNE_FAMILY_GLM }, /* denverton */
- { 0x6, 0x5C, HS_TUNE_FAMILY_GLM }, /* goldmont */
- { 0x6, 0x5F, HS_TUNE_FAMILY_GLM }, /* denverton */
-
{ 0x6, 0x3C, HS_TUNE_FAMILY_HSW }, /* haswell */
{ 0x6, 0x45, HS_TUNE_FAMILY_HSW }, /* haswell */
{ 0x6, 0x46, HS_TUNE_FAMILY_HSW }, /* haswell */
- { 0x6, 0x3F, HS_TUNE_FAMILY_HSW }, /* haswell Xeon */
+ { 0x6, 0x3F, HS_TUNE_FAMILY_HSW }, /* haswell Xeon */
- { 0x6, 0x3E, HS_TUNE_FAMILY_IVB }, /* ivybridge Xeon */
+ { 0x6, 0x3E, HS_TUNE_FAMILY_IVB }, /* ivybridge Xeon */
{ 0x6, 0x3A, HS_TUNE_FAMILY_IVB }, /* ivybridge */
{ 0x6, 0x2A, HS_TUNE_FAMILY_SNB }, /* sandybridge */
- { 0x6, 0x2D, HS_TUNE_FAMILY_SNB }, /* sandybridge Xeon */
+ { 0x6, 0x2D, HS_TUNE_FAMILY_SNB }, /* sandybridge Xeon */
{ 0x6, 0x3D, HS_TUNE_FAMILY_BDW }, /* broadwell Core-M */
- { 0x6, 0x47, HS_TUNE_FAMILY_BDW }, /* broadwell */
+ { 0x6, 0x47, HS_TUNE_FAMILY_BDW }, /* broadwell */
{ 0x6, 0x4F, HS_TUNE_FAMILY_BDW }, /* broadwell xeon */
{ 0x6, 0x56, HS_TUNE_FAMILY_BDW }, /* broadwell xeon-d */
- { 0x6, 0x4E, HS_TUNE_FAMILY_SKL }, /* Skylake Mobile */
- { 0x6, 0x5E, HS_TUNE_FAMILY_SKL }, /* Skylake Core/E3 Xeon */
- { 0x6, 0x55, HS_TUNE_FAMILY_SKX }, /* Skylake Xeon */
+ { 0x6, 0x4E, HS_TUNE_FAMILY_SKL }, /* Skylake Mobile */
+ { 0x6, 0x5E, HS_TUNE_FAMILY_SKL }, /* Skylake Core/E3 Xeon */
+ { 0x6, 0x55, HS_TUNE_FAMILY_SKX }, /* Skylake Xeon */
- { 0x6, 0x8E, HS_TUNE_FAMILY_SKL }, /* Kabylake Mobile */
- { 0x6, 0x9E, HS_TUNE_FAMILY_SKL }, /* Kabylake desktop */
+ { 0x6, 0x8E, HS_TUNE_FAMILY_SKL }, /* Kabylake Mobile */
+ { 0x6, 0x9E, HS_TUNE_FAMILY_SKL }, /* Kabylake desktop */
{ 0x6, 0x7D, HS_TUNE_FAMILY_ICL }, /* Icelake */
{ 0x6, 0x7E, HS_TUNE_FAMILY_ICL }, /* Icelake */
@@ -128,13 +128,13 @@ const char *dumpTune(u32 tune) {
#define T_CASE(x) case x: return #x;
switch (tune) {
T_CASE(HS_TUNE_FAMILY_SLM);
- T_CASE(HS_TUNE_FAMILY_GLM);
+ T_CASE(HS_TUNE_FAMILY_GLM);
T_CASE(HS_TUNE_FAMILY_HSW);
T_CASE(HS_TUNE_FAMILY_SNB);
T_CASE(HS_TUNE_FAMILY_IVB);
T_CASE(HS_TUNE_FAMILY_BDW);
- T_CASE(HS_TUNE_FAMILY_SKL);
- T_CASE(HS_TUNE_FAMILY_SKX);
+ T_CASE(HS_TUNE_FAMILY_SKL);
+ T_CASE(HS_TUNE_FAMILY_SKX);
T_CASE(HS_TUNE_FAMILY_ICL);
T_CASE(HS_TUNE_FAMILY_ICX);
}
diff --git a/contrib/libs/hyperscan/src/util/cpuid_flags.h b/contrib/libs/hyperscan/src/util/cpuid_flags.h
index 3d5d63cee7..527c6d52f3 100644
--- a/contrib/libs/hyperscan/src/util/cpuid_flags.h
+++ b/contrib/libs/hyperscan/src/util/cpuid_flags.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -26,17 +26,17 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef UTIL_CPUID_H_
-#define UTIL_CPUID_H_
+#ifndef UTIL_CPUID_H_
+#define UTIL_CPUID_H_
#include "ue2common.h"
-#if !defined(_WIN32) && !defined(CPUID_H_)
-#include <cpuid.h>
- /* system header doesn't have a header guard */
-#define CPUID_H_
-#endif
-
+#if !defined(_WIN32) && !defined(CPUID_H_)
+#include <cpuid.h>
+ /* system header doesn't have a header guard */
+#define CPUID_H_
+#endif
+
#ifdef __cplusplus
extern "C"
{
@@ -51,5 +51,5 @@ u32 cpuid_tune(void);
} /* extern "C" */
#endif
-#endif /* UTIL_CPUID_H_ */
+#endif /* UTIL_CPUID_H_ */
diff --git a/contrib/libs/hyperscan/src/util/cpuid_inline.h b/contrib/libs/hyperscan/src/util/cpuid_inline.h
index 49515d23b9..b7b4245289 100644
--- a/contrib/libs/hyperscan/src/util/cpuid_inline.h
+++ b/contrib/libs/hyperscan/src/util/cpuid_inline.h
@@ -1,193 +1,193 @@
-/*
+/*
* Copyright (c) 2017-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CPUID_INLINE_H_
-#define CPUID_INLINE_H_
-
-#include "ue2common.h"
-#include "cpuid_flags.h"
-
-#if !defined(_WIN32) && !defined(CPUID_H_)
-#include <cpuid.h>
-/* system header doesn't have a header guard */
-#define CPUID_H_
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-static inline
-void cpuid(unsigned int op, unsigned int leaf, unsigned int *eax,
- unsigned int *ebx, unsigned int *ecx, unsigned int *edx) {
-#ifndef _WIN32
- __cpuid_count(op, leaf, *eax, *ebx, *ecx, *edx);
-#else
- int a[4];
- __cpuidex(a, op, leaf);
- *eax = a[0];
- *ebx = a[1];
- *ecx = a[2];
- *edx = a[3];
-#endif
-}
-
-// ECX
-#define CPUID_SSE3 (1 << 0)
-#define CPUID_SSSE3 (1 << 9)
-#define CPUID_SSE4_1 (1 << 19)
-#define CPUID_SSE4_2 (1 << 20)
-#define CPUID_POPCNT (1 << 23)
-#define CPUID_XSAVE (1 << 27)
-#define CPUID_AVX (1 << 28)
-
-// EDX
-#define CPUID_FXSAVE (1 << 24)
-#define CPUID_SSE (1 << 25)
-#define CPUID_SSE2 (1 << 26)
-#define CPUID_HTT (1 << 28)
-
-// Structured Extended Feature Flags Enumeration Leaf ECX values
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CPUID_INLINE_H_
+#define CPUID_INLINE_H_
+
+#include "ue2common.h"
+#include "cpuid_flags.h"
+
+#if !defined(_WIN32) && !defined(CPUID_H_)
+#include <cpuid.h>
+/* system header doesn't have a header guard */
+#define CPUID_H_
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+static inline
+void cpuid(unsigned int op, unsigned int leaf, unsigned int *eax,
+ unsigned int *ebx, unsigned int *ecx, unsigned int *edx) {
+#ifndef _WIN32
+ __cpuid_count(op, leaf, *eax, *ebx, *ecx, *edx);
+#else
+ int a[4];
+ __cpuidex(a, op, leaf);
+ *eax = a[0];
+ *ebx = a[1];
+ *ecx = a[2];
+ *edx = a[3];
+#endif
+}
+
+// ECX
+#define CPUID_SSE3 (1 << 0)
+#define CPUID_SSSE3 (1 << 9)
+#define CPUID_SSE4_1 (1 << 19)
+#define CPUID_SSE4_2 (1 << 20)
+#define CPUID_POPCNT (1 << 23)
+#define CPUID_XSAVE (1 << 27)
+#define CPUID_AVX (1 << 28)
+
+// EDX
+#define CPUID_FXSAVE (1 << 24)
+#define CPUID_SSE (1 << 25)
+#define CPUID_SSE2 (1 << 26)
+#define CPUID_HTT (1 << 28)
+
+// Structured Extended Feature Flags Enumeration Leaf ECX values
#define CPUID_AVX512VBMI (1 << 1)
// Structured Extended Feature Flags Enumeration Leaf EBX values
-#define CPUID_BMI (1 << 3)
-#define CPUID_AVX2 (1 << 5)
-#define CPUID_BMI2 (1 << 8)
-#define CPUID_AVX512F (1 << 16)
-#define CPUID_AVX512BW (1 << 30)
-
-// Extended Control Register 0 (XCR0) values
-#define CPUID_XCR0_SSE (1 << 1)
-#define CPUID_XCR0_AVX (1 << 2)
-#define CPUID_XCR0_OPMASK (1 << 5) // k-regs
-#define CPUID_XCR0_ZMM_Hi256 (1 << 6) // upper 256 bits of ZMM0-ZMM15
-#define CPUID_XCR0_Hi16_ZMM (1 << 7) // ZMM16-ZMM31
-
-#define CPUID_XCR0_AVX512 \
- (CPUID_XCR0_OPMASK | CPUID_XCR0_ZMM_Hi256 | CPUID_XCR0_Hi16_ZMM)
-
-static inline
-u64a xgetbv(u32 op) {
-#if defined(_WIN32) || defined(__INTEL_COMPILER)
- return _xgetbv(op);
-#else
- u32 a, d;
- __asm__ volatile (
- "xgetbv\n"
- : "=a"(a),
- "=d"(d)
- : "c"(op));
- return ((u64a)d << 32) + a;
-#endif
-}
-
-static inline
-int check_avx2(void) {
-#if defined(__INTEL_COMPILER)
- return _may_i_use_cpu_feature(_FEATURE_AVX2);
-#else
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(1, 0, &eax, &ebx, &ecx, &edx);
-
- /* check AVX is supported and XGETBV is enabled by OS */
- if ((ecx & (CPUID_AVX | CPUID_XSAVE)) != (CPUID_AVX | CPUID_XSAVE)) {
- DEBUG_PRINTF("AVX and XSAVE not supported\n");
- return 0;
- }
-
- /* check that SSE and AVX registers are enabled by OS */
- u64a xcr0 = xgetbv(0);
- if ((xcr0 & (CPUID_XCR0_SSE | CPUID_XCR0_AVX)) !=
- (CPUID_XCR0_SSE | CPUID_XCR0_AVX)) {
- DEBUG_PRINTF("SSE and AVX registers not enabled\n");
- return 0;
- }
-
- /* ECX and EDX contain capability flags */
- ecx = 0;
- cpuid(7, 0, &eax, &ebx, &ecx, &edx);
-
- if (ebx & CPUID_AVX2) {
- DEBUG_PRINTF("AVX2 enabled\n");
- return 1;
- }
-
- return 0;
-#endif
-}
-
-static inline
-int check_avx512(void) {
- /*
- * For our purposes, having avx512 really means "can we use AVX512BW?"
- */
-#if defined(__INTEL_COMPILER)
- return _may_i_use_cpu_feature(_FEATURE_AVX512BW | _FEATURE_AVX512VL);
-#else
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(1, 0, &eax, &ebx, &ecx, &edx);
-
- /* check XSAVE is enabled by OS */
- if (!(ecx & CPUID_XSAVE)) {
- DEBUG_PRINTF("AVX and XSAVE not supported\n");
- return 0;
- }
-
- /* check that AVX 512 registers are enabled by OS */
- u64a xcr0 = xgetbv(0);
- if ((xcr0 & CPUID_XCR0_AVX512) != CPUID_XCR0_AVX512) {
- DEBUG_PRINTF("AVX512 registers not enabled\n");
- return 0;
- }
-
- /* ECX and EDX contain capability flags */
- ecx = 0;
- cpuid(7, 0, &eax, &ebx, &ecx, &edx);
-
- if (!(ebx & CPUID_AVX512F)) {
- DEBUG_PRINTF("AVX512F (AVX512 Foundation) instructions not enabled\n");
- return 0;
- }
-
- if (ebx & CPUID_AVX512BW) {
- DEBUG_PRINTF("AVX512BW instructions enabled\n");
- return 1;
- }
-
- return 0;
-#endif
-}
-
-static inline
+#define CPUID_BMI (1 << 3)
+#define CPUID_AVX2 (1 << 5)
+#define CPUID_BMI2 (1 << 8)
+#define CPUID_AVX512F (1 << 16)
+#define CPUID_AVX512BW (1 << 30)
+
+// Extended Control Register 0 (XCR0) values
+#define CPUID_XCR0_SSE (1 << 1)
+#define CPUID_XCR0_AVX (1 << 2)
+#define CPUID_XCR0_OPMASK (1 << 5) // k-regs
+#define CPUID_XCR0_ZMM_Hi256 (1 << 6) // upper 256 bits of ZMM0-ZMM15
+#define CPUID_XCR0_Hi16_ZMM (1 << 7) // ZMM16-ZMM31
+
+#define CPUID_XCR0_AVX512 \
+ (CPUID_XCR0_OPMASK | CPUID_XCR0_ZMM_Hi256 | CPUID_XCR0_Hi16_ZMM)
+
+static inline
+u64a xgetbv(u32 op) {
+#if defined(_WIN32) || defined(__INTEL_COMPILER)
+ return _xgetbv(op);
+#else
+ u32 a, d;
+ __asm__ volatile (
+ "xgetbv\n"
+ : "=a"(a),
+ "=d"(d)
+ : "c"(op));
+ return ((u64a)d << 32) + a;
+#endif
+}
+
+static inline
+int check_avx2(void) {
+#if defined(__INTEL_COMPILER)
+ return _may_i_use_cpu_feature(_FEATURE_AVX2);
+#else
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(1, 0, &eax, &ebx, &ecx, &edx);
+
+ /* check AVX is supported and XGETBV is enabled by OS */
+ if ((ecx & (CPUID_AVX | CPUID_XSAVE)) != (CPUID_AVX | CPUID_XSAVE)) {
+ DEBUG_PRINTF("AVX and XSAVE not supported\n");
+ return 0;
+ }
+
+ /* check that SSE and AVX registers are enabled by OS */
+ u64a xcr0 = xgetbv(0);
+ if ((xcr0 & (CPUID_XCR0_SSE | CPUID_XCR0_AVX)) !=
+ (CPUID_XCR0_SSE | CPUID_XCR0_AVX)) {
+ DEBUG_PRINTF("SSE and AVX registers not enabled\n");
+ return 0;
+ }
+
+ /* ECX and EDX contain capability flags */
+ ecx = 0;
+ cpuid(7, 0, &eax, &ebx, &ecx, &edx);
+
+ if (ebx & CPUID_AVX2) {
+ DEBUG_PRINTF("AVX2 enabled\n");
+ return 1;
+ }
+
+ return 0;
+#endif
+}
+
+static inline
+int check_avx512(void) {
+ /*
+ * For our purposes, having avx512 really means "can we use AVX512BW?"
+ */
+#if defined(__INTEL_COMPILER)
+ return _may_i_use_cpu_feature(_FEATURE_AVX512BW | _FEATURE_AVX512VL);
+#else
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(1, 0, &eax, &ebx, &ecx, &edx);
+
+ /* check XSAVE is enabled by OS */
+ if (!(ecx & CPUID_XSAVE)) {
+ DEBUG_PRINTF("AVX and XSAVE not supported\n");
+ return 0;
+ }
+
+ /* check that AVX 512 registers are enabled by OS */
+ u64a xcr0 = xgetbv(0);
+ if ((xcr0 & CPUID_XCR0_AVX512) != CPUID_XCR0_AVX512) {
+ DEBUG_PRINTF("AVX512 registers not enabled\n");
+ return 0;
+ }
+
+ /* ECX and EDX contain capability flags */
+ ecx = 0;
+ cpuid(7, 0, &eax, &ebx, &ecx, &edx);
+
+ if (!(ebx & CPUID_AVX512F)) {
+ DEBUG_PRINTF("AVX512F (AVX512 Foundation) instructions not enabled\n");
+ return 0;
+ }
+
+ if (ebx & CPUID_AVX512BW) {
+ DEBUG_PRINTF("AVX512BW instructions enabled\n");
+ return 1;
+ }
+
+ return 0;
+#endif
+}
+
+static inline
int check_avx512vbmi(void) {
#if defined(__INTEL_COMPILER)
return _may_i_use_cpu_feature(_FEATURE_AVX512VBMI);
@@ -233,28 +233,28 @@ int check_avx512vbmi(void) {
}
static inline
-int check_ssse3(void) {
- unsigned int eax, ebx, ecx, edx;
- cpuid(1, 0, &eax, &ebx, &ecx, &edx);
- return !!(ecx & CPUID_SSSE3);
-}
-
-static inline
-int check_sse42(void) {
- unsigned int eax, ebx, ecx, edx;
- cpuid(1, 0, &eax, &ebx, &ecx, &edx);
- return !!(ecx & CPUID_SSE4_2);
-}
-
-static inline
-int check_popcnt(void) {
- unsigned int eax, ebx, ecx, edx;
- cpuid(1, 0, &eax, &ebx, &ecx, &edx);
- return !!(ecx & CPUID_POPCNT);
-}
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* CPUID_INLINE_H_ */
+int check_ssse3(void) {
+ unsigned int eax, ebx, ecx, edx;
+ cpuid(1, 0, &eax, &ebx, &ecx, &edx);
+ return !!(ecx & CPUID_SSSE3);
+}
+
+static inline
+int check_sse42(void) {
+ unsigned int eax, ebx, ecx, edx;
+ cpuid(1, 0, &eax, &ebx, &ecx, &edx);
+ return !!(ecx & CPUID_SSE4_2);
+}
+
+static inline
+int check_popcnt(void) {
+ unsigned int eax, ebx, ecx, edx;
+ cpuid(1, 0, &eax, &ebx, &ecx, &edx);
+ return !!(ecx & CPUID_POPCNT);
+}
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* CPUID_INLINE_H_ */
diff --git a/contrib/libs/hyperscan/src/util/depth.h b/contrib/libs/hyperscan/src/util/depth.h
index 652fe36fda..5305c6f1b3 100644
--- a/contrib/libs/hyperscan/src/util/depth.h
+++ b/contrib/libs/hyperscan/src/util/depth.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,8 +34,8 @@
#define DEPTH_H
#include "ue2common.h"
-#include "util/hash.h"
-#include "util/operators.h"
+#include "util/hash.h"
+#include "util/operators.h"
#ifdef DUMP_SUPPORT
#include <string>
@@ -52,12 +52,12 @@ struct DepthOverflowError {};
* \brief Type used to represent depth information; value is either a count,
* or the special values "infinity" and "unreachable".
*/
-class depth : totally_ordered<depth> {
+class depth : totally_ordered<depth> {
public:
- /** \brief The default depth is special value "unreachable". */
- depth() = default;
+ /** \brief The default depth is special value "unreachable". */
+ depth() = default;
- explicit depth(u32 v) : val(v) {
+ explicit depth(u32 v) : val(v) {
if (v > max_value()) {
DEBUG_PRINTF("depth %u too large to represent!\n", v);
throw DepthOverflowError();
@@ -193,53 +193,53 @@ public:
return *this;
}
- depth operator-(s32 d) const {
- if (is_unreachable()) {
- return unreachable();
- }
- if (is_infinite()) {
- return infinity();
- }
-
- s64a rv = val - d;
- if (rv < 0 || (u64a)rv >= val_infinity) {
- DEBUG_PRINTF("depth %lld too large to represent!\n", rv);
- throw DepthOverflowError();
- }
-
- return depth((u32)rv);
- }
-
- depth operator-=(s32 d) {
- depth rv = *this - d;
- *this = rv;
- return *this;
- }
-
+ depth operator-(s32 d) const {
+ if (is_unreachable()) {
+ return unreachable();
+ }
+ if (is_infinite()) {
+ return infinity();
+ }
+
+ s64a rv = val - d;
+ if (rv < 0 || (u64a)rv >= val_infinity) {
+ DEBUG_PRINTF("depth %lld too large to represent!\n", rv);
+ throw DepthOverflowError();
+ }
+
+ return depth((u32)rv);
+ }
+
+ depth operator-=(s32 d) {
+ depth rv = *this - d;
+ *this = rv;
+ return *this;
+ }
+
#ifdef DUMP_SUPPORT
/** \brief Render as a string, useful for debugging. */
std::string str() const;
#endif
- size_t hash() const {
- return val;
+ size_t hash() const {
+ return val;
}
private:
static constexpr u32 val_infinity = (1u << 31) - 1;
static constexpr u32 val_unreachable = 1u << 31;
- u32 val = val_unreachable;
+ u32 val = val_unreachable;
};
/**
* \brief Encapsulates a min/max pair.
*/
-struct DepthMinMax : totally_ordered<DepthMinMax> {
- depth min{depth::infinity()};
- depth max{0};
+struct DepthMinMax : totally_ordered<DepthMinMax> {
+ depth min{depth::infinity()};
+ depth max{0};
- DepthMinMax() = default;
+ DepthMinMax() = default;
DepthMinMax(const depth &mn, const depth &mx) : min(mn), max(mx) {}
bool operator<(const DepthMinMax &b) const {
@@ -257,7 +257,7 @@ struct DepthMinMax : totally_ordered<DepthMinMax> {
/** \brief Render as a string, useful for debugging. */
std::string str() const;
#endif
-
+
};
/**
@@ -267,22 +267,22 @@ DepthMinMax unionDepthMinMax(const DepthMinMax &a, const DepthMinMax &b);
} // namespace ue2
-namespace std {
-
-template<>
-struct hash<ue2::depth> {
- size_t operator()(const ue2::depth &d) const {
- return d.hash();
- }
-};
-
-template<>
-struct hash<ue2::DepthMinMax> {
- size_t operator()(const ue2::DepthMinMax &d) const {
- return hash_all(d.min, d.max);
- }
-};
-
-} // namespace
-
+namespace std {
+
+template<>
+struct hash<ue2::depth> {
+ size_t operator()(const ue2::depth &d) const {
+ return d.hash();
+ }
+};
+
+template<>
+struct hash<ue2::DepthMinMax> {
+ size_t operator()(const ue2::DepthMinMax &d) const {
+ return hash_all(d.min, d.max);
+ }
+};
+
+} // namespace
+
#endif // DEPTH_H
diff --git a/contrib/libs/hyperscan/src/util/determinise.h b/contrib/libs/hyperscan/src/util/determinise.h
index 8cb2d11930..102a197441 100644
--- a/contrib/libs/hyperscan/src/util/determinise.h
+++ b/contrib/libs/hyperscan/src/util/determinise.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -38,9 +38,9 @@
#include "container.h"
#include "ue2common.h"
-#include <algorithm>
+#include <algorithm>
#include <array>
-#include <queue>
+#include <queue>
#include <vector>
namespace ue2 {
@@ -72,44 +72,44 @@ namespace ue2 {
* \param state_limit limit on the number of dfa states to construct
* \param statesets_out a mapping from DFA state to the set of NFA states in
* the automaton
- * \return true on success, false if state limit exceeded
+ * \return true on success, false if state limit exceeded
*/
template<class Auto, class ds>
never_inline
-bool determinise(Auto &n, std::vector<ds> &dstates, size_t state_limit,
+bool determinise(Auto &n, std::vector<ds> &dstates, size_t state_limit,
std::vector<typename Auto::StateSet> *statesets_out = nullptr) {
DEBUG_PRINTF("the determinator\n");
- using StateSet = typename Auto::StateSet;
- typename Auto::StateMap dstate_ids;
+ using StateSet = typename Auto::StateSet;
+ typename Auto::StateMap dstate_ids;
const size_t alphabet_size = n.alphasize;
- dstates.clear();
- dstates.reserve(state_limit);
+ dstates.clear();
+ dstates.reserve(state_limit);
- dstate_ids.emplace(n.dead, DEAD_STATE);
+ dstate_ids.emplace(n.dead, DEAD_STATE);
dstates.push_back(ds(alphabet_size));
std::fill_n(dstates[0].next.begin(), alphabet_size, DEAD_STATE);
- std::queue<std::pair<StateSet, dstate_id_t>> q;
- q.emplace(n.dead, DEAD_STATE);
+ std::queue<std::pair<StateSet, dstate_id_t>> q;
+ q.emplace(n.dead, DEAD_STATE);
const std::vector<StateSet> &init = n.initial();
for (u32 i = 0; i < init.size(); i++) {
- q.emplace(init[i], dstates.size());
+ q.emplace(init[i], dstates.size());
assert(!contains(dstate_ids, init[i]));
- dstate_ids.emplace(init[i], dstates.size());
+ dstate_ids.emplace(init[i], dstates.size());
dstates.push_back(ds(alphabet_size));
}
std::vector<StateSet> succs(alphabet_size, n.dead);
- while (!q.empty()) {
- auto m = std::move(q.front());
- q.pop();
- StateSet &curr = m.first;
- dstate_id_t curr_id = m.second;
-
+ while (!q.empty()) {
+ auto m = std::move(q.front());
+ q.pop();
+ StateSet &curr = m.first;
+ dstate_id_t curr_id = m.second;
+
DEBUG_PRINTF("curr: %hu\n", curr_id);
/* fill in accepts */
@@ -139,48 +139,48 @@ bool determinise(Auto &n, std::vector<ds> &dstates, size_t state_limit,
if (s && succs[s] == succs[s - 1]) {
succ_id = dstates[curr_id].next[s - 1];
} else {
- auto p = dstate_ids.find(succs[s]);
- if (p != dstate_ids.end()) { // succ[s] is already present
- succ_id = p->second;
+ auto p = dstate_ids.find(succs[s]);
+ if (p != dstate_ids.end()) { // succ[s] is already present
+ succ_id = p->second;
if (succ_id > curr_id && !dstates[succ_id].daddy
&& n.unalpha[s] < N_CHARS) {
dstates[succ_id].daddy = curr_id;
}
} else {
- succ_id = dstate_ids.size();
- dstate_ids.emplace(succs[s], succ_id);
+ succ_id = dstate_ids.size();
+ dstate_ids.emplace(succs[s], succ_id);
dstates.push_back(ds(alphabet_size));
dstates.back().daddy = n.unalpha[s] < N_CHARS ? curr_id : 0;
- q.emplace(succs[s], succ_id);
+ q.emplace(succs[s], succ_id);
}
DEBUG_PRINTF("-->%hu on %02hx\n", succ_id, n.unalpha[s]);
}
if (succ_id >= state_limit) {
- DEBUG_PRINTF("succ_id %hu >= state_limit %zu\n",
+ DEBUG_PRINTF("succ_id %hu >= state_limit %zu\n",
succ_id, state_limit);
- dstates.clear();
- return false;
+ dstates.clear();
+ return false;
}
dstates[curr_id].next[s] = succ_id;
}
}
- // The dstates vector will persist in the raw_dfa.
- dstates.shrink_to_fit();
-
+ // The dstates vector will persist in the raw_dfa.
+ dstates.shrink_to_fit();
+
if (statesets_out) {
- auto &statesets = *statesets_out;
- statesets.resize(dstate_ids.size());
- for (auto &m : dstate_ids) {
- statesets[m.second] = std::move(m.first);
- }
+ auto &statesets = *statesets_out;
+ statesets.resize(dstate_ids.size());
+ for (auto &m : dstate_ids) {
+ statesets[m.second] = std::move(m.first);
+ }
}
-
+
DEBUG_PRINTF("ok\n");
- return true;
+ return true;
}
static inline
diff --git a/contrib/libs/hyperscan/src/util/dump_charclass.h b/contrib/libs/hyperscan/src/util/dump_charclass.h
index b10d1b162b..999641340a 100644
--- a/contrib/libs/hyperscan/src/util/dump_charclass.h
+++ b/contrib/libs/hyperscan/src/util/dump_charclass.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,9 +37,9 @@
#include <cstdio>
#include <ostream>
-#include <sstream>
+#include <sstream>
#include <string>
-#include <vector>
+#include <vector>
namespace ue2 {
@@ -50,23 +50,23 @@ enum cc_output_t {
class CharReach;
-void describeClass(std::ostream &os, const CharReach &cr, size_t maxLength = 16,
- enum cc_output_t out_type = CC_OUT_TEXT);
+void describeClass(std::ostream &os, const CharReach &cr, size_t maxLength = 16,
+ enum cc_output_t out_type = CC_OUT_TEXT);
std::string describeClass(const CharReach &cr, size_t maxLength = 16,
enum cc_output_t out_type = CC_OUT_TEXT);
-template<typename Container>
-std::string describeClasses(const Container &container,
- size_t maxClassLength = 16,
- enum cc_output_t out_type = CC_OUT_TEXT) {
- std::ostringstream oss;
- for (const CharReach &cr : container) {
- describeClass(oss, cr, maxClassLength, out_type);
- }
- return oss.str();
-}
-
+template<typename Container>
+std::string describeClasses(const Container &container,
+ size_t maxClassLength = 16,
+ enum cc_output_t out_type = CC_OUT_TEXT) {
+ std::ostringstream oss;
+ for (const CharReach &cr : container) {
+ describeClass(oss, cr, maxClassLength, out_type);
+ }
+ return oss.str();
+}
+
void describeClass(FILE *f, const CharReach &cr, size_t maxLength,
enum cc_output_t out_type);
diff --git a/contrib/libs/hyperscan/src/util/exhaust.h b/contrib/libs/hyperscan/src/util/exhaust.h
index 0d8f96f6ab..d6f2ac06d9 100644
--- a/contrib/libs/hyperscan/src/util/exhaust.h
+++ b/contrib/libs/hyperscan/src/util/exhaust.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,7 +35,7 @@
#include "ue2common.h"
-/** Index meaning a given exhaustion key is invalid. */
-#define INVALID_EKEY (~(u32)0)
+/** Index meaning a given exhaustion key is invalid. */
+#define INVALID_EKEY (~(u32)0)
#endif
diff --git a/contrib/libs/hyperscan/src/util/fatbit.h b/contrib/libs/hyperscan/src/util/fatbit.h
index fa6f5c3793..3c65db1a59 100644
--- a/contrib/libs/hyperscan/src/util/fatbit.h
+++ b/contrib/libs/hyperscan/src/util/fatbit.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -40,10 +40,10 @@
#include "multibit.h"
#include "ue2common.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#define MIN_FAT_SIZE 32
struct fatbit {
@@ -62,32 +62,32 @@ void fatbit_clear(struct fatbit *bits) {
static really_inline
char fatbit_set(struct fatbit *bits, u32 total_bits, u32 key) {
- assert(ISALIGNED(bits));
+ assert(ISALIGNED(bits));
return mmbit_set(bits->fb_int.raw, total_bits, key);
}
static really_inline
void fatbit_unset(struct fatbit *bits, u32 total_bits, u32 key) {
- assert(ISALIGNED(bits));
+ assert(ISALIGNED(bits));
mmbit_unset(bits->fb_int.raw, total_bits, key);
}
static really_inline
char fatbit_isset(const struct fatbit *bits, u32 total_bits, u32 key) {
- assert(ISALIGNED(bits));
+ assert(ISALIGNED(bits));
return mmbit_isset(bits->fb_int.raw, total_bits, key);
}
static really_inline
u32 fatbit_iterate(const struct fatbit *bits, u32 total_bits, u32 it_in) {
- assert(ISALIGNED(bits));
+ assert(ISALIGNED(bits));
/* TODO: iterate_flat could be specialised as we don't have to worry about
* partial blocks. */
return mmbit_iterate(bits->fb_int.raw, total_bits, it_in);
}
-#ifdef __cplusplus
-} // extern "C"
-#endif
+#ifdef __cplusplus
+} // extern "C"
+#endif
#endif
diff --git a/contrib/libs/hyperscan/src/util/fatbit_build.cpp b/contrib/libs/hyperscan/src/util/fatbit_build.cpp
index 5a251b1cc0..77f4b55022 100644
--- a/contrib/libs/hyperscan/src/util/fatbit_build.cpp
+++ b/contrib/libs/hyperscan/src/util/fatbit_build.cpp
@@ -1,44 +1,44 @@
-/*
- * Copyright (c) 2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "fatbit_build.h"
-
-#include "fatbit.h"
-#include "multibit_build.h"
-
-#include <algorithm>
-
-using namespace std;
-
-namespace ue2 {
-
-u32 fatbit_size(u32 total_bits) {
- return max(u32{sizeof(struct fatbit)}, mmbit_size(total_bits));
-}
-
-} // namespace ue2
+/*
+ * Copyright (c) 2016, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fatbit_build.h"
+
+#include "fatbit.h"
+#include "multibit_build.h"
+
+#include <algorithm>
+
+using namespace std;
+
+namespace ue2 {
+
+u32 fatbit_size(u32 total_bits) {
+ return max(u32{sizeof(struct fatbit)}, mmbit_size(total_bits));
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/fatbit_build.h b/contrib/libs/hyperscan/src/util/fatbit_build.h
index 0fed10c06a..d76116570c 100644
--- a/contrib/libs/hyperscan/src/util/fatbit_build.h
+++ b/contrib/libs/hyperscan/src/util/fatbit_build.h
@@ -1,48 +1,48 @@
-/*
- * Copyright (c) 2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Fatbit: build code
- */
-
-#ifndef FATBIT_BUILD_H
-#define FATBIT_BUILD_H
-
-#include "ue2common.h"
-
-namespace ue2 {
-
-/**
- * \brief Return the size in bytes of a fatbit that can store the given
- * number of bits.
- */
-u32 fatbit_size(u32 total_bits);
-
-} // namespace ue2
-
-#endif // FATBIT_BUILD_H
+/*
+ * Copyright (c) 2016, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Fatbit: build code
+ */
+
+#ifndef FATBIT_BUILD_H
+#define FATBIT_BUILD_H
+
+#include "ue2common.h"
+
+namespace ue2 {
+
+/**
+ * \brief Return the size in bytes of a fatbit that can store the given
+ * number of bits.
+ */
+u32 fatbit_size(u32 total_bits);
+
+} // namespace ue2
+
+#endif // FATBIT_BUILD_H
diff --git a/contrib/libs/hyperscan/src/util/flat_containers.h b/contrib/libs/hyperscan/src/util/flat_containers.h
index 822c1f8957..41452eb42a 100644
--- a/contrib/libs/hyperscan/src/util/flat_containers.h
+++ b/contrib/libs/hyperscan/src/util/flat_containers.h
@@ -1,664 +1,664 @@
-/*
- * Copyright (c) 2015-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_FLAT_CONTAINERS_H
-#define UTIL_FLAT_CONTAINERS_H
-
-#include "ue2common.h"
-#include "util/hash.h"
-#include "util/operators.h"
-#include "util/small_vector.h"
-
-#include <algorithm>
-#include <iterator>
-#include <type_traits>
-#include <utility>
-
-#include <boost/iterator/iterator_facade.hpp>
-
-namespace ue2 {
-
-namespace flat_detail {
-
-// Iterator facade that wraps an underlying iterator, so that we get our
-// own iterator types.
-template <class WrappedIter, class Value>
-class iter_wrapper
- : public boost::iterator_facade<iter_wrapper<WrappedIter, Value>, Value,
- boost::random_access_traversal_tag> {
-public:
- iter_wrapper() = default;
- explicit iter_wrapper(WrappedIter it_in) : it(std::move(it_in)) {}
-
- // Templated copy-constructor to allow for interoperable iterator and
- // const_iterator.
-private:
- template <class, class> friend class iter_wrapper;
-
-public:
- template <class OtherIter, class OtherValue>
- iter_wrapper(iter_wrapper<OtherIter, OtherValue> other,
- typename std::enable_if<std::is_convertible<
- OtherIter, WrappedIter>::value>::type * = nullptr)
- : it(std::move(other.it)) {}
-
- WrappedIter get() const { return it; }
-
-private:
- friend class boost::iterator_core_access;
-
- WrappedIter it;
-
- void increment() { ++it; }
- void decrement() { --it; }
- void advance(size_t n) { it += n; }
- typename std::iterator_traits<WrappedIter>::difference_type
- distance_to(const iter_wrapper &other) const {
- return other.it - it;
- }
- bool equal(const iter_wrapper &other) const { return it == other.it; }
- Value &dereference() const { return *it; }
-};
-
-template <class T, class Compare, class Allocator>
-class flat_base {
-protected:
- // Underlying storage is a small vector with local space for one element.
- using storage_type = small_vector<T, 1, Allocator>;
- using storage_alloc_type = typename storage_type::allocator_type;
-
- // Putting our storage and comparator in a tuple allows us to make use of
- // the empty base class optimization (if this STL implements it for
- // std::tuple).
- std::tuple<storage_type, Compare> storage;
-
- flat_base(const Compare &compare, const Allocator &alloc)
- : storage(storage_type(storage_alloc_type(alloc)), compare) {}
-
- storage_type &data() { return std::get<0>(this->storage); }
- const storage_type &data() const { return std::get<0>(this->storage); }
-
- Compare &comp() { return std::get<1>(this->storage); }
- const Compare &comp() const { return std::get<1>(this->storage); }
-
-public:
- // Common member types.
- using key_compare = Compare;
-
- Allocator get_allocator() const {
- return data().get_allocator();
- }
-
- key_compare key_comp() const {
- return comp();
- }
-
- // Capacity.
-
- bool empty() const { return data().empty(); }
- size_t size() const { return data().size(); }
- size_t max_size() const { return data().max_size(); }
-
- // Modifiers.
-
- void clear() {
- data().clear();
- }
-
- void swap(flat_base &a) {
- using std::swap;
- swap(comp(), a.comp());
- swap(data(), a.data());
- }
-};
-
-} // namespace flat_detail
-
-/**
- * \brief Set container implemented internally as a sorted vector. Use this
- * rather than std::set for small sets as it's faster, uses less memory and
- * incurs less malloc time.
- *
- * Note: we used to use boost::flat_set, but have run into problems with all
- * the extra machinery it instantiates.
- */
-template <class T, class Compare = std::less<T>,
- class Allocator = std::allocator<T>>
-class flat_set
- : public flat_detail::flat_base<T, Compare, Allocator>,
- public totally_ordered<flat_set<T, Compare, Allocator>> {
- using base_type = flat_detail::flat_base<T, Compare, Allocator>;
- using storage_type = typename base_type::storage_type;
- using storage_iterator = typename storage_type::iterator;
- using storage_const_iterator = typename storage_type::const_iterator;
- using base_type::data;
- using base_type::comp;
-
-#if defined(SMALL_VECTOR_IS_STL_VECTOR)
- // Construct a non-const iterator from a const iterator. Used in flat_map
- // and flat_set erase() calls to work around g++-4.8 compatibility issues.
- storage_iterator mutable_iterator(storage_const_iterator it) {
- return data().begin() + std::distance(data().cbegin(), it);
- }
-#endif
-
-public:
- // Member types.
- using key_type = T;
- using value_type = T;
- using size_type = typename storage_type::size_type;
- using difference_type = typename storage_type::difference_type;
- using key_compare = typename base_type::key_compare;
- using value_compare = Compare;
- using allocator_type = Allocator;
- using reference = value_type &;
- using const_reference = const value_type &;
- using allocator_traits_type = typename std::allocator_traits<Allocator>;
- using pointer = typename allocator_traits_type::pointer;
- using const_pointer = typename allocator_traits_type::const_pointer;
-
- // Iterator types.
-
- using iterator = flat_detail::iter_wrapper<typename storage_type::iterator,
- const value_type>;
- using const_iterator =
- flat_detail::iter_wrapper<typename storage_type::const_iterator,
- const value_type>;
-
- using reverse_iterator = std::reverse_iterator<iterator>;
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
-
- // Constructors.
-
- flat_set(const Compare &compare = Compare(),
- const Allocator &alloc = Allocator())
- : base_type(compare, alloc) {}
-
- template <class InputIt>
- flat_set(InputIt first, InputIt last, const Compare &compare = Compare(),
- const Allocator &alloc = Allocator())
- : flat_set(compare, alloc) {
- insert(first, last);
- }
-
- flat_set(std::initializer_list<value_type> init,
- const Compare &compare = Compare(),
- const Allocator &alloc = Allocator())
- : flat_set(compare, alloc) {
- insert(init.begin(), init.end());
- }
-
- flat_set(const flat_set &) = default;
- flat_set(flat_set &&) = default;
- flat_set &operator=(const flat_set &) = default;
- flat_set &operator=(flat_set &&) = default;
-
- // Iterators.
-
- iterator begin() { return iterator(data().begin()); }
- const_iterator cbegin() const { return const_iterator(data().cbegin()); }
- const_iterator begin() const { return cbegin(); }
-
- iterator end() { return iterator(data().end()); }
- const_iterator cend() const { return const_iterator(data().cend()); }
- const_iterator end() const { return cend(); }
-
- reverse_iterator rbegin() { return reverse_iterator(end()); }
- const_reverse_iterator crbegin() const {
- return const_reverse_iterator(cend());
- }
- const_reverse_iterator rbegin() const { return crbegin(); }
-
- reverse_iterator rend() { return reverse_iterator(begin()); }
- const_reverse_iterator crend() const {
- return const_reverse_iterator(cbegin());
- }
- const_reverse_iterator rend() const { return crend(); }
-
- // Modifiers.
-
- std::pair<iterator, bool> insert(const value_type &value) {
- auto it = std::lower_bound(data().begin(), data().end(), value, comp());
- if (it == data().end() || comp()(value, *it)) {
- return std::make_pair(iterator(data().insert(it, value)), true);
- }
- return std::make_pair(iterator(it), false);
- }
-
- iterator insert(UNUSED const_iterator hint, const value_type &value) {
- return insert(value).first;
- }
-
- std::pair<iterator, bool> insert(value_type &&value) {
- auto it = std::lower_bound(data().begin(), data().end(), value, comp());
- if (it == data().end() || comp()(value, *it)) {
- return std::make_pair(iterator(data().insert(it, std::move(value))),
- true);
- }
- return std::make_pair(iterator(it), false);
- }
-
- iterator insert(UNUSED const_iterator hint, value_type &&value) {
- return insert(value).first;
- }
-
- template <class InputIt>
- void insert(InputIt first, InputIt second) {
- for (; first != second; ++first) {
- insert(*first);
- }
- }
-
- void insert(std::initializer_list<value_type> ilist) {
- insert(ilist.begin(), ilist.end());
- }
-
- template<class...Args>
- std::pair<iterator, bool> emplace(Args&&... args) {
- return insert(value_type(std::forward<Args>(args)...));
- }
-
- void erase(const_iterator pos) {
-#if defined(SMALL_VECTOR_IS_STL_VECTOR)
- // Cope with libstdc++ 4.8's incomplete STL (it's missing C++11
- // vector::erase(const_iterator)) by explicitly using a non-const
- // iterator.
- auto pos_it = mutable_iterator(pos.get());
-#else
- auto pos_it = pos.get();
-#endif
- data().erase(pos_it);
- }
-
- void erase(const_iterator first, const_iterator last) {
-#if defined(SMALL_VECTOR_IS_STL_VECTOR)
- // As above, work around libstdc++ 4.8's incomplete C++11 support.
- auto first_it = mutable_iterator(first.get());
- auto last_it = mutable_iterator(last.get());
-#else
- auto first_it = first.get();
- auto last_it = last.get();
-#endif
- data().erase(first_it, last_it);
- }
-
- void erase(const key_type &key) {
- auto it = find(key);
- if (it != end()) {
- erase(it);
- }
- }
-
- // Lookup.
-
- size_type count(const value_type &value) const {
- return find(value) != end() ? 1 : 0;
- }
-
- iterator find(const value_type &value) {
- auto it = std::lower_bound(data().begin(), data().end(), value, comp());
- if (it != data().end() && comp()(value, *it)) {
- it = data().end();
- }
- return iterator(it);
- }
-
- const_iterator find(const value_type &value) const {
- auto it = std::lower_bound(data().begin(), data().end(), value, comp());
- if (it != data().end() && comp()(value, *it)) {
- it = data().end();
- }
- return const_iterator(it);
- }
-
- // Observers.
-
- value_compare value_comp() const {
- return comp();
- }
-
- // Operators. All others provided by ue2::totally_ordered.
-
- bool operator==(const flat_set &a) const {
- return data() == a.data();
- }
- bool operator<(const flat_set &a) const {
- return data() < a.data();
- }
-
- // Free swap function for ADL.
- friend void swap(flat_set &a, flat_set &b) {
- a.swap(b);
- }
-};
-
-/**
- * \brief Map container implemented internally as a sorted vector. Use this
- * rather than std::map for small maps as it's faster, uses less memory and
- * incurs less malloc time.
- *
- * Note: we used to use boost::flat_map, but have run into problems with all
- * the extra machinery it instantiates.
- *
- * Note: ue2::flat_map does NOT provide mutable iterators, as (given the way
- * the data is stored) it is difficult to provide a real mutable iterator that
- * wraps std::pair<const Key, T>. Instead, all iterators are const, and you
- * should use flat_map::at() or flat_map::operator[] to mutate the contents of
- * the container.
- */
-template <class Key, class T, class Compare = std::less<Key>,
- class Allocator = std::allocator<std::pair<Key, T>>>
-class flat_map
- : public flat_detail::flat_base<std::pair<Key, T>, Compare, Allocator>,
- public totally_ordered<flat_map<Key, T, Compare, Allocator>> {
-public:
- // Member types.
- using key_type = Key;
- using mapped_type = T;
- using value_type = std::pair<const Key, T>;
-
-private:
- using base_type =
- flat_detail::flat_base<std::pair<Key, T>, Compare, Allocator>;
- using keyval_storage_type = std::pair<key_type, mapped_type>;
- using storage_type = typename base_type::storage_type;
- using storage_iterator = typename storage_type::iterator;
- using storage_const_iterator = typename storage_type::const_iterator;
- using base_type::data;
- using base_type::comp;
-
-#if defined(SMALL_VECTOR_IS_STL_VECTOR)
- // Construct a non-const iterator from a const iterator. Used in flat_map
- // and flat_set erase() calls to work around g++-4.8 compatibility issues.
- storage_iterator mutable_iterator(storage_const_iterator it) {
- return data().begin() + std::distance(data().cbegin(), it);
- }
-#endif
-
-public:
- // More Member types.
- using size_type = typename storage_type::size_type;
- using difference_type = typename storage_type::difference_type;
- using key_compare = typename base_type::key_compare;
- using allocator_type = Allocator;
- using reference = value_type &;
- using const_reference = const value_type &;
- using allocator_traits_type = typename std::allocator_traits<Allocator>;
- using pointer = typename allocator_traits_type::pointer;
- using const_pointer = typename allocator_traits_type::const_pointer;
-
-public:
- using const_iterator =
- flat_detail::iter_wrapper<typename storage_type::const_iterator,
- const keyval_storage_type>;
-
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
-
- // All iterators are const for flat_map.
- using iterator = const_iterator;
- using reverse_iterator = const_reverse_iterator;
-
- // Constructors.
-
- flat_map(const Compare &compare = Compare(),
- const Allocator &alloc = Allocator())
- : base_type(compare, alloc) {}
-
- template <class InputIt>
- flat_map(InputIt first, InputIt last, const Compare &compare = Compare(),
- const Allocator &alloc = Allocator())
- : flat_map(compare, alloc) {
- insert(first, last);
- }
-
- flat_map(std::initializer_list<value_type> init,
- const Compare &compare = Compare(),
- const Allocator &alloc = Allocator())
- : flat_map(compare, alloc) {
- insert(init.begin(), init.end());
- }
-
- flat_map(const flat_map &) = default;
- flat_map(flat_map &&) = default;
- flat_map &operator=(const flat_map &) = default;
- flat_map &operator=(flat_map &&) = default;
-
- // Iterators.
-
- const_iterator cbegin() const { return const_iterator(data().cbegin()); }
- const_iterator begin() const { return cbegin(); }
-
- const_iterator cend() const { return const_iterator(data().cend()); }
- const_iterator end() const { return cend(); }
-
- const_reverse_iterator crbegin() const {
- return const_reverse_iterator(cend());
- }
- const_reverse_iterator rbegin() const { return crbegin(); }
-
- const_reverse_iterator crend() const {
- return const_reverse_iterator(cbegin());
- }
- const_reverse_iterator rend() const { return crend(); }
-
-private:
- storage_iterator data_lower_bound(const key_type &key) {
- return std::lower_bound(
- data().begin(), data().end(), key,
- [&](const keyval_storage_type &elem, const key_type &k) {
- return comp()(elem.first, k);
- });
- }
-
- storage_const_iterator
- data_lower_bound(const key_type &key) const {
- return std::lower_bound(
- data().begin(), data().end(), key,
- [&](const keyval_storage_type &elem, const key_type &k) {
- return comp()(elem.first, k);
- });
- }
-
- std::pair<storage_iterator, bool> data_insert(const value_type &value) {
- auto it = data_lower_bound(value.first);
- if (it == data().end() || comp()(value.first, it->first)) {
- return std::make_pair(data().insert(it, value), true);
- }
- return std::make_pair(it, false);
- }
-
- std::pair<storage_iterator, bool> data_insert(value_type &&value) {
- auto it = data_lower_bound(value.first);
- if (it == data().end() || comp()(value.first, it->first)) {
- return std::make_pair(data().insert(it, std::move(value)), true);
- }
- return std::make_pair(it, false);
- }
-
- storage_iterator data_find(const key_type &key) {
- auto it = data_lower_bound(key);
- if (it != data().end() && comp()(key, it->first)) {
- it = data().end();
- }
- return it;
- }
-
- storage_const_iterator data_find(const key_type &key) const {
- auto it = data_lower_bound(key);
- if (it != data().end() && comp()(key, it->first)) {
- it = data().end();
- }
- return it;
- }
-
-public:
- // Modifiers.
-
- std::pair<iterator, bool> insert(const value_type &value) {
- auto rv = data_insert(value);
- return std::make_pair(iterator(rv.first), rv.second);
- }
-
- std::pair<iterator, bool> insert(value_type &&value) {
- auto rv = data_insert(std::move(value));
- return std::make_pair(iterator(rv.first), rv.second);
- }
-
- template <class InputIt>
- void insert(InputIt first, InputIt second) {
- for (; first != second; ++first) {
- insert(*first);
- }
- }
-
- void insert(std::initializer_list<value_type> ilist) {
- insert(ilist.begin(), ilist.end());
- }
-
- template<class...Args>
- std::pair<iterator, bool> emplace(Args&&... args) {
- return insert(value_type(std::forward<Args>(args)...));
- }
-
- void erase(const_iterator pos) {
-#if defined(SMALL_VECTOR_IS_STL_VECTOR)
- // Cope with libstdc++ 4.8's incomplete STL (it's missing C++11
- // vector::erase(const_iterator)) by explicitly using a non-const
- // iterator.
- auto pos_it = mutable_iterator(pos.get());
-#else
- auto pos_it = pos.get();
-#endif
- data().erase(pos_it);
- }
-
- void erase(const_iterator first, const_iterator last) {
-#if defined(SMALL_VECTOR_IS_STL_VECTOR)
- // As above, work around libstdc++ 4.8's incomplete C++11 support.
- auto first_it = mutable_iterator(first.get());
- auto last_it = mutable_iterator(last.get());
-#else
- auto first_it = first.get();
- auto last_it = last.get();
-#endif
- data().erase(first_it, last_it);
- }
-
- void erase(const key_type &key) {
- auto it = find(key);
- if (it != end()) {
- erase(it);
- }
- }
-
- // Lookup.
-
- size_type count(const key_type &key) const {
- return find(key) != end() ? 1 : 0;
- }
-
- const_iterator find(const key_type &key) const {
- return const_iterator(data_find(key));
- }
-
- // Element access.
-
- mapped_type &at(const key_type &key) {
- auto it = data_find(key);
- if (it == data().end()) {
- throw std::out_of_range("element not found");
- }
- return it->second;
- }
-
- const mapped_type &at(const key_type &key) const {
- auto it = data_find(key);
- if (it == data().end()) {
- throw std::out_of_range("element not found");
- }
- return it->second;
- }
-
- mapped_type &operator[](const key_type &key) {
- auto p = data_insert(value_type(key, mapped_type()));
- return p.first->second;
- }
-
- // Observers.
-
- class value_compare {
- friend class flat_map;
- protected:
- Compare c;
- value_compare(Compare c_in) : c(c_in) {}
- public:
- bool operator()(const value_type &lhs, const value_type &rhs) {
- return c(lhs.first, rhs.first);
- }
- };
-
- value_compare value_comp() const {
- return value_compare(comp());
- }
-
- // Operators. All others provided by ue2::totally_ordered.
-
- bool operator==(const flat_map &a) const {
- return data() == a.data();
- }
- bool operator<(const flat_map &a) const {
- return data() < a.data();
- }
-
- // Free swap function for ADL.
- friend void swap(flat_map &a, flat_map &b) {
- a.swap(b);
- }
-};
-
-} // namespace ue2
-
-namespace std {
-
-template<typename T, typename Compare, typename Allocator>
-struct hash<ue2::flat_set<T, Compare, Allocator>> {
- size_t operator()(const ue2::flat_set<T, Compare, Allocator> &f) {
- return ue2::ue2_hasher()(f);
- }
-};
-
-template<typename Key, typename T, typename Compare, typename Allocator>
-struct hash<ue2::flat_map<Key, T, Compare, Allocator>> {
- size_t operator()(const ue2::flat_map<Key, T, Compare, Allocator> &f) {
- return ue2::ue2_hasher()(f);
- }
-};
-
-} // namespace std
-
-#endif // UTIL_FLAT_CONTAINERS_H
+/*
+ * Copyright (c) 2015-2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_FLAT_CONTAINERS_H
+#define UTIL_FLAT_CONTAINERS_H
+
+#include "ue2common.h"
+#include "util/hash.h"
+#include "util/operators.h"
+#include "util/small_vector.h"
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include <boost/iterator/iterator_facade.hpp>
+
+namespace ue2 {
+
+namespace flat_detail {
+
+// Iterator facade that wraps an underlying iterator, so that we get our
+// own iterator types.
+template <class WrappedIter, class Value>
+class iter_wrapper
+ : public boost::iterator_facade<iter_wrapper<WrappedIter, Value>, Value,
+ boost::random_access_traversal_tag> {
+public:
+ iter_wrapper() = default;
+ explicit iter_wrapper(WrappedIter it_in) : it(std::move(it_in)) {}
+
+ // Templated copy-constructor to allow for interoperable iterator and
+ // const_iterator.
+private:
+ template <class, class> friend class iter_wrapper;
+
+public:
+ template <class OtherIter, class OtherValue>
+ iter_wrapper(iter_wrapper<OtherIter, OtherValue> other,
+ typename std::enable_if<std::is_convertible<
+ OtherIter, WrappedIter>::value>::type * = nullptr)
+ : it(std::move(other.it)) {}
+
+ WrappedIter get() const { return it; }
+
+private:
+ friend class boost::iterator_core_access;
+
+ WrappedIter it;
+
+ void increment() { ++it; }
+ void decrement() { --it; }
+ void advance(size_t n) { it += n; }
+ typename std::iterator_traits<WrappedIter>::difference_type
+ distance_to(const iter_wrapper &other) const {
+ return other.it - it;
+ }
+ bool equal(const iter_wrapper &other) const { return it == other.it; }
+ Value &dereference() const { return *it; }
+};
+
+template <class T, class Compare, class Allocator>
+class flat_base {
+protected:
+ // Underlying storage is a small vector with local space for one element.
+ using storage_type = small_vector<T, 1, Allocator>;
+ using storage_alloc_type = typename storage_type::allocator_type;
+
+ // Putting our storage and comparator in a tuple allows us to make use of
+ // the empty base class optimization (if this STL implements it for
+ // std::tuple).
+ std::tuple<storage_type, Compare> storage;
+
+ flat_base(const Compare &compare, const Allocator &alloc)
+ : storage(storage_type(storage_alloc_type(alloc)), compare) {}
+
+ storage_type &data() { return std::get<0>(this->storage); }
+ const storage_type &data() const { return std::get<0>(this->storage); }
+
+ Compare &comp() { return std::get<1>(this->storage); }
+ const Compare &comp() const { return std::get<1>(this->storage); }
+
+public:
+ // Common member types.
+ using key_compare = Compare;
+
+ Allocator get_allocator() const {
+ return data().get_allocator();
+ }
+
+ key_compare key_comp() const {
+ return comp();
+ }
+
+ // Capacity.
+
+ bool empty() const { return data().empty(); }
+ size_t size() const { return data().size(); }
+ size_t max_size() const { return data().max_size(); }
+
+ // Modifiers.
+
+ void clear() {
+ data().clear();
+ }
+
+ void swap(flat_base &a) {
+ using std::swap;
+ swap(comp(), a.comp());
+ swap(data(), a.data());
+ }
+};
+
+} // namespace flat_detail
+
+/**
+ * \brief Set container implemented internally as a sorted vector. Use this
+ * rather than std::set for small sets as it's faster, uses less memory and
+ * incurs less malloc time.
+ *
+ * Note: we used to use boost::flat_set, but have run into problems with all
+ * the extra machinery it instantiates.
+ */
+template <class T, class Compare = std::less<T>,
+ class Allocator = std::allocator<T>>
+class flat_set
+ : public flat_detail::flat_base<T, Compare, Allocator>,
+ public totally_ordered<flat_set<T, Compare, Allocator>> {
+ using base_type = flat_detail::flat_base<T, Compare, Allocator>;
+ using storage_type = typename base_type::storage_type;
+ using storage_iterator = typename storage_type::iterator;
+ using storage_const_iterator = typename storage_type::const_iterator;
+ using base_type::data;
+ using base_type::comp;
+
+#if defined(SMALL_VECTOR_IS_STL_VECTOR)
+ // Construct a non-const iterator from a const iterator. Used in flat_map
+ // and flat_set erase() calls to work around g++-4.8 compatibility issues.
+ storage_iterator mutable_iterator(storage_const_iterator it) {
+ return data().begin() + std::distance(data().cbegin(), it);
+ }
+#endif
+
+public:
+ // Member types.
+ using key_type = T;
+ using value_type = T;
+ using size_type = typename storage_type::size_type;
+ using difference_type = typename storage_type::difference_type;
+ using key_compare = typename base_type::key_compare;
+ using value_compare = Compare;
+ using allocator_type = Allocator;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+ using allocator_traits_type = typename std::allocator_traits<Allocator>;
+ using pointer = typename allocator_traits_type::pointer;
+ using const_pointer = typename allocator_traits_type::const_pointer;
+
+ // Iterator types.
+
+ using iterator = flat_detail::iter_wrapper<typename storage_type::iterator,
+ const value_type>;
+ using const_iterator =
+ flat_detail::iter_wrapper<typename storage_type::const_iterator,
+ const value_type>;
+
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ // Constructors.
+
+ flat_set(const Compare &compare = Compare(),
+ const Allocator &alloc = Allocator())
+ : base_type(compare, alloc) {}
+
+ template <class InputIt>
+ flat_set(InputIt first, InputIt last, const Compare &compare = Compare(),
+ const Allocator &alloc = Allocator())
+ : flat_set(compare, alloc) {
+ insert(first, last);
+ }
+
+ flat_set(std::initializer_list<value_type> init,
+ const Compare &compare = Compare(),
+ const Allocator &alloc = Allocator())
+ : flat_set(compare, alloc) {
+ insert(init.begin(), init.end());
+ }
+
+ flat_set(const flat_set &) = default;
+ flat_set(flat_set &&) = default;
+ flat_set &operator=(const flat_set &) = default;
+ flat_set &operator=(flat_set &&) = default;
+
+ // Iterators.
+
+ iterator begin() { return iterator(data().begin()); }
+ const_iterator cbegin() const { return const_iterator(data().cbegin()); }
+ const_iterator begin() const { return cbegin(); }
+
+ iterator end() { return iterator(data().end()); }
+ const_iterator cend() const { return const_iterator(data().cend()); }
+ const_iterator end() const { return cend(); }
+
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator crbegin() const {
+ return const_reverse_iterator(cend());
+ }
+ const_reverse_iterator rbegin() const { return crbegin(); }
+
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator crend() const {
+ return const_reverse_iterator(cbegin());
+ }
+ const_reverse_iterator rend() const { return crend(); }
+
+ // Modifiers.
+
+ std::pair<iterator, bool> insert(const value_type &value) {
+ auto it = std::lower_bound(data().begin(), data().end(), value, comp());
+ if (it == data().end() || comp()(value, *it)) {
+ return std::make_pair(iterator(data().insert(it, value)), true);
+ }
+ return std::make_pair(iterator(it), false);
+ }
+
+ iterator insert(UNUSED const_iterator hint, const value_type &value) {
+ return insert(value).first;
+ }
+
+ std::pair<iterator, bool> insert(value_type &&value) {
+ auto it = std::lower_bound(data().begin(), data().end(), value, comp());
+ if (it == data().end() || comp()(value, *it)) {
+ return std::make_pair(iterator(data().insert(it, std::move(value))),
+ true);
+ }
+ return std::make_pair(iterator(it), false);
+ }
+
+ iterator insert(UNUSED const_iterator hint, value_type &&value) {
+ return insert(value).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt second) {
+ for (; first != second; ++first) {
+ insert(*first);
+ }
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ template<class...Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return insert(value_type(std::forward<Args>(args)...));
+ }
+
+ void erase(const_iterator pos) {
+#if defined(SMALL_VECTOR_IS_STL_VECTOR)
+ // Cope with libstdc++ 4.8's incomplete STL (it's missing C++11
+ // vector::erase(const_iterator)) by explicitly using a non-const
+ // iterator.
+ auto pos_it = mutable_iterator(pos.get());
+#else
+ auto pos_it = pos.get();
+#endif
+ data().erase(pos_it);
+ }
+
+ void erase(const_iterator first, const_iterator last) {
+#if defined(SMALL_VECTOR_IS_STL_VECTOR)
+ // As above, work around libstdc++ 4.8's incomplete C++11 support.
+ auto first_it = mutable_iterator(first.get());
+ auto last_it = mutable_iterator(last.get());
+#else
+ auto first_it = first.get();
+ auto last_it = last.get();
+#endif
+ data().erase(first_it, last_it);
+ }
+
+ void erase(const key_type &key) {
+ auto it = find(key);
+ if (it != end()) {
+ erase(it);
+ }
+ }
+
+ // Lookup.
+
+ size_type count(const value_type &value) const {
+ return find(value) != end() ? 1 : 0;
+ }
+
+ iterator find(const value_type &value) {
+ auto it = std::lower_bound(data().begin(), data().end(), value, comp());
+ if (it != data().end() && comp()(value, *it)) {
+ it = data().end();
+ }
+ return iterator(it);
+ }
+
+ const_iterator find(const value_type &value) const {
+ auto it = std::lower_bound(data().begin(), data().end(), value, comp());
+ if (it != data().end() && comp()(value, *it)) {
+ it = data().end();
+ }
+ return const_iterator(it);
+ }
+
+ // Observers.
+
+ value_compare value_comp() const {
+ return comp();
+ }
+
+ // Operators. All others provided by ue2::totally_ordered.
+
+ bool operator==(const flat_set &a) const {
+ return data() == a.data();
+ }
+ bool operator<(const flat_set &a) const {
+ return data() < a.data();
+ }
+
+ // Free swap function for ADL.
+ friend void swap(flat_set &a, flat_set &b) {
+ a.swap(b);
+ }
+};
+
+/**
+ * \brief Map container implemented internally as a sorted vector. Use this
+ * rather than std::map for small maps as it's faster, uses less memory and
+ * incurs less malloc time.
+ *
+ * Note: we used to use boost::flat_map, but have run into problems with all
+ * the extra machinery it instantiates.
+ *
+ * Note: ue2::flat_map does NOT provide mutable iterators, as (given the way
+ * the data is stored) it is difficult to provide a real mutable iterator that
+ * wraps std::pair<const Key, T>. Instead, all iterators are const, and you
+ * should use flat_map::at() or flat_map::operator[] to mutate the contents of
+ * the container.
+ */
+template <class Key, class T, class Compare = std::less<Key>,
+ class Allocator = std::allocator<std::pair<Key, T>>>
+class flat_map
+ : public flat_detail::flat_base<std::pair<Key, T>, Compare, Allocator>,
+ public totally_ordered<flat_map<Key, T, Compare, Allocator>> {
+public:
+ // Member types.
+ using key_type = Key;
+ using mapped_type = T;
+ using value_type = std::pair<const Key, T>;
+
+private:
+ using base_type =
+ flat_detail::flat_base<std::pair<Key, T>, Compare, Allocator>;
+ using keyval_storage_type = std::pair<key_type, mapped_type>;
+ using storage_type = typename base_type::storage_type;
+ using storage_iterator = typename storage_type::iterator;
+ using storage_const_iterator = typename storage_type::const_iterator;
+ using base_type::data;
+ using base_type::comp;
+
+#if defined(SMALL_VECTOR_IS_STL_VECTOR)
+ // Construct a non-const iterator from a const iterator. Used in flat_map
+ // and flat_set erase() calls to work around g++-4.8 compatibility issues.
+ storage_iterator mutable_iterator(storage_const_iterator it) {
+ return data().begin() + std::distance(data().cbegin(), it);
+ }
+#endif
+
+public:
+ // More Member types.
+ using size_type = typename storage_type::size_type;
+ using difference_type = typename storage_type::difference_type;
+ using key_compare = typename base_type::key_compare;
+ using allocator_type = Allocator;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+ using allocator_traits_type = typename std::allocator_traits<Allocator>;
+ using pointer = typename allocator_traits_type::pointer;
+ using const_pointer = typename allocator_traits_type::const_pointer;
+
+public:
+ using const_iterator =
+ flat_detail::iter_wrapper<typename storage_type::const_iterator,
+ const keyval_storage_type>;
+
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ // All iterators are const for flat_map.
+ using iterator = const_iterator;
+ using reverse_iterator = const_reverse_iterator;
+
+ // Constructors.
+
+ flat_map(const Compare &compare = Compare(),
+ const Allocator &alloc = Allocator())
+ : base_type(compare, alloc) {}
+
+ template <class InputIt>
+ flat_map(InputIt first, InputIt last, const Compare &compare = Compare(),
+ const Allocator &alloc = Allocator())
+ : flat_map(compare, alloc) {
+ insert(first, last);
+ }
+
+ flat_map(std::initializer_list<value_type> init,
+ const Compare &compare = Compare(),
+ const Allocator &alloc = Allocator())
+ : flat_map(compare, alloc) {
+ insert(init.begin(), init.end());
+ }
+
+ flat_map(const flat_map &) = default;
+ flat_map(flat_map &&) = default;
+ flat_map &operator=(const flat_map &) = default;
+ flat_map &operator=(flat_map &&) = default;
+
+ // Iterators.
+
+ const_iterator cbegin() const { return const_iterator(data().cbegin()); }
+ const_iterator begin() const { return cbegin(); }
+
+ const_iterator cend() const { return const_iterator(data().cend()); }
+ const_iterator end() const { return cend(); }
+
+ const_reverse_iterator crbegin() const {
+ return const_reverse_iterator(cend());
+ }
+ const_reverse_iterator rbegin() const { return crbegin(); }
+
+ const_reverse_iterator crend() const {
+ return const_reverse_iterator(cbegin());
+ }
+ const_reverse_iterator rend() const { return crend(); }
+
+private:
+ storage_iterator data_lower_bound(const key_type &key) {
+ return std::lower_bound(
+ data().begin(), data().end(), key,
+ [&](const keyval_storage_type &elem, const key_type &k) {
+ return comp()(elem.first, k);
+ });
+ }
+
+ storage_const_iterator
+ data_lower_bound(const key_type &key) const {
+ return std::lower_bound(
+ data().begin(), data().end(), key,
+ [&](const keyval_storage_type &elem, const key_type &k) {
+ return comp()(elem.first, k);
+ });
+ }
+
+ std::pair<storage_iterator, bool> data_insert(const value_type &value) {
+ auto it = data_lower_bound(value.first);
+ if (it == data().end() || comp()(value.first, it->first)) {
+ return std::make_pair(data().insert(it, value), true);
+ }
+ return std::make_pair(it, false);
+ }
+
+ std::pair<storage_iterator, bool> data_insert(value_type &&value) {
+ auto it = data_lower_bound(value.first);
+ if (it == data().end() || comp()(value.first, it->first)) {
+ return std::make_pair(data().insert(it, std::move(value)), true);
+ }
+ return std::make_pair(it, false);
+ }
+
+ storage_iterator data_find(const key_type &key) {
+ auto it = data_lower_bound(key);
+ if (it != data().end() && comp()(key, it->first)) {
+ it = data().end();
+ }
+ return it;
+ }
+
+ storage_const_iterator data_find(const key_type &key) const {
+ auto it = data_lower_bound(key);
+ if (it != data().end() && comp()(key, it->first)) {
+ it = data().end();
+ }
+ return it;
+ }
+
+public:
+ // Modifiers.
+
+ std::pair<iterator, bool> insert(const value_type &value) {
+ auto rv = data_insert(value);
+ return std::make_pair(iterator(rv.first), rv.second);
+ }
+
+ std::pair<iterator, bool> insert(value_type &&value) {
+ auto rv = data_insert(std::move(value));
+ return std::make_pair(iterator(rv.first), rv.second);
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt second) {
+ for (; first != second; ++first) {
+ insert(*first);
+ }
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ template<class...Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return insert(value_type(std::forward<Args>(args)...));
+ }
+
+ void erase(const_iterator pos) {
+#if defined(SMALL_VECTOR_IS_STL_VECTOR)
+ // Cope with libstdc++ 4.8's incomplete STL (it's missing C++11
+ // vector::erase(const_iterator)) by explicitly using a non-const
+ // iterator.
+ auto pos_it = mutable_iterator(pos.get());
+#else
+ auto pos_it = pos.get();
+#endif
+ data().erase(pos_it);
+ }
+
+ void erase(const_iterator first, const_iterator last) {
+#if defined(SMALL_VECTOR_IS_STL_VECTOR)
+ // As above, work around libstdc++ 4.8's incomplete C++11 support.
+ auto first_it = mutable_iterator(first.get());
+ auto last_it = mutable_iterator(last.get());
+#else
+ auto first_it = first.get();
+ auto last_it = last.get();
+#endif
+ data().erase(first_it, last_it);
+ }
+
+ void erase(const key_type &key) {
+ auto it = find(key);
+ if (it != end()) {
+ erase(it);
+ }
+ }
+
+ // Lookup.
+
+ size_type count(const key_type &key) const {
+ return find(key) != end() ? 1 : 0;
+ }
+
+ const_iterator find(const key_type &key) const {
+ return const_iterator(data_find(key));
+ }
+
+ // Element access.
+
+ mapped_type &at(const key_type &key) {
+ auto it = data_find(key);
+ if (it == data().end()) {
+ throw std::out_of_range("element not found");
+ }
+ return it->second;
+ }
+
+ const mapped_type &at(const key_type &key) const {
+ auto it = data_find(key);
+ if (it == data().end()) {
+ throw std::out_of_range("element not found");
+ }
+ return it->second;
+ }
+
+ mapped_type &operator[](const key_type &key) {
+ auto p = data_insert(value_type(key, mapped_type()));
+ return p.first->second;
+ }
+
+ // Observers.
+
+ class value_compare {
+ friend class flat_map;
+ protected:
+ Compare c;
+ value_compare(Compare c_in) : c(c_in) {}
+ public:
+ bool operator()(const value_type &lhs, const value_type &rhs) {
+ return c(lhs.first, rhs.first);
+ }
+ };
+
+ value_compare value_comp() const {
+ return value_compare(comp());
+ }
+
+ // Operators. All others provided by ue2::totally_ordered.
+
+ bool operator==(const flat_map &a) const {
+ return data() == a.data();
+ }
+ bool operator<(const flat_map &a) const {
+ return data() < a.data();
+ }
+
+ // Free swap function for ADL.
+ friend void swap(flat_map &a, flat_map &b) {
+ a.swap(b);
+ }
+};
+
+} // namespace ue2
+
+namespace std {
+
+template<typename T, typename Compare, typename Allocator>
+struct hash<ue2::flat_set<T, Compare, Allocator>> {
+ size_t operator()(const ue2::flat_set<T, Compare, Allocator> &f) {
+ return ue2::ue2_hasher()(f);
+ }
+};
+
+template<typename Key, typename T, typename Compare, typename Allocator>
+struct hash<ue2::flat_map<Key, T, Compare, Allocator>> {
+ size_t operator()(const ue2::flat_map<Key, T, Compare, Allocator> &f) {
+ return ue2::ue2_hasher()(f);
+ }
+};
+
+} // namespace std
+
+#endif // UTIL_FLAT_CONTAINERS_H
diff --git a/contrib/libs/hyperscan/src/util/graph.h b/contrib/libs/hyperscan/src/util/graph.h
index 054269a726..3e18dae552 100644
--- a/contrib/libs/hyperscan/src/util/graph.h
+++ b/contrib/libs/hyperscan/src/util/graph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,26 +35,26 @@
#include "container.h"
#include "ue2common.h"
-#include "util/flat_containers.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
-#include "util/unordered.h"
+#include "util/unordered.h"
#include <boost/graph/depth_first_search.hpp>
-#include <boost/graph/strong_components.hpp>
-#include <boost/range/adaptor/map.hpp>
-
-#include <algorithm>
-#include <map>
-#include <set>
-#include <utility>
-#include <vector>
-
+#include <boost/graph/strong_components.hpp>
+#include <boost/range/adaptor/map.hpp>
+
+#include <algorithm>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
namespace ue2 {
/** \brief True if the given vertex has no out-edges. */
template<class Graph>
bool isLeafNode(const typename Graph::vertex_descriptor& v, const Graph& g) {
- return out_degree(v, g) == 0;
+ return out_degree(v, g) == 0;
}
/** \brief True if vertex \a v has an edge to itself. */
@@ -92,7 +92,7 @@ size_t proper_in_degree(const typename Graph::vertex_descriptor &v,
/** \brief True if vertex \a v has at least one successor. */
template<class Graph>
bool has_successor(const typename Graph::vertex_descriptor &v, const Graph &g) {
- return out_degree(v, g) > 0;
+ return out_degree(v, g) > 0;
}
/** \brief True if vertex \a v has at least one successor other than itself. */
@@ -116,7 +116,7 @@ bool has_proper_successor(const typename Graph::vertex_descriptor &v,
template<class Graph, class SourceCont, class OutCont>
void find_reachable(const Graph &g, const SourceCont &sources, OutCont *out) {
using vertex_descriptor = typename Graph::vertex_descriptor;
- std::unordered_map<vertex_descriptor, boost::default_color_type> colours;
+ std::unordered_map<vertex_descriptor, boost::default_color_type> colours;
for (auto v : sources) {
boost::depth_first_visit(g, v,
@@ -134,7 +134,7 @@ void find_reachable(const Graph &g, const SourceCont &sources, OutCont *out) {
template<class Graph, class SourceCont, class OutCont>
void find_unreachable(const Graph &g, const SourceCont &sources, OutCont *out) {
using vertex_descriptor = typename Graph::vertex_descriptor;
- std::unordered_set<vertex_descriptor> reachable;
+ std::unordered_set<vertex_descriptor> reachable;
find_reachable(g, sources, &reachable);
@@ -146,46 +146,46 @@ void find_unreachable(const Graph &g, const SourceCont &sources, OutCont *out) {
}
template <class Graph>
-flat_set<typename Graph::vertex_descriptor>
-find_vertices_in_cycles(const Graph &g) {
- using vertex_descriptor = typename Graph::vertex_descriptor;
-
- std::map<vertex_descriptor, size_t> comp_map;
-
- boost::strong_components(g, boost::make_assoc_property_map(comp_map));
-
- std::map<size_t, std::vector<vertex_descriptor>> comps;
-
- for (const auto &e : comp_map) {
- comps[e.second].push_back(e.first);
- }
-
- flat_set<vertex_descriptor> rv;
-
- for (const auto &comp : comps | boost::adaptors::map_values) {
- /* every vertex in a strongly connected component is reachable from
- * every other vertex in the component. A vertex is involved in a cycle
- * therefore if it is in a strongly connected component with more than
- * one vertex or if it is the only vertex and it has a self loop. */
- assert(!comp.empty());
- if (comp.size() > 1) {
- insert(&rv, comp);
+flat_set<typename Graph::vertex_descriptor>
+find_vertices_in_cycles(const Graph &g) {
+ using vertex_descriptor = typename Graph::vertex_descriptor;
+
+ std::map<vertex_descriptor, size_t> comp_map;
+
+ boost::strong_components(g, boost::make_assoc_property_map(comp_map));
+
+ std::map<size_t, std::vector<vertex_descriptor>> comps;
+
+ for (const auto &e : comp_map) {
+ comps[e.second].push_back(e.first);
+ }
+
+ flat_set<vertex_descriptor> rv;
+
+ for (const auto &comp : comps | boost::adaptors::map_values) {
+ /* every vertex in a strongly connected component is reachable from
+ * every other vertex in the component. A vertex is involved in a cycle
+ * therefore if it is in a strongly connected component with more than
+ * one vertex or if it is the only vertex and it has a self loop. */
+ assert(!comp.empty());
+ if (comp.size() > 1) {
+ insert(&rv, comp);
continue;
- }
- vertex_descriptor v = *comp.begin();
- if (hasSelfLoop(v, g)) {
- rv.insert(v);
- }
- }
-
- return rv;
-}
-
-template <class Graph>
+ }
+ vertex_descriptor v = *comp.begin();
+ if (hasSelfLoop(v, g)) {
+ rv.insert(v);
+ }
+ }
+
+ return rv;
+}
+
+template <class Graph>
bool has_parallel_edge(const Graph &g) {
using vertex_descriptor = typename Graph::vertex_descriptor;
- ue2_unordered_set<std::pair<vertex_descriptor, vertex_descriptor>> seen;
-
+ ue2_unordered_set<std::pair<vertex_descriptor, vertex_descriptor>> seen;
+
for (const auto &e : edges_range(g)) {
auto u = source(e, g);
auto v = target(e, g);
@@ -222,45 +222,45 @@ bool is_dag(const Graph &g, bool ignore_self_loops = false) {
return true;
}
-template<typename Cont>
-class vertex_recorder : public boost::default_dfs_visitor {
-public:
- explicit vertex_recorder(Cont &o) : out(o) {}
- template<class G>
- void discover_vertex(typename Cont::value_type v, const G &) {
- out.insert(v);
- }
- Cont &out;
-};
-
-template<typename Cont>
-vertex_recorder<Cont> make_vertex_recorder(Cont &o) {
- return vertex_recorder<Cont>(o);
-}
-
-/**
- * \brief A vertex recorder visitor that sets the bits in the given bitset
- * type (e.g. boost::dynamic_bitset) corresponding to the indices of the
- * vertices encountered.
- */
-template<typename Bitset>
-class vertex_index_bitset_recorder : public boost::default_dfs_visitor {
-public:
- explicit vertex_index_bitset_recorder(Bitset &o) : out(o) {}
- template<class Graph>
- void discover_vertex(typename Graph::vertex_descriptor v, const Graph &g) {
- assert(g[v].index < out.size());
- out.set(g[v].index);
- }
- Bitset &out;
-};
-
-template<typename Bitset>
-vertex_index_bitset_recorder<Bitset>
-make_vertex_index_bitset_recorder(Bitset &o) {
- return vertex_index_bitset_recorder<Bitset>(o);
-}
-
+template<typename Cont>
+class vertex_recorder : public boost::default_dfs_visitor {
+public:
+ explicit vertex_recorder(Cont &o) : out(o) {}
+ template<class G>
+ void discover_vertex(typename Cont::value_type v, const G &) {
+ out.insert(v);
+ }
+ Cont &out;
+};
+
+template<typename Cont>
+vertex_recorder<Cont> make_vertex_recorder(Cont &o) {
+ return vertex_recorder<Cont>(o);
+}
+
+/**
+ * \brief A vertex recorder visitor that sets the bits in the given bitset
+ * type (e.g. boost::dynamic_bitset) corresponding to the indices of the
+ * vertices encountered.
+ */
+template<typename Bitset>
+class vertex_index_bitset_recorder : public boost::default_dfs_visitor {
+public:
+ explicit vertex_index_bitset_recorder(Bitset &o) : out(o) {}
+ template<class Graph>
+ void discover_vertex(typename Graph::vertex_descriptor v, const Graph &g) {
+ assert(g[v].index < out.size());
+ out.set(g[v].index);
+ }
+ Bitset &out;
+};
+
+template<typename Bitset>
+vertex_index_bitset_recorder<Bitset>
+make_vertex_index_bitset_recorder(Bitset &o) {
+ return vertex_index_bitset_recorder<Bitset>(o);
+}
+
template <class Graph>
std::pair<typename Graph::edge_descriptor, bool>
add_edge_if_not_present(typename Graph::vertex_descriptor u,
@@ -283,40 +283,40 @@ std::pair<typename Graph::edge_descriptor, bool> add_edge_if_not_present(
return e;
}
-#ifndef NDEBUG
-
-template <class Graph>
-bool hasCorrectlyNumberedVertices(const Graph &g) {
- auto count = num_vertices(g);
- std::vector<bool> ids(count, false);
- for (auto v : vertices_range(g)) {
- auto id = g[v].index;
- if (id >= count || ids[id]) {
- return false; // duplicate
- }
- ids[id] = true;
- }
- return std::find(ids.begin(), ids.end(), false) == ids.end()
- && count == vertex_index_upper_bound(g);
-}
-
-template <class Graph>
-bool hasCorrectlyNumberedEdges(const Graph &g) {
- auto count = num_edges(g);
- std::vector<bool> ids(count, false);
- for (const auto &e : edges_range(g)) {
- auto id = g[e].index;
- if (id >= count || ids[id]) {
- return false; // duplicate
- }
- ids[id] = true;
- }
- return std::find(ids.begin(), ids.end(), false) == ids.end()
- && count == edge_index_upper_bound(g);
-}
-
-#endif
-
+#ifndef NDEBUG
+
+template <class Graph>
+bool hasCorrectlyNumberedVertices(const Graph &g) {
+ auto count = num_vertices(g);
+ std::vector<bool> ids(count, false);
+ for (auto v : vertices_range(g)) {
+ auto id = g[v].index;
+ if (id >= count || ids[id]) {
+ return false; // duplicate
+ }
+ ids[id] = true;
+ }
+ return std::find(ids.begin(), ids.end(), false) == ids.end()
+ && count == vertex_index_upper_bound(g);
+}
+
+template <class Graph>
+bool hasCorrectlyNumberedEdges(const Graph &g) {
+ auto count = num_edges(g);
+ std::vector<bool> ids(count, false);
+ for (const auto &e : edges_range(g)) {
+ auto id = g[e].index;
+ if (id >= count || ids[id]) {
+ return false; // duplicate
+ }
+ ids[id] = true;
+ }
+ return std::find(ids.begin(), ids.end(), false) == ids.end()
+ && count == edge_index_upper_bound(g);
+}
+
+#endif
+
} // namespace ue2
#endif // UTIL_GRAPH_H
diff --git a/contrib/libs/hyperscan/src/util/graph_range.h b/contrib/libs/hyperscan/src/util/graph_range.h
index ae3bd9bf66..3df06911a7 100644
--- a/contrib/libs/hyperscan/src/util/graph_range.h
+++ b/contrib/libs/hyperscan/src/util/graph_range.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/util/graph_small_color_map.h b/contrib/libs/hyperscan/src/util/graph_small_color_map.h
index 036d4ca772..249b71531c 100644
--- a/contrib/libs/hyperscan/src/util/graph_small_color_map.h
+++ b/contrib/libs/hyperscan/src/util/graph_small_color_map.h
@@ -1,119 +1,119 @@
-/*
+/*
* Copyright (c) 2017-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * \brief Small Color Map: implements a property map designed to represent
- * colors using minimal memory (two bits per index).
- *
- * This is based on the Boost BGL two_bit_color_map, but provides some extra
- * functionality (such as a fill operation).
- */
-
-#ifndef GRAPH_SMALL_COLOR_MAP_H
-#define GRAPH_SMALL_COLOR_MAP_H
-
-#include "ue2common.h"
-
-#include <cstring>
-#include <memory>
-#include <vector>
-
-namespace ue2 {
-
-enum class small_color : u8 {
- white = 0,
- gray = 1,
- black = 2
- // Note: we have room for one more colour.
-};
-
-} // namespace ue2
-
-namespace boost {
-
-/** \brief Specialisation of boost::color_traits for small_color. */
-template<>
-struct color_traits<ue2::small_color> {
- static ue2::small_color white() { return ue2::small_color::white; }
- static ue2::small_color gray() { return ue2::small_color::gray; }
- static ue2::small_color black() { return ue2::small_color::black; }
-};
-
-} // namespace boost
-
-namespace ue2 {
-
-static constexpr u8 fill_lut[] = {
- 0, // white
- 0x55, // gray
- 0xaa, // black
-};
-
-/**
- * \brief Small Color Map: implements a property map designed to represent
- * colors using minimal memory (two bits per index).
- *
- * If your graph type provides an index map in get(vertex_index, g), you can
- * use make_small_color_map() to construct this.
- */
-template<typename IndexMap>
-class small_color_map {
- size_t n;
- IndexMap index_map;
-
- // This class is passed by value into (potentially recursive) BGL
- // algorithms, so we use a shared_ptr to keep the copy lightweight and
- // ensure that data is correctly destroyed.
- std::shared_ptr<std::vector<u8>> data;
-
- static constexpr size_t bit_size = 2;
- static constexpr size_t entries_per_byte = (sizeof(u8) * 8) / bit_size;
- static constexpr u8 bit_mask = (1U << bit_size) - 1;
-
-public:
- using key_type = typename boost::property_traits<IndexMap>::key_type;
- using value_type = small_color;
- using reference = small_color;
- using category = boost::read_write_property_map_tag;
-
- small_color_map(size_t n_in, const IndexMap &index_map_in)
- : n(n_in), index_map(index_map_in) {
- size_t num_bytes = (n + entries_per_byte - 1) / entries_per_byte;
- data = std::make_shared<std::vector<unsigned char>>(num_bytes);
- fill(small_color::white);
- }
-
- void fill(small_color color) {
- assert(static_cast<u8>(color) < sizeof(fill_lut));
- u8 val = fill_lut[static_cast<u8>(color)];
- std::memset(data->data(), val, data->size());
- }
-
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \brief Small Color Map: implements a property map designed to represent
+ * colors using minimal memory (two bits per index).
+ *
+ * This is based on the Boost BGL two_bit_color_map, but provides some extra
+ * functionality (such as a fill operation).
+ */
+
+#ifndef GRAPH_SMALL_COLOR_MAP_H
+#define GRAPH_SMALL_COLOR_MAP_H
+
+#include "ue2common.h"
+
+#include <cstring>
+#include <memory>
+#include <vector>
+
+namespace ue2 {
+
+enum class small_color : u8 {
+ white = 0,
+ gray = 1,
+ black = 2
+ // Note: we have room for one more colour.
+};
+
+} // namespace ue2
+
+namespace boost {
+
+/** \brief Specialisation of boost::color_traits for small_color. */
+template<>
+struct color_traits<ue2::small_color> {
+ static ue2::small_color white() { return ue2::small_color::white; }
+ static ue2::small_color gray() { return ue2::small_color::gray; }
+ static ue2::small_color black() { return ue2::small_color::black; }
+};
+
+} // namespace boost
+
+namespace ue2 {
+
+static constexpr u8 fill_lut[] = {
+ 0, // white
+ 0x55, // gray
+ 0xaa, // black
+};
+
+/**
+ * \brief Small Color Map: implements a property map designed to represent
+ * colors using minimal memory (two bits per index).
+ *
+ * If your graph type provides an index map in get(vertex_index, g), you can
+ * use make_small_color_map() to construct this.
+ */
+template<typename IndexMap>
+class small_color_map {
+ size_t n;
+ IndexMap index_map;
+
+ // This class is passed by value into (potentially recursive) BGL
+ // algorithms, so we use a shared_ptr to keep the copy lightweight and
+ // ensure that data is correctly destroyed.
+ std::shared_ptr<std::vector<u8>> data;
+
+ static constexpr size_t bit_size = 2;
+ static constexpr size_t entries_per_byte = (sizeof(u8) * 8) / bit_size;
+ static constexpr u8 bit_mask = (1U << bit_size) - 1;
+
+public:
+ using key_type = typename boost::property_traits<IndexMap>::key_type;
+ using value_type = small_color;
+ using reference = small_color;
+ using category = boost::read_write_property_map_tag;
+
+ small_color_map(size_t n_in, const IndexMap &index_map_in)
+ : n(n_in), index_map(index_map_in) {
+ size_t num_bytes = (n + entries_per_byte - 1) / entries_per_byte;
+ data = std::make_shared<std::vector<unsigned char>>(num_bytes);
+ fill(small_color::white);
+ }
+
+ void fill(small_color color) {
+ assert(static_cast<u8>(color) < sizeof(fill_lut));
+ u8 val = fill_lut[static_cast<u8>(color)];
+ std::memset(data->data(), val, data->size());
+ }
+
size_t count(small_color color) const {
assert(static_cast<u8>(color) < sizeof(fill_lut));
size_t num = 0;
@@ -129,48 +129,48 @@ public:
return num;
}
- small_color get_impl(key_type key) const {
- auto i = get(index_map, key);
- assert(i < n);
- size_t byte = i / entries_per_byte;
- assert(byte < data->size());
- size_t bit = (i % entries_per_byte) * bit_size;
- u8 val = ((*data)[byte] >> bit) & bit_mask;
- return static_cast<small_color>(val);
- }
-
- void put_impl(key_type key, small_color color) {
- auto i = get(index_map, key);
- assert(i < n);
- size_t byte = i / entries_per_byte;
- assert(byte < data->size());
- size_t bit = (i % entries_per_byte) * bit_size;
- auto &block = (*data)[byte];
- u8 val = static_cast<u8>(color);
- block = (block & ~(bit_mask << bit)) | (val << bit);
- }
-};
-
-template<typename IndexMap>
-small_color get(const small_color_map<IndexMap> &color_map,
- typename boost::property_traits<IndexMap>::key_type key) {
- return color_map.get_impl(key);
-}
-
-template<typename IndexMap>
-void put(small_color_map<IndexMap> &color_map,
- typename boost::property_traits<IndexMap>::key_type key,
- small_color val) {
- color_map.put_impl(key, val);
-}
-
-template<typename Graph>
-auto make_small_color_map(const Graph &g)
- -> small_color_map<decltype(get(vertex_index, g))> {
- return small_color_map<decltype(get(vertex_index, g))>(
- num_vertices(g), get(vertex_index, g));
-}
-
-} // namespace ue2
-
-#endif // GRAPH_SMALL_COLOR_MAP_H
+ small_color get_impl(key_type key) const {
+ auto i = get(index_map, key);
+ assert(i < n);
+ size_t byte = i / entries_per_byte;
+ assert(byte < data->size());
+ size_t bit = (i % entries_per_byte) * bit_size;
+ u8 val = ((*data)[byte] >> bit) & bit_mask;
+ return static_cast<small_color>(val);
+ }
+
+ void put_impl(key_type key, small_color color) {
+ auto i = get(index_map, key);
+ assert(i < n);
+ size_t byte = i / entries_per_byte;
+ assert(byte < data->size());
+ size_t bit = (i % entries_per_byte) * bit_size;
+ auto &block = (*data)[byte];
+ u8 val = static_cast<u8>(color);
+ block = (block & ~(bit_mask << bit)) | (val << bit);
+ }
+};
+
+template<typename IndexMap>
+small_color get(const small_color_map<IndexMap> &color_map,
+ typename boost::property_traits<IndexMap>::key_type key) {
+ return color_map.get_impl(key);
+}
+
+template<typename IndexMap>
+void put(small_color_map<IndexMap> &color_map,
+ typename boost::property_traits<IndexMap>::key_type key,
+ small_color val) {
+ color_map.put_impl(key, val);
+}
+
+template<typename Graph>
+auto make_small_color_map(const Graph &g)
+ -> small_color_map<decltype(get(vertex_index, g))> {
+ return small_color_map<decltype(get(vertex_index, g))>(
+ num_vertices(g), get(vertex_index, g));
+}
+
+} // namespace ue2
+
+#endif // GRAPH_SMALL_COLOR_MAP_H
diff --git a/contrib/libs/hyperscan/src/util/hash.h b/contrib/libs/hyperscan/src/util/hash.h
index 5cd4cbd29b..60bc670abb 100644
--- a/contrib/libs/hyperscan/src/util/hash.h
+++ b/contrib/libs/hyperscan/src/util/hash.h
@@ -1,207 +1,207 @@
-/*
- * Copyright (c) 2016-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * \file
- * \brief Hashing utility functions.
- */
-
-#ifndef UTIL_HASH_H
-#define UTIL_HASH_H
-
-#include <functional>
-#include <string>
-#include <type_traits>
-#include <utility>
-
-namespace ue2 {
-
-namespace hash_detail {
-
-inline
-void hash_combine_impl(size_t &seed, size_t value) {
- // Note: constants explicitly truncated on 32-bit platforms.
- const size_t a = (size_t)0x0b4e0ef37bc32127ULL;
- const size_t b = (size_t)0x318f07b0c8eb9be9ULL;
- seed ^= value * a;
- seed += b;
-}
-
-/** \brief Helper that determines whether std::begin() exists for T. */
-template<typename T>
-struct is_container_check {
-private:
- template<typename C>
- static auto has_begin_function(const C &obj) -> decltype(std::begin(obj)) {
- return std::begin(obj);
- }
- static void has_begin_function(...) {
- return;
- }
- using has_begin_type = decltype(has_begin_function(std::declval<T>()));
-
-public:
- static const bool value = !std::is_void<has_begin_type>::value;
-};
-
-/** \brief Type trait to enable on whether T is a container. */
-template<typename T>
-struct is_container
- : public ::std::integral_constant<bool, is_container_check<T>::value> {};
-
-/** \brief Helper that determines whether T::hash() exists. */
-template<typename T>
-struct has_hash_member_check {
-private:
- template<typename C>
- static auto has_hash_member_function(const C &obj) -> decltype(obj.hash()) {
- return obj.hash();
- }
- static void has_hash_member_function(...) {
- return;
- }
- using has_hash = decltype(has_hash_member_function(std::declval<T>()));
-
-public:
- static const bool value = !std::is_void<has_hash>::value;
-};
-
-/** \brief Type trait to enable on whether T::hash() exists. */
-template<typename T>
-struct has_hash_member
- : public ::std::integral_constant<bool, has_hash_member_check<T>::value> {};
-
-/** \brief Default hash: falls back to std::hash. */
-template<typename T, typename Enable = void>
-struct ue2_hash {
- using decayed_type = typename std::decay<T>::type;
- size_t operator()(const T &obj) const {
- return std::hash<decayed_type>()(obj);
- }
-};
-
-/** \brief Hash for std::pair. */
-template<typename A, typename B>
-struct ue2_hash<std::pair<A, B>, void> {
- size_t operator()(const std::pair<A, B> &p) const {
- size_t v = 0;
- hash_combine_impl(v, ue2_hash<A>()(p.first));
- hash_combine_impl(v, ue2_hash<B>()(p.second));
- return v;
- }
-};
-
-/** \brief Hash for any type that has a hash() member function. */
-template<typename T>
-struct ue2_hash<T, typename std::enable_if<has_hash_member<T>::value>::type> {
- size_t operator()(const T &obj) const {
- return obj.hash();
- }
-};
-
-/**
- * \brief Hash for any container type that supports std::begin().
- *
- * We exempt std::string as std::hash<std:string> is provided and quicker.
- */
-template<typename T>
-struct ue2_hash<T, typename std::enable_if<
- is_container<T>::value &&
- !std::is_same<typename std::decay<T>::type, std::string>::value &&
- !has_hash_member<T>::value>::type> {
- size_t operator()(const T &obj) const {
- size_t v = 0;
- for (const auto &elem : obj) {
- using element_type = typename std::decay<decltype(elem)>::type;
- hash_combine_impl(v, ue2_hash<element_type>()(elem));
- }
- return v;
- }
-};
-
-/** \brief Hash for enum types. */
-template<typename T>
-struct ue2_hash<T, typename std::enable_if<std::is_enum<T>::value>::type> {
- size_t operator()(const T &obj) const {
- using utype = typename std::underlying_type<T>::type;
- return ue2_hash<utype>()(static_cast<utype>(obj));
- }
-};
-
-template<typename T>
-void hash_combine(size_t &seed, const T &obj) {
- hash_combine_impl(seed, ue2_hash<T>()(obj));
-}
-
-template<typename T>
-void hash_build(size_t &v, const T &obj) {
- hash_combine(v, obj);
-}
-
-template<typename T, typename... Args>
-void hash_build(size_t &v, const T &obj, Args&&... args) {
- hash_build(v, obj);
- hash_build(v, args...); // recursive
-}
-
-} // namespace hash_detail
-
-using hash_detail::hash_combine;
-
-/**
- * \brief Hasher for general use.
- *
- * Provides operators for most standard containers and falls back to
- * std::hash<T>.
- */
-struct ue2_hasher {
- template<typename T>
- size_t operator()(const T &obj) const {
- return hash_detail::ue2_hash<T>()(obj);
- }
-};
-
-/**
- * \brief Computes the combined hash of all its arguments.
- *
- * Simply use:
- *
- * size_t hash = hash_all(a, b, c, d);
- *
- * Where a, b, c and d are hashable.
- */
-template<typename... Args>
-size_t hash_all(Args&&... args) {
- size_t v = 0;
- hash_detail::hash_build(v, args...);
- return v;
-}
-
-} // namespace ue2
-
-#endif // UTIL_HASH_H
+/*
+ * Copyright (c) 2016-2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ * \brief Hashing utility functions.
+ */
+
+#ifndef UTIL_HASH_H
+#define UTIL_HASH_H
+
+#include <functional>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+namespace ue2 {
+
+namespace hash_detail {
+
+inline
+void hash_combine_impl(size_t &seed, size_t value) {
+ // Note: constants explicitly truncated on 32-bit platforms.
+ const size_t a = (size_t)0x0b4e0ef37bc32127ULL;
+ const size_t b = (size_t)0x318f07b0c8eb9be9ULL;
+ seed ^= value * a;
+ seed += b;
+}
+
+/** \brief Helper that determines whether std::begin() exists for T. */
+template<typename T>
+struct is_container_check {
+private:
+ template<typename C>
+ static auto has_begin_function(const C &obj) -> decltype(std::begin(obj)) {
+ return std::begin(obj);
+ }
+ static void has_begin_function(...) {
+ return;
+ }
+ using has_begin_type = decltype(has_begin_function(std::declval<T>()));
+
+public:
+ static const bool value = !std::is_void<has_begin_type>::value;
+};
+
+/** \brief Type trait to enable on whether T is a container. */
+template<typename T>
+struct is_container
+ : public ::std::integral_constant<bool, is_container_check<T>::value> {};
+
+/** \brief Helper that determines whether T::hash() exists. */
+template<typename T>
+struct has_hash_member_check {
+private:
+ template<typename C>
+ static auto has_hash_member_function(const C &obj) -> decltype(obj.hash()) {
+ return obj.hash();
+ }
+ static void has_hash_member_function(...) {
+ return;
+ }
+ using has_hash = decltype(has_hash_member_function(std::declval<T>()));
+
+public:
+ static const bool value = !std::is_void<has_hash>::value;
+};
+
+/** \brief Type trait to enable on whether T::hash() exists. */
+template<typename T>
+struct has_hash_member
+ : public ::std::integral_constant<bool, has_hash_member_check<T>::value> {};
+
+/** \brief Default hash: falls back to std::hash. */
+template<typename T, typename Enable = void>
+struct ue2_hash {
+ using decayed_type = typename std::decay<T>::type;
+ size_t operator()(const T &obj) const {
+ return std::hash<decayed_type>()(obj);
+ }
+};
+
+/** \brief Hash for std::pair. */
+template<typename A, typename B>
+struct ue2_hash<std::pair<A, B>, void> {
+ size_t operator()(const std::pair<A, B> &p) const {
+ size_t v = 0;
+ hash_combine_impl(v, ue2_hash<A>()(p.first));
+ hash_combine_impl(v, ue2_hash<B>()(p.second));
+ return v;
+ }
+};
+
+/** \brief Hash for any type that has a hash() member function. */
+template<typename T>
+struct ue2_hash<T, typename std::enable_if<has_hash_member<T>::value>::type> {
+ size_t operator()(const T &obj) const {
+ return obj.hash();
+ }
+};
+
+/**
+ * \brief Hash for any container type that supports std::begin().
+ *
+ * We exempt std::string as std::hash<std:string> is provided and quicker.
+ */
+template<typename T>
+struct ue2_hash<T, typename std::enable_if<
+ is_container<T>::value &&
+ !std::is_same<typename std::decay<T>::type, std::string>::value &&
+ !has_hash_member<T>::value>::type> {
+ size_t operator()(const T &obj) const {
+ size_t v = 0;
+ for (const auto &elem : obj) {
+ using element_type = typename std::decay<decltype(elem)>::type;
+ hash_combine_impl(v, ue2_hash<element_type>()(elem));
+ }
+ return v;
+ }
+};
+
+/** \brief Hash for enum types. */
+template<typename T>
+struct ue2_hash<T, typename std::enable_if<std::is_enum<T>::value>::type> {
+ size_t operator()(const T &obj) const {
+ using utype = typename std::underlying_type<T>::type;
+ return ue2_hash<utype>()(static_cast<utype>(obj));
+ }
+};
+
+template<typename T>
+void hash_combine(size_t &seed, const T &obj) {
+ hash_combine_impl(seed, ue2_hash<T>()(obj));
+}
+
+template<typename T>
+void hash_build(size_t &v, const T &obj) {
+ hash_combine(v, obj);
+}
+
+template<typename T, typename... Args>
+void hash_build(size_t &v, const T &obj, Args&&... args) {
+ hash_build(v, obj);
+ hash_build(v, args...); // recursive
+}
+
+} // namespace hash_detail
+
+using hash_detail::hash_combine;
+
+/**
+ * \brief Hasher for general use.
+ *
+ * Provides operators for most standard containers and falls back to
+ * std::hash<T>.
+ */
+struct ue2_hasher {
+ template<typename T>
+ size_t operator()(const T &obj) const {
+ return hash_detail::ue2_hash<T>()(obj);
+ }
+};
+
+/**
+ * \brief Computes the combined hash of all its arguments.
+ *
+ * Simply use:
+ *
+ * size_t hash = hash_all(a, b, c, d);
+ *
+ * Where a, b, c and d are hashable.
+ */
+template<typename... Args>
+size_t hash_all(Args&&... args) {
+ size_t v = 0;
+ hash_detail::hash_build(v, args...);
+ return v;
+}
+
+} // namespace ue2
+
+#endif // UTIL_HASH_H
diff --git a/contrib/libs/hyperscan/src/util/hash_dynamic_bitset.h b/contrib/libs/hyperscan/src/util/hash_dynamic_bitset.h
index 213d9237d5..65bc29c30b 100644
--- a/contrib/libs/hyperscan/src/util/hash_dynamic_bitset.h
+++ b/contrib/libs/hyperscan/src/util/hash_dynamic_bitset.h
@@ -1,96 +1,96 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * \file
- * \brief Hashing utility functions.
- */
-
-#ifndef UTIL_HASH_DYNAMIC_BITSET_H
-#define UTIL_HASH_DYNAMIC_BITSET_H
-
-#include "hash.h"
-
-#include <boost/dynamic_bitset.hpp>
-
-#include <iterator>
-
-namespace ue2 {
-
-/**
- * \brief An output iterator which calculates the combined hash of all elements
- * written to it.
- *
- * The location to output the hash is provided to the constructor and should
- * already be zero initialised.
- */
-struct hash_output_it {
- using value_type = void;
- using difference_type = ptrdiff_t;
- using pointer = void *;
- using reference = void;
- using iterator_category = std::output_iterator_tag;
-
- hash_output_it(size_t *hash_out = nullptr) : out(hash_out) {}
- hash_output_it &operator++() {
- return *this;
- }
- hash_output_it &operator++(int) {
- return *this;
- }
-
- struct deref_proxy {
- deref_proxy(size_t *hash_out) : out(hash_out) {}
-
- template<typename T>
- void operator=(const T &val) const {
- hash_combine(*out, val);
- }
-
- private:
- size_t *out; /* output location of the owning iterator */
- };
-
- deref_proxy operator*() { return {out}; }
-
-private:
- size_t *out; /* location to output the hashes to */
-};
-
-/* Function object for hashing a dynamic bitset */
-struct hash_dynamic_bitset {
- size_t operator()(const boost::dynamic_bitset<> &bs) const {
- size_t rv = 0;
- to_block_range(bs, hash_output_it(&rv));
- return rv;
- }
-};
-
-} // namespace ue2
-
-#endif
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ * \brief Hashing utility functions.
+ */
+
+#ifndef UTIL_HASH_DYNAMIC_BITSET_H
+#define UTIL_HASH_DYNAMIC_BITSET_H
+
+#include "hash.h"
+
+#include <boost/dynamic_bitset.hpp>
+
+#include <iterator>
+
+namespace ue2 {
+
+/**
+ * \brief An output iterator which calculates the combined hash of all elements
+ * written to it.
+ *
+ * The location to output the hash is provided to the constructor and should
+ * already be zero initialised.
+ */
+struct hash_output_it {
+ using value_type = void;
+ using difference_type = ptrdiff_t;
+ using pointer = void *;
+ using reference = void;
+ using iterator_category = std::output_iterator_tag;
+
+ hash_output_it(size_t *hash_out = nullptr) : out(hash_out) {}
+ hash_output_it &operator++() {
+ return *this;
+ }
+ hash_output_it &operator++(int) {
+ return *this;
+ }
+
+ struct deref_proxy {
+ deref_proxy(size_t *hash_out) : out(hash_out) {}
+
+ template<typename T>
+ void operator=(const T &val) const {
+ hash_combine(*out, val);
+ }
+
+ private:
+ size_t *out; /* output location of the owning iterator */
+ };
+
+ deref_proxy operator*() { return {out}; }
+
+private:
+ size_t *out; /* location to output the hashes to */
+};
+
+/* Function object for hashing a dynamic bitset */
+struct hash_dynamic_bitset {
+ size_t operator()(const boost::dynamic_bitset<> &bs) const {
+ size_t rv = 0;
+ to_block_range(bs, hash_output_it(&rv));
+ return rv;
+ }
+};
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/insertion_ordered.h b/contrib/libs/hyperscan/src/util/insertion_ordered.h
index c47b4d6473..2067d35079 100644
--- a/contrib/libs/hyperscan/src/util/insertion_ordered.h
+++ b/contrib/libs/hyperscan/src/util/insertion_ordered.h
@@ -1,368 +1,368 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_INSERTION_ORDERED_H
-#define UTIL_INSERTION_ORDERED_H
-
-/**
- * \file
- * \brief Insertion-ordered associative containers (set, map).
- */
-
-#include "util/operators.h"
-#include "util/unordered.h"
-
-#include <cassert>
-#include <iterator>
-#include <type_traits>
-#include <utility>
-#include <vector>
-
-#include <boost/iterator/iterator_facade.hpp>
-
-namespace ue2 {
-
-namespace insertion_ordered_detail {
-
-// Iterator facade that wraps an underlying iterator, so that we get our
-// own iterator types.
-template<class WrappedIter, class Value>
-class iter_wrapper
- : public boost::iterator_facade<iter_wrapper<WrappedIter, Value>, Value,
- boost::random_access_traversal_tag> {
-public:
- iter_wrapper() = default;
- explicit iter_wrapper(WrappedIter it_in) : it(std::move(it_in)) {}
-
- // Templated copy-constructor to allow for interoperable iterator and
- // const_iterator.
- template<class, class> friend class iter_wrapper;
-
- template<class OtherIter, class OtherValue>
- iter_wrapper(iter_wrapper<OtherIter, OtherValue> other,
- typename std::enable_if<std::is_convertible<
- OtherIter, WrappedIter>::value>::type * = nullptr)
- : it(std::move(other.it)) {}
-
- WrappedIter get() const { return it; }
-
-private:
- friend class boost::iterator_core_access;
-
- WrappedIter it;
-
- void increment() { ++it; }
- void decrement() { --it; }
- void advance(size_t n) { it += n; }
- typename std::iterator_traits<WrappedIter>::difference_type
- distance_to(const iter_wrapper &other) const {
- return other.it - it;
- }
- bool equal(const iter_wrapper &other) const { return it == other.it; }
- Value &dereference() const { return *it; }
-};
-
-template<class Key, class Element>
-class element_store {
- std::vector<Element> data;
- ue2_unordered_map<Key, size_t> map;
-
-public:
- bool empty() const {
- return data.empty();
- }
-
- size_t size() const {
- assert(data.size() == map.size());
- return data.size();
- }
-
- void clear() {
- data.clear();
- map.clear();
- }
-
- void reserve(size_t n) {
- data.reserve(n);
- map.reserve(n);
- }
-
- // Iteration.
-
- using const_iterator =
- iter_wrapper<typename std::vector<Element>::const_iterator,
- const Element>;
- using iterator =
- iter_wrapper<typename std::vector<Element>::iterator, Element>;
-
- const_iterator begin() const {
- return const_iterator(data.begin());
- }
-
- const_iterator end() const {
- return const_iterator(data.end());
- }
-
- iterator begin() {
- return iterator(data.begin());
- }
-
- iterator end() {
- return iterator(data.end());
- }
-
- // Search.
-
- const_iterator find(const Key &key) const {
- auto map_it = map.find(key);
- if (map_it == map.end()) {
- return end();
- }
- auto idx = map_it->second;
- assert(idx < data.size());
- return begin() + idx;
- }
-
- iterator find(const Key &key) {
- auto map_it = map.find(key);
- if (map_it == map.end()) {
- return end();
- }
- auto idx = map_it->second;
- assert(idx < data.size());
- return begin() + idx;
- }
-
- // Insert.
-
- std::pair<iterator, bool> insert(const Key &key, const Element &element) {
- const auto idx = data.size();
- if (map.emplace(key, idx).second) {
- data.push_back(element);
- return {begin() + idx, true};
- }
- return {end(), false};
- }
-
- bool operator==(const element_store &a) const {
- return data == a.data;
- }
-
- bool operator<(const element_store &a) const {
- return data < a.data;
- }
-
- void swap(element_store &a) {
- using std::swap;
- swap(data, a.data);
- swap(map, a.map);
- }
-};
-
-} // namespace insertion_ordered_detail
-
-template<class Key, class Value>
-class insertion_ordered_map
- : public totally_ordered<insertion_ordered_map<Key, Value>> {
-public:
- using key_type = Key;
- using mapped_type = Value;
- using value_type = std::pair<const Key, Value>;
-
-private:
- using store_type = insertion_ordered_detail::element_store<Key, value_type>;
- store_type store;
-
-public:
- using const_iterator = typename store_type::const_iterator;
- using iterator = typename store_type::iterator;
-
- insertion_ordered_map() = default;
-
- template<class Iter>
- insertion_ordered_map(Iter it, Iter it_end) {
- insert(it, it_end);
- }
-
- explicit insertion_ordered_map(std::initializer_list<value_type> init) {
- insert(init.begin(), init.end());
- }
-
- const_iterator begin() const { return store.begin(); }
- const_iterator end() const { return store.end(); }
- iterator begin() { return store.begin(); }
- iterator end() { return store.end(); }
-
- const_iterator find(const Key &key) const {
- return store.find(key);
- }
-
- iterator find(const Key &key) {
- return store.find(key);
- }
-
- std::pair<iterator, bool> insert(const std::pair<const Key, Value> &p) {
- return store.insert(p.first, p);
- }
-
- template<class Iter>
- void insert(Iter it, Iter it_end) {
- for (; it != it_end; ++it) {
- insert(*it);
- }
- }
-
- Value &operator[](const Key &key) {
- auto it = find(key);
- if (it == end()) {
- it = insert({key, Value{}}).first;
- }
- return it->second;
- }
-
- const Value &at(const Key &key) const {
- return find(key)->second;
- }
-
- Value &at(const Key &key) {
- return find(key)->second;
- }
-
- bool empty() const {
- return store.empty();
- }
-
- size_t size() const {
- return store.size();
- }
-
- void clear() {
- store.clear();
- }
-
- void reserve(size_t n) {
- store.reserve(n);
- }
-
- bool operator==(const insertion_ordered_map &a) const {
- return store == a.store;
- }
-
- bool operator<(const insertion_ordered_map &a) const {
- return store < a.store;
- }
-
- void swap(insertion_ordered_map &a) {
- store.swap(a.store);
- }
-
- friend void swap(insertion_ordered_map &a, insertion_ordered_map &b) {
- a.swap(b);
- }
-};
-
-template<class Key>
-class insertion_ordered_set
- : public totally_ordered<insertion_ordered_set<Key>> {
-public:
- using key_type = Key;
- using value_type = Key;
-
-private:
- using store_type = insertion_ordered_detail::element_store<Key, value_type>;
- store_type store;
-
-public:
- using const_iterator = typename store_type::const_iterator;
- using iterator = typename store_type::iterator;
-
- insertion_ordered_set() = default;
-
- template<class Iter>
- insertion_ordered_set(Iter it, Iter it_end) {
- insert(it, it_end);
- }
-
- explicit insertion_ordered_set(std::initializer_list<value_type> init) {
- insert(init.begin(), init.end());
- }
-
- const_iterator begin() const { return store.begin(); }
- const_iterator end() const { return store.end(); }
-
- const_iterator find(const Key &key) const {
- return store.find(key);
- }
-
- std::pair<iterator, bool> insert(const Key &key) {
- return store.insert(key, key);
- }
-
- template<class Iter>
- void insert(Iter it, Iter it_end) {
- for (; it != it_end; ++it) {
- insert(*it);
- }
- }
-
- bool empty() const {
- return store.empty();
- }
-
- size_t size() const {
- return store.size();
- }
-
- void clear() {
- store.clear();
- }
-
- void reserve(size_t n) {
- store.reserve(n);
- }
-
- bool operator==(const insertion_ordered_set &a) const {
- return store == a.store;
- }
-
- bool operator<(const insertion_ordered_set &a) const {
- return store < a.store;
- }
-
- void swap(insertion_ordered_set &a) {
- store.swap(a.store);
- }
-
- friend void swap(insertion_ordered_set &a, insertion_ordered_set &b) {
- a.swap(b);
- }
-};
-
-} // namespace ue2
-
-#endif // UTIL_INSERTION_ORDERED_H
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_INSERTION_ORDERED_H
+#define UTIL_INSERTION_ORDERED_H
+
+/**
+ * \file
+ * \brief Insertion-ordered associative containers (set, map).
+ */
+
+#include "util/operators.h"
+#include "util/unordered.h"
+
+#include <cassert>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include <boost/iterator/iterator_facade.hpp>
+
+namespace ue2 {
+
+namespace insertion_ordered_detail {
+
+// Iterator facade that wraps an underlying iterator, so that we get our
+// own iterator types.
+template<class WrappedIter, class Value>
+class iter_wrapper
+ : public boost::iterator_facade<iter_wrapper<WrappedIter, Value>, Value,
+ boost::random_access_traversal_tag> {
+public:
+ iter_wrapper() = default;
+ explicit iter_wrapper(WrappedIter it_in) : it(std::move(it_in)) {}
+
+ // Templated copy-constructor to allow for interoperable iterator and
+ // const_iterator.
+ template<class, class> friend class iter_wrapper;
+
+ template<class OtherIter, class OtherValue>
+ iter_wrapper(iter_wrapper<OtherIter, OtherValue> other,
+ typename std::enable_if<std::is_convertible<
+ OtherIter, WrappedIter>::value>::type * = nullptr)
+ : it(std::move(other.it)) {}
+
+ WrappedIter get() const { return it; }
+
+private:
+ friend class boost::iterator_core_access;
+
+ WrappedIter it;
+
+ void increment() { ++it; }
+ void decrement() { --it; }
+ void advance(size_t n) { it += n; }
+ typename std::iterator_traits<WrappedIter>::difference_type
+ distance_to(const iter_wrapper &other) const {
+ return other.it - it;
+ }
+ bool equal(const iter_wrapper &other) const { return it == other.it; }
+ Value &dereference() const { return *it; }
+};
+
+template<class Key, class Element>
+class element_store {
+ std::vector<Element> data;
+ ue2_unordered_map<Key, size_t> map;
+
+public:
+ bool empty() const {
+ return data.empty();
+ }
+
+ size_t size() const {
+ assert(data.size() == map.size());
+ return data.size();
+ }
+
+ void clear() {
+ data.clear();
+ map.clear();
+ }
+
+ void reserve(size_t n) {
+ data.reserve(n);
+ map.reserve(n);
+ }
+
+ // Iteration.
+
+ using const_iterator =
+ iter_wrapper<typename std::vector<Element>::const_iterator,
+ const Element>;
+ using iterator =
+ iter_wrapper<typename std::vector<Element>::iterator, Element>;
+
+ const_iterator begin() const {
+ return const_iterator(data.begin());
+ }
+
+ const_iterator end() const {
+ return const_iterator(data.end());
+ }
+
+ iterator begin() {
+ return iterator(data.begin());
+ }
+
+ iterator end() {
+ return iterator(data.end());
+ }
+
+ // Search.
+
+ const_iterator find(const Key &key) const {
+ auto map_it = map.find(key);
+ if (map_it == map.end()) {
+ return end();
+ }
+ auto idx = map_it->second;
+ assert(idx < data.size());
+ return begin() + idx;
+ }
+
+ iterator find(const Key &key) {
+ auto map_it = map.find(key);
+ if (map_it == map.end()) {
+ return end();
+ }
+ auto idx = map_it->second;
+ assert(idx < data.size());
+ return begin() + idx;
+ }
+
+ // Insert.
+
+ std::pair<iterator, bool> insert(const Key &key, const Element &element) {
+ const auto idx = data.size();
+ if (map.emplace(key, idx).second) {
+ data.push_back(element);
+ return {begin() + idx, true};
+ }
+ return {end(), false};
+ }
+
+ bool operator==(const element_store &a) const {
+ return data == a.data;
+ }
+
+ bool operator<(const element_store &a) const {
+ return data < a.data;
+ }
+
+ void swap(element_store &a) {
+ using std::swap;
+ swap(data, a.data);
+ swap(map, a.map);
+ }
+};
+
+} // namespace insertion_ordered_detail
+
+template<class Key, class Value>
+class insertion_ordered_map
+ : public totally_ordered<insertion_ordered_map<Key, Value>> {
+public:
+ using key_type = Key;
+ using mapped_type = Value;
+ using value_type = std::pair<const Key, Value>;
+
+private:
+ using store_type = insertion_ordered_detail::element_store<Key, value_type>;
+ store_type store;
+
+public:
+ using const_iterator = typename store_type::const_iterator;
+ using iterator = typename store_type::iterator;
+
+ insertion_ordered_map() = default;
+
+ template<class Iter>
+ insertion_ordered_map(Iter it, Iter it_end) {
+ insert(it, it_end);
+ }
+
+ explicit insertion_ordered_map(std::initializer_list<value_type> init) {
+ insert(init.begin(), init.end());
+ }
+
+ const_iterator begin() const { return store.begin(); }
+ const_iterator end() const { return store.end(); }
+ iterator begin() { return store.begin(); }
+ iterator end() { return store.end(); }
+
+ const_iterator find(const Key &key) const {
+ return store.find(key);
+ }
+
+ iterator find(const Key &key) {
+ return store.find(key);
+ }
+
+ std::pair<iterator, bool> insert(const std::pair<const Key, Value> &p) {
+ return store.insert(p.first, p);
+ }
+
+ template<class Iter>
+ void insert(Iter it, Iter it_end) {
+ for (; it != it_end; ++it) {
+ insert(*it);
+ }
+ }
+
+ Value &operator[](const Key &key) {
+ auto it = find(key);
+ if (it == end()) {
+ it = insert({key, Value{}}).first;
+ }
+ return it->second;
+ }
+
+ const Value &at(const Key &key) const {
+ return find(key)->second;
+ }
+
+ Value &at(const Key &key) {
+ return find(key)->second;
+ }
+
+ bool empty() const {
+ return store.empty();
+ }
+
+ size_t size() const {
+ return store.size();
+ }
+
+ void clear() {
+ store.clear();
+ }
+
+ void reserve(size_t n) {
+ store.reserve(n);
+ }
+
+ bool operator==(const insertion_ordered_map &a) const {
+ return store == a.store;
+ }
+
+ bool operator<(const insertion_ordered_map &a) const {
+ return store < a.store;
+ }
+
+ void swap(insertion_ordered_map &a) {
+ store.swap(a.store);
+ }
+
+ friend void swap(insertion_ordered_map &a, insertion_ordered_map &b) {
+ a.swap(b);
+ }
+};
+
+template<class Key>
+class insertion_ordered_set
+ : public totally_ordered<insertion_ordered_set<Key>> {
+public:
+ using key_type = Key;
+ using value_type = Key;
+
+private:
+ using store_type = insertion_ordered_detail::element_store<Key, value_type>;
+ store_type store;
+
+public:
+ using const_iterator = typename store_type::const_iterator;
+ using iterator = typename store_type::iterator;
+
+ insertion_ordered_set() = default;
+
+ template<class Iter>
+ insertion_ordered_set(Iter it, Iter it_end) {
+ insert(it, it_end);
+ }
+
+ explicit insertion_ordered_set(std::initializer_list<value_type> init) {
+ insert(init.begin(), init.end());
+ }
+
+ const_iterator begin() const { return store.begin(); }
+ const_iterator end() const { return store.end(); }
+
+ const_iterator find(const Key &key) const {
+ return store.find(key);
+ }
+
+ std::pair<iterator, bool> insert(const Key &key) {
+ return store.insert(key, key);
+ }
+
+ template<class Iter>
+ void insert(Iter it, Iter it_end) {
+ for (; it != it_end; ++it) {
+ insert(*it);
+ }
+ }
+
+ bool empty() const {
+ return store.empty();
+ }
+
+ size_t size() const {
+ return store.size();
+ }
+
+ void clear() {
+ store.clear();
+ }
+
+ void reserve(size_t n) {
+ store.reserve(n);
+ }
+
+ bool operator==(const insertion_ordered_set &a) const {
+ return store == a.store;
+ }
+
+ bool operator<(const insertion_ordered_set &a) const {
+ return store < a.store;
+ }
+
+ void swap(insertion_ordered_set &a) {
+ store.swap(a.store);
+ }
+
+ friend void swap(insertion_ordered_set &a, insertion_ordered_set &b) {
+ a.swap(b);
+ }
+};
+
+} // namespace ue2
+
+#endif // UTIL_INSERTION_ORDERED_H
diff --git a/contrib/libs/hyperscan/src/util/intrinsics.h b/contrib/libs/hyperscan/src/util/intrinsics.h
index 6b3c865add..edc4f6efb3 100644
--- a/contrib/libs/hyperscan/src/util/intrinsics.h
+++ b/contrib/libs/hyperscan/src/util/intrinsics.h
@@ -1,66 +1,66 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Wrapper around the compiler supplied intrinsic header
- */
-
-#ifndef INTRINSICS_H
-#define INTRINSICS_H
-
-#include "config.h"
-
-#ifdef __cplusplus
-# if defined(HAVE_CXX_X86INTRIN_H)
-# define USE_X86INTRIN_H
-# endif
-#else // C
-# if defined(HAVE_C_X86INTRIN_H)
-# define USE_X86INTRIN_H
-# endif
-#endif
-
-#ifdef __cplusplus
-# if defined(HAVE_CXX_INTRIN_H)
-# define USE_INTRIN_H
-# endif
-#else // C
-# if defined(HAVE_C_INTRIN_H)
-# define USE_INTRIN_H
-# endif
-#endif
-
-#if defined(USE_X86INTRIN_H)
-#include <x86intrin.h>
-#elif defined(USE_INTRIN_H)
-#include <intrin.h>
-#else
-#error no intrinsics file
-#endif
-
-#endif // INTRINSICS_H
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Wrapper around the compiler supplied intrinsic header
+ */
+
+#ifndef INTRINSICS_H
+#define INTRINSICS_H
+
+#include "config.h"
+
+#ifdef __cplusplus
+# if defined(HAVE_CXX_X86INTRIN_H)
+# define USE_X86INTRIN_H
+# endif
+#else // C
+# if defined(HAVE_C_X86INTRIN_H)
+# define USE_X86INTRIN_H
+# endif
+#endif
+
+#ifdef __cplusplus
+# if defined(HAVE_CXX_INTRIN_H)
+# define USE_INTRIN_H
+# endif
+#else // C
+# if defined(HAVE_C_INTRIN_H)
+# define USE_INTRIN_H
+# endif
+#endif
+
+#if defined(USE_X86INTRIN_H)
+#include <x86intrin.h>
+#elif defined(USE_INTRIN_H)
+#include <intrin.h>
+#else
+#error no intrinsics file
+#endif
+
+#endif // INTRINSICS_H
diff --git a/contrib/libs/hyperscan/src/util/join.h b/contrib/libs/hyperscan/src/util/join.h
index a251c6e032..7d5a30c39a 100644
--- a/contrib/libs/hyperscan/src/util/join.h
+++ b/contrib/libs/hyperscan/src/util/join.h
@@ -31,10 +31,10 @@
#define JOIN(x, y) JOIN_AGAIN(x, y)
#define JOIN_AGAIN(x, y) x ## y
-#define JOIN3(x, y, z) JOIN_AGAIN3(x, y, z)
-#define JOIN_AGAIN3(x, y, z) x ## y ## z
-
-#define JOIN4(w, x, y, z) JOIN_AGAIN4(w, x, y, z)
-#define JOIN_AGAIN4(w, x, y, z) w ## x ## y ## z
-
+#define JOIN3(x, y, z) JOIN_AGAIN3(x, y, z)
+#define JOIN_AGAIN3(x, y, z) x ## y ## z
+
+#define JOIN4(w, x, y, z) JOIN_AGAIN4(w, x, y, z)
+#define JOIN_AGAIN4(w, x, y, z) w ## x ## y ## z
+
#endif
diff --git a/contrib/libs/hyperscan/src/util/make_unique.h b/contrib/libs/hyperscan/src/util/make_unique.h
index ba15a9dfbb..651e8c5cf9 100644
--- a/contrib/libs/hyperscan/src/util/make_unique.h
+++ b/contrib/libs/hyperscan/src/util/make_unique.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -39,9 +39,9 @@
namespace ue2 {
#if defined(USE_STD)
-using std::make_unique;
+using std::make_unique;
#else
-using boost::make_unique;
+using boost::make_unique;
#endif
}
diff --git a/contrib/libs/hyperscan/src/util/masked_move.c b/contrib/libs/hyperscan/src/util/masked_move.c
index fcbd24037a..001cd49f28 100644
--- a/contrib/libs/hyperscan/src/util/masked_move.c
+++ b/contrib/libs/hyperscan/src/util/masked_move.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -29,13 +29,13 @@
#include "ue2common.h"
#include "masked_move.h"
-#include "util/arch.h"
+#include "util/arch.h"
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
/* masks for masked moves */
/* magic mask for maskload (vmmaskmovq) - described in UE-2424 */
-const ALIGN_CL_DIRECTIVE u32 mm_mask_mask[16] = {
+const ALIGN_CL_DIRECTIVE u32 mm_mask_mask[16] = {
0x00000000U,
0x00000000U,
0x00000000U,
diff --git a/contrib/libs/hyperscan/src/util/masked_move.h b/contrib/libs/hyperscan/src/util/masked_move.h
index 9b8a6ebc3e..4c877ca9e5 100644
--- a/contrib/libs/hyperscan/src/util/masked_move.h
+++ b/contrib/libs/hyperscan/src/util/masked_move.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -29,21 +29,21 @@
#ifndef MASKED_MOVE_H
#define MASKED_MOVE_H
-#include "arch.h"
+#include "arch.h"
+
+#if defined(HAVE_AVX2)
-#if defined(HAVE_AVX2)
-
#include "unaligned.h"
#include "simd_utils.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
extern const u32 mm_mask_mask[16];
extern const u32 mm_shuffle_end[32][8];
-#ifdef __cplusplus
-}
-#endif
+#ifdef __cplusplus
+}
+#endif
/* load mask for len bytes from start of buffer */
static really_inline m256
@@ -70,8 +70,8 @@ masked_move256_len(const u8 *buf, const u32 len) {
u32 end = unaligned_load_u32(buf + len - 4);
m256 preshufend = _mm256_broadcastq_epi64(_mm_cvtsi32_si128(end));
m256 v = _mm256_maskload_epi32((const int *)buf, lmask);
- m256 shufend = pshufb_m256(preshufend,
- loadu256(&mm_shuffle_end[len - 4]));
+ m256 shufend = pshufb_m256(preshufend,
+ loadu256(&mm_shuffle_end[len - 4]));
m256 target = or256(v, shufend);
return target;
diff --git a/contrib/libs/hyperscan/src/util/math.h b/contrib/libs/hyperscan/src/util/math.h
index 99db81df8c..e18c502776 100644
--- a/contrib/libs/hyperscan/src/util/math.h
+++ b/contrib/libs/hyperscan/src/util/math.h
@@ -1,50 +1,50 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_MATH_H_
-#define UTIL_MATH_H_
-
-#include "arch.h"
-#include "intrinsics.h"
-
-#include <math.h>
-
-static really_inline
-double our_pow(double x, double y) {
-#if defined(HAVE_AVX)
- /*
- * Clear the upper half of AVX registers before calling into the math lib.
- * On some versions of glibc this can save thousands of AVX-to-SSE
- * transitions.
- */
- _mm256_zeroupper();
-#endif
- return pow(x, y);
-}
-
-#endif // UTIL_MATH_H_
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_MATH_H_
+#define UTIL_MATH_H_
+
+#include "arch.h"
+#include "intrinsics.h"
+
+#include <math.h>
+
+static really_inline
+double our_pow(double x, double y) {
+#if defined(HAVE_AVX)
+ /*
+ * Clear the upper half of AVX registers before calling into the math lib.
+ * On some versions of glibc this can save thousands of AVX-to-SSE
+ * transitions.
+ */
+ _mm256_zeroupper();
+#endif
+ return pow(x, y);
+}
+
+#endif // UTIL_MATH_H_
diff --git a/contrib/libs/hyperscan/src/util/multibit.c b/contrib/libs/hyperscan/src/util/multibit.c
index b3afd24c42..de192d7dd7 100644
--- a/contrib/libs/hyperscan/src/util/multibit.c
+++ b/contrib/libs/hyperscan/src/util/multibit.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/util/multibit.h b/contrib/libs/hyperscan/src/util/multibit.h
index ffdbb8ebd0..c3a4ba461a 100644
--- a/contrib/libs/hyperscan/src/util/multibit.h
+++ b/contrib/libs/hyperscan/src/util/multibit.h
@@ -162,7 +162,7 @@ u32 mmb_popcount(MMB_TYPE val) {
}
#ifndef MMMB_DEBUG
-#define MDEBUG_PRINTF(x, ...) do { } while(0)
+#define MDEBUG_PRINTF(x, ...) do { } while(0)
#else
#define MDEBUG_PRINTF DEBUG_PRINTF
#endif
@@ -665,84 +665,84 @@ char mmbit_any_precise(const u8 *bits, u32 total_bits) {
}
static really_inline
-char mmbit_all_flat(const u8 *bits, u32 total_bits) {
- while (total_bits > MMB_KEY_BITS) {
- if (mmb_load(bits) != MMB_ALL_ONES) {
- return 0;
- }
- bits += sizeof(MMB_TYPE);
- total_bits -= MMB_KEY_BITS;
- }
- while (total_bits > 8) {
- if (*bits != 0xff) {
- return 0;
- }
- bits++;
- total_bits -= 8;
- }
- u8 mask = (u8)mmb_mask_zero_to_nocheck(total_bits);
- return (*bits & mask) == mask;
-}
-
-static really_inline
-char mmbit_all_big(const u8 *bits, u32 total_bits) {
- u32 ks = mmbit_keyshift(total_bits);
-
- u32 level = 0;
- for (;;) {
- // Number of bits we expect to see switched on on this level.
- u32 level_bits;
- if (ks != 0) {
- u32 next_level_width = MMB_KEY_BITS << (ks - MMB_KEY_SHIFT);
- level_bits = ROUNDUP_N(total_bits, next_level_width) >> ks;
- } else {
- level_bits = total_bits;
- }
-
- const u8 *block_ptr = mmbit_get_level_root_const(bits, level);
-
- // All full-size blocks should be all-ones.
- while (level_bits >= MMB_KEY_BITS) {
- MMB_TYPE block = mmb_load(block_ptr);
- if (block != MMB_ALL_ONES) {
- return 0;
- }
- block_ptr += sizeof(MMB_TYPE);
- level_bits -= MMB_KEY_BITS;
- }
-
- // If we have bits remaining, we have a runt block on the end.
- if (level_bits > 0) {
- MMB_TYPE block = mmb_load(block_ptr);
- MMB_TYPE mask = mmb_mask_zero_to_nocheck(level_bits);
- if ((block & mask) != mask) {
- return 0;
- }
- }
-
- if (ks == 0) {
- break;
- }
-
- ks -= MMB_KEY_SHIFT;
- level++;
- }
-
- return 1;
-}
-
-/** \brief True if all keys are on. Guaranteed precise. */
-static really_inline
-char mmbit_all(const u8 *bits, u32 total_bits) {
- MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
-
- if (mmbit_is_flat_model(total_bits)) {
- return mmbit_all_flat(bits, total_bits);
- }
- return mmbit_all_big(bits, total_bits);
-}
-
-static really_inline
+char mmbit_all_flat(const u8 *bits, u32 total_bits) {
+ while (total_bits > MMB_KEY_BITS) {
+ if (mmb_load(bits) != MMB_ALL_ONES) {
+ return 0;
+ }
+ bits += sizeof(MMB_TYPE);
+ total_bits -= MMB_KEY_BITS;
+ }
+ while (total_bits > 8) {
+ if (*bits != 0xff) {
+ return 0;
+ }
+ bits++;
+ total_bits -= 8;
+ }
+ u8 mask = (u8)mmb_mask_zero_to_nocheck(total_bits);
+ return (*bits & mask) == mask;
+}
+
+static really_inline
+char mmbit_all_big(const u8 *bits, u32 total_bits) {
+ u32 ks = mmbit_keyshift(total_bits);
+
+ u32 level = 0;
+ for (;;) {
+ // Number of bits we expect to see switched on on this level.
+ u32 level_bits;
+ if (ks != 0) {
+ u32 next_level_width = MMB_KEY_BITS << (ks - MMB_KEY_SHIFT);
+ level_bits = ROUNDUP_N(total_bits, next_level_width) >> ks;
+ } else {
+ level_bits = total_bits;
+ }
+
+ const u8 *block_ptr = mmbit_get_level_root_const(bits, level);
+
+ // All full-size blocks should be all-ones.
+ while (level_bits >= MMB_KEY_BITS) {
+ MMB_TYPE block = mmb_load(block_ptr);
+ if (block != MMB_ALL_ONES) {
+ return 0;
+ }
+ block_ptr += sizeof(MMB_TYPE);
+ level_bits -= MMB_KEY_BITS;
+ }
+
+ // If we have bits remaining, we have a runt block on the end.
+ if (level_bits > 0) {
+ MMB_TYPE block = mmb_load(block_ptr);
+ MMB_TYPE mask = mmb_mask_zero_to_nocheck(level_bits);
+ if ((block & mask) != mask) {
+ return 0;
+ }
+ }
+
+ if (ks == 0) {
+ break;
+ }
+
+ ks -= MMB_KEY_SHIFT;
+ level++;
+ }
+
+ return 1;
+}
+
+/** \brief True if all keys are on. Guaranteed precise. */
+static really_inline
+char mmbit_all(const u8 *bits, u32 total_bits) {
+ MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
+
+ if (mmbit_is_flat_model(total_bits)) {
+ return mmbit_all_flat(bits, total_bits);
+ }
+ return mmbit_all_big(bits, total_bits);
+}
+
+static really_inline
MMB_TYPE get_flat_masks(u32 base, u32 it_start, u32 it_end) {
if (it_end <= base) {
return 0;
@@ -820,7 +820,7 @@ u32 mmbit_iterate_bounded_big(const u8 *bits, u32 total_bits, u32 it_start, u32
for (;;) {
assert(level <= max_level);
- u64a block_width = MMB_KEY_BITS << ks;
+ u64a block_width = MMB_KEY_BITS << ks;
u64a block_base = key * block_width;
u64a block_min = MAX(it_start, block_base);
u64a block_max = MIN(it_end, block_base + block_width - 1);
diff --git a/contrib/libs/hyperscan/src/util/multibit_build.cpp b/contrib/libs/hyperscan/src/util/multibit_build.cpp
index b1c10f5f67..67bb9ec702 100644
--- a/contrib/libs/hyperscan/src/util/multibit_build.cpp
+++ b/contrib/libs/hyperscan/src/util/multibit_build.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,7 +34,7 @@
#include "scatter.h"
#include "ue2common.h"
#include "rose/rose_build_scatter.h"
-#include "util/compile_error.h"
+#include "util/compile_error.h"
#include <cassert>
#include <cstring> // for memset
@@ -46,32 +46,32 @@ using namespace std;
namespace ue2 {
-u32 mmbit_size(u32 total_bits) {
- if (total_bits > MMB_MAX_BITS) {
- throw ResourceLimitError();
- }
-
- // Flat model multibit structures are just stored as a bit vector.
- if (total_bits <= MMB_FLAT_MAX_BITS) {
- return ROUNDUP_N(total_bits, 8) / 8;
- }
-
- u64a current_level = 1; // Number of blocks on current level.
- u64a total = 0; // Total number of blocks.
- while (current_level * MMB_KEY_BITS < total_bits) {
- total += current_level;
- current_level <<= MMB_KEY_SHIFT;
- }
-
- // Last level is a one-for-one bit vector. It needs room for total_bits
- // elements, rounded up to the nearest block.
- u64a last_level = ((u64a)total_bits + MMB_KEY_BITS - 1) / MMB_KEY_BITS;
- total += last_level;
-
- assert(total * sizeof(MMB_TYPE) <= UINT32_MAX);
- return (u32)(total * sizeof(MMB_TYPE));
-}
-
+u32 mmbit_size(u32 total_bits) {
+ if (total_bits > MMB_MAX_BITS) {
+ throw ResourceLimitError();
+ }
+
+ // Flat model multibit structures are just stored as a bit vector.
+ if (total_bits <= MMB_FLAT_MAX_BITS) {
+ return ROUNDUP_N(total_bits, 8) / 8;
+ }
+
+ u64a current_level = 1; // Number of blocks on current level.
+ u64a total = 0; // Total number of blocks.
+ while (current_level * MMB_KEY_BITS < total_bits) {
+ total += current_level;
+ current_level <<= MMB_KEY_SHIFT;
+ }
+
+ // Last level is a one-for-one bit vector. It needs room for total_bits
+ // elements, rounded up to the nearest block.
+ u64a last_level = ((u64a)total_bits + MMB_KEY_BITS - 1) / MMB_KEY_BITS;
+ total += last_level;
+
+ assert(total * sizeof(MMB_TYPE) <= UINT32_MAX);
+ return (u32)(total * sizeof(MMB_TYPE));
+}
+
namespace {
struct TreeNode {
MMB_TYPE mask = 0;
@@ -155,12 +155,12 @@ void bfs(vector<mmbit_sparse_iter> &out, const TreeNode &tree) {
/** \brief Construct a sparse iterator over the values in \a bits for a
* multibit of size \a total_bits. */
-vector<mmbit_sparse_iter> mmbBuildSparseIterator(const vector<u32> &bits,
- u32 total_bits) {
- vector<mmbit_sparse_iter> out;
+vector<mmbit_sparse_iter> mmbBuildSparseIterator(const vector<u32> &bits,
+ u32 total_bits) {
+ vector<mmbit_sparse_iter> out;
assert(!bits.empty());
assert(total_bits > 0);
- assert(total_bits <= MMB_MAX_BITS);
+ assert(total_bits <= MMB_MAX_BITS);
DEBUG_PRINTF("building sparse iter for %zu of %u bits\n",
bits.size(), total_bits);
@@ -186,7 +186,7 @@ vector<mmbit_sparse_iter> mmbBuildSparseIterator(const vector<u32> &bits,
#endif
DEBUG_PRINTF("iter has %zu records\n", out.size());
- return out;
+ return out;
}
template<typename T>
@@ -273,7 +273,7 @@ void mmbBuildInitRangePlan(u32 total_bits, u32 begin, u32 end,
}
// Partial block to deal with beginning.
- block_offset += (k1 / MMB_KEY_BITS) * sizeof(MMB_TYPE);
+ block_offset += (k1 / MMB_KEY_BITS) * sizeof(MMB_TYPE);
if (k1 % MMB_KEY_BITS) {
u32 idx = k1 / MMB_KEY_BITS;
u32 block_end = (idx + 1) * MMB_KEY_BITS;
diff --git a/contrib/libs/hyperscan/src/util/multibit_build.h b/contrib/libs/hyperscan/src/util/multibit_build.h
index 72820387fa..24f1bb55b0 100644
--- a/contrib/libs/hyperscan/src/util/multibit_build.h
+++ b/contrib/libs/hyperscan/src/util/multibit_build.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,41 +35,41 @@
#include "hs_common.h"
#include "multibit_internal.h"
-#include "hash.h"
+#include "hash.h"
#include <vector>
-inline
-bool operator==(const mmbit_sparse_iter &a, const mmbit_sparse_iter &b) {
- return a.mask == b.mask && a.val == b.val;
+inline
+bool operator==(const mmbit_sparse_iter &a, const mmbit_sparse_iter &b) {
+ return a.mask == b.mask && a.val == b.val;
}
-namespace std {
-
-template<>
-struct hash<mmbit_sparse_iter> {
- size_t operator()(const mmbit_sparse_iter &iter) const {
- return ue2::hash_all(iter.mask, iter.val);
- }
-};
-
-} // namespace std
-
+namespace std {
+
+template<>
+struct hash<mmbit_sparse_iter> {
+ size_t operator()(const mmbit_sparse_iter &iter) const {
+ return ue2::hash_all(iter.mask, iter.val);
+ }
+};
+
+} // namespace std
+
namespace ue2 {
-/**
- * \brief Return the size in bytes of a multibit that can store the given
- * number of bits.
- *
- * This will throw a resource limit assertion if the requested mmbit is too
- * large.
- */
-u32 mmbit_size(u32 total_bits);
-
+/**
+ * \brief Return the size in bytes of a multibit that can store the given
+ * number of bits.
+ *
+ * This will throw a resource limit assertion if the requested mmbit is too
+ * large.
+ */
+u32 mmbit_size(u32 total_bits);
+
/** \brief Construct a sparse iterator over the values in \a bits for a
* multibit of size \a total_bits. */
-std::vector<mmbit_sparse_iter>
-mmbBuildSparseIterator(const std::vector<u32> &bits, u32 total_bits);
+std::vector<mmbit_sparse_iter>
+mmbBuildSparseIterator(const std::vector<u32> &bits, u32 total_bits);
struct scatter_plan_raw;
diff --git a/contrib/libs/hyperscan/src/util/multibit_compress.h b/contrib/libs/hyperscan/src/util/multibit_compress.h
index 4844788f50..e7b4fd8e86 100644
--- a/contrib/libs/hyperscan/src/util/multibit_compress.h
+++ b/contrib/libs/hyperscan/src/util/multibit_compress.h
@@ -1,204 +1,204 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** file
- * \brief multibit compression API: compress / decompress / size
- */
-
-#ifndef MULTIBIT_COMPRESS_H
-#define MULTIBIT_COMPRESS_H
-
-#include "multibit.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** \brief size API. */
-static really_inline
-size_t mmbit_compsize(const u8 *bits, u32 total_bits) {
- // Deal with flat model.
- if (total_bits <= MMB_FLAT_MAX_BITS) {
- return (ROUNDUP_N(total_bits, 8) / 8);
- }
- // Deal with all cleared mmb.
- if (mmb_load(bits) == 0) {
- return sizeof(MMB_TYPE);
- }
- // Deal with normal pyramid mmb.
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
- u32 key = 0;
- u32 key_rem = 0;
- u32 num_block = 0;
- // Iteration-version of DFS
- while (1) {
- if (key_rem < MMB_KEY_BITS) {
- const u8 *block_ptr = mmbit_get_level_root_const(bits, level) +
- key * sizeof(MMB_TYPE);
- MMB_TYPE block = mmb_load(block_ptr);
- MMB_TYPE block_1 = block & ~mmb_mask_zero_to_nocheck(key_rem);
- if (mmb_popcount(block) == mmb_popcount(block_1)) {
- num_block++;
- }
- if (level < max_level && block_1) {
- key = (key << MMB_KEY_SHIFT) + mmb_ctz(block_1);
- key_rem = 0;
- level++;
- continue;
- }
- }
- if (level-- == 0) {
- return sizeof(MMB_TYPE) * num_block;
- }
- key_rem = (key & MMB_KEY_MASK) + 1;
- key >>= MMB_KEY_SHIFT;
- }
-}
-
-/** \brief compress API. */
-static really_inline
-char mmbit_compress(const u8 *bits, u32 total_bits, u8 *comp,
- size_t *comp_space, size_t max_comp_space) {
- UNUSED u8 *comp_init = comp;
- // Compute comp_size first.
- size_t comp_size = mmbit_compsize(bits, total_bits);
- // Check whether out of writable range.
- if (comp_size > max_comp_space) {
- return 0;
- }
- *comp_space = comp_size; // Return comp_size outside.
- // Deal with flat model.
- if (total_bits <= MMB_FLAT_MAX_BITS) {
- memcpy(comp, bits, comp_size);
- return 1;
- }
- // Deal with all cleared mmb.
- if (mmb_load(bits) == 0) {
- memcpy(comp, bits, sizeof(MMB_TYPE));
- return 1;
- }
- // Deal with normal pyramid mmb.
- const u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
- u32 key = 0;
- u32 key_rem = 0;
- // Iteration-version of DFS
- while (1) {
- if (key_rem < MMB_KEY_BITS) {
- const u8 *block_ptr = mmbit_get_level_root_const(bits, level) +
- key * sizeof(MMB_TYPE);
- MMB_TYPE block = mmb_load(block_ptr);
- MMB_TYPE block_1 = block & ~mmb_mask_zero_to_nocheck(key_rem);
- if (mmb_popcount(block) == mmb_popcount(block_1)) {
- memcpy(comp, &block, sizeof(MMB_TYPE));
- comp += sizeof(MMB_TYPE);
- }
- if (level < max_level && block_1) {
- key = (key << MMB_KEY_SHIFT) + mmb_ctz(block_1);
- key_rem = 0;
- level++;
- continue;
- }
- }
- if (level-- == 0) {
- break;
- }
- key_rem = (key & MMB_KEY_MASK) + 1;
- key >>= MMB_KEY_SHIFT;
- }
- assert((u32)(comp - comp_init) == comp_size);
- return 1;
-}
-
-/** \brief decompress API. */
-static really_inline
-char mmbit_decompress(u8 *bits, u32 total_bits, const u8 *comp,
- size_t *comp_space, size_t max_comp_space) {
- UNUSED const u8 *comp_init = comp;
- size_t comp_size;
- // Deal with flat model.
- if (total_bits <= MMB_FLAT_MAX_BITS) {
- comp_size = ROUNDUP_N(total_bits, 8) / 8;
- memcpy(bits, comp, comp_size);
- *comp_space = comp_size;
- return 1;
- }
- // Deal with all cleared mmb.
- if (mmb_load(comp) == 0) {
- comp_size = sizeof(MMB_TYPE);
- memcpy(bits, comp, comp_size);
- *comp_space = comp_size;
- return 1;
- }
- // Deal with normal mmb.
- u32 max_level = mmbit_maxlevel(total_bits);
- u32 level = 0;
- u32 key = 0;
- u32 key_rem = 0;
- UNUSED const u8 *comp_end = comp_init + max_comp_space;
- // Iteration-version of DFS
- memcpy(bits, comp, sizeof(MMB_TYPE)); // Copy root block first.
- comp += sizeof(MMB_TYPE);
- while (1) {
- if (key_rem < MMB_KEY_BITS) {
- u8 *block_ptr = mmbit_get_level_root(bits, level) +
- key * sizeof(MMB_TYPE);
- MMB_TYPE block = mmb_load(block_ptr);
- MMB_TYPE block_1 = block & ~mmb_mask_zero_to_nocheck(key_rem);
- if (level < max_level && block_1) {
- key = (key << MMB_KEY_SHIFT) + mmb_ctz(block_1);
- u8 *block_ptr_1 = mmbit_get_level_root(bits, level + 1) +
- key * sizeof(MMB_TYPE);
- memcpy(block_ptr_1, comp, sizeof(MMB_TYPE));
- comp += sizeof(MMB_TYPE);
- if (comp > comp_end) {
- return 0; // Out of buffer.
- }
- key_rem = 0;
- level++;
- continue;
- }
- }
- if (level-- == 0) {
- break;
- }
- key_rem = (key & MMB_KEY_MASK) + 1;
- key >>= MMB_KEY_SHIFT;
- }
- comp_size = (u32)(comp - comp_init);
- *comp_space = comp_size;
- return 1;
-}
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // MULTBIT_COMPRESS_H
-
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** file
+ * \brief multibit compression API: compress / decompress / size
+ */
+
+#ifndef MULTIBIT_COMPRESS_H
+#define MULTIBIT_COMPRESS_H
+
+#include "multibit.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** \brief size API. */
+static really_inline
+size_t mmbit_compsize(const u8 *bits, u32 total_bits) {
+ // Deal with flat model.
+ if (total_bits <= MMB_FLAT_MAX_BITS) {
+ return (ROUNDUP_N(total_bits, 8) / 8);
+ }
+ // Deal with all cleared mmb.
+ if (mmb_load(bits) == 0) {
+ return sizeof(MMB_TYPE);
+ }
+ // Deal with normal pyramid mmb.
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+ u32 key = 0;
+ u32 key_rem = 0;
+ u32 num_block = 0;
+ // Iteration-version of DFS
+ while (1) {
+ if (key_rem < MMB_KEY_BITS) {
+ const u8 *block_ptr = mmbit_get_level_root_const(bits, level) +
+ key * sizeof(MMB_TYPE);
+ MMB_TYPE block = mmb_load(block_ptr);
+ MMB_TYPE block_1 = block & ~mmb_mask_zero_to_nocheck(key_rem);
+ if (mmb_popcount(block) == mmb_popcount(block_1)) {
+ num_block++;
+ }
+ if (level < max_level && block_1) {
+ key = (key << MMB_KEY_SHIFT) + mmb_ctz(block_1);
+ key_rem = 0;
+ level++;
+ continue;
+ }
+ }
+ if (level-- == 0) {
+ return sizeof(MMB_TYPE) * num_block;
+ }
+ key_rem = (key & MMB_KEY_MASK) + 1;
+ key >>= MMB_KEY_SHIFT;
+ }
+}
+
+/** \brief compress API. */
+static really_inline
+char mmbit_compress(const u8 *bits, u32 total_bits, u8 *comp,
+ size_t *comp_space, size_t max_comp_space) {
+ UNUSED u8 *comp_init = comp;
+ // Compute comp_size first.
+ size_t comp_size = mmbit_compsize(bits, total_bits);
+ // Check whether out of writable range.
+ if (comp_size > max_comp_space) {
+ return 0;
+ }
+ *comp_space = comp_size; // Return comp_size outside.
+ // Deal with flat model.
+ if (total_bits <= MMB_FLAT_MAX_BITS) {
+ memcpy(comp, bits, comp_size);
+ return 1;
+ }
+ // Deal with all cleared mmb.
+ if (mmb_load(bits) == 0) {
+ memcpy(comp, bits, sizeof(MMB_TYPE));
+ return 1;
+ }
+ // Deal with normal pyramid mmb.
+ const u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+ u32 key = 0;
+ u32 key_rem = 0;
+ // Iteration-version of DFS
+ while (1) {
+ if (key_rem < MMB_KEY_BITS) {
+ const u8 *block_ptr = mmbit_get_level_root_const(bits, level) +
+ key * sizeof(MMB_TYPE);
+ MMB_TYPE block = mmb_load(block_ptr);
+ MMB_TYPE block_1 = block & ~mmb_mask_zero_to_nocheck(key_rem);
+ if (mmb_popcount(block) == mmb_popcount(block_1)) {
+ memcpy(comp, &block, sizeof(MMB_TYPE));
+ comp += sizeof(MMB_TYPE);
+ }
+ if (level < max_level && block_1) {
+ key = (key << MMB_KEY_SHIFT) + mmb_ctz(block_1);
+ key_rem = 0;
+ level++;
+ continue;
+ }
+ }
+ if (level-- == 0) {
+ break;
+ }
+ key_rem = (key & MMB_KEY_MASK) + 1;
+ key >>= MMB_KEY_SHIFT;
+ }
+ assert((u32)(comp - comp_init) == comp_size);
+ return 1;
+}
+
+/** \brief decompress API. */
+static really_inline
+char mmbit_decompress(u8 *bits, u32 total_bits, const u8 *comp,
+ size_t *comp_space, size_t max_comp_space) {
+ UNUSED const u8 *comp_init = comp;
+ size_t comp_size;
+ // Deal with flat model.
+ if (total_bits <= MMB_FLAT_MAX_BITS) {
+ comp_size = ROUNDUP_N(total_bits, 8) / 8;
+ memcpy(bits, comp, comp_size);
+ *comp_space = comp_size;
+ return 1;
+ }
+ // Deal with all cleared mmb.
+ if (mmb_load(comp) == 0) {
+ comp_size = sizeof(MMB_TYPE);
+ memcpy(bits, comp, comp_size);
+ *comp_space = comp_size;
+ return 1;
+ }
+ // Deal with normal mmb.
+ u32 max_level = mmbit_maxlevel(total_bits);
+ u32 level = 0;
+ u32 key = 0;
+ u32 key_rem = 0;
+ UNUSED const u8 *comp_end = comp_init + max_comp_space;
+ // Iteration-version of DFS
+ memcpy(bits, comp, sizeof(MMB_TYPE)); // Copy root block first.
+ comp += sizeof(MMB_TYPE);
+ while (1) {
+ if (key_rem < MMB_KEY_BITS) {
+ u8 *block_ptr = mmbit_get_level_root(bits, level) +
+ key * sizeof(MMB_TYPE);
+ MMB_TYPE block = mmb_load(block_ptr);
+ MMB_TYPE block_1 = block & ~mmb_mask_zero_to_nocheck(key_rem);
+ if (level < max_level && block_1) {
+ key = (key << MMB_KEY_SHIFT) + mmb_ctz(block_1);
+ u8 *block_ptr_1 = mmbit_get_level_root(bits, level + 1) +
+ key * sizeof(MMB_TYPE);
+ memcpy(block_ptr_1, comp, sizeof(MMB_TYPE));
+ comp += sizeof(MMB_TYPE);
+ if (comp > comp_end) {
+ return 0; // Out of buffer.
+ }
+ key_rem = 0;
+ level++;
+ continue;
+ }
+ }
+ if (level-- == 0) {
+ break;
+ }
+ key_rem = (key & MMB_KEY_MASK) + 1;
+ key >>= MMB_KEY_SHIFT;
+ }
+ comp_size = (u32)(comp - comp_init);
+ *comp_space = comp_size;
+ return 1;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // MULTBIT_COMPRESS_H
+
diff --git a/contrib/libs/hyperscan/src/util/multibit_internal.h b/contrib/libs/hyperscan/src/util/multibit_internal.h
index ebe53f958b..350f3bfd47 100644
--- a/contrib/libs/hyperscan/src/util/multibit_internal.h
+++ b/contrib/libs/hyperscan/src/util/multibit_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -47,9 +47,9 @@ extern "C" {
typedef u64a MMB_TYPE; /**< Basic block type for mmbit operations. */
#define MMB_MAX_LEVEL 6 /**< Maximum level in the mmbit pyramid. */
-/** \brief Maximum number of keys (bits) in a multibit. */
-#define MMB_MAX_BITS (1U << 31)
-
+/** \brief Maximum number of keys (bits) in a multibit. */
+#define MMB_MAX_BITS (1U << 31)
+
/** \brief Sparse iterator record type.
*
* A sparse iterator is a tree of these records, where val identifies the
diff --git a/contrib/libs/hyperscan/src/util/noncopyable.h b/contrib/libs/hyperscan/src/util/noncopyable.h
index 63128ef55e..cd4f2e0261 100644
--- a/contrib/libs/hyperscan/src/util/noncopyable.h
+++ b/contrib/libs/hyperscan/src/util/noncopyable.h
@@ -1,50 +1,50 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * \file
- * \brief Class that makes derived classes non-copyable.
- */
-
-#ifndef UTIL_NONCOPYABLE_H
-#define UTIL_NONCOPYABLE_H
-
-namespace ue2 {
-
-/** \brief Class that makes derived classes non-copyable. */
-struct noncopyable {
- noncopyable() = default;
- noncopyable(const noncopyable &) = delete;
- noncopyable(noncopyable &&) = default;
- noncopyable &operator=(const noncopyable &) = delete;
- noncopyable &operator=(noncopyable &&) = default;
-};
-
-} // namespace ue2
-
-#endif // UTIL_NONCOPYABLE_H
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ * \brief Class that makes derived classes non-copyable.
+ */
+
+#ifndef UTIL_NONCOPYABLE_H
+#define UTIL_NONCOPYABLE_H
+
+namespace ue2 {
+
+/** \brief Class that makes derived classes non-copyable. */
+struct noncopyable {
+ noncopyable() = default;
+ noncopyable(const noncopyable &) = delete;
+ noncopyable(noncopyable &&) = default;
+ noncopyable &operator=(const noncopyable &) = delete;
+ noncopyable &operator=(noncopyable &&) = default;
+};
+
+} // namespace ue2
+
+#endif // UTIL_NONCOPYABLE_H
diff --git a/contrib/libs/hyperscan/src/util/operators.h b/contrib/libs/hyperscan/src/util/operators.h
index 2da8efea8a..b0a1c1cca2 100644
--- a/contrib/libs/hyperscan/src/util/operators.h
+++ b/contrib/libs/hyperscan/src/util/operators.h
@@ -1,60 +1,60 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * \brief Ordered operators: provides all the other compare operators for types
- * that provide equal and less-than.
- *
- * This is similar to Boost's totally_ordered class, but much simpler and
- * without injecting the boost namespace into ADL lookup.
- */
-
-#ifndef UTIL_OPERATORS_H
-#define UTIL_OPERATORS_H
-
-namespace ue2 {
-
-/**
- * \brief Ordered operators: provides all the other compare operators for types
- * that provide equal and less-than.
- *
- * Simply inherit from this class with your class name as its template
- * parameter.
- */
-template<typename T>
-class totally_ordered {
-public:
- friend bool operator!=(const T &a, const T &b) { return !(a == b); }
- friend bool operator<=(const T &a, const T &b) { return !(b < a); }
- friend bool operator>(const T &a, const T &b) { return b < a; }
- friend bool operator>=(const T &a, const T &b) { return !(a < b); }
-};
-
-} // namespace
-
-#endif // UTIL_OPERATORS_H
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \brief Ordered operators: provides all the other compare operators for types
+ * that provide equal and less-than.
+ *
+ * This is similar to Boost's totally_ordered class, but much simpler and
+ * without injecting the boost namespace into ADL lookup.
+ */
+
+#ifndef UTIL_OPERATORS_H
+#define UTIL_OPERATORS_H
+
+namespace ue2 {
+
+/**
+ * \brief Ordered operators: provides all the other compare operators for types
+ * that provide equal and less-than.
+ *
+ * Simply inherit from this class with your class name as its template
+ * parameter.
+ */
+template<typename T>
+class totally_ordered {
+public:
+ friend bool operator!=(const T &a, const T &b) { return !(a == b); }
+ friend bool operator<=(const T &a, const T &b) { return !(b < a); }
+ friend bool operator>(const T &a, const T &b) { return b < a; }
+ friend bool operator>=(const T &a, const T &b) { return !(a < b); }
+};
+
+} // namespace
+
+#endif // UTIL_OPERATORS_H
diff --git a/contrib/libs/hyperscan/src/util/partitioned_set.h b/contrib/libs/hyperscan/src/util/partitioned_set.h
index 89e4a38ff6..8a4d3dd9e1 100644
--- a/contrib/libs/hyperscan/src/util/partitioned_set.h
+++ b/contrib/libs/hyperscan/src/util/partitioned_set.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,8 +30,8 @@
#define PARTITIONED_SET_H
#include "container.h"
-#include "noncopyable.h"
-#include "flat_containers.h"
+#include "noncopyable.h"
+#include "flat_containers.h"
#include "ue2common.h"
#include <algorithm>
@@ -53,7 +53,7 @@ static constexpr size_t INVALID_SUBSET = ~(size_t)0;
*/
template<typename T>
-class partitioned_set : noncopyable {
+class partitioned_set : noncopyable {
public:
class subset {
public:
@@ -98,7 +98,7 @@ public:
* If the set was not split (due to there being no overlap with splitter or
* being a complete subset), INVALID_SUBSET is returned.
*/
- size_t split(size_t subset_index, const flat_set<T> &splitter) {
+ size_t split(size_t subset_index, const flat_set<T> &splitter) {
assert(!splitter.empty());
if (splitter.empty()) {
return INVALID_SUBSET;
@@ -128,10 +128,10 @@ public:
}
for (auto it = orig.members.begin(); it != orig.members.end(); ++it) {
- const auto &member = *it;
+ const auto &member = *it;
assert(member < member_to_subset.size());
- sp_it = std::lower_bound(sp_it, sp_e, member);
+ sp_it = std::lower_bound(sp_it, sp_e, member);
if (sp_it == sp_e) {
split_temp_diff.insert(split_temp_diff.end(), it,
orig.members.end());
@@ -190,7 +190,7 @@ public:
/**
* Returns all subsets which have a member in keys.
*/
- void find_overlapping(const flat_set<T> &keys,
+ void find_overlapping(const flat_set<T> &keys,
std::vector<size_t> *containing) const {
boost::dynamic_bitset<> seen(subsets.size()); // all zero by default.
diff --git a/contrib/libs/hyperscan/src/util/popcount.h b/contrib/libs/hyperscan/src/util/popcount.h
index 9ef0897c67..eb08f6b1b2 100644
--- a/contrib/libs/hyperscan/src/util/popcount.h
+++ b/contrib/libs/hyperscan/src/util/popcount.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,19 +30,19 @@
* \brief Platform specific popcount functions
*/
-#ifndef UTIL_POPCOUNT_H_
-#define UTIL_POPCOUNT_H_
+#ifndef UTIL_POPCOUNT_H_
+#define UTIL_POPCOUNT_H_
#include "ue2common.h"
-#include "util/arch.h"
+#include "util/arch.h"
static really_inline
u32 popcount32(u32 x) {
#if defined(HAVE_POPCOUNT_INSTR)
// Single-instruction builtin.
- return _mm_popcnt_u32(x);
+ return _mm_popcnt_u32(x);
#else
- // Fast branch-free version from bit-twiddling hacks as older Intel
+ // Fast branch-free version from bit-twiddling hacks as older Intel
// processors do not have a POPCNT instruction.
x -= (x >> 1) & 0x55555555;
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
@@ -52,23 +52,23 @@ u32 popcount32(u32 x) {
static really_inline
u32 popcount64(u64a x) {
-#if defined(ARCH_X86_64)
-# if defined(HAVE_POPCOUNT_INSTR)
+#if defined(ARCH_X86_64)
+# if defined(HAVE_POPCOUNT_INSTR)
// Single-instruction builtin.
- return (u32)_mm_popcnt_u64(x);
-# else
- // Fast branch-free version from bit-twiddling hacks as older Intel
+ return (u32)_mm_popcnt_u64(x);
+# else
+ // Fast branch-free version from bit-twiddling hacks as older Intel
// processors do not have a POPCNT instruction.
x -= (x >> 1) & 0x5555555555555555;
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333);
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f;
return (x * 0x0101010101010101) >> 56;
-# endif
+# endif
#else
// Synthesise from two 32-bit cases.
return popcount32(x >> 32) + popcount32(x);
#endif
}
-#endif /* UTIL_POPCOUNT_H_ */
+#endif /* UTIL_POPCOUNT_H_ */
diff --git a/contrib/libs/hyperscan/src/util/queue_index_factory.h b/contrib/libs/hyperscan/src/util/queue_index_factory.h
index a9bc828289..e8f7028ec5 100644
--- a/contrib/libs/hyperscan/src/util/queue_index_factory.h
+++ b/contrib/libs/hyperscan/src/util/queue_index_factory.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,11 +33,11 @@
#define UTIL_QUEUE_INDEX_FACTORY_H
#include "ue2common.h"
-#include "util/noncopyable.h"
+#include "util/noncopyable.h"
namespace ue2 {
-class QueueIndexFactory : noncopyable {
+class QueueIndexFactory : noncopyable {
public:
QueueIndexFactory() : val(0) {}
u32 get_queue() { return val++; }
diff --git a/contrib/libs/hyperscan/src/util/report.h b/contrib/libs/hyperscan/src/util/report.h
index 233f705b03..ee830d0f10 100644
--- a/contrib/libs/hyperscan/src/util/report.h
+++ b/contrib/libs/hyperscan/src/util/report.h
@@ -35,10 +35,10 @@
#define UTIL_REPORT_H
#include "ue2common.h"
-#include "util/exhaust.h" // for INVALID_EKEY
+#include "util/exhaust.h" // for INVALID_EKEY
#include "util/logical.h" // for INVALID_LKEY
-#include "util/hash.h"
-#include "util/order_check.h"
+#include "util/hash.h"
+#include "util/order_check.h"
#include <cassert>
@@ -46,39 +46,39 @@ namespace ue2 {
class ReportManager;
-enum ReportType {
- EXTERNAL_CALLBACK,
- EXTERNAL_CALLBACK_SOM_REL,
- INTERNAL_SOM_LOC_SET,
- INTERNAL_SOM_LOC_SET_IF_UNSET,
- INTERNAL_SOM_LOC_SET_IF_WRITABLE,
- INTERNAL_SOM_LOC_SET_SOM_REV_NFA,
- INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_UNSET,
- INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_WRITABLE,
- INTERNAL_SOM_LOC_COPY,
- INTERNAL_SOM_LOC_COPY_IF_WRITABLE,
- INTERNAL_SOM_LOC_MAKE_WRITABLE,
- EXTERNAL_CALLBACK_SOM_STORED,
- EXTERNAL_CALLBACK_SOM_ABS,
- EXTERNAL_CALLBACK_SOM_REV_NFA,
- INTERNAL_SOM_LOC_SET_FROM,
- INTERNAL_SOM_LOC_SET_FROM_IF_WRITABLE,
- INTERNAL_ROSE_CHAIN,
- EXTERNAL_CALLBACK_SOM_PASS
-};
-
+enum ReportType {
+ EXTERNAL_CALLBACK,
+ EXTERNAL_CALLBACK_SOM_REL,
+ INTERNAL_SOM_LOC_SET,
+ INTERNAL_SOM_LOC_SET_IF_UNSET,
+ INTERNAL_SOM_LOC_SET_IF_WRITABLE,
+ INTERNAL_SOM_LOC_SET_SOM_REV_NFA,
+ INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_UNSET,
+ INTERNAL_SOM_LOC_SET_SOM_REV_NFA_IF_WRITABLE,
+ INTERNAL_SOM_LOC_COPY,
+ INTERNAL_SOM_LOC_COPY_IF_WRITABLE,
+ INTERNAL_SOM_LOC_MAKE_WRITABLE,
+ EXTERNAL_CALLBACK_SOM_STORED,
+ EXTERNAL_CALLBACK_SOM_ABS,
+ EXTERNAL_CALLBACK_SOM_REV_NFA,
+ INTERNAL_SOM_LOC_SET_FROM,
+ INTERNAL_SOM_LOC_SET_FROM_IF_WRITABLE,
+ INTERNAL_ROSE_CHAIN,
+ EXTERNAL_CALLBACK_SOM_PASS
+};
+
/**
* \brief All the data we use for handling a match.
*
* Includes extparam constraints and bounds, exhaustion/dedupe keys, offset
* adjustment and SOM information.
*
- * The data in this structure eventually becomes a list of Rose programs
- * instructions.
+ * The data in this structure eventually becomes a list of Rose programs
+ * instructions.
*/
struct Report {
- Report(ReportType type_in, u32 onmatch_in)
- : type(type_in), onmatch(onmatch_in) {}
+ Report(ReportType type_in, u32 onmatch_in)
+ : type(type_in), onmatch(onmatch_in) {}
/** \brief True if this report has bounds from extended parameters, i.e.
* min offset, max offset, min length. */
@@ -86,8 +86,8 @@ struct Report {
return minOffset > 0 || maxOffset < MAX_OFFSET || minLength > 0;
}
- /** \brief Type of this report. */
- ReportType type;
+ /** \brief Type of this report. */
+ ReportType type;
/** \brief use SOM for minLength, but don't report it to user callback. */
bool quashSom = false;
@@ -177,7 +177,7 @@ bool isExternalReport(const Report &r) {
case EXTERNAL_CALLBACK_SOM_STORED:
case EXTERNAL_CALLBACK_SOM_ABS:
case EXTERNAL_CALLBACK_SOM_REV_NFA:
- case EXTERNAL_CALLBACK_SOM_PASS:
+ case EXTERNAL_CALLBACK_SOM_PASS:
return true;
default:
break; // fall through
@@ -187,11 +187,11 @@ bool isExternalReport(const Report &r) {
}
static inline
-bool isExternalSomReport(const Report &r) {
- return r.type != EXTERNAL_CALLBACK && isExternalReport(r);
-}
-
-static inline
+bool isExternalSomReport(const Report &r) {
+ return r.type != EXTERNAL_CALLBACK && isExternalReport(r);
+}
+
+static inline
bool operator<(const Report &a, const Report &b) {
ORDER_CHECK(type);
ORDER_CHECK(quashSom);
@@ -207,16 +207,16 @@ bool operator<(const Report &a, const Report &b) {
return false;
}
-inline
-bool operator==(const Report &a, const Report &b) {
- return a.type == b.type && a.quashSom == b.quashSom &&
- a.minOffset == b.minOffset && a.maxOffset == b.maxOffset &&
- a.minLength == b.minLength && a.ekey == b.ekey &&
- a.offsetAdjust == b.offsetAdjust && a.onmatch == b.onmatch &&
- a.revNfaIndex == b.revNfaIndex && a.somDistance == b.somDistance &&
- a.topSquashDistance == b.topSquashDistance;
-}
-
+inline
+bool operator==(const Report &a, const Report &b) {
+ return a.type == b.type && a.quashSom == b.quashSom &&
+ a.minOffset == b.minOffset && a.maxOffset == b.maxOffset &&
+ a.minLength == b.minLength && a.ekey == b.ekey &&
+ a.offsetAdjust == b.offsetAdjust && a.onmatch == b.onmatch &&
+ a.revNfaIndex == b.revNfaIndex && a.somDistance == b.somDistance &&
+ a.topSquashDistance == b.topSquashDistance;
+}
+
static inline
Report makeECallback(u32 report, s32 offsetAdjust, u32 ekey, bool quiet) {
Report ir(EXTERNAL_CALLBACK, report);
@@ -241,7 +241,7 @@ Report makeSomRelativeCallback(u32 report, s32 offsetAdjust, u64a distance) {
}
static inline
-Report makeMpvTrigger(u32 event, u64a squashDistance) {
+Report makeMpvTrigger(u32 event, u64a squashDistance) {
Report ir(INTERNAL_ROSE_CHAIN, event);
ir.ekey = INVALID_EKEY;
ir.topSquashDistance = squashDistance;
@@ -267,19 +267,19 @@ bool isSimpleExhaustible(const Report &ir) {
return true;
}
-} // namespace ue2
+} // namespace ue2
-namespace std {
+namespace std {
-template<>
-struct hash<ue2::Report> {
- std::size_t operator()(const ue2::Report &r) const {
- return ue2::hash_all(r.type, r.quashSom, r.minOffset, r.maxOffset,
- r.minLength, r.ekey, r.offsetAdjust, r.onmatch,
- r.revNfaIndex, r.somDistance, r.topSquashDistance);
- }
-};
+template<>
+struct hash<ue2::Report> {
+ std::size_t operator()(const ue2::Report &r) const {
+ return ue2::hash_all(r.type, r.quashSom, r.minOffset, r.maxOffset,
+ r.minLength, r.ekey, r.offsetAdjust, r.onmatch,
+ r.revNfaIndex, r.somDistance, r.topSquashDistance);
+ }
+};
-} // namespace std
+} // namespace std
#endif // UTIL_REPORT_H
diff --git a/contrib/libs/hyperscan/src/util/report_manager.cpp b/contrib/libs/hyperscan/src/util/report_manager.cpp
index c43c731775..78b9b73dfc 100644
--- a/contrib/libs/hyperscan/src/util/report_manager.cpp
+++ b/contrib/libs/hyperscan/src/util/report_manager.cpp
@@ -29,12 +29,12 @@
/** \file
* \brief ReportManager: tracks Report structures, exhaustion and dedupe keys.
*/
-
-#include "report_manager.h"
-
+
+#include "report_manager.h"
+
#include "grey.h"
#include "ue2common.h"
-#include "compiler/compiler.h"
+#include "compiler/compiler.h"
#include "nfagraph/ng.h"
#include "rose/rose_build.h"
#include "util/compile_error.h"
@@ -67,7 +67,7 @@ u32 ReportManager::getInternalId(const Report &ir) {
u32 size = reportIds.size();
reportIds.push_back(ir);
- reportIdToInternalMap.emplace(ir, size);
+ reportIdToInternalMap.emplace(ir, size);
DEBUG_PRINTF("new report %u\n", size);
return size;
}
@@ -170,7 +170,7 @@ vector<ReportID> ReportManager::getDkeyToReportTable() const {
void ReportManager::assignDkeys(const RoseBuild *rose) {
DEBUG_PRINTF("assigning...\n");
- map<u32, flat_set<ReportID>> ext_to_int;
+ map<u32, flat_set<ReportID>> ext_to_int;
for (u32 i = 0; i < reportIds.size(); i++) {
const Report &ir = reportIds[i];
@@ -211,9 +211,9 @@ u32 ReportManager::getDkey(const Report &r) const {
void ReportManager::registerExtReport(ReportID id,
const external_report_info &ext) {
- auto it = externalIdMap.find(id);
- if (it != externalIdMap.end()) {
- const external_report_info &eri = it->second;
+ auto it = externalIdMap.find(id);
+ if (it != externalIdMap.end()) {
+ const external_report_info &eri = it->second;
if (eri.highlander != ext.highlander) {
/* we have a problem */
ostringstream out;
@@ -242,35 +242,35 @@ void ReportManager::registerExtReport(ReportID id,
}
}
-Report ReportManager::getBasicInternalReport(const ExpressionInfo &expr,
- s32 adj) {
+Report ReportManager::getBasicInternalReport(const ExpressionInfo &expr,
+ s32 adj) {
/* validate that we are not violating highlander constraints, this will
* throw a CompileError if so. */
- registerExtReport(expr.report,
- external_report_info(expr.highlander, expr.index));
+ registerExtReport(expr.report,
+ external_report_info(expr.highlander, expr.index));
/* create the internal report */
u32 ekey = INVALID_EKEY;
- if (expr.highlander) {
+ if (expr.highlander) {
/* all patterns with the same report id share an ekey */
- ekey = getExhaustibleKey(expr.report);
+ ekey = getExhaustibleKey(expr.report);
}
return makeECallback(expr.report, adj, ekey, expr.quiet);
}
-void ReportManager::setProgramOffset(ReportID id, u32 programOffset) {
- assert(id < reportIds.size());
- assert(!contains(reportIdToProgramOffset, id));
- reportIdToProgramOffset.emplace(id, programOffset);
-}
-
-u32 ReportManager::getProgramOffset(ReportID id) const {
- assert(id < reportIds.size());
- assert(contains(reportIdToProgramOffset, id));
- return reportIdToProgramOffset.at(id);
-}
-
+void ReportManager::setProgramOffset(ReportID id, u32 programOffset) {
+ assert(id < reportIds.size());
+ assert(!contains(reportIdToProgramOffset, id));
+ reportIdToProgramOffset.emplace(id, programOffset);
+}
+
+u32 ReportManager::getProgramOffset(ReportID id) const {
+ assert(id < reportIds.size());
+ assert(contains(reportIdToProgramOffset, id));
+ return reportIdToProgramOffset.at(id);
+}
+
static
void ekeysUnion(std::set<u32> *ekeys, u32 more) {
if (!ekeys->empty()) {
diff --git a/contrib/libs/hyperscan/src/util/report_manager.h b/contrib/libs/hyperscan/src/util/report_manager.h
index 0cbf4ac5d6..015dc9c855 100644
--- a/contrib/libs/hyperscan/src/util/report_manager.h
+++ b/contrib/libs/hyperscan/src/util/report_manager.h
@@ -36,20 +36,20 @@
#include "ue2common.h"
#include "util/compile_error.h"
-#include "util/noncopyable.h"
+#include "util/noncopyable.h"
#include "util/report.h"
#include "parser/logical_combination.h"
#include <map>
#include <set>
-#include <unordered_map>
+#include <unordered_map>
#include <vector>
namespace ue2 {
struct Grey;
class RoseBuild;
-class ExpressionInfo;
+class ExpressionInfo;
struct external_report_info {
external_report_info(bool h, u32 fpi)
@@ -59,7 +59,7 @@ struct external_report_info {
};
/** \brief Tracks Report structures, exhaustion and dedupe keys. */
-class ReportManager : noncopyable {
+class ReportManager : noncopyable {
public:
explicit ReportManager(const Grey &g);
@@ -103,13 +103,13 @@ public:
const std::vector<Report> &reports() const { return reportIds; }
/**
- * Get a simple internal report corresponding to the expression. An ekey
- * will be setup if required.
+ * Get a simple internal report corresponding to the expression. An ekey
+ * will be setup if required.
*
* Note: this function may throw a CompileError if constraints on external
* match id are violated (mixed highlander status for example).
*/
- Report getBasicInternalReport(const ExpressionInfo &expr, s32 adj = 0);
+ Report getBasicInternalReport(const ExpressionInfo &expr, s32 adj = 0);
/** \brief Register an external report and validate that we are not
* violating highlander constraints (which will cause an exception to be
@@ -137,14 +137,14 @@ public:
* ~0U if no dkey is needed. */
u32 getDkey(const Report &r) const;
- /** \brief Register a Rose program offset with the given report. */
- void setProgramOffset(ReportID id, u32 programOffset);
-
- /** \brief Fetch the program offset for a given report. It is a fatal error
- * for this to be called with a report for which no program offset has been
- * set. */
- u32 getProgramOffset(ReportID id) const;
-
+ /** \brief Register a Rose program offset with the given report. */
+ void setProgramOffset(ReportID id, u32 programOffset);
+
+ /** \brief Fetch the program offset for a given report. It is a fatal error
+ * for this to be called with a report for which no program offset has been
+ * set. */
+ u32 getProgramOffset(ReportID id) const;
+
/** \brief Parsed logical combination structure. */
ParsedLogical pl;
@@ -156,18 +156,18 @@ private:
std::vector<Report> reportIds;
/** \brief Mapping from Report to ID (inverse of \ref reportIds
- * vector). */
- std::unordered_map<Report, size_t> reportIdToInternalMap;
+ * vector). */
+ std::unordered_map<Report, size_t> reportIdToInternalMap;
/** \brief Mapping from ReportID to dedupe key. */
- std::unordered_map<ReportID, u32> reportIdToDedupeKey;
+ std::unordered_map<ReportID, u32> reportIdToDedupeKey;
+
+ /** \brief Mapping from ReportID to Rose program offset in bytecode. */
+ std::unordered_map<ReportID, u32> reportIdToProgramOffset;
- /** \brief Mapping from ReportID to Rose program offset in bytecode. */
- std::unordered_map<ReportID, u32> reportIdToProgramOffset;
-
/** \brief Mapping from external match ids to information about that
* id. */
- std::unordered_map<ReportID, external_report_info> externalIdMap;
+ std::unordered_map<ReportID, external_report_info> externalIdMap;
/** \brief Mapping from expression index to exhaustion key. */
std::map<s64a, u32> toExhaustibleKeyMap;
diff --git a/contrib/libs/hyperscan/src/util/simd_types.h b/contrib/libs/hyperscan/src/util/simd_types.h
index 4d15b25018..962cad6c97 100644
--- a/contrib/libs/hyperscan/src/util/simd_types.h
+++ b/contrib/libs/hyperscan/src/util/simd_types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,28 +30,28 @@
#define SIMD_TYPES_H
#include "config.h"
-#include "util/arch.h"
-#include "util/intrinsics.h"
+#include "util/arch.h"
+#include "util/intrinsics.h"
#include "ue2common.h"
-#if defined(HAVE_SSE2)
-typedef __m128i m128;
+#if defined(HAVE_SSE2)
+typedef __m128i m128;
#else
-typedef struct ALIGN_DIRECTIVE {u64a hi; u64a lo;} m128;
+typedef struct ALIGN_DIRECTIVE {u64a hi; u64a lo;} m128;
#endif
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
typedef __m256i m256;
#else
typedef struct ALIGN_AVX_DIRECTIVE {m128 lo; m128 hi;} m256;
#endif
typedef struct {m128 lo; m128 mid; m128 hi;} m384;
-#if defined(HAVE_AVX512)
-typedef __m512i m512;
-#else
-typedef struct ALIGN_ATTR(64) {m256 lo; m256 hi;} m512;
-#endif
+#if defined(HAVE_AVX512)
+typedef __m512i m512;
+#else
+typedef struct ALIGN_ATTR(64) {m256 lo; m256 hi;} m512;
+#endif
#endif /* SIMD_TYPES_H */
diff --git a/contrib/libs/hyperscan/src/util/simd_utils.c b/contrib/libs/hyperscan/src/util/simd_utils.c
index 0b9371fac7..25a81412e1 100644
--- a/contrib/libs/hyperscan/src/util/simd_utils.c
+++ b/contrib/libs/hyperscan/src/util/simd_utils.c
@@ -1,62 +1,62 @@
-/*
- * Copyright (c) 2016-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Lookup tables to support SIMD operations.
- */
-
-#include "simd_utils.h"
-
-ALIGN_CL_DIRECTIVE const char vbs_mask_data[] = {
- 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
- 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
-
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-
- 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
- 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
-};
-
-#define ZEROES_8 0, 0, 0, 0, 0, 0, 0, 0
-#define ZEROES_31 ZEROES_8, ZEROES_8, ZEROES_8, 0, 0, 0, 0, 0, 0, 0
-#define ZEROES_32 ZEROES_8, ZEROES_8, ZEROES_8, ZEROES_8
-
-/** \brief LUT for the mask1bit functions. */
-ALIGN_CL_DIRECTIVE const u8 simd_onebit_masks[] = {
- ZEROES_32, ZEROES_32,
- ZEROES_31, 0x01, ZEROES_32,
- ZEROES_31, 0x02, ZEROES_32,
- ZEROES_31, 0x04, ZEROES_32,
- ZEROES_31, 0x08, ZEROES_32,
- ZEROES_31, 0x10, ZEROES_32,
- ZEROES_31, 0x20, ZEROES_32,
- ZEROES_31, 0x40, ZEROES_32,
- ZEROES_31, 0x80, ZEROES_32,
- ZEROES_32, ZEROES_32,
-};
+/*
+ * Copyright (c) 2016-2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Lookup tables to support SIMD operations.
+ */
+
+#include "simd_utils.h"
+
+ALIGN_CL_DIRECTIVE const char vbs_mask_data[] = {
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+};
+
+#define ZEROES_8 0, 0, 0, 0, 0, 0, 0, 0
+#define ZEROES_31 ZEROES_8, ZEROES_8, ZEROES_8, 0, 0, 0, 0, 0, 0, 0
+#define ZEROES_32 ZEROES_8, ZEROES_8, ZEROES_8, ZEROES_8
+
+/** \brief LUT for the mask1bit functions. */
+ALIGN_CL_DIRECTIVE const u8 simd_onebit_masks[] = {
+ ZEROES_32, ZEROES_32,
+ ZEROES_31, 0x01, ZEROES_32,
+ ZEROES_31, 0x02, ZEROES_32,
+ ZEROES_31, 0x04, ZEROES_32,
+ ZEROES_31, 0x08, ZEROES_32,
+ ZEROES_31, 0x10, ZEROES_32,
+ ZEROES_31, 0x20, ZEROES_32,
+ ZEROES_31, 0x40, ZEROES_32,
+ ZEROES_31, 0x80, ZEROES_32,
+ ZEROES_32, ZEROES_32,
+};
diff --git a/contrib/libs/hyperscan/src/util/simd_utils.h b/contrib/libs/hyperscan/src/util/simd_utils.h
index e21005c50f..d1f060b070 100644
--- a/contrib/libs/hyperscan/src/util/simd_utils.h
+++ b/contrib/libs/hyperscan/src/util/simd_utils.h
@@ -33,18 +33,18 @@
#ifndef SIMD_UTILS
#define SIMD_UTILS
-#if !defined(_WIN32) && !defined(__SSSE3__)
-#error SSSE3 instructions must be enabled
+#if !defined(_WIN32) && !defined(__SSSE3__)
+#error SSSE3 instructions must be enabled
#endif
-#include "config.h"
+#include "config.h"
#include "ue2common.h"
#include "simd_types.h"
-#include "unaligned.h"
-#include "util/arch.h"
-#include "util/intrinsics.h"
+#include "unaligned.h"
+#include "util/arch.h"
+#include "util/intrinsics.h"
-#include <string.h> // for memcpy
+#include <string.h> // for memcpy
// Define a common assume_aligned using an appropriate compiler built-in, if
// it's available. Note that we need to handle C or C++ compilation.
@@ -63,21 +63,21 @@
#define assume_aligned(x, y) (x)
#endif
-#ifdef __cplusplus
-extern "C" {
-#endif
-extern const char vbs_mask_data[];
-#ifdef __cplusplus
-}
-#endif
-
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern const char vbs_mask_data[];
+#ifdef __cplusplus
+}
+#endif
+
static really_inline m128 ones128(void) {
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
- /* gcc gets this right */
- return _mm_set1_epi8(0xFF);
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+ /* gcc gets this right */
+ return _mm_set1_epi8(0xFF);
#else
- /* trick from Intel's optimization guide to generate all-ones.
- * ICC converts this to the single cmpeq instruction */
+ /* trick from Intel's optimization guide to generate all-ones.
+ * ICC converts this to the single cmpeq instruction */
return _mm_cmpeq_epi8(_mm_setzero_si128(), _mm_setzero_si128());
#endif
}
@@ -114,7 +114,7 @@ static really_inline u32 diffrich128(m128 a, m128 b) {
* returns a 4-bit mask indicating which 64-bit words contain differences.
*/
static really_inline u32 diffrich64_128(m128 a, m128 b) {
-#if defined(HAVE_SSE41)
+#if defined(HAVE_SSE41)
a = _mm_cmpeq_epi64(a, b);
return ~(_mm_movemask_ps(_mm_castsi128_ps(a))) & 0x5;
#else
@@ -123,18 +123,18 @@ static really_inline u32 diffrich64_128(m128 a, m128 b) {
#endif
}
-static really_really_inline
-m128 lshift64_m128(m128 a, unsigned b) {
-#if defined(HAVE__BUILTIN_CONSTANT_P)
- if (__builtin_constant_p(b)) {
- return _mm_slli_epi64(a, b);
- }
-#endif
- m128 x = _mm_cvtsi32_si128(b);
- return _mm_sll_epi64(a, x);
+static really_really_inline
+m128 lshift64_m128(m128 a, unsigned b) {
+#if defined(HAVE__BUILTIN_CONSTANT_P)
+ if (__builtin_constant_p(b)) {
+ return _mm_slli_epi64(a, b);
+ }
+#endif
+ m128 x = _mm_cvtsi32_si128(b);
+ return _mm_sll_epi64(a, x);
}
-#define rshift64_m128(a, b) _mm_srli_epi64((a), (b))
+#define rshift64_m128(a, b) _mm_srli_epi64((a), (b))
#define eq128(a, b) _mm_cmpeq_epi8((a), (b))
#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
@@ -148,10 +148,10 @@ static really_inline m128 set16x8(u8 c) {
return _mm_set1_epi8(c);
}
-static really_inline m128 set4x32(u32 c) {
- return _mm_set1_epi32(c);
-}
-
+static really_inline m128 set4x32(u32 c) {
+ return _mm_set1_epi32(c);
+}
+
static really_inline u32 movd(const m128 in) {
return _mm_cvtsi128_si32(in);
}
@@ -180,33 +180,33 @@ static really_inline u64a movq(const m128 in) {
#endif
}
-/* another form of movq */
-static really_inline
-m128 load_m128_from_u64a(const u64a *p) {
- return _mm_set_epi64x(0LL, *p);
+/* another form of movq */
+static really_inline
+m128 load_m128_from_u64a(const u64a *p) {
+ return _mm_set_epi64x(0LL, *p);
}
-#define rshiftbyte_m128(a, count_immed) _mm_srli_si128(a, count_immed)
-#define lshiftbyte_m128(a, count_immed) _mm_slli_si128(a, count_immed)
+#define rshiftbyte_m128(a, count_immed) _mm_srli_si128(a, count_immed)
+#define lshiftbyte_m128(a, count_immed) _mm_slli_si128(a, count_immed)
-#if defined(HAVE_SSE41)
-#define extract32from128(a, imm) _mm_extract_epi32(a, imm)
-#define extract64from128(a, imm) _mm_extract_epi64(a, imm)
-#else
-#define extract32from128(a, imm) movd(_mm_srli_si128(a, imm << 2))
-#define extract64from128(a, imm) movq(_mm_srli_si128(a, imm << 3))
-#endif
+#if defined(HAVE_SSE41)
+#define extract32from128(a, imm) _mm_extract_epi32(a, imm)
+#define extract64from128(a, imm) _mm_extract_epi64(a, imm)
+#else
+#define extract32from128(a, imm) movd(_mm_srli_si128(a, imm << 2))
+#define extract64from128(a, imm) movq(_mm_srli_si128(a, imm << 3))
+#endif
-#if !defined(HAVE_AVX2)
+#if !defined(HAVE_AVX2)
// TODO: this entire file needs restructuring - this carveout is awful
#define extractlow64from256(a) movq(a.lo)
#define extractlow32from256(a) movd(a.lo)
-#if defined(HAVE_SSE41)
+#if defined(HAVE_SSE41)
#define extract32from256(a, imm) _mm_extract_epi32((imm >> 2) ? a.hi : a.lo, imm % 4)
-#define extract64from256(a, imm) _mm_extract_epi64((imm >> 1) ? a.hi : a.lo, imm % 2)
+#define extract64from256(a, imm) _mm_extract_epi64((imm >> 1) ? a.hi : a.lo, imm % 2)
#else
-#define extract32from256(a, imm) movd(_mm_srli_si128((imm >> 2) ? a.hi : a.lo, (imm % 4) * 4))
-#define extract64from256(a, imm) movq(_mm_srli_si128((imm >> 1) ? a.hi : a.lo, (imm % 2) * 8))
+#define extract32from256(a, imm) movd(_mm_srli_si128((imm >> 2) ? a.hi : a.lo, (imm % 4) * 4))
+#define extract64from256(a, imm) movq(_mm_srli_si128((imm >> 1) ? a.hi : a.lo, (imm % 2) * 8))
#endif
#endif // !AVX2
@@ -285,139 +285,139 @@ m128 loadbytes128(const void *ptr, unsigned int n) {
return a;
}
-#ifdef __cplusplus
-extern "C" {
-#endif
-extern const u8 simd_onebit_masks[];
-#ifdef __cplusplus
-}
-#endif
-
-static really_inline
-m128 mask1bit128(unsigned int n) {
- assert(n < sizeof(m128) * 8);
- u32 mask_idx = ((n % 8) * 64) + 95;
- mask_idx -= n / 8;
- return loadu128(&simd_onebit_masks[mask_idx]);
-}
-
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern const u8 simd_onebit_masks[];
+#ifdef __cplusplus
+}
+#endif
+
+static really_inline
+m128 mask1bit128(unsigned int n) {
+ assert(n < sizeof(m128) * 8);
+ u32 mask_idx = ((n % 8) * 64) + 95;
+ mask_idx -= n / 8;
+ return loadu128(&simd_onebit_masks[mask_idx]);
+}
+
// switches on bit N in the given vector.
static really_inline
void setbit128(m128 *ptr, unsigned int n) {
- *ptr = or128(mask1bit128(n), *ptr);
+ *ptr = or128(mask1bit128(n), *ptr);
}
// switches off bit N in the given vector.
static really_inline
void clearbit128(m128 *ptr, unsigned int n) {
- *ptr = andnot128(mask1bit128(n), *ptr);
-}
-
-// tests bit N in the given vector.
-static really_inline
-char testbit128(m128 val, unsigned int n) {
- const m128 mask = mask1bit128(n);
-#if defined(HAVE_SSE41)
- return !_mm_testz_si128(mask, val);
-#else
- return isnonzero128(and128(mask, val));
-#endif
-}
-
-// offset must be an immediate
-#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
-
-static really_inline
-m128 pshufb_m128(m128 a, m128 b) {
- m128 result;
- result = _mm_shuffle_epi8(a, b);
- return result;
-}
-
-static really_inline
-m256 pshufb_m256(m256 a, m256 b) {
-#if defined(HAVE_AVX2)
- return _mm256_shuffle_epi8(a, b);
-#else
- m256 rv;
- rv.lo = pshufb_m128(a.lo, b.lo);
- rv.hi = pshufb_m128(a.hi, b.hi);
- return rv;
-#endif
-}
-
-#if defined(HAVE_AVX512)
-static really_inline
-m512 pshufb_m512(m512 a, m512 b) {
- return _mm512_shuffle_epi8(a, b);
-}
-
-static really_inline
-m512 maskz_pshufb_m512(__mmask64 k, m512 a, m512 b) {
- return _mm512_maskz_shuffle_epi8(k, a, b);
-}
+ *ptr = andnot128(mask1bit128(n), *ptr);
+}
+
+// tests bit N in the given vector.
+static really_inline
+char testbit128(m128 val, unsigned int n) {
+ const m128 mask = mask1bit128(n);
+#if defined(HAVE_SSE41)
+ return !_mm_testz_si128(mask, val);
+#else
+ return isnonzero128(and128(mask, val));
+#endif
+}
+
+// offset must be an immediate
+#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
+
+static really_inline
+m128 pshufb_m128(m128 a, m128 b) {
+ m128 result;
+ result = _mm_shuffle_epi8(a, b);
+ return result;
+}
+
+static really_inline
+m256 pshufb_m256(m256 a, m256 b) {
+#if defined(HAVE_AVX2)
+ return _mm256_shuffle_epi8(a, b);
+#else
+ m256 rv;
+ rv.lo = pshufb_m128(a.lo, b.lo);
+ rv.hi = pshufb_m128(a.hi, b.hi);
+ return rv;
+#endif
+}
+
+#if defined(HAVE_AVX512)
+static really_inline
+m512 pshufb_m512(m512 a, m512 b) {
+ return _mm512_shuffle_epi8(a, b);
+}
+
+static really_inline
+m512 maskz_pshufb_m512(__mmask64 k, m512 a, m512 b) {
+ return _mm512_maskz_shuffle_epi8(k, a, b);
+}
#if defined(HAVE_AVX512VBMI)
#define vpermb512(idx, a) _mm512_permutexvar_epi8(idx, a)
#define maskz_vpermb512(k, idx, a) _mm512_maskz_permutexvar_epi8(k, idx, a)
-#endif
-
-#endif
-
-static really_inline
-m128 variable_byte_shift_m128(m128 in, s32 amount) {
- assert(amount >= -16 && amount <= 16);
- m128 shift_mask = loadu128(vbs_mask_data + 16 - amount);
- return pshufb_m128(in, shift_mask);
-}
-
-static really_inline
-m128 max_u8_m128(m128 a, m128 b) {
- return _mm_max_epu8(a, b);
-}
-
-static really_inline
-m128 min_u8_m128(m128 a, m128 b) {
- return _mm_min_epu8(a, b);
-}
-
-static really_inline
-m128 sadd_u8_m128(m128 a, m128 b) {
- return _mm_adds_epu8(a, b);
-}
-
-static really_inline
-m128 sub_u8_m128(m128 a, m128 b) {
- return _mm_sub_epi8(a, b);
-}
-
-static really_inline
-m128 set64x2(u64a hi, u64a lo) {
- return _mm_set_epi64x(hi, lo);
-}
-
+#endif
+
+#endif
+
+static really_inline
+m128 variable_byte_shift_m128(m128 in, s32 amount) {
+ assert(amount >= -16 && amount <= 16);
+ m128 shift_mask = loadu128(vbs_mask_data + 16 - amount);
+ return pshufb_m128(in, shift_mask);
+}
+
+static really_inline
+m128 max_u8_m128(m128 a, m128 b) {
+ return _mm_max_epu8(a, b);
+}
+
+static really_inline
+m128 min_u8_m128(m128 a, m128 b) {
+ return _mm_min_epu8(a, b);
+}
+
+static really_inline
+m128 sadd_u8_m128(m128 a, m128 b) {
+ return _mm_adds_epu8(a, b);
+}
+
+static really_inline
+m128 sub_u8_m128(m128 a, m128 b) {
+ return _mm_sub_epi8(a, b);
+}
+
+static really_inline
+m128 set64x2(u64a hi, u64a lo) {
+ return _mm_set_epi64x(hi, lo);
+}
+
/****
**** 256-bit Primitives
****/
-#if defined(HAVE_AVX2)
-
-static really_really_inline
-m256 lshift64_m256(m256 a, unsigned b) {
-#if defined(HAVE__BUILTIN_CONSTANT_P)
- if (__builtin_constant_p(b)) {
- return _mm256_slli_epi64(a, b);
- }
-#endif
- m128 x = _mm_cvtsi32_si128(b);
- return _mm256_sll_epi64(a, x);
-}
-
-#define rshift64_m256(a, b) _mm256_srli_epi64((a), (b))
-
+#if defined(HAVE_AVX2)
+
+static really_really_inline
+m256 lshift64_m256(m256 a, unsigned b) {
+#if defined(HAVE__BUILTIN_CONSTANT_P)
+ if (__builtin_constant_p(b)) {
+ return _mm256_slli_epi64(a, b);
+ }
+#endif
+ m128 x = _mm_cvtsi32_si128(b);
+ return _mm256_sll_epi64(a, x);
+}
+
+#define rshift64_m256(a, b) _mm256_srli_epi64((a), (b))
+
static really_inline
m256 set32x8(u32 in) {
- return _mm256_set1_epi8(in);
+ return _mm256_set1_epi8(in);
}
#define eq256(a, b) _mm256_cmpeq_epi8((a), (b))
@@ -430,19 +430,19 @@ m256 set2x128(m128 a) {
#else
-static really_really_inline
-m256 lshift64_m256(m256 a, int b) {
+static really_really_inline
+m256 lshift64_m256(m256 a, int b) {
m256 rv = a;
- rv.lo = lshift64_m128(rv.lo, b);
- rv.hi = lshift64_m128(rv.hi, b);
+ rv.lo = lshift64_m128(rv.lo, b);
+ rv.hi = lshift64_m128(rv.hi, b);
return rv;
}
static really_inline
-m256 rshift64_m256(m256 a, int b) {
+m256 rshift64_m256(m256 a, int b) {
m256 rv = a;
- rv.lo = rshift64_m128(rv.lo, b);
- rv.hi = rshift64_m128(rv.hi, b);
+ rv.lo = rshift64_m128(rv.lo, b);
+ rv.hi = rshift64_m128(rv.hi, b);
return rv;
}
static really_inline
@@ -453,30 +453,30 @@ m256 set32x8(u32 in) {
return rv;
}
-static really_inline
-m256 eq256(m256 a, m256 b) {
- m256 rv;
- rv.lo = eq128(a.lo, b.lo);
- rv.hi = eq128(a.hi, b.hi);
- return rv;
-}
-
-static really_inline
-u32 movemask256(m256 a) {
- u32 lo_mask = movemask128(a.lo);
- u32 hi_mask = movemask128(a.hi);
- return lo_mask | (hi_mask << 16);
-}
-
-static really_inline
-m256 set2x128(m128 a) {
- m256 rv = {a, a};
- return rv;
-}
+static really_inline
+m256 eq256(m256 a, m256 b) {
+ m256 rv;
+ rv.lo = eq128(a.lo, b.lo);
+ rv.hi = eq128(a.hi, b.hi);
+ return rv;
+}
+
+static really_inline
+u32 movemask256(m256 a) {
+ u32 lo_mask = movemask128(a.lo);
+ u32 hi_mask = movemask128(a.hi);
+ return lo_mask | (hi_mask << 16);
+}
+
+static really_inline
+m256 set2x128(m128 a) {
+ m256 rv = {a, a};
+ return rv;
+}
#endif
static really_inline m256 zeroes256(void) {
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
return _mm256_setzero_si256();
#else
m256 rv = {zeroes128(), zeroes128()};
@@ -485,15 +485,15 @@ static really_inline m256 zeroes256(void) {
}
static really_inline m256 ones256(void) {
-#if defined(HAVE_AVX2)
- m256 rv = _mm256_set1_epi8(0xFF);
+#if defined(HAVE_AVX2)
+ m256 rv = _mm256_set1_epi8(0xFF);
#else
m256 rv = {ones128(), ones128()};
#endif
return rv;
}
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
static really_inline m256 and256(m256 a, m256 b) {
return _mm256_and_si256(a, b);
}
@@ -506,7 +506,7 @@ static really_inline m256 and256(m256 a, m256 b) {
}
#endif
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
static really_inline m256 or256(m256 a, m256 b) {
return _mm256_or_si256(a, b);
}
@@ -519,7 +519,7 @@ static really_inline m256 or256(m256 a, m256 b) {
}
#endif
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
static really_inline m256 xor256(m256 a, m256 b) {
return _mm256_xor_si256(a, b);
}
@@ -532,7 +532,7 @@ static really_inline m256 xor256(m256 a, m256 b) {
}
#endif
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
static really_inline m256 not256(m256 a) {
return _mm256_xor_si256(a, ones256());
}
@@ -545,7 +545,7 @@ static really_inline m256 not256(m256 a) {
}
#endif
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
static really_inline m256 andnot256(m256 a, m256 b) {
return _mm256_andnot_si256(a, b);
}
@@ -559,7 +559,7 @@ static really_inline m256 andnot256(m256 a, m256 b) {
#endif
static really_inline int diff256(m256 a, m256 b) {
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
return !!(_mm256_movemask_epi8(_mm256_cmpeq_epi8(a, b)) ^ (int)-1);
#else
return diff128(a.lo, b.lo) || diff128(a.hi, b.hi);
@@ -567,7 +567,7 @@ static really_inline int diff256(m256 a, m256 b) {
}
static really_inline int isnonzero256(m256 a) {
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
return !!diff256(a, zeroes256());
#else
return isnonzero128(or128(a.lo, a.hi));
@@ -579,7 +579,7 @@ static really_inline int isnonzero256(m256 a) {
* mask indicating which 32-bit words contain differences.
*/
static really_inline u32 diffrich256(m256 a, m256 b) {
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
a = _mm256_cmpeq_epi32(a, b);
return ~(_mm256_movemask_ps(_mm256_castsi256_ps(a))) & 0xFF;
#else
@@ -603,7 +603,7 @@ static really_inline u32 diffrich64_256(m256 a, m256 b) {
// aligned load
static really_inline m256 load256(const void *ptr) {
assert(ISALIGNED_N(ptr, alignof(m256)));
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
return _mm256_load_si256((const m256 *)ptr);
#else
m256 rv = { load128(ptr), load128((const char *)ptr + 16) };
@@ -613,7 +613,7 @@ static really_inline m256 load256(const void *ptr) {
// aligned load of 128-bit value to low and high part of 256-bit value
static really_inline m256 load2x128(const void *ptr) {
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
return set2x128(load128(ptr));
#else
assert(ISALIGNED_N(ptr, alignof(m128)));
@@ -623,14 +623,14 @@ static really_inline m256 load2x128(const void *ptr) {
#endif
}
-static really_inline m256 loadu2x128(const void *ptr) {
- return set2x128(loadu128(ptr));
-}
-
+static really_inline m256 loadu2x128(const void *ptr) {
+ return set2x128(loadu128(ptr));
+}
+
// aligned store
static really_inline void store256(void *ptr, m256 a) {
assert(ISALIGNED_N(ptr, alignof(m256)));
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
_mm256_store_si256((m256 *)ptr, a);
#else
ptr = assume_aligned(ptr, 16);
@@ -640,7 +640,7 @@ static really_inline void store256(void *ptr, m256 a) {
// unaligned load
static really_inline m256 loadu256(const void *ptr) {
-#if defined(HAVE_AVX2)
+#if defined(HAVE_AVX2)
return _mm256_loadu_si256((const m256 *)ptr);
#else
m256 rv = { loadu128(ptr), loadu128((const char *)ptr + 16) };
@@ -648,13 +648,13 @@ static really_inline m256 loadu256(const void *ptr) {
#endif
}
-// unaligned store
-static really_inline void storeu256(void *ptr, m256 a) {
-#if defined(HAVE_AVX2)
- _mm256_storeu_si256((m256 *)ptr, a);
+// unaligned store
+static really_inline void storeu256(void *ptr, m256 a) {
+#if defined(HAVE_AVX2)
+ _mm256_storeu_si256((m256 *)ptr, a);
#else
- storeu128(ptr, a.lo);
- storeu128((char *)ptr + 16, a.hi);
+ storeu128(ptr, a.lo);
+ storeu128((char *)ptr + 16, a.hi);
#endif
}
@@ -674,27 +674,27 @@ m256 loadbytes256(const void *ptr, unsigned int n) {
return a;
}
-static really_inline
-m256 mask1bit256(unsigned int n) {
- assert(n < sizeof(m256) * 8);
- u32 mask_idx = ((n % 8) * 64) + 95;
- mask_idx -= n / 8;
- return loadu256(&simd_onebit_masks[mask_idx]);
-}
-
-static really_inline
-m256 set64x4(u64a hi_1, u64a hi_0, u64a lo_1, u64a lo_0) {
-#if defined(HAVE_AVX2)
- return _mm256_set_epi64x(hi_1, hi_0, lo_1, lo_0);
-#else
- m256 rv;
- rv.hi = set64x2(hi_1, hi_0);
- rv.lo = set64x2(lo_1, lo_0);
- return rv;
-#endif
-}
-
-#if !defined(HAVE_AVX2)
+static really_inline
+m256 mask1bit256(unsigned int n) {
+ assert(n < sizeof(m256) * 8);
+ u32 mask_idx = ((n % 8) * 64) + 95;
+ mask_idx -= n / 8;
+ return loadu256(&simd_onebit_masks[mask_idx]);
+}
+
+static really_inline
+m256 set64x4(u64a hi_1, u64a hi_0, u64a lo_1, u64a lo_0) {
+#if defined(HAVE_AVX2)
+ return _mm256_set_epi64x(hi_1, hi_0, lo_1, lo_0);
+#else
+ m256 rv;
+ rv.hi = set64x2(hi_1, hi_0);
+ rv.lo = set64x2(lo_1, lo_0);
+ return rv;
+#endif
+}
+
+#if !defined(HAVE_AVX2)
// switches on bit N in the given vector.
static really_inline
void setbit256(m256 *ptr, unsigned int n) {
@@ -725,52 +725,52 @@ void clearbit256(m256 *ptr, unsigned int n) {
// tests bit N in the given vector.
static really_inline
-char testbit256(m256 val, unsigned int n) {
- assert(n < sizeof(val) * 8);
- m128 sub;
+char testbit256(m256 val, unsigned int n) {
+ assert(n < sizeof(val) * 8);
+ m128 sub;
if (n < 128) {
- sub = val.lo;
+ sub = val.lo;
} else {
- sub = val.hi;
+ sub = val.hi;
n -= 128;
}
return testbit128(sub, n);
}
-static really_really_inline
-m128 movdq_hi(m256 x) {
- return x.hi;
-}
-
-static really_really_inline
-m128 movdq_lo(m256 x) {
- return x.lo;
-}
-
-static really_inline
-m256 combine2x128(m128 hi, m128 lo) {
- m256 rv = {lo, hi};
- return rv;
-}
-
+static really_really_inline
+m128 movdq_hi(m256 x) {
+ return x.hi;
+}
+
+static really_really_inline
+m128 movdq_lo(m256 x) {
+ return x.lo;
+}
+
+static really_inline
+m256 combine2x128(m128 hi, m128 lo) {
+ m256 rv = {lo, hi};
+ return rv;
+}
+
#else // AVX2
// switches on bit N in the given vector.
static really_inline
void setbit256(m256 *ptr, unsigned int n) {
- *ptr = or256(mask1bit256(n), *ptr);
+ *ptr = or256(mask1bit256(n), *ptr);
}
static really_inline
void clearbit256(m256 *ptr, unsigned int n) {
- *ptr = andnot256(mask1bit256(n), *ptr);
+ *ptr = andnot256(mask1bit256(n), *ptr);
}
// tests bit N in the given vector.
static really_inline
-char testbit256(m256 val, unsigned int n) {
- const m256 mask = mask1bit256(n);
- return !_mm256_testz_si256(mask, val);
+char testbit256(m256 val, unsigned int n) {
+ const m256 mask = mask1bit256(n);
+ return !_mm256_testz_si256(mask, val);
}
static really_really_inline
@@ -787,35 +787,35 @@ m128 movdq_lo(m256 x) {
#define cast128to256(a) _mm256_castsi128_si256(a)
#define swap128in256(a) _mm256_permute4x64_epi64(a, 0x4E)
#define insert128to256(a, b, imm) _mm256_inserti128_si256(a, b, imm)
-#define rshift128_m256(a, count_immed) _mm256_srli_si256(a, count_immed)
-#define lshift128_m256(a, count_immed) _mm256_slli_si256(a, count_immed)
+#define rshift128_m256(a, count_immed) _mm256_srli_si256(a, count_immed)
+#define lshift128_m256(a, count_immed) _mm256_slli_si256(a, count_immed)
#define extract64from256(a, imm) _mm_extract_epi64(_mm256_extracti128_si256(a, imm >> 1), imm % 2)
#define extract32from256(a, imm) _mm_extract_epi32(_mm256_extracti128_si256(a, imm >> 2), imm % 4)
#define extractlow64from256(a) _mm_cvtsi128_si64(cast256to128(a))
#define extractlow32from256(a) movd(cast256to128(a))
-#define interleave256hi(a, b) _mm256_unpackhi_epi8(a, b)
-#define interleave256lo(a, b) _mm256_unpacklo_epi8(a, b)
-#define vpalignr(r, l, offset) _mm256_alignr_epi8(r, l, offset)
-
-static really_inline
-m256 combine2x128(m128 hi, m128 lo) {
-#if defined(_mm256_set_m128i)
- return _mm256_set_m128i(hi, lo);
-#else
- return insert128to256(cast128to256(lo), hi, 1);
-#endif
-}
+#define interleave256hi(a, b) _mm256_unpackhi_epi8(a, b)
+#define interleave256lo(a, b) _mm256_unpacklo_epi8(a, b)
+#define vpalignr(r, l, offset) _mm256_alignr_epi8(r, l, offset)
+
+static really_inline
+m256 combine2x128(m128 hi, m128 lo) {
+#if defined(_mm256_set_m128i)
+ return _mm256_set_m128i(hi, lo);
+#else
+ return insert128to256(cast128to256(lo), hi, 1);
+#endif
+}
#endif //AVX2
-#if defined(HAVE_AVX512)
-#define extract128from512(a, imm) _mm512_extracti32x4_epi32(a, imm)
-#define interleave512hi(a, b) _mm512_unpackhi_epi8(a, b)
-#define interleave512lo(a, b) _mm512_unpacklo_epi8(a, b)
-#define set2x256(a) _mm512_broadcast_i64x4(a)
-#define mask_set2x256(src, k, a) _mm512_mask_broadcast_i64x4(src, k, a)
-#define vpermq512(idx, a) _mm512_permutexvar_epi64(idx, a)
-#endif
-
+#if defined(HAVE_AVX512)
+#define extract128from512(a, imm) _mm512_extracti32x4_epi32(a, imm)
+#define interleave512hi(a, b) _mm512_unpackhi_epi8(a, b)
+#define interleave512lo(a, b) _mm512_unpacklo_epi8(a, b)
+#define set2x256(a) _mm512_broadcast_i64x4(a)
+#define mask_set2x256(src, k, a) _mm512_mask_broadcast_i64x4(src, k, a)
+#define vpermq512(idx, a) _mm512_permutexvar_epi64(idx, a)
+#endif
+
/****
**** 384-bit Primitives
****/
@@ -858,12 +858,12 @@ static really_inline m384 andnot384(m384 a, m384 b) {
return rv;
}
-static really_really_inline
-m384 lshift64_m384(m384 a, unsigned b) {
+static really_really_inline
+m384 lshift64_m384(m384 a, unsigned b) {
m384 rv;
- rv.lo = lshift64_m128(a.lo, b);
- rv.mid = lshift64_m128(a.mid, b);
- rv.hi = lshift64_m128(a.hi, b);
+ rv.lo = lshift64_m128(a.lo, b);
+ rv.mid = lshift64_m128(a.mid, b);
+ rv.hi = lshift64_m128(a.hi, b);
return rv;
}
@@ -978,15 +978,15 @@ void clearbit384(m384 *ptr, unsigned int n) {
// tests bit N in the given vector.
static really_inline
-char testbit384(m384 val, unsigned int n) {
- assert(n < sizeof(val) * 8);
- m128 sub;
+char testbit384(m384 val, unsigned int n) {
+ assert(n < sizeof(val) * 8);
+ m128 sub;
if (n < 128) {
- sub = val.lo;
+ sub = val.lo;
} else if (n < 256) {
- sub = val.mid;
+ sub = val.mid;
} else {
- sub = val.hi;
+ sub = val.hi;
}
return testbit128(sub, n % 128);
}
@@ -995,63 +995,63 @@ char testbit384(m384 val, unsigned int n) {
**** 512-bit Primitives
****/
-#define eq512mask(a, b) _mm512_cmpeq_epi8_mask((a), (b))
-#define masked_eq512mask(k, a, b) _mm512_mask_cmpeq_epi8_mask((k), (a), (b))
-
-static really_inline
-m512 zeroes512(void) {
-#if defined(HAVE_AVX512)
- return _mm512_setzero_si512();
+#define eq512mask(a, b) _mm512_cmpeq_epi8_mask((a), (b))
+#define masked_eq512mask(k, a, b) _mm512_mask_cmpeq_epi8_mask((k), (a), (b))
+
+static really_inline
+m512 zeroes512(void) {
+#if defined(HAVE_AVX512)
+ return _mm512_setzero_si512();
+#else
+ m512 rv = {zeroes256(), zeroes256()};
+ return rv;
+#endif
+}
+
+static really_inline
+m512 ones512(void) {
+#if defined(HAVE_AVX512)
+ return _mm512_set1_epi8(0xFF);
+ //return _mm512_xor_si512(_mm512_setzero_si512(), _mm512_setzero_si512());
#else
- m512 rv = {zeroes256(), zeroes256()};
- return rv;
-#endif
-}
-
-static really_inline
-m512 ones512(void) {
-#if defined(HAVE_AVX512)
- return _mm512_set1_epi8(0xFF);
- //return _mm512_xor_si512(_mm512_setzero_si512(), _mm512_setzero_si512());
-#else
- m512 rv = {ones256(), ones256()};
- return rv;
-#endif
-}
-
-#if defined(HAVE_AVX512)
-static really_inline
-m512 set64x8(u8 a) {
- return _mm512_set1_epi8(a);
-}
-
-static really_inline
-m512 set8x64(u64a a) {
- return _mm512_set1_epi64(a);
-}
-
-static really_inline
+ m512 rv = {ones256(), ones256()};
+ return rv;
+#endif
+}
+
+#if defined(HAVE_AVX512)
+static really_inline
+m512 set64x8(u8 a) {
+ return _mm512_set1_epi8(a);
+}
+
+static really_inline
+m512 set8x64(u64a a) {
+ return _mm512_set1_epi64(a);
+}
+
+static really_inline
m512 set16x32(u32 a) {
return _mm512_set1_epi32(a);
}
static really_inline
-m512 set512_64(u64a hi_3, u64a hi_2, u64a hi_1, u64a hi_0,
- u64a lo_3, u64a lo_2, u64a lo_1, u64a lo_0) {
- return _mm512_set_epi64(hi_3, hi_2, hi_1, hi_0,
- lo_3, lo_2, lo_1, lo_0);
-}
-
-static really_inline
-m512 swap256in512(m512 a) {
- m512 idx = set512_64(3ULL, 2ULL, 1ULL, 0ULL, 7ULL, 6ULL, 5ULL, 4ULL);
- return vpermq512(idx, a);
-}
-
-static really_inline
-m512 set4x128(m128 a) {
- return _mm512_broadcast_i32x4(a);
-}
+m512 set512_64(u64a hi_3, u64a hi_2, u64a hi_1, u64a hi_0,
+ u64a lo_3, u64a lo_2, u64a lo_1, u64a lo_0) {
+ return _mm512_set_epi64(hi_3, hi_2, hi_1, hi_0,
+ lo_3, lo_2, lo_1, lo_0);
+}
+
+static really_inline
+m512 swap256in512(m512 a) {
+ m512 idx = set512_64(3ULL, 2ULL, 1ULL, 0ULL, 7ULL, 6ULL, 5ULL, 4ULL);
+ return vpermq512(idx, a);
+}
+
+static really_inline
+m512 set4x128(m128 a) {
+ return _mm512_broadcast_i32x4(a);
+}
static really_inline
m512 sadd_u8_m512(m512 a, m512 b) {
@@ -1072,116 +1072,116 @@ static really_inline
m512 sub_u8_m512(m512 a, m512 b) {
return _mm512_sub_epi8(a, b);
}
-#endif
-
-static really_inline
-m512 and512(m512 a, m512 b) {
-#if defined(HAVE_AVX512)
- return _mm512_and_si512(a, b);
-#else
+#endif
+
+static really_inline
+m512 and512(m512 a, m512 b) {
+#if defined(HAVE_AVX512)
+ return _mm512_and_si512(a, b);
+#else
m512 rv;
rv.lo = and256(a.lo, b.lo);
rv.hi = and256(a.hi, b.hi);
return rv;
-#endif
+#endif
}
-static really_inline
-m512 or512(m512 a, m512 b) {
-#if defined(HAVE_AVX512)
- return _mm512_or_si512(a, b);
+static really_inline
+m512 or512(m512 a, m512 b) {
+#if defined(HAVE_AVX512)
+ return _mm512_or_si512(a, b);
#else
m512 rv;
rv.lo = or256(a.lo, b.lo);
rv.hi = or256(a.hi, b.hi);
return rv;
-#endif
+#endif
}
-static really_inline
-m512 xor512(m512 a, m512 b) {
-#if defined(HAVE_AVX512)
- return _mm512_xor_si512(a, b);
+static really_inline
+m512 xor512(m512 a, m512 b) {
+#if defined(HAVE_AVX512)
+ return _mm512_xor_si512(a, b);
#else
m512 rv;
rv.lo = xor256(a.lo, b.lo);
rv.hi = xor256(a.hi, b.hi);
return rv;
-#endif
+#endif
}
-static really_inline
-m512 not512(m512 a) {
-#if defined(HAVE_AVX512)
- return _mm512_xor_si512(a, ones512());
+static really_inline
+m512 not512(m512 a) {
+#if defined(HAVE_AVX512)
+ return _mm512_xor_si512(a, ones512());
#else
m512 rv;
rv.lo = not256(a.lo);
rv.hi = not256(a.hi);
return rv;
-#endif
+#endif
}
-static really_inline
-m512 andnot512(m512 a, m512 b) {
-#if defined(HAVE_AVX512)
- return _mm512_andnot_si512(a, b);
+static really_inline
+m512 andnot512(m512 a, m512 b) {
+#if defined(HAVE_AVX512)
+ return _mm512_andnot_si512(a, b);
#else
m512 rv;
rv.lo = andnot256(a.lo, b.lo);
rv.hi = andnot256(a.hi, b.hi);
return rv;
-#endif
-}
-
-#if defined(HAVE_AVX512)
-static really_really_inline
-m512 lshift64_m512(m512 a, unsigned b) {
-#if defined(HAVE__BUILTIN_CONSTANT_P)
- if (__builtin_constant_p(b)) {
- return _mm512_slli_epi64(a, b);
- }
-#endif
- m128 x = _mm_cvtsi32_si128(b);
- return _mm512_sll_epi64(a, x);
-}
+#endif
+}
+
+#if defined(HAVE_AVX512)
+static really_really_inline
+m512 lshift64_m512(m512 a, unsigned b) {
+#if defined(HAVE__BUILTIN_CONSTANT_P)
+ if (__builtin_constant_p(b)) {
+ return _mm512_slli_epi64(a, b);
+ }
+#endif
+ m128 x = _mm_cvtsi32_si128(b);
+ return _mm512_sll_epi64(a, x);
+}
#else
-static really_really_inline
-m512 lshift64_m512(m512 a, unsigned b) {
+static really_really_inline
+m512 lshift64_m512(m512 a, unsigned b) {
m512 rv;
- rv.lo = lshift64_m256(a.lo, b);
- rv.hi = lshift64_m256(a.hi, b);
+ rv.lo = lshift64_m256(a.lo, b);
+ rv.hi = lshift64_m256(a.hi, b);
return rv;
}
#endif
-#if defined(HAVE_AVX512)
-#define rshift64_m512(a, b) _mm512_srli_epi64((a), (b))
-#define rshift128_m512(a, count_immed) _mm512_bsrli_epi128(a, count_immed)
-#define lshift128_m512(a, count_immed) _mm512_bslli_epi128(a, count_immed)
-#endif
+#if defined(HAVE_AVX512)
+#define rshift64_m512(a, b) _mm512_srli_epi64((a), (b))
+#define rshift128_m512(a, count_immed) _mm512_bsrli_epi128(a, count_immed)
+#define lshift128_m512(a, count_immed) _mm512_bslli_epi128(a, count_immed)
+#endif
-#if !defined(_MM_CMPINT_NE)
-#define _MM_CMPINT_NE 0x4
-#endif
+#if !defined(_MM_CMPINT_NE)
+#define _MM_CMPINT_NE 0x4
+#endif
-static really_inline
-int diff512(m512 a, m512 b) {
-#if defined(HAVE_AVX512)
- return !!_mm512_cmp_epi8_mask(a, b, _MM_CMPINT_NE);
-#else
+static really_inline
+int diff512(m512 a, m512 b) {
+#if defined(HAVE_AVX512)
+ return !!_mm512_cmp_epi8_mask(a, b, _MM_CMPINT_NE);
+#else
return diff256(a.lo, b.lo) || diff256(a.hi, b.hi);
-#endif
+#endif
}
-static really_inline
-int isnonzero512(m512 a) {
-#if defined(HAVE_AVX512)
- return diff512(a, zeroes512());
-#elif defined(HAVE_AVX2)
- m256 x = or256(a.lo, a.hi);
- return !!diff256(x, zeroes256());
-#else
+static really_inline
+int isnonzero512(m512 a) {
+#if defined(HAVE_AVX512)
+ return diff512(a, zeroes512());
+#elif defined(HAVE_AVX2)
+ m256 x = or256(a.lo, a.hi);
+ return !!diff256(x, zeroes256());
+#else
m128 x = or128(a.lo.lo, a.lo.hi);
m128 y = or128(a.hi.lo, a.hi.hi);
return isnonzero128(or128(x, y));
@@ -1192,11 +1192,11 @@ int isnonzero512(m512 a) {
* "Rich" version of diff512(). Takes two vectors a and b and returns a 16-bit
* mask indicating which 32-bit words contain differences.
*/
-static really_inline
-u32 diffrich512(m512 a, m512 b) {
-#if defined(HAVE_AVX512)
- return _mm512_cmp_epi32_mask(a, b, _MM_CMPINT_NE);
-#elif defined(HAVE_AVX2)
+static really_inline
+u32 diffrich512(m512 a, m512 b) {
+#if defined(HAVE_AVX512)
+ return _mm512_cmp_epi32_mask(a, b, _MM_CMPINT_NE);
+#elif defined(HAVE_AVX2)
return diffrich256(a.lo, b.lo) | (diffrich256(a.hi, b.hi) << 8);
#else
a.lo.lo = _mm_cmpeq_epi32(a.lo.lo, b.lo.lo);
@@ -1213,32 +1213,32 @@ u32 diffrich512(m512 a, m512 b) {
* "Rich" version of diffrich(), 64-bit variant. Takes two vectors a and b and
* returns a 16-bit mask indicating which 64-bit words contain differences.
*/
-static really_inline
-u32 diffrich64_512(m512 a, m512 b) {
- //TODO: cmp_epi64?
+static really_inline
+u32 diffrich64_512(m512 a, m512 b) {
+ //TODO: cmp_epi64?
u32 d = diffrich512(a, b);
return (d | (d >> 1)) & 0x55555555;
}
// aligned load
-static really_inline
-m512 load512(const void *ptr) {
-#if defined(HAVE_AVX512)
- return _mm512_load_si512(ptr);
-#else
- assert(ISALIGNED_N(ptr, alignof(m256)));
+static really_inline
+m512 load512(const void *ptr) {
+#if defined(HAVE_AVX512)
+ return _mm512_load_si512(ptr);
+#else
+ assert(ISALIGNED_N(ptr, alignof(m256)));
m512 rv = { load256(ptr), load256((const char *)ptr + 32) };
return rv;
-#endif
+#endif
}
// aligned store
-static really_inline
-void store512(void *ptr, m512 a) {
- assert(ISALIGNED_N(ptr, alignof(m512)));
-#if defined(HAVE_AVX512)
- return _mm512_store_si512(ptr, a);
-#elif defined(HAVE_AVX2)
+static really_inline
+void store512(void *ptr, m512 a) {
+ assert(ISALIGNED_N(ptr, alignof(m512)));
+#if defined(HAVE_AVX512)
+ return _mm512_store_si512(ptr, a);
+#elif defined(HAVE_AVX2)
m512 *x = (m512 *)ptr;
store256(&x->lo, a.lo);
store256(&x->hi, a.hi);
@@ -1249,20 +1249,20 @@ void store512(void *ptr, m512 a) {
}
// unaligned load
-static really_inline
-m512 loadu512(const void *ptr) {
-#if defined(HAVE_AVX512)
- return _mm512_loadu_si512(ptr);
-#else
+static really_inline
+m512 loadu512(const void *ptr) {
+#if defined(HAVE_AVX512)
+ return _mm512_loadu_si512(ptr);
+#else
m512 rv = { loadu256(ptr), loadu256((const char *)ptr + 32) };
return rv;
-#endif
+#endif
}
// unaligned store
static really_inline
void storeu512(void *ptr, m512 a) {
-#if defined(HAVE_AVX512)
+#if defined(HAVE_AVX512)
_mm512_storeu_si512((m512 *)ptr, a);
#elif defined(HAVE_AVX2)
storeu256(ptr, a.lo);
@@ -1276,32 +1276,32 @@ void storeu512(void *ptr, m512 a) {
}
#if defined(HAVE_AVX512)
-static really_inline
-m512 loadu_maskz_m512(__mmask64 k, const void *ptr) {
- return _mm512_maskz_loadu_epi8(k, ptr);
-}
-
-static really_inline
-m512 loadu_mask_m512(m512 src, __mmask64 k, const void *ptr) {
- return _mm512_mask_loadu_epi8(src, k, ptr);
-}
-
-static really_inline
+static really_inline
+m512 loadu_maskz_m512(__mmask64 k, const void *ptr) {
+ return _mm512_maskz_loadu_epi8(k, ptr);
+}
+
+static really_inline
+m512 loadu_mask_m512(m512 src, __mmask64 k, const void *ptr) {
+ return _mm512_mask_loadu_epi8(src, k, ptr);
+}
+
+static really_inline
void storeu_mask_m512(void *ptr, __mmask64 k, m512 a) {
_mm512_mask_storeu_epi8(ptr, k, a);
}
static really_inline
-m512 set_mask_m512(__mmask64 k) {
- return _mm512_movm_epi8(k);
-}
+m512 set_mask_m512(__mmask64 k) {
+ return _mm512_movm_epi8(k);
+}
static really_inline
m256 loadu_maskz_m256(__mmask32 k, const void *ptr) {
return _mm256_maskz_loadu_epi8(k, ptr);
}
-#endif
-
+#endif
+
// packed unaligned store of first N bytes
static really_inline
void storebytes512(void *ptr, m512 a, unsigned int n) {
@@ -1318,19 +1318,19 @@ m512 loadbytes512(const void *ptr, unsigned int n) {
return a;
}
-static really_inline
-m512 mask1bit512(unsigned int n) {
- assert(n < sizeof(m512) * 8);
- u32 mask_idx = ((n % 8) * 64) + 95;
- mask_idx -= n / 8;
- return loadu512(&simd_onebit_masks[mask_idx]);
-}
-
+static really_inline
+m512 mask1bit512(unsigned int n) {
+ assert(n < sizeof(m512) * 8);
+ u32 mask_idx = ((n % 8) * 64) + 95;
+ mask_idx -= n / 8;
+ return loadu512(&simd_onebit_masks[mask_idx]);
+}
+
// switches on bit N in the given vector.
static really_inline
void setbit512(m512 *ptr, unsigned int n) {
assert(n < sizeof(*ptr) * 8);
-#if !defined(HAVE_AVX2)
+#if !defined(HAVE_AVX2)
m128 *sub;
if (n < 128) {
sub = &ptr->lo.lo;
@@ -1342,8 +1342,8 @@ void setbit512(m512 *ptr, unsigned int n) {
sub = &ptr->hi.hi;
}
setbit128(sub, n % 128);
-#elif defined(HAVE_AVX512)
- *ptr = or512(mask1bit512(n), *ptr);
+#elif defined(HAVE_AVX512)
+ *ptr = or512(mask1bit512(n), *ptr);
#else
m256 *sub;
if (n < 256) {
@@ -1360,7 +1360,7 @@ void setbit512(m512 *ptr, unsigned int n) {
static really_inline
void clearbit512(m512 *ptr, unsigned int n) {
assert(n < sizeof(*ptr) * 8);
-#if !defined(HAVE_AVX2)
+#if !defined(HAVE_AVX2)
m128 *sub;
if (n < 128) {
sub = &ptr->lo.lo;
@@ -1372,8 +1372,8 @@ void clearbit512(m512 *ptr, unsigned int n) {
sub = &ptr->hi.hi;
}
clearbit128(sub, n % 128);
-#elif defined(HAVE_AVX512)
- *ptr = andnot512(mask1bit512(n), *ptr);
+#elif defined(HAVE_AVX512)
+ *ptr = andnot512(mask1bit512(n), *ptr);
#else
m256 *sub;
if (n < 256) {
@@ -1388,29 +1388,29 @@ void clearbit512(m512 *ptr, unsigned int n) {
// tests bit N in the given vector.
static really_inline
-char testbit512(m512 val, unsigned int n) {
- assert(n < sizeof(val) * 8);
-#if !defined(HAVE_AVX2)
- m128 sub;
+char testbit512(m512 val, unsigned int n) {
+ assert(n < sizeof(val) * 8);
+#if !defined(HAVE_AVX2)
+ m128 sub;
if (n < 128) {
- sub = val.lo.lo;
+ sub = val.lo.lo;
} else if (n < 256) {
- sub = val.lo.hi;
+ sub = val.lo.hi;
} else if (n < 384) {
- sub = val.hi.lo;
+ sub = val.hi.lo;
} else {
- sub = val.hi.hi;
+ sub = val.hi.hi;
}
return testbit128(sub, n % 128);
-#elif defined(HAVE_AVX512)
- const m512 mask = mask1bit512(n);
- return !!_mm512_test_epi8_mask(mask, val);
+#elif defined(HAVE_AVX512)
+ const m512 mask = mask1bit512(n);
+ return !!_mm512_test_epi8_mask(mask, val);
#else
- m256 sub;
+ m256 sub;
if (n < 256) {
- sub = val.lo;
+ sub = val.lo;
} else {
- sub = val.hi;
+ sub = val.hi;
n -= 256;
}
return testbit256(sub, n);
diff --git a/contrib/libs/hyperscan/src/util/small_vector.h b/contrib/libs/hyperscan/src/util/small_vector.h
index c5f11915d5..0f54bbf6bf 100644
--- a/contrib/libs/hyperscan/src/util/small_vector.h
+++ b/contrib/libs/hyperscan/src/util/small_vector.h
@@ -1,70 +1,70 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_SMALL_VECTOR_H
-#define UTIL_SMALL_VECTOR_H
-
-#include <vector>
-
-#include <boost/version.hpp>
-
-/*
- * We use the small_vector constructors introduced in Boost 1.61 (trac bug
- * #11866, github commit b436c91). If the Boost version is too old, we fall
- * back to using std::vector.
- */
-#if BOOST_VERSION >= 106100
-# define HAVE_BOOST_CONTAINER_SMALL_VECTOR
-#endif
-
-#if defined(HAVE_BOOST_CONTAINER_SMALL_VECTOR)
-# include <boost/container/small_vector.hpp>
-#endif
-
-namespace ue2 {
-
-#if defined(HAVE_BOOST_CONTAINER_SMALL_VECTOR)
-
-template <class T, std::size_t N,
- typename Allocator = boost::container::new_allocator<T>>
-using small_vector = boost::container::small_vector<T, N, Allocator>;
-
-#else
-
-// Boost version isn't new enough, fall back to just using std::vector.
-template <class T, std::size_t N, typename Allocator = std::allocator<T>>
-using small_vector = std::vector<T, Allocator>;
-
-// Support workarounds for flat_set/flat_map and GCC 4.8.
-#define SMALL_VECTOR_IS_STL_VECTOR 1
-
-#endif // HAVE_BOOST_CONTAINER_SMALL_VECTOR
-
-} // namespace ue2
-
-#endif // UTIL_SMALL_VECTOR_H
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_SMALL_VECTOR_H
+#define UTIL_SMALL_VECTOR_H
+
+#include <vector>
+
+#include <boost/version.hpp>
+
+/*
+ * We use the small_vector constructors introduced in Boost 1.61 (trac bug
+ * #11866, github commit b436c91). If the Boost version is too old, we fall
+ * back to using std::vector.
+ */
+#if BOOST_VERSION >= 106100
+# define HAVE_BOOST_CONTAINER_SMALL_VECTOR
+#endif
+
+#if defined(HAVE_BOOST_CONTAINER_SMALL_VECTOR)
+# include <boost/container/small_vector.hpp>
+#endif
+
+namespace ue2 {
+
+#if defined(HAVE_BOOST_CONTAINER_SMALL_VECTOR)
+
+template <class T, std::size_t N,
+ typename Allocator = boost::container::new_allocator<T>>
+using small_vector = boost::container::small_vector<T, N, Allocator>;
+
+#else
+
+// Boost version isn't new enough, fall back to just using std::vector.
+template <class T, std::size_t N, typename Allocator = std::allocator<T>>
+using small_vector = std::vector<T, Allocator>;
+
+// Support workarounds for flat_set/flat_map and GCC 4.8.
+#define SMALL_VECTOR_IS_STL_VECTOR 1
+
+#endif // HAVE_BOOST_CONTAINER_SMALL_VECTOR
+
+} // namespace ue2
+
+#endif // UTIL_SMALL_VECTOR_H
diff --git a/contrib/libs/hyperscan/src/util/state_compress.c b/contrib/libs/hyperscan/src/util/state_compress.c
index acc0a47667..7238849e7f 100644
--- a/contrib/libs/hyperscan/src/util/state_compress.c
+++ b/contrib/libs/hyperscan/src/util/state_compress.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -31,7 +31,7 @@
*/
#include "config.h"
#include "ue2common.h"
-#include "arch.h"
+#include "arch.h"
#include "bitutils.h"
#include "unaligned.h"
#include "pack_bits.h"
@@ -263,7 +263,7 @@ m256 loadcompressed256_32bit(const void *ptr, m256 mvec) {
expand32(v[4], m[4]), expand32(v[5], m[5]),
expand32(v[6], m[6]), expand32(v[7], m[7]) };
-#if !defined(HAVE_AVX2)
+#if !defined(HAVE_AVX2)
m256 xvec = { .lo = _mm_set_epi32(x[3], x[2], x[1], x[0]),
.hi = _mm_set_epi32(x[7], x[6], x[5], x[4]) };
#else
@@ -290,7 +290,7 @@ m256 loadcompressed256_64bit(const void *ptr, m256 mvec) {
u64a x[4] = { expand64(v[0], m[0]), expand64(v[1], m[1]),
expand64(v[2], m[2]), expand64(v[3], m[3]) };
-#if !defined(HAVE_AVX2)
+#if !defined(HAVE_AVX2)
m256 xvec = { .lo = _mm_set_epi64x(x[1], x[0]),
.hi = _mm_set_epi64x(x[3], x[2]) };
#else
@@ -547,17 +547,17 @@ m512 loadcompressed512_32bit(const void *ptr, m512 mvec) {
expand32(v[14], m[14]), expand32(v[15], m[15]) };
m512 xvec;
-#if defined(HAVE_AVX512)
- xvec = _mm512_set_epi32(x[15], x[14], x[13], x[12],
- x[11], x[10], x[9], x[8],
- x[7], x[6], x[5], x[4],
- x[3], x[2], x[1], x[0]);
-#elif defined(HAVE_AVX2)
- xvec.lo = _mm256_set_epi32(x[7], x[6], x[5], x[4],
- x[3], x[2], x[1], x[0]);
- xvec.hi = _mm256_set_epi32(x[15], x[14], x[13], x[12],
- x[11], x[10], x[9], x[8]);
-#else
+#if defined(HAVE_AVX512)
+ xvec = _mm512_set_epi32(x[15], x[14], x[13], x[12],
+ x[11], x[10], x[9], x[8],
+ x[7], x[6], x[5], x[4],
+ x[3], x[2], x[1], x[0]);
+#elif defined(HAVE_AVX2)
+ xvec.lo = _mm256_set_epi32(x[7], x[6], x[5], x[4],
+ x[3], x[2], x[1], x[0]);
+ xvec.hi = _mm256_set_epi32(x[15], x[14], x[13], x[12],
+ x[11], x[10], x[9], x[8]);
+#else
xvec.lo.lo = _mm_set_epi32(x[3], x[2], x[1], x[0]);
xvec.lo.hi = _mm_set_epi32(x[7], x[6], x[5], x[4]);
xvec.hi.lo = _mm_set_epi32(x[11], x[10], x[9], x[8]);
@@ -587,13 +587,13 @@ m512 loadcompressed512_64bit(const void *ptr, m512 mvec) {
expand64(v[4], m[4]), expand64(v[5], m[5]),
expand64(v[6], m[6]), expand64(v[7], m[7]) };
-#if defined(HAVE_AVX512)
- m512 xvec = _mm512_set_epi64(x[7], x[6], x[5], x[4],
- x[3], x[2], x[1], x[0]);
-#elif defined(HAVE_AVX2)
- m512 xvec = { .lo = _mm256_set_epi64x(x[3], x[2], x[1], x[0]),
- .hi = _mm256_set_epi64x(x[7], x[6], x[5], x[4])};
-#else
+#if defined(HAVE_AVX512)
+ m512 xvec = _mm512_set_epi64(x[7], x[6], x[5], x[4],
+ x[3], x[2], x[1], x[0]);
+#elif defined(HAVE_AVX2)
+ m512 xvec = { .lo = _mm256_set_epi64x(x[3], x[2], x[1], x[0]),
+ .hi = _mm256_set_epi64x(x[7], x[6], x[5], x[4])};
+#else
m512 xvec = { .lo = { _mm_set_epi64x(x[1], x[0]),
_mm_set_epi64x(x[3], x[2]) },
.hi = { _mm_set_epi64x(x[5], x[4]),
diff --git a/contrib/libs/hyperscan/src/util/target_info.cpp b/contrib/libs/hyperscan/src/util/target_info.cpp
index ccd8945d4c..66ba5f5acc 100644
--- a/contrib/libs/hyperscan/src/util/target_info.cpp
+++ b/contrib/libs/hyperscan/src/util/target_info.cpp
@@ -46,10 +46,10 @@ bool target_t::can_run_on_code_built_for(const target_t &code_target) const {
return false;
}
- if (!has_avx512() && code_target.has_avx512()) {
- return false;
- }
-
+ if (!has_avx512() && code_target.has_avx512()) {
+ return false;
+ }
+
if (!has_avx512vbmi() && code_target.has_avx512vbmi()) {
return false;
}
@@ -61,19 +61,19 @@ target_t::target_t(const hs_platform_info &p)
: tune(p.tune), cpu_features(p.cpu_features) {}
bool target_t::has_avx2(void) const {
- return cpu_features & HS_CPU_FEATURES_AVX2;
+ return cpu_features & HS_CPU_FEATURES_AVX2;
+}
+
+bool target_t::has_avx512(void) const {
+ return cpu_features & HS_CPU_FEATURES_AVX512;
}
-bool target_t::has_avx512(void) const {
- return cpu_features & HS_CPU_FEATURES_AVX512;
-}
-
bool target_t::has_avx512vbmi(void) const {
return cpu_features & HS_CPU_FEATURES_AVX512VBMI;
}
bool target_t::is_atom_class(void) const {
- return tune == HS_TUNE_FAMILY_SLM || tune == HS_TUNE_FAMILY_GLM;
+ return tune == HS_TUNE_FAMILY_SLM || tune == HS_TUNE_FAMILY_GLM;
}
} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/util/target_info.h b/contrib/libs/hyperscan/src/util/target_info.h
index 15aa4d61b8..f64573aeda 100644
--- a/contrib/libs/hyperscan/src/util/target_info.h
+++ b/contrib/libs/hyperscan/src/util/target_info.h
@@ -40,8 +40,8 @@ struct target_t {
bool has_avx2(void) const;
- bool has_avx512(void) const;
-
+ bool has_avx512(void) const;
+
bool has_avx512vbmi(void) const;
bool is_atom_class(void) const;
diff --git a/contrib/libs/hyperscan/src/util/ue2_graph.h b/contrib/libs/hyperscan/src/util/ue2_graph.h
index 72a525374b..aa9718d73a 100644
--- a/contrib/libs/hyperscan/src/util/ue2_graph.h
+++ b/contrib/libs/hyperscan/src/util/ue2_graph.h
@@ -1,1032 +1,1032 @@
-/*
+/*
* Copyright (c) 2016-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UE2_GRAPH_H
-#define UE2_GRAPH_H
-
-#include "ue2common.h"
-#include "util/graph_range.h"
-#include "util/noncopyable.h"
-#include "util/operators.h"
-
-#include <boost/graph/properties.hpp> /* vertex_index_t, ... */
-#include <boost/pending/property.hpp> /* no_property */
-#include <boost/property_map/property_map.hpp>
-#include <boost/intrusive/list.hpp>
-#include <boost/iterator/iterator_adaptor.hpp>
-#include <boost/iterator/iterator_facade.hpp>
-
-#include <functional> /* hash */
-#include <tuple> /* tie */
-#include <type_traits> /* is_same, etc */
-#include <utility> /* pair, declval */
-
-/*
- * Basic design of ue2_graph:
- *
- * Fairly standard adjacency list type graph structure. The main internal
- * structures are vertex_node and edge_node.
- *
- * Each vertex_node maintains lists of incoming and outgoing edge_nodes, a
- * serial number and the vertex properties.
- *
- * Each edge_node contains pointers to the source and target vertex as well as
- * the serial number and edge properties.
- *
- * Every time an edge_node or vertex_node is created in the graph, it is given a
- * unique serial number by increasing a private counter in the graph.
- *
- * The main thing to note is that the in and out edge lists are intrusive lists
- * with the edge_node containing the necessary hooks. This means that we can
- * easily convert the edge_node to iterators of the in_edge_list and
- * out_edge_list and remove them from the lists.
- *
- * vertex_descriptor and edge_descriptor structures both just wrap pointers to
- * the relevant node structure along with the serial number. operator<() for the
- * descriptors is overridden to look at the serial member of the node.
- * We do not use:
- * - the address of the node structure as this would lead to an unstable
- * ordering of vertices between runs.
- * - the index field as this would mean that the generation of new index
- * values (during say renumbering of vertex nodes after removing some
- * vertices) would potentially reorder vertices and corrupt containers
- * such as std::set<>.
- * The serial number is copied into the descriptors so that we can still have
- * descriptors in a container (such as set or unordered_set) after removing the
- * underlying node.
- *
- * Hashing of descriptors is based on the serial field for similar reasons.
- *
- *
- *
- * Main differences from boost::adjacency_list<> with listS:
- *
- * (1) Deterministic ordering for vertices and edges
- * boost::adjacency_list<> uses pointer ordering for vertex_descriptors. As
- * a result, ordering of vertices and edges between runs is
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UE2_GRAPH_H
+#define UE2_GRAPH_H
+
+#include "ue2common.h"
+#include "util/graph_range.h"
+#include "util/noncopyable.h"
+#include "util/operators.h"
+
+#include <boost/graph/properties.hpp> /* vertex_index_t, ... */
+#include <boost/pending/property.hpp> /* no_property */
+#include <boost/property_map/property_map.hpp>
+#include <boost/intrusive/list.hpp>
+#include <boost/iterator/iterator_adaptor.hpp>
+#include <boost/iterator/iterator_facade.hpp>
+
+#include <functional> /* hash */
+#include <tuple> /* tie */
+#include <type_traits> /* is_same, etc */
+#include <utility> /* pair, declval */
+
+/*
+ * Basic design of ue2_graph:
+ *
+ * Fairly standard adjacency list type graph structure. The main internal
+ * structures are vertex_node and edge_node.
+ *
+ * Each vertex_node maintains lists of incoming and outgoing edge_nodes, a
+ * serial number and the vertex properties.
+ *
+ * Each edge_node contains pointers to the source and target vertex as well as
+ * the serial number and edge properties.
+ *
+ * Every time an edge_node or vertex_node is created in the graph, it is given a
+ * unique serial number by increasing a private counter in the graph.
+ *
+ * The main thing to note is that the in and out edge lists are intrusive lists
+ * with the edge_node containing the necessary hooks. This means that we can
+ * easily convert the edge_node to iterators of the in_edge_list and
+ * out_edge_list and remove them from the lists.
+ *
+ * vertex_descriptor and edge_descriptor structures both just wrap pointers to
+ * the relevant node structure along with the serial number. operator<() for the
+ * descriptors is overridden to look at the serial member of the node.
+ * We do not use:
+ * - the address of the node structure as this would lead to an unstable
+ * ordering of vertices between runs.
+ * - the index field as this would mean that the generation of new index
+ * values (during say renumbering of vertex nodes after removing some
+ * vertices) would potentially reorder vertices and corrupt containers
+ * such as std::set<>.
+ * The serial number is copied into the descriptors so that we can still have
+ * descriptors in a container (such as set or unordered_set) after removing the
+ * underlying node.
+ *
+ * Hashing of descriptors is based on the serial field for similar reasons.
+ *
+ *
+ *
+ * Main differences from boost::adjacency_list<> with listS:
+ *
+ * (1) Deterministic ordering for vertices and edges
+ * boost::adjacency_list<> uses pointer ordering for vertex_descriptors. As
+ * a result, ordering of vertices and edges between runs is
* non-deterministic unless containers, etc use custom comparators.
- *
- * (2) Proper types for descriptors, etc.
- * No more void * for vertex_descriptors and trying to use it for the wrong
- * graph type.
- *
- * (3) Constant time num_edges(), num_vertices(), degree(), in_degree() and
- * out_degree()
- * std::list is meant to have constant time in C++11 ::size(), but this is
- * not always implemented as people want to keep ABI compatibility with
- * existing C++98 standard libraries (gcc 4.8). As ue2_graph_h uses
- * intrusive lists rather than std::list this is not an issue for us.
- *
- * (4) Constant time remove_edge(e, g)
- * ue2_graph uses boost::intrusive_lists internally so we can easily unlink
- * an edge from the in and out edgelist of its source and target.
- *
- * (5) More efficient edge(u, v, g) and remove_edge(u, v, g)
- * ue2_graph will check which of u and v has the smallest relevant degree
- * and use that to search for the edge(s).
- *
- * (6) Automatically populate the index field of vertex and edge bundles.
- * Saves us from doing it manually. Naturally there is nothing to prevent
- * the user from stuffing up the index properties later.
- *
- * (7) Different edge iteration order
- * ue2_graph does not maintain an explicit global edge list, so the
- * edge_iterator is constructed out of vertex_iterator and
- * out_edge_iterators by iterating the out_edges of each vertices. This
- * means that edge iteration order is not insertion order like for
- * adjacency_list.
- *
- * (8) null_edge()
- * Because why not?
- *
- * (9) vertex and edge properties must have an index field.
- * We generally need them so the effort has not been put into specialising
- * for when they are not present.
- *
- *
- *
- * Possible Future Work:
- *
- * (1) Improve edge(u, v, g) performance
- * This function sees a fair amount of use and is O(n) in the smallest of
- * the source out_degree or target in_degree. This could be improved by
- * changes on of the edge containers to be something similar to a multiset.
- *
- * (2) 'Lie' about the number of edges / vertices
- *
- * One of the main uses of num_edges() and num_vertices() is to allocate a
- * vector, etc so that it can be indexed by edge or vertex index. If
- * num_edges() and num_vertices() returned the appropriate size for such a
- * vector (at least one more than the largest index), we would be able to
- * avoid some renumbering operations. Functions would have to be provided to
- * get the real number of vertices and edges. Having num_vertices() and
- * num_edges() return an over-estimate is not without precedence in the BGL
- * - the filtered_graph adaptor does the same thing and is compatible with
- * various (all?) BGL algorithms. It is not clear that this was done
- * deliberately for the same reason or because it is difficult for
- * filtered_graph to get the true counts.
- *
- * (3) Investigate slab/pooled allocation schemes for nodes.
- */
-
-namespace ue2 {
-
-namespace graph_detail {
-
-class graph_base : noncopyable {
-};
-
-struct default_edge_property {
- size_t index;
-};
-
-struct default_vertex_property {
- size_t index;
-};
-
-template<typename Graph>
-class vertex_descriptor : totally_ordered<vertex_descriptor<Graph>> {
- using vertex_node = typename Graph::vertex_node;
-public:
- vertex_descriptor() : p(nullptr), serial(0) {}
- explicit vertex_descriptor(vertex_node *pp) : p(pp), serial(pp->serial) {}
-
+ *
+ * (2) Proper types for descriptors, etc.
+ * No more void * for vertex_descriptors and trying to use it for the wrong
+ * graph type.
+ *
+ * (3) Constant time num_edges(), num_vertices(), degree(), in_degree() and
+ * out_degree()
+ * std::list is meant to have constant time in C++11 ::size(), but this is
+ * not always implemented as people want to keep ABI compatibility with
+ * existing C++98 standard libraries (gcc 4.8). As ue2_graph_h uses
+ * intrusive lists rather than std::list this is not an issue for us.
+ *
+ * (4) Constant time remove_edge(e, g)
+ * ue2_graph uses boost::intrusive_lists internally so we can easily unlink
+ * an edge from the in and out edgelist of its source and target.
+ *
+ * (5) More efficient edge(u, v, g) and remove_edge(u, v, g)
+ * ue2_graph will check which of u and v has the smallest relevant degree
+ * and use that to search for the edge(s).
+ *
+ * (6) Automatically populate the index field of vertex and edge bundles.
+ * Saves us from doing it manually. Naturally there is nothing to prevent
+ * the user from stuffing up the index properties later.
+ *
+ * (7) Different edge iteration order
+ * ue2_graph does not maintain an explicit global edge list, so the
+ * edge_iterator is constructed out of vertex_iterator and
+ * out_edge_iterators by iterating the out_edges of each vertices. This
+ * means that edge iteration order is not insertion order like for
+ * adjacency_list.
+ *
+ * (8) null_edge()
+ * Because why not?
+ *
+ * (9) vertex and edge properties must have an index field.
+ * We generally need them so the effort has not been put into specialising
+ * for when they are not present.
+ *
+ *
+ *
+ * Possible Future Work:
+ *
+ * (1) Improve edge(u, v, g) performance
+ * This function sees a fair amount of use and is O(n) in the smallest of
+ * the source out_degree or target in_degree. This could be improved by
+ * changes on of the edge containers to be something similar to a multiset.
+ *
+ * (2) 'Lie' about the number of edges / vertices
+ *
+ * One of the main uses of num_edges() and num_vertices() is to allocate a
+ * vector, etc so that it can be indexed by edge or vertex index. If
+ * num_edges() and num_vertices() returned the appropriate size for such a
+ * vector (at least one more than the largest index), we would be able to
+ * avoid some renumbering operations. Functions would have to be provided to
+ * get the real number of vertices and edges. Having num_vertices() and
+ * num_edges() return an over-estimate is not without precedence in the BGL
+ * - the filtered_graph adaptor does the same thing and is compatible with
+ * various (all?) BGL algorithms. It is not clear that this was done
+ * deliberately for the same reason or because it is difficult for
+ * filtered_graph to get the true counts.
+ *
+ * (3) Investigate slab/pooled allocation schemes for nodes.
+ */
+
+namespace ue2 {
+
+namespace graph_detail {
+
+class graph_base : noncopyable {
+};
+
+struct default_edge_property {
+ size_t index;
+};
+
+struct default_vertex_property {
+ size_t index;
+};
+
+template<typename Graph>
+class vertex_descriptor : totally_ordered<vertex_descriptor<Graph>> {
+ using vertex_node = typename Graph::vertex_node;
+public:
+ vertex_descriptor() : p(nullptr), serial(0) {}
+ explicit vertex_descriptor(vertex_node *pp) : p(pp), serial(pp->serial) {}
+
explicit operator bool() const { return p; }
- bool operator<(const vertex_descriptor b) const {
- if (p && b.p) {
- /* no vertices in the same graph can have the same serial */
- assert(p == b.p || serial != b.serial);
- return serial < b.serial;
- } else {
- return p < b.p;
- }
- }
- bool operator==(const vertex_descriptor b) const { return p == b.p; }
-
- size_t hash() const {
- return std::hash<u64a>()(serial);
- }
-
-private:
- vertex_node *raw(void) { return p; }
- vertex_node *p;
- u64a serial;
- friend Graph;
-};
-
-template<typename Graph>
-class edge_descriptor : totally_ordered<edge_descriptor<Graph>> {
- using edge_node = typename Graph::edge_node;
-public:
- edge_descriptor() : p(nullptr), serial(0) {}
- explicit edge_descriptor(edge_node *pp) : p(pp), serial(pp->serial) {}
-
- /* Convenience ctor to allow us to directly get an edge_descriptor from
- * edge() and add_edge(). As we have null_edges and we always allow
- * parallel edges, the bool component of the return from these functions is
- * not required. */
- edge_descriptor(const std::pair<edge_descriptor, bool> &tup)
- : p(tup.first.p), serial(tup.first.serial) {
- assert(tup.second == (bool)tup.first);
- }
-
- operator bool() const { return p; }
- bool operator<(const edge_descriptor b) const {
- if (p && b.p) {
- /* no edges in the same graph can have the same serial */
- assert(p == b.p || serial != b.serial);
- return serial < b.serial;
- } else {
- return p < b.p;
- }
- }
- bool operator==(const edge_descriptor b) const { return p == b.p; }
-
- size_t hash() const {
- return std::hash<u64a>()(serial);
- }
-
-private:
- edge_node *raw(void) { return p; }
- edge_node *p;
- u64a serial;
- friend Graph;
-};
-
-} // namespace graph_detail
-
-template<typename Graph,
- typename VertexPropertyType = graph_detail::default_vertex_property,
- typename EdgePropertyType = graph_detail::default_edge_property>
-class ue2_graph : graph_detail::graph_base {
-private:
- struct in_edge_tag { };
- struct out_edge_tag { };
-
- struct vertex_node;
-
- using out_edge_hook
- = boost::intrusive::list_base_hook<boost::intrusive::tag<out_edge_tag> >;
-
- /* in_edge_hook does not use safe mode as during graph destruction we do not
- * maintain the in edge lists */
- using in_edge_hook
- = boost::intrusive::list_base_hook<boost::intrusive::tag<in_edge_tag>,
- boost::intrusive::link_mode<boost::intrusive::normal_link> >;
-
- struct edge_node : public out_edge_hook, public in_edge_hook {
- explicit edge_node(u64a serial_in) : serial(serial_in) { }
-
- vertex_node *source = nullptr;
- vertex_node *target = nullptr;
- const u64a serial; /*< used to order edges. We do not use props.index so
- * that there is no danger of invalidating sets or
- * other containers by changing the index due to
- * renumbering */
- EdgePropertyType props;
- };
-
- template<typename hook_type> using vertex_edge_list
- = boost::intrusive::list<edge_node,
- boost::intrusive::base_hook<hook_type> >;
-
- struct vertex_node : public boost::intrusive::list_base_hook<> {
- explicit vertex_node(u64a serial_in) : serial(serial_in) { }
-
- VertexPropertyType props;
- const u64a serial; /*< used to order vertices. We do not use props.index
- * so that there is no danger of invalidating sets or
- * other containers by changing the index due to
- * renumbering */
-
- /* The incoming edges are not considered owned by the vertex */
- vertex_edge_list<in_edge_hook> in_edge_list;
-
- /* The out going edges are considered owned by the vertex and
+ bool operator<(const vertex_descriptor b) const {
+ if (p && b.p) {
+ /* no vertices in the same graph can have the same serial */
+ assert(p == b.p || serial != b.serial);
+ return serial < b.serial;
+ } else {
+ return p < b.p;
+ }
+ }
+ bool operator==(const vertex_descriptor b) const { return p == b.p; }
+
+ size_t hash() const {
+ return std::hash<u64a>()(serial);
+ }
+
+private:
+ vertex_node *raw(void) { return p; }
+ vertex_node *p;
+ u64a serial;
+ friend Graph;
+};
+
+template<typename Graph>
+class edge_descriptor : totally_ordered<edge_descriptor<Graph>> {
+ using edge_node = typename Graph::edge_node;
+public:
+ edge_descriptor() : p(nullptr), serial(0) {}
+ explicit edge_descriptor(edge_node *pp) : p(pp), serial(pp->serial) {}
+
+ /* Convenience ctor to allow us to directly get an edge_descriptor from
+ * edge() and add_edge(). As we have null_edges and we always allow
+ * parallel edges, the bool component of the return from these functions is
+ * not required. */
+ edge_descriptor(const std::pair<edge_descriptor, bool> &tup)
+ : p(tup.first.p), serial(tup.first.serial) {
+ assert(tup.second == (bool)tup.first);
+ }
+
+ operator bool() const { return p; }
+ bool operator<(const edge_descriptor b) const {
+ if (p && b.p) {
+ /* no edges in the same graph can have the same serial */
+ assert(p == b.p || serial != b.serial);
+ return serial < b.serial;
+ } else {
+ return p < b.p;
+ }
+ }
+ bool operator==(const edge_descriptor b) const { return p == b.p; }
+
+ size_t hash() const {
+ return std::hash<u64a>()(serial);
+ }
+
+private:
+ edge_node *raw(void) { return p; }
+ edge_node *p;
+ u64a serial;
+ friend Graph;
+};
+
+} // namespace graph_detail
+
+template<typename Graph,
+ typename VertexPropertyType = graph_detail::default_vertex_property,
+ typename EdgePropertyType = graph_detail::default_edge_property>
+class ue2_graph : graph_detail::graph_base {
+private:
+ struct in_edge_tag { };
+ struct out_edge_tag { };
+
+ struct vertex_node;
+
+ using out_edge_hook
+ = boost::intrusive::list_base_hook<boost::intrusive::tag<out_edge_tag> >;
+
+ /* in_edge_hook does not use safe mode as during graph destruction we do not
+ * maintain the in edge lists */
+ using in_edge_hook
+ = boost::intrusive::list_base_hook<boost::intrusive::tag<in_edge_tag>,
+ boost::intrusive::link_mode<boost::intrusive::normal_link> >;
+
+ struct edge_node : public out_edge_hook, public in_edge_hook {
+ explicit edge_node(u64a serial_in) : serial(serial_in) { }
+
+ vertex_node *source = nullptr;
+ vertex_node *target = nullptr;
+ const u64a serial; /*< used to order edges. We do not use props.index so
+ * that there is no danger of invalidating sets or
+ * other containers by changing the index due to
+ * renumbering */
+ EdgePropertyType props;
+ };
+
+ template<typename hook_type> using vertex_edge_list
+ = boost::intrusive::list<edge_node,
+ boost::intrusive::base_hook<hook_type> >;
+
+ struct vertex_node : public boost::intrusive::list_base_hook<> {
+ explicit vertex_node(u64a serial_in) : serial(serial_in) { }
+
+ VertexPropertyType props;
+ const u64a serial; /*< used to order vertices. We do not use props.index
+ * so that there is no danger of invalidating sets or
+ * other containers by changing the index due to
+ * renumbering */
+
+ /* The incoming edges are not considered owned by the vertex */
+ vertex_edge_list<in_edge_hook> in_edge_list;
+
+ /* The out going edges are considered owned by the vertex and
* need to be freed when the graph is being destroyed */
- vertex_edge_list<out_edge_hook> out_edge_list;
-
- /* The destructor only frees memory owned by the vertex and will leave
- * the neighbour's edges in a bad state. If a vertex is being removed
- * (rather than the graph being destroyed), then the more gentle clean
- * up of clear_vertex() is required to be called first */
- ~vertex_node() {
- out_edge_list.clear_and_dispose(delete_disposer());
- }
- };
-
- struct delete_disposer {
- template<typename T> void operator()(const T *d) const { delete d; }
- };
-
- struct in_edge_disposer {
- void operator()(edge_node *e) const {
- /* remove from source's out edge list before deleting */
- vertex_node *u = e->source;
- u->out_edge_list.erase(u->out_edge_list.iterator_to(*e));
- delete e;
- }
- };
-
- struct out_edge_disposer {
- void operator()(edge_node *e) const {
- /* remove from target's in edge list before deleting */
- vertex_node *v = e->target;
- v->in_edge_list.erase(v->in_edge_list.iterator_to(*e));
- delete e;
- }
- };
-
- using vertices_list_type
- = boost::intrusive::list<vertex_node,
- boost::intrusive::base_hook<boost::intrusive::list_base_hook<> > >;
-
- vertices_list_type vertices_list;
-
-protected: /* to allow renumbering */
- static const size_t N_SPECIAL_VERTICES = 0; /* override in derived class */
- size_t next_vertex_index = 0;
- size_t next_edge_index = 0;
-
-private:
- size_t graph_edge_count = 0; /* maintained explicitly as we have no global
- edge list */
-
- u64a next_serial = 0;
- u64a new_serial() {
- u64a serial = next_serial++;
- if (!next_serial) {
- /* if we have created enough graph edges/vertices to overflow a u64a
- * we must have spent close to an eternity adding to this graph so
- * something must have gone very wrong and we will not be producing
- * a final bytecode in a reasonable amount of time. Or, more likely,
- * the next_serial value has become corrupt. */
- throw std::overflow_error("too many graph edges/vertices created");
- }
- return serial;
- }
-public:
- using vertex_descriptor = graph_detail::vertex_descriptor<ue2_graph>;
- using edge_descriptor = graph_detail::edge_descriptor<ue2_graph>;
- friend vertex_descriptor;
- friend edge_descriptor;
-
- using vertices_size_type = typename vertices_list_type::size_type;
- using degree_size_type
- = typename vertex_edge_list<out_edge_hook>::size_type;
- using edges_size_type = size_t;
-
- using vertex_property_type = VertexPropertyType;
- using edge_property_type = EdgePropertyType;
-
- using graph_bundled = boost::no_property;
- using vertex_bundled = VertexPropertyType;
- using edge_bundled = EdgePropertyType;
-
-private:
- /* Note: apparently, nested class templates cannot be fully specialised but
- * they can be partially specialised. Sigh, ... */
- template<typename BundleType, typename dummy = void>
- struct bundle_key_type {
- };
-
- template<typename dummy>
- struct bundle_key_type<VertexPropertyType, dummy> {
- using type = vertex_descriptor;
- };
-
- template<typename dummy>
- struct bundle_key_type<EdgePropertyType, dummy> {
- using type = edge_descriptor;
- };
-
-public:
- class out_edge_iterator : public boost::iterator_adaptor<
- out_edge_iterator,
- typename vertex_edge_list<out_edge_hook>::const_iterator,
- edge_descriptor,
- boost::bidirectional_traversal_tag,
- edge_descriptor> {
- using super = typename out_edge_iterator::iterator_adaptor_;
- public:
- out_edge_iterator() : super() { }
- explicit out_edge_iterator(
- typename vertex_edge_list<out_edge_hook>::const_iterator it)
- : super(it) { }
- edge_descriptor dereference() const {
- /* :( const_cast makes me sad but constness is defined by the graph
- * parameter of bgl api calls */
- return edge_descriptor(const_cast<edge_node *>(&*super::base()));
- }
- };
-
- class in_edge_iterator : public boost::iterator_adaptor<
- in_edge_iterator,
- typename vertex_edge_list<in_edge_hook>::const_iterator,
- edge_descriptor,
- boost::bidirectional_traversal_tag,
- edge_descriptor> {
- using super = typename in_edge_iterator::iterator_adaptor_;
- public:
- in_edge_iterator() : super() { }
- explicit in_edge_iterator(
- typename vertex_edge_list<in_edge_hook>::const_iterator it)
- : super(it) { }
- edge_descriptor dereference() const {
- /* :( const_cast makes me sad but constness is defined by the graph
- * parameter of bgl api calls */
- return edge_descriptor(const_cast<edge_node *>(&*super::base()));
- }
- };
-
- class adjacency_iterator : public boost::iterator_adaptor<
- adjacency_iterator,
- out_edge_iterator,
- vertex_descriptor,
- boost::bidirectional_traversal_tag,
- vertex_descriptor> {
- using super = typename adjacency_iterator::iterator_adaptor_;
- public:
- adjacency_iterator(out_edge_iterator a) : super(std::move(a)) { }
- adjacency_iterator() { }
-
- vertex_descriptor dereference() const {
- return vertex_descriptor(super::base()->p->target);
- }
- };
-
- class inv_adjacency_iterator : public boost::iterator_adaptor<
- inv_adjacency_iterator,
- in_edge_iterator,
- vertex_descriptor,
- boost::bidirectional_traversal_tag,
- vertex_descriptor> {
- using super = typename inv_adjacency_iterator::iterator_adaptor_;
- public:
- inv_adjacency_iterator(in_edge_iterator a) : super(std::move(a)) { }
- inv_adjacency_iterator() { }
-
- vertex_descriptor dereference() const {
- return vertex_descriptor(super::base()->p->source);
- }
- };
-
- class vertex_iterator : public boost::iterator_adaptor<
- vertex_iterator,
- typename vertices_list_type::const_iterator,
- vertex_descriptor,
- boost::bidirectional_traversal_tag,
- vertex_descriptor> {
- using super = typename vertex_iterator::iterator_adaptor_;
- public:
- vertex_iterator() : super() { }
- explicit vertex_iterator(typename vertices_list_type::const_iterator it)
- : super(it) { }
- vertex_descriptor dereference() const {
- /* :( const_cast makes me sad but constness is defined by the graph
- * parameter of bgl api calls */
- return vertex_descriptor(
- const_cast<vertex_node *>(&*super::base()));
- }
- };
-
- class edge_iterator : public boost::iterator_facade<
- edge_iterator,
- edge_descriptor,
- boost::forward_traversal_tag, /* TODO: make bidi */
- edge_descriptor> {
- public:
- using main_base_iter_type = vertex_iterator;
- using aux_base_iter_type = out_edge_iterator;
-
- edge_iterator(main_base_iter_type b, main_base_iter_type e)
- : main(std::move(b)), main_end(std::move(e)) {
- if (main == main_end) {
- return;
- }
- std::tie(aux, aux_end) = out_edges_impl(*main);
- while (aux == aux_end) {
- ++main;
- if (main == main_end) {
- break;
- }
- std::tie(aux, aux_end) = out_edges_impl(*main);
- }
- }
- edge_iterator() { }
-
- friend class boost::iterator_core_access;
- void increment() {
- ++aux;
- while (aux == aux_end) {
- ++main;
- if (main == main_end) {
- break;
- }
- std::tie(aux, aux_end) = out_edges_impl(*main);
- }
- }
- bool equal(const edge_iterator &other) const {
- return main == other.main && (main == main_end || aux == other.aux);
- }
- edge_descriptor dereference() const {
- return *aux;
- }
-
- main_base_iter_type main;
- main_base_iter_type main_end;
- aux_base_iter_type aux;
- aux_base_iter_type aux_end;
- };
-
-public:
- static
- vertex_descriptor null_vertex() { return vertex_descriptor(); }
-
- vertex_descriptor add_vertex_impl() {
- vertex_node *v = new vertex_node(new_serial());
- v->props.index = next_vertex_index++;
- vertices_list.push_back(*v);
- return vertex_descriptor(v);
- }
-
- void remove_vertex_impl(vertex_descriptor v) {
- vertex_node *vv = v.raw();
- assert(vv->in_edge_list.empty());
- assert(vv->out_edge_list.empty());
- vertices_list.erase_and_dispose(vertices_list.iterator_to(*vv),
- delete_disposer());
- }
-
- void clear_in_edges_impl(vertex_descriptor v) {
- graph_edge_count -= v.raw()->in_edge_list.size();
- v.raw()->in_edge_list.clear_and_dispose(in_edge_disposer());
- }
-
- void clear_out_edges_impl(vertex_descriptor v) {
- graph_edge_count -= v.raw()->out_edge_list.size();
- v.raw()->out_edge_list.clear_and_dispose(out_edge_disposer());
- }
-
- /* IncidenceGraph concept functions */
-
- static
- vertex_descriptor source_impl(edge_descriptor e) {
- return vertex_descriptor(e.raw()->source);
- }
-
- static
- vertex_descriptor target_impl(edge_descriptor e) {
- return vertex_descriptor(e.raw()->target);
- }
-
- static
- degree_size_type out_degree_impl(vertex_descriptor v) {
- return v.raw()->out_edge_list.size();
- }
-
- static
- std::pair<out_edge_iterator, out_edge_iterator>
- out_edges_impl(vertex_descriptor v) {
- return {out_edge_iterator(v.raw()->out_edge_list.begin()),
- out_edge_iterator(v.raw()->out_edge_list.end())};
- }
-
- /* BidirectionalGraph concept functions */
-
- static
- degree_size_type in_degree_impl(vertex_descriptor v) {
- return v.raw()->in_edge_list.size();
- }
-
- static
- std::pair<in_edge_iterator, in_edge_iterator>
- in_edges_impl(vertex_descriptor v) {
- return {in_edge_iterator(v.raw()->in_edge_list.begin()),
- in_edge_iterator(v.raw()->in_edge_list.end())};
- }
-
- /* Note: this is defined so that self loops are counted twice - which may or
- * may not be what you want. Actually, you probably don't want this at
- * all. */
- static
- degree_size_type degree_impl(vertex_descriptor v) {
- return in_degree_impl(v) + out_degree_impl(v);
- }
-
- /* AdjacencyList concept functions */
-
- static
- std::pair<adjacency_iterator, adjacency_iterator>
- adjacent_vertices_impl(vertex_descriptor v) {
- auto out_edge_its = out_edges_impl(v);
- return {adjacency_iterator(out_edge_its.first),
- adjacency_iterator(out_edge_its.second)};
- }
-
- /* AdjacencyMatrix concept functions
- * (Note: complexity guarantee is not met) */
-
- std::pair<edge_descriptor, bool> edge_impl(vertex_descriptor u,
- vertex_descriptor v) const {
- if (in_degree_impl(v) < out_degree_impl(u)) {
- for (const edge_descriptor &e : in_edges_range(v, *this)) {
- if (source_impl(e) == u) {
- return {e, true};
- }
- }
- } else {
- for (const edge_descriptor &e : out_edges_range(u, *this)) {
- if (target_impl(e) == v) {
- return {e, true};
- }
- }
- }
-
- return {edge_descriptor(), false};
- }
-
- /* Misc functions that don't actually seem to belong to a formal BGL
- concept. */
- static
- edge_descriptor null_edge() { return edge_descriptor(); }
-
- static
- std::pair<inv_adjacency_iterator, inv_adjacency_iterator>
- inv_adjacent_vertices_impl(vertex_descriptor v) {
- auto in_edge_its = in_edges_impl(v);
- return {inv_adjacency_iterator(in_edge_its.first),
- inv_adjacency_iterator(in_edge_its.second)};
- }
-
- /* MutableGraph concept functions */
-
- std::pair<edge_descriptor, bool>
- add_edge_impl(vertex_descriptor u, vertex_descriptor v) {
- bool added = true; /* we always allow parallel edges */
- edge_node *e = new edge_node(new_serial());
- e->source = u.raw();
- e->target = v.raw();
- e->props.index = next_edge_index++;
-
- u.raw()->out_edge_list.push_back(*e);
- v.raw()->in_edge_list.push_back(*e);
-
- graph_edge_count++;
- return {edge_descriptor(e), added};
- }
-
- void remove_edge_impl(edge_descriptor e) {
- graph_edge_count--;
-
- vertex_node *u = e.raw()->source;
- vertex_node *v = e.raw()->target;
-
- v->in_edge_list.erase(v->in_edge_list.iterator_to(*e.raw()));
- u->out_edge_list.erase(u->out_edge_list.iterator_to(*e.raw()));
-
- delete e.raw();
- }
-
- template<class Predicate>
- void remove_out_edge_if_impl(vertex_descriptor v, Predicate pred) {
- out_edge_iterator it, ite;
- std::tie(it, ite) = out_edges_impl(v);
- while (it != ite) {
- auto jt = it;
- ++it;
- if (pred(*jt)) {
- this->remove_edge_impl(*jt);
- }
- }
- }
-
- template<class Predicate>
- void remove_in_edge_if_impl(vertex_descriptor v, Predicate pred) {
- in_edge_iterator it, ite;
- std::tie(it, ite) = in_edges_impl(v);
- while (it != ite) {
- auto jt = it;
- ++it;
- if (pred(*jt)) {
- remove_edge_impl(*jt);
- }
- }
- }
-
- template<class Predicate>
- void remove_edge_if_impl(Predicate pred) {
- edge_iterator it, ite;
- std::tie(it, ite) = edges_impl();
- while (it != ite) {
- auto jt = it;
- ++it;
- if (pred(*jt)) {
- remove_edge_impl(*jt);
- }
- }
- }
-
-private:
- /* GCC 4.8 has bugs with lambdas in templated friend functions, so: */
- struct source_match {
- explicit source_match(const vertex_descriptor &uu) : u(uu) { }
- bool operator()(edge_descriptor e) const { return source_impl(e) == u; }
- const vertex_descriptor &u;
- };
-
- struct target_match {
- explicit target_match(const vertex_descriptor &vv) : v(vv) { }
- bool operator()(edge_descriptor e) const { return target_impl(e) == v; }
- const vertex_descriptor &v;
- };
-public:
- /* Note: (u,v) variant needs to remove all (parallel) edges between (u,v).
- *
- * The edge_descriptor version should be strongly preferred if the
- * edge_descriptor is available.
- */
- void remove_edge_impl(const vertex_descriptor &u,
- const vertex_descriptor &v) {
- if (in_degree_impl(v) < out_degree_impl(u)) {
- remove_in_edge_if_impl(v, source_match(u));
- } else {
- remove_out_edge_if_impl(u, target_match(v));
- }
- }
-
- /* VertexListGraph concept functions */
- vertices_size_type num_vertices_impl() const {
- return vertices_list.size();
- }
-
- std::pair<vertex_iterator, vertex_iterator> vertices_impl() const {
- return {vertex_iterator(vertices_list.begin()),
- vertex_iterator(vertices_list.end())};
- }
-
- /* EdgeListGraph concept functions (aside from those in IncidenceGraph) */
-
- edges_size_type num_edges_impl() const {
- return graph_edge_count;
- }
-
- std::pair<edge_iterator, edge_iterator> edges_impl() const {
- vertex_iterator vi, ve;
- std::tie(vi, ve) = vertices_impl();
-
- return {edge_iterator(vi, ve), edge_iterator(ve, ve)};
- }
-
- /* bundled properties functions */
-
- vertex_property_type &operator[](vertex_descriptor v) {
- return v.raw()->props;
- }
-
- const vertex_property_type &operator[](vertex_descriptor v) const {
- return v.raw()->props;
- }
-
- edge_property_type &operator[](edge_descriptor e) {
- return e.raw()->props;
- }
-
- const edge_property_type &operator[](edge_descriptor e) const {
- return e.raw()->props;
- }
-
- /* PropertyGraph concept functions & helpers */
-
- template<typename R, typename P_of>
- struct prop_map : public boost::put_get_helper<R, prop_map<R, P_of> > {
- using value_type = typename std::decay<R>::type;
- using reference = R;
- using key_type = typename bundle_key_type<P_of>::type;
-
- typedef typename boost::lvalue_property_map_tag category;
-
- prop_map(value_type P_of::*m_in) : member(m_in) { }
-
- reference operator[](key_type k) const {
- return k.raw()->props.*member;
- }
- reference operator()(key_type k) const { return (*this)[k]; }
-
- private:
- value_type P_of::*member;
- };
-
- template<typename R>
- struct prop_map_all : public boost::put_get_helper<R, prop_map_all<R> > {
- using value_type = typename std::decay<R>::type;
- using reference = R;
- using key_type = typename bundle_key_type<value_type>::type;
-
- typedef typename boost::lvalue_property_map_tag category;
-
- reference operator[](key_type k) const {
- return k.raw()->props;
- }
- reference operator()(key_type k) const { return (*this)[k]; }
- };
-
- template<typename P_type, typename P_of>
- friend
- prop_map<P_type &, P_of> get(P_type P_of::*t, Graph &) {
- return prop_map<P_type &, P_of>(t);
- }
-
- template<typename P_type, typename P_of>
- friend
- prop_map<const P_type &, P_of> get(P_type P_of::*t, const Graph &) {
- return prop_map<const P_type &, P_of>(t);
- }
-
- /* We can't seem to use auto/decltype returns here as it seems that the
- * templated member functions are not yet visible when the compile is
- * evaluating the decltype for the return value. We could probably work
- * around it by making this a dummy templated function. */
- friend
- prop_map<size_t &, VertexPropertyType>
- get(boost::vertex_index_t, Graph &g) {
- return get(&VertexPropertyType::index, g);
- }
-
- friend
- prop_map<const size_t &, VertexPropertyType>
- get(boost::vertex_index_t, const Graph &g) {
- return get(&VertexPropertyType::index, g);
- }
-
- friend
- prop_map<size_t &, EdgePropertyType>
- get(boost::edge_index_t, Graph &g) {
- return get(&EdgePropertyType::index, g);
- }
-
- friend
- prop_map<const size_t &, EdgePropertyType>
- get(boost::edge_index_t, const Graph &g) {
- return get(&EdgePropertyType::index, g);
- }
-
- friend
- prop_map_all<VertexPropertyType &> get(boost::vertex_all_t, Graph &) {
- return {};
- }
-
- friend
- prop_map_all<const VertexPropertyType &> get(boost::vertex_all_t,
- const Graph &) {
- return {};
- }
-
- friend
- prop_map_all<EdgePropertyType &> get(boost::edge_all_t, Graph &) {
- return {};
- }
-
- friend
- prop_map_all<const EdgePropertyType &> get(boost::edge_all_t,
- const Graph &) {
- return {};
- }
-
- friend
- prop_map_all<VertexPropertyType &> get(boost::vertex_bundle_t, Graph &) {
- return {};
- }
-
- friend
- prop_map_all<const VertexPropertyType &> get(boost::vertex_bundle_t,
- const Graph &) {
- return {};
- }
-
- friend
- prop_map_all<EdgePropertyType &> get(boost::edge_bundle_t, Graph &) {
- return {};
- }
-
- friend
- prop_map_all<const EdgePropertyType &> get(boost::edge_bundle_t,
- const Graph &) {
- return {};
- }
-
- template<typename Prop, typename K>
- friend
- auto get(Prop p, Graph &g, K key) -> decltype(get(p, g)[key]) {
- return get(p, g)[key];
- }
-
- template<typename Prop, typename K>
- friend
- auto get(Prop p, const Graph &g, K key) -> decltype(get(p, g)[key]) {
- return get(p, g)[key];
- }
-
- template<typename Prop, typename K, typename V>
- friend
- void put(Prop p, Graph &g, K key, const V &value) {
- get(p, g)[key] = value;
- }
-
- /* MutablePropertyGraph concept functions */
-
- /* Note: add_vertex(g, vp) allocates a next index value for the vertex
- * rather than using the index in vp. i.e., except for in rare coincidences:
- * g[add_vertex(g, vp)].index != vp.index
- */
- vertex_descriptor add_vertex_impl(const VertexPropertyType &vp) {
- vertex_descriptor v = add_vertex_impl();
- auto i = (*this)[v].index;
- (*this)[v] = vp;
- (*this)[v].index = i;
-
- return v;
- }
-
- /* Note: add_edge(u, v, g, vp) allocates a next index value for the edge
- * rather than using the index in ep. i.e., except for in rare coincidences:
- * g[add_edge(u, v, g, ep)].index != ep.index
- */
- std::pair<edge_descriptor, bool>
- add_edge_impl(vertex_descriptor u, vertex_descriptor v,
- const EdgePropertyType &ep) {
- auto e = add_edge_impl(u, v);
- auto i = (*this)[e.first].index;
- (*this)[e.first] = ep;
- (*this)[e.first].index = i;
-
- return e;
- }
-
- /* End MutablePropertyGraph */
-
- /** Pack the edge index into a contiguous range [ 0, num_edges(g) ). */
- void renumber_edges_impl() {
- next_edge_index = 0;
- edge_iterator it;
- edge_iterator ite;
- for (std::tie(it, ite) = edges_impl(); it != ite; ++it) {
- (*this)[*it].index = next_edge_index++;
- }
- }
-
- /** Pack the vertex index into a contiguous range [ 0, num_vertices(g) ).
- * Vertices with indices less than N_SPECIAL_VERTICES are not renumbered.
- */
- void renumber_vertices_impl() {
- DEBUG_PRINTF("renumbering above %zu\n", Graph::N_SPECIAL_VERTICES);
- next_vertex_index = Graph::N_SPECIAL_VERTICES;
- vertex_iterator it;
- vertex_iterator ite;
- for (std::tie(it, ite) = vertices_impl(); it != ite; ++it) {
- if ((*this)[*it].index < Graph::N_SPECIAL_VERTICES) {
- continue;
- }
-
- (*this)[*it].index = next_vertex_index++;
- }
- }
-
- /** Returns what the next allocated vertex index will be. This is an upper
- * on the values of index for vertices (vertex removal means that there may
- * be gaps). */
- vertices_size_type vertex_index_upper_bound_impl() const {
- return next_vertex_index;
- }
-
- /** Returns what the next allocated edge index will be. This is an upper on
- * the values of index for edges (edge removal means that there may be
- * gaps). */
- vertices_size_type edge_index_upper_bound_impl() const {
- return next_edge_index;
- }
-
- using directed_category = boost::directed_tag;
- using edge_parallel_category = boost::allow_parallel_edge_tag;
- struct traversal_category :
- public virtual boost::bidirectional_graph_tag,
- public virtual boost::adjacency_graph_tag,
- public virtual boost::vertex_list_graph_tag,
- public virtual boost::edge_list_graph_tag { };
-
- ue2_graph() = default;
-
- ue2_graph(ue2_graph &&old)
- : next_vertex_index(old.next_vertex_index),
- next_edge_index(old.next_edge_index),
- graph_edge_count(old.graph_edge_count),
- next_serial(old.next_serial) {
- using std::swap;
- swap(vertices_list, old.vertices_list);
- }
-
- ue2_graph &operator=(ue2_graph &&old) {
- next_vertex_index = old.next_vertex_index;
- next_edge_index = old.next_edge_index;
- graph_edge_count = old.graph_edge_count;
- next_serial = old.next_serial;
- using std::swap;
- swap(vertices_list, old.vertices_list);
- return *this;
- }
-
- ~ue2_graph() {
- vertices_list.clear_and_dispose(delete_disposer());
- }
-};
-
+ vertex_edge_list<out_edge_hook> out_edge_list;
+
+ /* The destructor only frees memory owned by the vertex and will leave
+ * the neighbour's edges in a bad state. If a vertex is being removed
+ * (rather than the graph being destroyed), then the more gentle clean
+ * up of clear_vertex() is required to be called first */
+ ~vertex_node() {
+ out_edge_list.clear_and_dispose(delete_disposer());
+ }
+ };
+
+ struct delete_disposer {
+ template<typename T> void operator()(const T *d) const { delete d; }
+ };
+
+ struct in_edge_disposer {
+ void operator()(edge_node *e) const {
+ /* remove from source's out edge list before deleting */
+ vertex_node *u = e->source;
+ u->out_edge_list.erase(u->out_edge_list.iterator_to(*e));
+ delete e;
+ }
+ };
+
+ struct out_edge_disposer {
+ void operator()(edge_node *e) const {
+ /* remove from target's in edge list before deleting */
+ vertex_node *v = e->target;
+ v->in_edge_list.erase(v->in_edge_list.iterator_to(*e));
+ delete e;
+ }
+ };
+
+ using vertices_list_type
+ = boost::intrusive::list<vertex_node,
+ boost::intrusive::base_hook<boost::intrusive::list_base_hook<> > >;
+
+ vertices_list_type vertices_list;
+
+protected: /* to allow renumbering */
+ static const size_t N_SPECIAL_VERTICES = 0; /* override in derived class */
+ size_t next_vertex_index = 0;
+ size_t next_edge_index = 0;
+
+private:
+ size_t graph_edge_count = 0; /* maintained explicitly as we have no global
+ edge list */
+
+ u64a next_serial = 0;
+ u64a new_serial() {
+ u64a serial = next_serial++;
+ if (!next_serial) {
+ /* if we have created enough graph edges/vertices to overflow a u64a
+ * we must have spent close to an eternity adding to this graph so
+ * something must have gone very wrong and we will not be producing
+ * a final bytecode in a reasonable amount of time. Or, more likely,
+ * the next_serial value has become corrupt. */
+ throw std::overflow_error("too many graph edges/vertices created");
+ }
+ return serial;
+ }
+public:
+ using vertex_descriptor = graph_detail::vertex_descriptor<ue2_graph>;
+ using edge_descriptor = graph_detail::edge_descriptor<ue2_graph>;
+ friend vertex_descriptor;
+ friend edge_descriptor;
+
+ using vertices_size_type = typename vertices_list_type::size_type;
+ using degree_size_type
+ = typename vertex_edge_list<out_edge_hook>::size_type;
+ using edges_size_type = size_t;
+
+ using vertex_property_type = VertexPropertyType;
+ using edge_property_type = EdgePropertyType;
+
+ using graph_bundled = boost::no_property;
+ using vertex_bundled = VertexPropertyType;
+ using edge_bundled = EdgePropertyType;
+
+private:
+ /* Note: apparently, nested class templates cannot be fully specialised but
+ * they can be partially specialised. Sigh, ... */
+ template<typename BundleType, typename dummy = void>
+ struct bundle_key_type {
+ };
+
+ template<typename dummy>
+ struct bundle_key_type<VertexPropertyType, dummy> {
+ using type = vertex_descriptor;
+ };
+
+ template<typename dummy>
+ struct bundle_key_type<EdgePropertyType, dummy> {
+ using type = edge_descriptor;
+ };
+
+public:
+ class out_edge_iterator : public boost::iterator_adaptor<
+ out_edge_iterator,
+ typename vertex_edge_list<out_edge_hook>::const_iterator,
+ edge_descriptor,
+ boost::bidirectional_traversal_tag,
+ edge_descriptor> {
+ using super = typename out_edge_iterator::iterator_adaptor_;
+ public:
+ out_edge_iterator() : super() { }
+ explicit out_edge_iterator(
+ typename vertex_edge_list<out_edge_hook>::const_iterator it)
+ : super(it) { }
+ edge_descriptor dereference() const {
+ /* :( const_cast makes me sad but constness is defined by the graph
+ * parameter of bgl api calls */
+ return edge_descriptor(const_cast<edge_node *>(&*super::base()));
+ }
+ };
+
+ class in_edge_iterator : public boost::iterator_adaptor<
+ in_edge_iterator,
+ typename vertex_edge_list<in_edge_hook>::const_iterator,
+ edge_descriptor,
+ boost::bidirectional_traversal_tag,
+ edge_descriptor> {
+ using super = typename in_edge_iterator::iterator_adaptor_;
+ public:
+ in_edge_iterator() : super() { }
+ explicit in_edge_iterator(
+ typename vertex_edge_list<in_edge_hook>::const_iterator it)
+ : super(it) { }
+ edge_descriptor dereference() const {
+ /* :( const_cast makes me sad but constness is defined by the graph
+ * parameter of bgl api calls */
+ return edge_descriptor(const_cast<edge_node *>(&*super::base()));
+ }
+ };
+
+ class adjacency_iterator : public boost::iterator_adaptor<
+ adjacency_iterator,
+ out_edge_iterator,
+ vertex_descriptor,
+ boost::bidirectional_traversal_tag,
+ vertex_descriptor> {
+ using super = typename adjacency_iterator::iterator_adaptor_;
+ public:
+ adjacency_iterator(out_edge_iterator a) : super(std::move(a)) { }
+ adjacency_iterator() { }
+
+ vertex_descriptor dereference() const {
+ return vertex_descriptor(super::base()->p->target);
+ }
+ };
+
+ class inv_adjacency_iterator : public boost::iterator_adaptor<
+ inv_adjacency_iterator,
+ in_edge_iterator,
+ vertex_descriptor,
+ boost::bidirectional_traversal_tag,
+ vertex_descriptor> {
+ using super = typename inv_adjacency_iterator::iterator_adaptor_;
+ public:
+ inv_adjacency_iterator(in_edge_iterator a) : super(std::move(a)) { }
+ inv_adjacency_iterator() { }
+
+ vertex_descriptor dereference() const {
+ return vertex_descriptor(super::base()->p->source);
+ }
+ };
+
+ class vertex_iterator : public boost::iterator_adaptor<
+ vertex_iterator,
+ typename vertices_list_type::const_iterator,
+ vertex_descriptor,
+ boost::bidirectional_traversal_tag,
+ vertex_descriptor> {
+ using super = typename vertex_iterator::iterator_adaptor_;
+ public:
+ vertex_iterator() : super() { }
+ explicit vertex_iterator(typename vertices_list_type::const_iterator it)
+ : super(it) { }
+ vertex_descriptor dereference() const {
+ /* :( const_cast makes me sad but constness is defined by the graph
+ * parameter of bgl api calls */
+ return vertex_descriptor(
+ const_cast<vertex_node *>(&*super::base()));
+ }
+ };
+
+ class edge_iterator : public boost::iterator_facade<
+ edge_iterator,
+ edge_descriptor,
+ boost::forward_traversal_tag, /* TODO: make bidi */
+ edge_descriptor> {
+ public:
+ using main_base_iter_type = vertex_iterator;
+ using aux_base_iter_type = out_edge_iterator;
+
+ edge_iterator(main_base_iter_type b, main_base_iter_type e)
+ : main(std::move(b)), main_end(std::move(e)) {
+ if (main == main_end) {
+ return;
+ }
+ std::tie(aux, aux_end) = out_edges_impl(*main);
+ while (aux == aux_end) {
+ ++main;
+ if (main == main_end) {
+ break;
+ }
+ std::tie(aux, aux_end) = out_edges_impl(*main);
+ }
+ }
+ edge_iterator() { }
+
+ friend class boost::iterator_core_access;
+ void increment() {
+ ++aux;
+ while (aux == aux_end) {
+ ++main;
+ if (main == main_end) {
+ break;
+ }
+ std::tie(aux, aux_end) = out_edges_impl(*main);
+ }
+ }
+ bool equal(const edge_iterator &other) const {
+ return main == other.main && (main == main_end || aux == other.aux);
+ }
+ edge_descriptor dereference() const {
+ return *aux;
+ }
+
+ main_base_iter_type main;
+ main_base_iter_type main_end;
+ aux_base_iter_type aux;
+ aux_base_iter_type aux_end;
+ };
+
+public:
+ static
+ vertex_descriptor null_vertex() { return vertex_descriptor(); }
+
+ vertex_descriptor add_vertex_impl() {
+ vertex_node *v = new vertex_node(new_serial());
+ v->props.index = next_vertex_index++;
+ vertices_list.push_back(*v);
+ return vertex_descriptor(v);
+ }
+
+ void remove_vertex_impl(vertex_descriptor v) {
+ vertex_node *vv = v.raw();
+ assert(vv->in_edge_list.empty());
+ assert(vv->out_edge_list.empty());
+ vertices_list.erase_and_dispose(vertices_list.iterator_to(*vv),
+ delete_disposer());
+ }
+
+ void clear_in_edges_impl(vertex_descriptor v) {
+ graph_edge_count -= v.raw()->in_edge_list.size();
+ v.raw()->in_edge_list.clear_and_dispose(in_edge_disposer());
+ }
+
+ void clear_out_edges_impl(vertex_descriptor v) {
+ graph_edge_count -= v.raw()->out_edge_list.size();
+ v.raw()->out_edge_list.clear_and_dispose(out_edge_disposer());
+ }
+
+ /* IncidenceGraph concept functions */
+
+ static
+ vertex_descriptor source_impl(edge_descriptor e) {
+ return vertex_descriptor(e.raw()->source);
+ }
+
+ static
+ vertex_descriptor target_impl(edge_descriptor e) {
+ return vertex_descriptor(e.raw()->target);
+ }
+
+ static
+ degree_size_type out_degree_impl(vertex_descriptor v) {
+ return v.raw()->out_edge_list.size();
+ }
+
+ static
+ std::pair<out_edge_iterator, out_edge_iterator>
+ out_edges_impl(vertex_descriptor v) {
+ return {out_edge_iterator(v.raw()->out_edge_list.begin()),
+ out_edge_iterator(v.raw()->out_edge_list.end())};
+ }
+
+ /* BidirectionalGraph concept functions */
+
+ static
+ degree_size_type in_degree_impl(vertex_descriptor v) {
+ return v.raw()->in_edge_list.size();
+ }
+
+ static
+ std::pair<in_edge_iterator, in_edge_iterator>
+ in_edges_impl(vertex_descriptor v) {
+ return {in_edge_iterator(v.raw()->in_edge_list.begin()),
+ in_edge_iterator(v.raw()->in_edge_list.end())};
+ }
+
+ /* Note: this is defined so that self loops are counted twice - which may or
+ * may not be what you want. Actually, you probably don't want this at
+ * all. */
+ static
+ degree_size_type degree_impl(vertex_descriptor v) {
+ return in_degree_impl(v) + out_degree_impl(v);
+ }
+
+ /* AdjacencyList concept functions */
+
+ static
+ std::pair<adjacency_iterator, adjacency_iterator>
+ adjacent_vertices_impl(vertex_descriptor v) {
+ auto out_edge_its = out_edges_impl(v);
+ return {adjacency_iterator(out_edge_its.first),
+ adjacency_iterator(out_edge_its.second)};
+ }
+
+ /* AdjacencyMatrix concept functions
+ * (Note: complexity guarantee is not met) */
+
+ std::pair<edge_descriptor, bool> edge_impl(vertex_descriptor u,
+ vertex_descriptor v) const {
+ if (in_degree_impl(v) < out_degree_impl(u)) {
+ for (const edge_descriptor &e : in_edges_range(v, *this)) {
+ if (source_impl(e) == u) {
+ return {e, true};
+ }
+ }
+ } else {
+ for (const edge_descriptor &e : out_edges_range(u, *this)) {
+ if (target_impl(e) == v) {
+ return {e, true};
+ }
+ }
+ }
+
+ return {edge_descriptor(), false};
+ }
+
+ /* Misc functions that don't actually seem to belong to a formal BGL
+ concept. */
+ static
+ edge_descriptor null_edge() { return edge_descriptor(); }
+
+ static
+ std::pair<inv_adjacency_iterator, inv_adjacency_iterator>
+ inv_adjacent_vertices_impl(vertex_descriptor v) {
+ auto in_edge_its = in_edges_impl(v);
+ return {inv_adjacency_iterator(in_edge_its.first),
+ inv_adjacency_iterator(in_edge_its.second)};
+ }
+
+ /* MutableGraph concept functions */
+
+ std::pair<edge_descriptor, bool>
+ add_edge_impl(vertex_descriptor u, vertex_descriptor v) {
+ bool added = true; /* we always allow parallel edges */
+ edge_node *e = new edge_node(new_serial());
+ e->source = u.raw();
+ e->target = v.raw();
+ e->props.index = next_edge_index++;
+
+ u.raw()->out_edge_list.push_back(*e);
+ v.raw()->in_edge_list.push_back(*e);
+
+ graph_edge_count++;
+ return {edge_descriptor(e), added};
+ }
+
+ void remove_edge_impl(edge_descriptor e) {
+ graph_edge_count--;
+
+ vertex_node *u = e.raw()->source;
+ vertex_node *v = e.raw()->target;
+
+ v->in_edge_list.erase(v->in_edge_list.iterator_to(*e.raw()));
+ u->out_edge_list.erase(u->out_edge_list.iterator_to(*e.raw()));
+
+ delete e.raw();
+ }
+
+ template<class Predicate>
+ void remove_out_edge_if_impl(vertex_descriptor v, Predicate pred) {
+ out_edge_iterator it, ite;
+ std::tie(it, ite) = out_edges_impl(v);
+ while (it != ite) {
+ auto jt = it;
+ ++it;
+ if (pred(*jt)) {
+ this->remove_edge_impl(*jt);
+ }
+ }
+ }
+
+ template<class Predicate>
+ void remove_in_edge_if_impl(vertex_descriptor v, Predicate pred) {
+ in_edge_iterator it, ite;
+ std::tie(it, ite) = in_edges_impl(v);
+ while (it != ite) {
+ auto jt = it;
+ ++it;
+ if (pred(*jt)) {
+ remove_edge_impl(*jt);
+ }
+ }
+ }
+
+ template<class Predicate>
+ void remove_edge_if_impl(Predicate pred) {
+ edge_iterator it, ite;
+ std::tie(it, ite) = edges_impl();
+ while (it != ite) {
+ auto jt = it;
+ ++it;
+ if (pred(*jt)) {
+ remove_edge_impl(*jt);
+ }
+ }
+ }
+
+private:
+ /* GCC 4.8 has bugs with lambdas in templated friend functions, so: */
+ struct source_match {
+ explicit source_match(const vertex_descriptor &uu) : u(uu) { }
+ bool operator()(edge_descriptor e) const { return source_impl(e) == u; }
+ const vertex_descriptor &u;
+ };
+
+ struct target_match {
+ explicit target_match(const vertex_descriptor &vv) : v(vv) { }
+ bool operator()(edge_descriptor e) const { return target_impl(e) == v; }
+ const vertex_descriptor &v;
+ };
+public:
+ /* Note: (u,v) variant needs to remove all (parallel) edges between (u,v).
+ *
+ * The edge_descriptor version should be strongly preferred if the
+ * edge_descriptor is available.
+ */
+ void remove_edge_impl(const vertex_descriptor &u,
+ const vertex_descriptor &v) {
+ if (in_degree_impl(v) < out_degree_impl(u)) {
+ remove_in_edge_if_impl(v, source_match(u));
+ } else {
+ remove_out_edge_if_impl(u, target_match(v));
+ }
+ }
+
+ /* VertexListGraph concept functions */
+ vertices_size_type num_vertices_impl() const {
+ return vertices_list.size();
+ }
+
+ std::pair<vertex_iterator, vertex_iterator> vertices_impl() const {
+ return {vertex_iterator(vertices_list.begin()),
+ vertex_iterator(vertices_list.end())};
+ }
+
+ /* EdgeListGraph concept functions (aside from those in IncidenceGraph) */
+
+ edges_size_type num_edges_impl() const {
+ return graph_edge_count;
+ }
+
+ std::pair<edge_iterator, edge_iterator> edges_impl() const {
+ vertex_iterator vi, ve;
+ std::tie(vi, ve) = vertices_impl();
+
+ return {edge_iterator(vi, ve), edge_iterator(ve, ve)};
+ }
+
+ /* bundled properties functions */
+
+ vertex_property_type &operator[](vertex_descriptor v) {
+ return v.raw()->props;
+ }
+
+ const vertex_property_type &operator[](vertex_descriptor v) const {
+ return v.raw()->props;
+ }
+
+ edge_property_type &operator[](edge_descriptor e) {
+ return e.raw()->props;
+ }
+
+ const edge_property_type &operator[](edge_descriptor e) const {
+ return e.raw()->props;
+ }
+
+ /* PropertyGraph concept functions & helpers */
+
+ template<typename R, typename P_of>
+ struct prop_map : public boost::put_get_helper<R, prop_map<R, P_of> > {
+ using value_type = typename std::decay<R>::type;
+ using reference = R;
+ using key_type = typename bundle_key_type<P_of>::type;
+
+ typedef typename boost::lvalue_property_map_tag category;
+
+ prop_map(value_type P_of::*m_in) : member(m_in) { }
+
+ reference operator[](key_type k) const {
+ return k.raw()->props.*member;
+ }
+ reference operator()(key_type k) const { return (*this)[k]; }
+
+ private:
+ value_type P_of::*member;
+ };
+
+ template<typename R>
+ struct prop_map_all : public boost::put_get_helper<R, prop_map_all<R> > {
+ using value_type = typename std::decay<R>::type;
+ using reference = R;
+ using key_type = typename bundle_key_type<value_type>::type;
+
+ typedef typename boost::lvalue_property_map_tag category;
+
+ reference operator[](key_type k) const {
+ return k.raw()->props;
+ }
+ reference operator()(key_type k) const { return (*this)[k]; }
+ };
+
+ template<typename P_type, typename P_of>
+ friend
+ prop_map<P_type &, P_of> get(P_type P_of::*t, Graph &) {
+ return prop_map<P_type &, P_of>(t);
+ }
+
+ template<typename P_type, typename P_of>
+ friend
+ prop_map<const P_type &, P_of> get(P_type P_of::*t, const Graph &) {
+ return prop_map<const P_type &, P_of>(t);
+ }
+
+ /* We can't seem to use auto/decltype returns here as it seems that the
+ * templated member functions are not yet visible when the compile is
+ * evaluating the decltype for the return value. We could probably work
+ * around it by making this a dummy templated function. */
+ friend
+ prop_map<size_t &, VertexPropertyType>
+ get(boost::vertex_index_t, Graph &g) {
+ return get(&VertexPropertyType::index, g);
+ }
+
+ friend
+ prop_map<const size_t &, VertexPropertyType>
+ get(boost::vertex_index_t, const Graph &g) {
+ return get(&VertexPropertyType::index, g);
+ }
+
+ friend
+ prop_map<size_t &, EdgePropertyType>
+ get(boost::edge_index_t, Graph &g) {
+ return get(&EdgePropertyType::index, g);
+ }
+
+ friend
+ prop_map<const size_t &, EdgePropertyType>
+ get(boost::edge_index_t, const Graph &g) {
+ return get(&EdgePropertyType::index, g);
+ }
+
+ friend
+ prop_map_all<VertexPropertyType &> get(boost::vertex_all_t, Graph &) {
+ return {};
+ }
+
+ friend
+ prop_map_all<const VertexPropertyType &> get(boost::vertex_all_t,
+ const Graph &) {
+ return {};
+ }
+
+ friend
+ prop_map_all<EdgePropertyType &> get(boost::edge_all_t, Graph &) {
+ return {};
+ }
+
+ friend
+ prop_map_all<const EdgePropertyType &> get(boost::edge_all_t,
+ const Graph &) {
+ return {};
+ }
+
+ friend
+ prop_map_all<VertexPropertyType &> get(boost::vertex_bundle_t, Graph &) {
+ return {};
+ }
+
+ friend
+ prop_map_all<const VertexPropertyType &> get(boost::vertex_bundle_t,
+ const Graph &) {
+ return {};
+ }
+
+ friend
+ prop_map_all<EdgePropertyType &> get(boost::edge_bundle_t, Graph &) {
+ return {};
+ }
+
+ friend
+ prop_map_all<const EdgePropertyType &> get(boost::edge_bundle_t,
+ const Graph &) {
+ return {};
+ }
+
+ template<typename Prop, typename K>
+ friend
+ auto get(Prop p, Graph &g, K key) -> decltype(get(p, g)[key]) {
+ return get(p, g)[key];
+ }
+
+ template<typename Prop, typename K>
+ friend
+ auto get(Prop p, const Graph &g, K key) -> decltype(get(p, g)[key]) {
+ return get(p, g)[key];
+ }
+
+ template<typename Prop, typename K, typename V>
+ friend
+ void put(Prop p, Graph &g, K key, const V &value) {
+ get(p, g)[key] = value;
+ }
+
+ /* MutablePropertyGraph concept functions */
+
+ /* Note: add_vertex(g, vp) allocates a next index value for the vertex
+ * rather than using the index in vp. i.e., except for in rare coincidences:
+ * g[add_vertex(g, vp)].index != vp.index
+ */
+ vertex_descriptor add_vertex_impl(const VertexPropertyType &vp) {
+ vertex_descriptor v = add_vertex_impl();
+ auto i = (*this)[v].index;
+ (*this)[v] = vp;
+ (*this)[v].index = i;
+
+ return v;
+ }
+
+ /* Note: add_edge(u, v, g, vp) allocates a next index value for the edge
+ * rather than using the index in ep. i.e., except for in rare coincidences:
+ * g[add_edge(u, v, g, ep)].index != ep.index
+ */
+ std::pair<edge_descriptor, bool>
+ add_edge_impl(vertex_descriptor u, vertex_descriptor v,
+ const EdgePropertyType &ep) {
+ auto e = add_edge_impl(u, v);
+ auto i = (*this)[e.first].index;
+ (*this)[e.first] = ep;
+ (*this)[e.first].index = i;
+
+ return e;
+ }
+
+ /* End MutablePropertyGraph */
+
+ /** Pack the edge index into a contiguous range [ 0, num_edges(g) ). */
+ void renumber_edges_impl() {
+ next_edge_index = 0;
+ edge_iterator it;
+ edge_iterator ite;
+ for (std::tie(it, ite) = edges_impl(); it != ite; ++it) {
+ (*this)[*it].index = next_edge_index++;
+ }
+ }
+
+ /** Pack the vertex index into a contiguous range [ 0, num_vertices(g) ).
+ * Vertices with indices less than N_SPECIAL_VERTICES are not renumbered.
+ */
+ void renumber_vertices_impl() {
+ DEBUG_PRINTF("renumbering above %zu\n", Graph::N_SPECIAL_VERTICES);
+ next_vertex_index = Graph::N_SPECIAL_VERTICES;
+ vertex_iterator it;
+ vertex_iterator ite;
+ for (std::tie(it, ite) = vertices_impl(); it != ite; ++it) {
+ if ((*this)[*it].index < Graph::N_SPECIAL_VERTICES) {
+ continue;
+ }
+
+ (*this)[*it].index = next_vertex_index++;
+ }
+ }
+
+ /** Returns what the next allocated vertex index will be. This is an upper
+ * on the values of index for vertices (vertex removal means that there may
+ * be gaps). */
+ vertices_size_type vertex_index_upper_bound_impl() const {
+ return next_vertex_index;
+ }
+
+ /** Returns what the next allocated edge index will be. This is an upper on
+ * the values of index for edges (edge removal means that there may be
+ * gaps). */
+ vertices_size_type edge_index_upper_bound_impl() const {
+ return next_edge_index;
+ }
+
+ using directed_category = boost::directed_tag;
+ using edge_parallel_category = boost::allow_parallel_edge_tag;
+ struct traversal_category :
+ public virtual boost::bidirectional_graph_tag,
+ public virtual boost::adjacency_graph_tag,
+ public virtual boost::vertex_list_graph_tag,
+ public virtual boost::edge_list_graph_tag { };
+
+ ue2_graph() = default;
+
+ ue2_graph(ue2_graph &&old)
+ : next_vertex_index(old.next_vertex_index),
+ next_edge_index(old.next_edge_index),
+ graph_edge_count(old.graph_edge_count),
+ next_serial(old.next_serial) {
+ using std::swap;
+ swap(vertices_list, old.vertices_list);
+ }
+
+ ue2_graph &operator=(ue2_graph &&old) {
+ next_vertex_index = old.next_vertex_index;
+ next_edge_index = old.next_edge_index;
+ graph_edge_count = old.graph_edge_count;
+ next_serial = old.next_serial;
+ using std::swap;
+ swap(vertices_list, old.vertices_list);
+ return *this;
+ }
+
+ ~ue2_graph() {
+ vertices_list.clear_and_dispose(delete_disposer());
+ }
+};
+
/** \brief Type trait to enable on whether the Graph is an ue2_graph. */
-template<typename Graph>
+template<typename Graph>
struct is_ue2_graph
: public ::std::integral_constant<
bool, std::is_base_of<graph_detail::graph_base, Graph>::value> {};
@@ -1034,231 +1034,231 @@ struct is_ue2_graph
template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::vertex_descriptor>::type
-add_vertex(Graph &g) {
- return g.add_vertex_impl();
-}
-
-template<typename Graph>
+add_vertex(Graph &g) {
+ return g.add_vertex_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-remove_vertex(typename Graph::vertex_descriptor v, Graph &g) {
- g.remove_vertex_impl(v);
-}
-
-template<typename Graph>
+remove_vertex(typename Graph::vertex_descriptor v, Graph &g) {
+ g.remove_vertex_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-clear_in_edges(typename Graph::vertex_descriptor v, Graph &g) {
- g.clear_in_edges_impl(v);
-}
-
-template<typename Graph>
+clear_in_edges(typename Graph::vertex_descriptor v, Graph &g) {
+ g.clear_in_edges_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-clear_out_edges(typename Graph::vertex_descriptor v, Graph &g) {
- g.clear_out_edges_impl(v);
-}
-
-template<typename Graph>
+clear_out_edges(typename Graph::vertex_descriptor v, Graph &g) {
+ g.clear_out_edges_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-clear_vertex(typename Graph::vertex_descriptor v, Graph &g) {
- g.clear_in_edges_impl(v);
- g.clear_out_edges_impl(v);
-}
-
-template<typename Graph>
+clear_vertex(typename Graph::vertex_descriptor v, Graph &g) {
+ g.clear_in_edges_impl(v);
+ g.clear_out_edges_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::vertex_descriptor>::type
-source(typename Graph::edge_descriptor e, const Graph &) {
- return Graph::source_impl(e);
-}
-
-template<typename Graph>
+source(typename Graph::edge_descriptor e, const Graph &) {
+ return Graph::source_impl(e);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::vertex_descriptor>::type
-target(typename Graph::edge_descriptor e, const Graph &) {
- return Graph::target_impl(e);
-}
-
-template<typename Graph>
+target(typename Graph::edge_descriptor e, const Graph &) {
+ return Graph::target_impl(e);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::degree_size_type>::type
-out_degree(typename Graph::vertex_descriptor v, const Graph &) {
- return Graph::out_degree_impl(v);
-}
-
-template<typename Graph>
+out_degree(typename Graph::vertex_descriptor v, const Graph &) {
+ return Graph::out_degree_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::out_edge_iterator,
typename Graph::out_edge_iterator>>::type
-out_edges(typename Graph::vertex_descriptor v, const Graph &) {
- return Graph::out_edges_impl(v);
-}
-
-template<typename Graph>
+out_edges(typename Graph::vertex_descriptor v, const Graph &) {
+ return Graph::out_edges_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::degree_size_type>::type
-in_degree(typename Graph::vertex_descriptor v, const Graph &) {
- return Graph::in_degree_impl(v);
-}
-
-template<typename Graph>
+in_degree(typename Graph::vertex_descriptor v, const Graph &) {
+ return Graph::in_degree_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::in_edge_iterator,
typename Graph::in_edge_iterator>>::type
-in_edges(typename Graph::vertex_descriptor v, const Graph &) {
- return Graph::in_edges_impl(v);
-}
-
-template<typename Graph>
+in_edges(typename Graph::vertex_descriptor v, const Graph &) {
+ return Graph::in_edges_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::degree_size_type>::type
-degree(typename Graph::vertex_descriptor v, const Graph &) {
- return Graph::degree_impl(v);
-}
-
-template<typename Graph>
+degree(typename Graph::vertex_descriptor v, const Graph &) {
+ return Graph::degree_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::adjacency_iterator,
typename Graph::adjacency_iterator>>::type
-adjacent_vertices(typename Graph::vertex_descriptor v, const Graph &) {
- return Graph::adjacent_vertices_impl(v);
-}
-
-template<typename Graph>
+adjacent_vertices(typename Graph::vertex_descriptor v, const Graph &) {
+ return Graph::adjacent_vertices_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::edge_descriptor, bool>>::type
-edge(typename Graph::vertex_descriptor u, typename Graph::vertex_descriptor v,
- const Graph &g) {
- return g.edge_impl(u, v);
-}
-
-template<typename Graph>
+edge(typename Graph::vertex_descriptor u, typename Graph::vertex_descriptor v,
+ const Graph &g) {
+ return g.edge_impl(u, v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::inv_adjacency_iterator,
typename Graph::inv_adjacency_iterator>>::type
-inv_adjacent_vertices(typename Graph::vertex_descriptor v, const Graph &) {
- return Graph::inv_adjacent_vertices_impl(v);
-}
-
-template<typename Graph>
+inv_adjacent_vertices(typename Graph::vertex_descriptor v, const Graph &) {
+ return Graph::inv_adjacent_vertices_impl(v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::edge_descriptor, bool>>::type
-add_edge(typename Graph::vertex_descriptor u,
- typename Graph::vertex_descriptor v, Graph &g) {
- return g.add_edge_impl(u, v);
-}
-
-template<typename Graph>
+add_edge(typename Graph::vertex_descriptor u,
+ typename Graph::vertex_descriptor v, Graph &g) {
+ return g.add_edge_impl(u, v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-remove_edge(typename Graph::edge_descriptor e, Graph &g) {
- g.remove_edge_impl(e);
-}
-
-template<typename Graph, typename Iter>
-typename std::enable_if<
+remove_edge(typename Graph::edge_descriptor e, Graph &g) {
+ g.remove_edge_impl(e);
+}
+
+template<typename Graph, typename Iter>
+typename std::enable_if<
!std::is_convertible<Iter, typename Graph::edge_descriptor>::value &&
is_ue2_graph<Graph>::value>::type
-remove_edge(Iter it, Graph &g) {
- g.remove_edge_impl(*it);
-}
-
-template<typename Graph, typename Predicate>
+remove_edge(Iter it, Graph &g) {
+ g.remove_edge_impl(*it);
+}
+
+template<typename Graph, typename Predicate>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-remove_out_edge_if(typename Graph::vertex_descriptor v, Predicate pred,
- Graph &g) {
- g.remove_out_edge_if_impl(v, pred);
-}
-
-template<typename Graph, typename Predicate>
+remove_out_edge_if(typename Graph::vertex_descriptor v, Predicate pred,
+ Graph &g) {
+ g.remove_out_edge_if_impl(v, pred);
+}
+
+template<typename Graph, typename Predicate>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-remove_in_edge_if(typename Graph::vertex_descriptor v, Predicate pred,
- Graph &g) {
- g.remove_in_edge_if_impl(v, pred);
-}
-
-template<typename Graph, typename Predicate>
+remove_in_edge_if(typename Graph::vertex_descriptor v, Predicate pred,
+ Graph &g) {
+ g.remove_in_edge_if_impl(v, pred);
+}
+
+template<typename Graph, typename Predicate>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-remove_edge_if(Predicate pred, Graph &g) {
- g.remove_edge_if_impl(pred);
-}
-
-template<typename Graph>
+remove_edge_if(Predicate pred, Graph &g) {
+ g.remove_edge_if_impl(pred);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-remove_edge(const typename Graph::vertex_descriptor &u,
- const typename Graph::vertex_descriptor &v, Graph &g) {
- g.remove_edge_impl(u, v);
-}
-
-template<typename Graph>
+remove_edge(const typename Graph::vertex_descriptor &u,
+ const typename Graph::vertex_descriptor &v, Graph &g) {
+ g.remove_edge_impl(u, v);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::vertices_size_type>::type
-num_vertices(const Graph &g) {
- return g.num_vertices_impl();
-}
-
-template<typename Graph>
+num_vertices(const Graph &g) {
+ return g.num_vertices_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::vertex_iterator,
typename Graph::vertex_iterator>>::type
-vertices(const Graph &g) {
- return g.vertices_impl();
-}
-
-template<typename Graph>
+vertices(const Graph &g) {
+ return g.vertices_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::edges_size_type>::type
-num_edges(const Graph &g) {
- return g.num_edges_impl();
-}
-
-template<typename Graph>
+num_edges(const Graph &g) {
+ return g.num_edges_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::edge_iterator,
typename Graph::edge_iterator>>::type
-edges(const Graph &g) {
- return g.edges_impl();
-}
-
-template<typename Graph>
+edges(const Graph &g) {
+ return g.edges_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::vertex_descriptor>::type
-add_vertex(const typename Graph::vertex_property_type &vp, Graph &g) {
- return g.add_vertex_impl(vp);
-}
-
-template<typename Graph>
+add_vertex(const typename Graph::vertex_property_type &vp, Graph &g) {
+ return g.add_vertex_impl(vp);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
std::pair<typename Graph::edge_descriptor, bool>>::type
-add_edge(typename Graph::vertex_descriptor u,
- typename Graph::vertex_descriptor v,
- const typename Graph::edge_property_type &ep, Graph &g) {
- return g.add_edge_impl(u, v, ep);
-}
-
-template<typename Graph>
+add_edge(typename Graph::vertex_descriptor u,
+ typename Graph::vertex_descriptor v,
+ const typename Graph::edge_property_type &ep, Graph &g) {
+ return g.add_edge_impl(u, v, ep);
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-renumber_edges(Graph &g) {
- g.renumber_edges_impl();
-}
-
-template<typename Graph>
+renumber_edges(Graph &g) {
+ g.renumber_edges_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value>::type
-renumber_vertices(Graph &g) {
- g.renumber_vertices_impl();
-}
-
-template<typename Graph>
+renumber_vertices(Graph &g) {
+ g.renumber_vertices_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::vertices_size_type>::type
-vertex_index_upper_bound(const Graph &g) {
- return g.vertex_index_upper_bound_impl();
-}
-
-template<typename Graph>
+vertex_index_upper_bound(const Graph &g) {
+ return g.vertex_index_upper_bound_impl();
+}
+
+template<typename Graph>
typename std::enable_if<is_ue2_graph<Graph>::value,
typename Graph::edges_size_type>::type
-edge_index_upper_bound(const Graph &g) {
- return g.edge_index_upper_bound_impl();
-}
-
+edge_index_upper_bound(const Graph &g) {
+ return g.edge_index_upper_bound_impl();
+}
+
template<typename T> struct pointer_to_member_traits {};
template<typename Return, typename Class>
@@ -1287,17 +1287,17 @@ public:
std::is_same<class_type, edge_type>::value;
};
-using boost::vertex_index;
-using boost::edge_index;
-
-} // namespace ue2
-
-namespace boost {
-
-/* Install partial specialisation of property_map - this is required for
- * adaptors (like filtered_graph) to know the type of the property maps */
-template<typename Graph, typename Prop>
-struct property_map<Graph, Prop,
+using boost::vertex_index;
+using boost::edge_index;
+
+} // namespace ue2
+
+namespace boost {
+
+/* Install partial specialisation of property_map - this is required for
+ * adaptors (like filtered_graph) to know the type of the property maps */
+template<typename Graph, typename Prop>
+struct property_map<Graph, Prop,
typename std::enable_if<ue2::is_ue2_graph<Graph>::value &&
ue2::is_ue2_vertex_or_edge_property<
Graph, Prop>::value>::type> {
@@ -1309,8 +1309,8 @@ public:
using type = typename Graph::template prop_map<member_type &, class_type>;
using const_type = typename Graph::template prop_map<const member_type &,
class_type>;
-};
-
+};
+
template<typename Graph>
struct property_map<Graph, vertex_index_t,
typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
@@ -1347,29 +1347,29 @@ struct property_map<Graph, edge_all_t,
typename Graph::template prop_map_all<const e_prop_type &>;
};
-} // namespace boost
-
-namespace std {
-
-/* Specialization of std::hash so that vertex_descriptor can be used in
- * unordered containers. */
-template<typename Graph>
-struct hash<ue2::graph_detail::vertex_descriptor<Graph>> {
- using vertex_descriptor = ue2::graph_detail::vertex_descriptor<Graph>;
- std::size_t operator()(const vertex_descriptor &v) const {
- return v.hash();
- }
-};
-
-/* Specialization of std::hash so that edge_descriptor can be used in
- * unordered containers. */
-template<typename Graph>
-struct hash<ue2::graph_detail::edge_descriptor<Graph>> {
- using edge_descriptor = ue2::graph_detail::edge_descriptor<Graph>;
- std::size_t operator()(const edge_descriptor &e) const {
- return e.hash();
- }
-};
-
-} // namespace std
-#endif
+} // namespace boost
+
+namespace std {
+
+/* Specialization of std::hash so that vertex_descriptor can be used in
+ * unordered containers. */
+template<typename Graph>
+struct hash<ue2::graph_detail::vertex_descriptor<Graph>> {
+ using vertex_descriptor = ue2::graph_detail::vertex_descriptor<Graph>;
+ std::size_t operator()(const vertex_descriptor &v) const {
+ return v.hash();
+ }
+};
+
+/* Specialization of std::hash so that edge_descriptor can be used in
+ * unordered containers. */
+template<typename Graph>
+struct hash<ue2::graph_detail::edge_descriptor<Graph>> {
+ using edge_descriptor = ue2::graph_detail::edge_descriptor<Graph>;
+ std::size_t operator()(const edge_descriptor &e) const {
+ return e.hash();
+ }
+};
+
+} // namespace std
+#endif
diff --git a/contrib/libs/hyperscan/src/util/ue2string.cpp b/contrib/libs/hyperscan/src/util/ue2string.cpp
index 213d3d104e..50b2bbcc89 100644
--- a/contrib/libs/hyperscan/src/util/ue2string.cpp
+++ b/contrib/libs/hyperscan/src/util/ue2string.cpp
@@ -29,15 +29,15 @@
/** \file
* \brief Tools for string manipulation, ue2_literal definition.
*/
-
-#include "ue2string.h"
-
+
+#include "ue2string.h"
+
#include "charreach.h"
#include "compare.h"
-#include "hash_dynamic_bitset.h"
+#include "hash_dynamic_bitset.h"
#include <algorithm>
-#include <cstring>
+#include <cstring>
#include <iomanip>
#include <sstream>
#include <string>
@@ -133,9 +133,9 @@ string dumpString(const ue2_literal &lit) {
#endif
void upperString(string &s) {
- for (auto &c : s) {
- c = mytoupper(c);
- }
+ for (auto &c : s) {
+ c = mytoupper(c);
+ }
}
size_t maxStringOverlap(const string &a, const string &b, bool nocase) {
@@ -179,16 +179,16 @@ size_t maxStringSelfOverlap(const string &a, bool nocase) {
}
u32 cmp(const char *a, const char *b, size_t len, bool nocase) {
- if (!nocase) {
- return memcmp(a, b, len);
- }
-
- for (const auto *a_end = a + len; a < a_end; a++, b++) {
- if (mytoupper(*a) != mytoupper(*b)) {
- return 1;
- }
- }
- return 0;
+ if (!nocase) {
+ return memcmp(a, b, len);
+ }
+
+ for (const auto *a_end = a + len; a < a_end; a++, b++) {
+ if (mytoupper(*a) != mytoupper(*b)) {
+ return 1;
+ }
+ }
+ return 0;
}
case_iter::case_iter(const ue2_literal &ss) : s(ss.get_string()),
@@ -237,15 +237,15 @@ ue2_literal::elem::operator CharReach () const {
}
}
-const ue2_literal::size_type ue2_literal::npos = std::string::npos;
-
+const ue2_literal::size_type ue2_literal::npos = std::string::npos;
+
ue2_literal::ue2_literal(const std::string &s_in, bool nc_in)
- : s(nc_in ? toUpperString(s_in) : s_in), nocase(s_in.size()) {
+ : s(nc_in ? toUpperString(s_in) : s_in), nocase(s_in.size()) {
if (nc_in) {
- // Switch on nocase bit for all alpha characters.
+ // Switch on nocase bit for all alpha characters.
for (size_t i = 0; i < s.length(); i++) {
- if (ourisalpha(s[i])) {
- nocase.set(i);
+ if (ourisalpha(s[i])) {
+ nocase.set(i);
}
}
}
@@ -258,27 +258,27 @@ ue2_literal ue2_literal::substr(size_type pos, size_type n) const {
ue2_literal rv;
rv.s = s.substr(pos, n);
size_type upper = nocase.size();
- if (n != npos && n + pos < nocase.size()) {
+ if (n != npos && n + pos < nocase.size()) {
upper = n + pos;
}
-
- rv.nocase.resize(upper - pos, false);
- for (size_t i = pos; i < upper; i++) {
- rv.nocase.set(i - pos, nocase.test(i));
- }
- assert(s.size() == nocase.size());
+
+ rv.nocase.resize(upper - pos, false);
+ for (size_t i = pos; i < upper; i++) {
+ rv.nocase.set(i - pos, nocase.test(i));
+ }
+ assert(s.size() == nocase.size());
return rv;
}
ue2_literal &ue2_literal::erase(size_type pos, size_type n) {
s.erase(pos, n);
-
- if (n != npos) {
- for (size_type i = pos + n; i < nocase.size(); i++) {
- nocase.set(i - n, nocase.test(i));
- }
+
+ if (n != npos) {
+ for (size_type i = pos + n; i < nocase.size(); i++) {
+ nocase.set(i - n, nocase.test(i));
+ }
}
- nocase.resize(s.size());
+ nocase.resize(s.size());
return *this;
}
@@ -290,26 +290,26 @@ void ue2_literal::push_back(char c, bool nc) {
s.push_back(c);
}
-void ue2_literal::reverse() {
- std::reverse(s.begin(), s.end());
+void ue2_literal::reverse() {
+ std::reverse(s.begin(), s.end());
- const size_t len = nocase.size();
- for (size_t i = 0; i < len / 2; i++) {
- size_t j = len - i - 1;
- bool a = nocase.test(i);
- bool b = nocase.test(j);
- nocase.set(i, b);
- nocase.set(j, a);
+ const size_t len = nocase.size();
+ for (size_t i = 0; i < len / 2; i++) {
+ size_t j = len - i - 1;
+ bool a = nocase.test(i);
+ bool b = nocase.test(j);
+ nocase.set(i, b);
+ nocase.set(j, a);
}
}
-// Return a copy of this literal in reverse order.
-ue2_literal reverse_literal(const ue2_literal &in) {
- auto out = in;
- out.reverse();
- return out;
-}
-
+// Return a copy of this literal in reverse order.
+ue2_literal reverse_literal(const ue2_literal &in) {
+ auto out = in;
+ out.reverse();
+ return out;
+}
+
bool ue2_literal::operator<(const ue2_literal &b) const {
if (s < b.s) {
return true;
@@ -322,26 +322,26 @@ bool ue2_literal::operator<(const ue2_literal &b) const {
void ue2_literal::operator+=(const ue2_literal &b) {
s += b.s;
- size_t prefix = nocase.size();
- nocase.resize(prefix + b.nocase.size());
- for (size_t i = 0; i < b.nocase.size(); i++) {
- nocase.set(prefix + i, b.nocase[i]);
- }
+ size_t prefix = nocase.size();
+ nocase.resize(prefix + b.nocase.size());
+ for (size_t i = 0; i < b.nocase.size(); i++) {
+ nocase.set(prefix + i, b.nocase[i]);
+ }
}
bool ue2_literal::any_nocase() const {
- return nocase.any();
+ return nocase.any();
}
-size_t ue2_literal::hash() const {
- return hash_all(s, hash_dynamic_bitset()(nocase));
+size_t ue2_literal::hash() const {
+ return hash_all(s, hash_dynamic_bitset()(nocase));
}
void make_nocase(ue2_literal *lit) {
ue2_literal rv;
- for (const auto &elem: *lit) {
- rv.push_back(elem.c, ourisalpha(elem.c));
+ for (const auto &elem: *lit) {
+ rv.push_back(elem.c, ourisalpha(elem.c));
}
lit->swap(rv);
diff --git a/contrib/libs/hyperscan/src/util/ue2string.h b/contrib/libs/hyperscan/src/util/ue2string.h
index 08c3735ab9..0aa846896e 100644
--- a/contrib/libs/hyperscan/src/util/ue2string.h
+++ b/contrib/libs/hyperscan/src/util/ue2string.h
@@ -35,15 +35,15 @@
#include "ue2common.h"
#include "util/charreach.h"
-#include "util/compare.h"
-#include "util/hash.h"
-#include "util/operators.h"
+#include "util/compare.h"
+#include "util/hash.h"
+#include "util/operators.h"
#include <iterator>
#include <string>
#include <vector>
-#include <boost/dynamic_bitset.hpp>
+#include <boost/dynamic_bitset.hpp>
#include <boost/iterator/iterator_facade.hpp>
namespace ue2 {
@@ -59,30 +59,30 @@ size_t maxStringSelfOverlap(const std::string &a, bool nocase);
/// Compares two strings, returns non-zero if they're different.
u32 cmp(const char *a, const char *b, size_t len, bool nocase);
-/**
- * \brief String type that also records whether the whole string is caseful or
- * caseless.
- *
- * You should use \ref ue2_literal if you need to represent a mixed-case
- * literal.
- */
-struct ue2_case_string {
- ue2_case_string(std::string s_in, bool nocase_in)
- : s(std::move(s_in)), nocase(nocase_in) {
- if (nocase) {
- upperString(s);
- }
- }
-
- bool operator==(const ue2_case_string &other) const {
- return s == other.s && nocase == other.nocase;
- }
-
- std::string s;
- bool nocase;
-};
-
-struct ue2_literal : totally_ordered<ue2_literal> {
+/**
+ * \brief String type that also records whether the whole string is caseful or
+ * caseless.
+ *
+ * You should use \ref ue2_literal if you need to represent a mixed-case
+ * literal.
+ */
+struct ue2_case_string {
+ ue2_case_string(std::string s_in, bool nocase_in)
+ : s(std::move(s_in)), nocase(nocase_in) {
+ if (nocase) {
+ upperString(s);
+ }
+ }
+
+ bool operator==(const ue2_case_string &other) const {
+ return s == other.s && nocase == other.nocase;
+ }
+
+ std::string s;
+ bool nocase;
+};
+
+struct ue2_literal : totally_ordered<ue2_literal> {
public:
/// Single element proxy, pointed to by our const_iterator.
struct elem {
@@ -110,38 +110,38 @@ public:
private:
friend class boost::iterator_core_access;
void increment() {
- ++idx;
+ ++idx;
}
void decrement() {
- --idx;
+ --idx;
}
void advance(size_t n) {
- idx += n;
+ idx += n;
}
difference_type distance_to(const const_iterator &other) const {
- return other.idx - idx;
+ return other.idx - idx;
}
bool equal(const const_iterator &other) const {
- return idx == other.idx && lit == other.lit;
+ return idx == other.idx && lit == other.lit;
}
const elem dereference() const {
- return elem(lit->s[idx], lit->nocase[idx]);
+ return elem(lit->s[idx], lit->nocase[idx]);
}
friend struct ue2_literal;
- const_iterator(const ue2_literal &lit_in, size_t idx_in)
- : lit(&lit_in), idx(idx_in) {}
+ const_iterator(const ue2_literal &lit_in, size_t idx_in)
+ : lit(&lit_in), idx(idx_in) {}
- const ue2_literal *lit = nullptr;
- size_t idx;
+ const ue2_literal *lit = nullptr;
+ size_t idx;
};
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
- using size_type = std::string::size_type;
+ using size_type = std::string::size_type;
- static const size_type npos;
+ static const size_type npos;
- ue2_literal() = default;
+ ue2_literal() = default;
ue2_literal(const std::string &s_in, bool nc_in);
ue2_literal(char c, bool nc_in);
ue2_literal(const ue2_literal &) = default;
@@ -149,25 +149,25 @@ public:
ue2_literal &operator=(const ue2_literal &) = default;
ue2_literal &operator=(ue2_literal &&) = default;
- template<typename InputIt>
- ue2_literal(InputIt b, InputIt e) {
- for (; b != e; ++b) {
- push_back(*b);
- }
- }
-
+ template<typename InputIt>
+ ue2_literal(InputIt b, InputIt e) {
+ for (; b != e; ++b) {
+ push_back(*b);
+ }
+ }
+
size_type length() const { return s.length(); }
bool empty() const { return s.empty(); }
- ue2_literal substr(size_type pos, size_type n = npos) const;
+ ue2_literal substr(size_type pos, size_type n = npos) const;
const char *c_str() const { return s.c_str(); }
bool any_nocase() const;
const_iterator begin() const {
- return const_iterator(*this, 0);
+ return const_iterator(*this, 0);
}
const_iterator end() const {
- return const_iterator(*this, s.size());
+ return const_iterator(*this, s.size());
}
const_reverse_iterator rbegin() const {
@@ -178,22 +178,22 @@ public:
return const_reverse_iterator(begin());
}
- ue2_literal &erase(size_type pos = 0, size_type n = npos);
+ ue2_literal &erase(size_type pos = 0, size_type n = npos);
void push_back(const elem &e) {
push_back(e.c, e.nocase);
}
void push_back(char c, bool nc);
- const elem back() const { return *rbegin(); }
-
- friend ue2_literal operator+(ue2_literal a, const ue2_literal &b) {
- a += b;
- return a;
- }
-
- /// Reverse this literal in-place.
- void reverse();
-
+ const elem back() const { return *rbegin(); }
+
+ friend ue2_literal operator+(ue2_literal a, const ue2_literal &b) {
+ a += b;
+ return a;
+ }
+
+ /// Reverse this literal in-place.
+ void reverse();
+
void operator+=(const ue2_literal &b);
bool operator==(const ue2_literal &b) const {
return s == b.s && nocase == b.nocase;
@@ -209,12 +209,12 @@ public:
nocase.swap(other.nocase);
}
- size_t hash() const;
-
+ size_t hash() const;
+
private:
- friend const_iterator;
+ friend const_iterator;
std::string s;
- boost::dynamic_bitset<> nocase;
+ boost::dynamic_bitset<> nocase;
};
/// Return a reversed copy of this literal.
@@ -228,37 +228,37 @@ size_t maxStringSelfOverlap(const ue2_literal &a);
size_t minStringPeriod(const ue2_literal &a);
size_t maxStringOverlap(const ue2_literal &a, const ue2_literal &b);
-/**
- * \brief True iff the range of a literal given cannot be considered entirely
- * case-sensitive nor entirely case-insensitive.
- */
-template<class Iter>
-bool mixed_sensitivity_in(Iter begin, Iter end) {
- bool cs = false;
- bool nc = false;
- for (auto it = begin; it != end; ++it) {
- if (!ourisalpha(it->c)) {
- continue;
- }
- if (it->nocase) {
- nc = true;
- } else {
- cs = true;
- }
- }
-
- return cs && nc;
-}
-
-/**
- * \brief True iff the literal cannot be considered entirely case-sensitive
- * nor entirely case-insensitive.
- */
-inline
-bool mixed_sensitivity(const ue2_literal &s) {
- return mixed_sensitivity_in(s.begin(), s.end());
-}
-
+/**
+ * \brief True iff the range of a literal given cannot be considered entirely
+ * case-sensitive nor entirely case-insensitive.
+ */
+template<class Iter>
+bool mixed_sensitivity_in(Iter begin, Iter end) {
+ bool cs = false;
+ bool nc = false;
+ for (auto it = begin; it != end; ++it) {
+ if (!ourisalpha(it->c)) {
+ continue;
+ }
+ if (it->nocase) {
+ nc = true;
+ } else {
+ cs = true;
+ }
+ }
+
+ return cs && nc;
+}
+
+/**
+ * \brief True iff the literal cannot be considered entirely case-sensitive
+ * nor entirely case-insensitive.
+ */
+inline
+bool mixed_sensitivity(const ue2_literal &s) {
+ return mixed_sensitivity_in(s.begin(), s.end());
+}
+
void make_nocase(ue2_literal *lit);
struct case_iter {
@@ -315,22 +315,22 @@ std::string escapeString(const ue2_literal &lit);
} // namespace ue2
-namespace std {
-
-template<>
-struct hash<ue2::ue2_literal::elem> {
- size_t operator()(const ue2::ue2_literal::elem &elem) const {
- return ue2::hash_all(elem.c, elem.nocase);
- }
-};
-
-template<>
-struct hash<ue2::ue2_literal> {
- size_t operator()(const ue2::ue2_literal &lit) const {
- return lit.hash();
- }
-};
-
-} // namespace std
-
+namespace std {
+
+template<>
+struct hash<ue2::ue2_literal::elem> {
+ size_t operator()(const ue2::ue2_literal::elem &elem) const {
+ return ue2::hash_all(elem.c, elem.nocase);
+ }
+};
+
+template<>
+struct hash<ue2::ue2_literal> {
+ size_t operator()(const ue2::ue2_literal &lit) const {
+ return lit.hash();
+ }
+};
+
+} // namespace std
+
#endif
diff --git a/contrib/libs/hyperscan/src/util/uniform_ops.h b/contrib/libs/hyperscan/src/util/uniform_ops.h
index 72a3c4b9e0..262104aca2 100644
--- a/contrib/libs/hyperscan/src/util/uniform_ops.h
+++ b/contrib/libs/hyperscan/src/util/uniform_ops.h
@@ -137,12 +137,12 @@
#define andnot_m384(a, b) (andnot384(a, b))
#define andnot_m512(a, b) (andnot512(a, b))
-#define lshift_u32(a, b) ((a) << (b))
-#define lshift_u64a(a, b) ((a) << (b))
-#define lshift_m128(a, b) (lshift64_m128(a, b))
-#define lshift_m256(a, b) (lshift64_m256(a, b))
-#define lshift_m384(a, b) (lshift64_m384(a, b))
-#define lshift_m512(a, b) (lshift64_m512(a, b))
+#define lshift_u32(a, b) ((a) << (b))
+#define lshift_u64a(a, b) ((a) << (b))
+#define lshift_m128(a, b) (lshift64_m128(a, b))
+#define lshift_m256(a, b) (lshift64_m256(a, b))
+#define lshift_m384(a, b) (lshift64_m384(a, b))
+#define lshift_m512(a, b) (lshift64_m512(a, b))
#define isZero_u8(a) ((a) == 0)
#define isZero_u32(a) ((a) == 0)
@@ -192,52 +192,52 @@
#define partial_load_m384(ptr, sz) loadbytes384(ptr, sz)
#define partial_load_m512(ptr, sz) loadbytes512(ptr, sz)
-#define store_compressed_u32(ptr, x, m, len) storecompressed32(ptr, x, m, len)
-#define store_compressed_u64a(ptr, x, m, len) storecompressed64(ptr, x, m, len)
-#define store_compressed_m128(ptr, x, m, len) storecompressed128(ptr, x, m, len)
-#define store_compressed_m256(ptr, x, m, len) storecompressed256(ptr, x, m, len)
-#define store_compressed_m384(ptr, x, m, len) storecompressed384(ptr, x, m, len)
-#define store_compressed_m512(ptr, x, m, len) storecompressed512(ptr, x, m, len)
-
-#define load_compressed_u32(x, ptr, m, len) loadcompressed32(x, ptr, m, len)
-#define load_compressed_u64a(x, ptr, m, len) loadcompressed64(x, ptr, m, len)
-#define load_compressed_m128(x, ptr, m, len) loadcompressed128(x, ptr, m, len)
-#define load_compressed_m256(x, ptr, m, len) loadcompressed256(x, ptr, m, len)
-#define load_compressed_m384(x, ptr, m, len) loadcompressed384(x, ptr, m, len)
-#define load_compressed_m512(x, ptr, m, len) loadcompressed512(x, ptr, m, len)
-
-static really_inline
-void clearbit_u32(u32 *p, u32 n) {
+#define store_compressed_u32(ptr, x, m, len) storecompressed32(ptr, x, m, len)
+#define store_compressed_u64a(ptr, x, m, len) storecompressed64(ptr, x, m, len)
+#define store_compressed_m128(ptr, x, m, len) storecompressed128(ptr, x, m, len)
+#define store_compressed_m256(ptr, x, m, len) storecompressed256(ptr, x, m, len)
+#define store_compressed_m384(ptr, x, m, len) storecompressed384(ptr, x, m, len)
+#define store_compressed_m512(ptr, x, m, len) storecompressed512(ptr, x, m, len)
+
+#define load_compressed_u32(x, ptr, m, len) loadcompressed32(x, ptr, m, len)
+#define load_compressed_u64a(x, ptr, m, len) loadcompressed64(x, ptr, m, len)
+#define load_compressed_m128(x, ptr, m, len) loadcompressed128(x, ptr, m, len)
+#define load_compressed_m256(x, ptr, m, len) loadcompressed256(x, ptr, m, len)
+#define load_compressed_m384(x, ptr, m, len) loadcompressed384(x, ptr, m, len)
+#define load_compressed_m512(x, ptr, m, len) loadcompressed512(x, ptr, m, len)
+
+static really_inline
+void clearbit_u32(u32 *p, u32 n) {
assert(n < sizeof(*p) * 8);
*p &= ~(1U << n);
}
-
-static really_inline
-void clearbit_u64a(u64a *p, u32 n) {
+
+static really_inline
+void clearbit_u64a(u64a *p, u32 n) {
assert(n < sizeof(*p) * 8);
*p &= ~(1ULL << n);
}
-
+
#define clearbit_m128(ptr, n) (clearbit128(ptr, n))
#define clearbit_m256(ptr, n) (clearbit256(ptr, n))
#define clearbit_m384(ptr, n) (clearbit384(ptr, n))
#define clearbit_m512(ptr, n) (clearbit512(ptr, n))
-static really_inline
-char testbit_u32(u32 val, u32 n) {
- assert(n < sizeof(val) * 8);
- return !!(val & (1U << n));
+static really_inline
+char testbit_u32(u32 val, u32 n) {
+ assert(n < sizeof(val) * 8);
+ return !!(val & (1U << n));
}
-
-static really_inline
-char testbit_u64a(u64a val, u32 n) {
- assert(n < sizeof(val) * 8);
- return !!(val & (1ULL << n));
+
+static really_inline
+char testbit_u64a(u64a val, u32 n) {
+ assert(n < sizeof(val) * 8);
+ return !!(val & (1ULL << n));
}
-#define testbit_m128(val, n) (testbit128(val, n))
-#define testbit_m256(val, n) (testbit256(val, n))
-#define testbit_m384(val, n) (testbit384(val, n))
-#define testbit_m512(val, n) (testbit512(val, n))
-
+#define testbit_m128(val, n) (testbit128(val, n))
+#define testbit_m256(val, n) (testbit256(val, n))
+#define testbit_m384(val, n) (testbit384(val, n))
+#define testbit_m512(val, n) (testbit512(val, n))
+
#endif
diff --git a/contrib/libs/hyperscan/src/util/unordered.h b/contrib/libs/hyperscan/src/util/unordered.h
index e4de5031dc..a8aa61cd04 100644
--- a/contrib/libs/hyperscan/src/util/unordered.h
+++ b/contrib/libs/hyperscan/src/util/unordered.h
@@ -1,53 +1,53 @@
-/*
- * Copyright (c) 2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef UTIL_UNORDERED_H
-#define UTIL_UNORDERED_H
-
-/**
- * \file
- * \brief Unordered set and map containers that default to using our own hasher.
- */
-
-#include "hash.h"
-
-#include <unordered_set>
-#include <unordered_map>
-
-namespace ue2 {
-
-template<class Key, class Hash = ue2_hasher>
-using ue2_unordered_set = std::unordered_set<Key, Hash>;
-
-template<class Key, class T, class Hash = ue2_hasher>
-using ue2_unordered_map = std::unordered_map<Key, T, Hash>;
-
-} // namespace ue2
-
-
-#endif // UTIL_UNORDERED_H
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_UNORDERED_H
+#define UTIL_UNORDERED_H
+
+/**
+ * \file
+ * \brief Unordered set and map containers that default to using our own hasher.
+ */
+
+#include "hash.h"
+
+#include <unordered_set>
+#include <unordered_map>
+
+namespace ue2 {
+
+template<class Key, class Hash = ue2_hasher>
+using ue2_unordered_set = std::unordered_set<Key, Hash>;
+
+template<class Key, class T, class Hash = ue2_hasher>
+using ue2_unordered_map = std::unordered_map<Key, T, Hash>;
+
+} // namespace ue2
+
+
+#endif // UTIL_UNORDERED_H
diff --git a/contrib/libs/hyperscan/src/util/verify_types.h b/contrib/libs/hyperscan/src/util/verify_types.h
index 2cde6f8779..5833d5ec62 100644
--- a/contrib/libs/hyperscan/src/util/verify_types.h
+++ b/contrib/libs/hyperscan/src/util/verify_types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,61 +30,61 @@
#define UTIL_VERIFY_TYPES
#include "ue2common.h"
-#include "util/compile_error.h"
+#include "util/compile_error.h"
#include <cassert>
-#include <type_traits>
+#include <type_traits>
namespace ue2 {
-template<typename To_T, typename From_T>
-To_T verify_cast(From_T val) {
- static_assert(std::is_integral<To_T>::value,
- "Output type must be integral.");
- static_assert(std::is_integral<From_T>::value ||
- std::is_enum<From_T>::value ||
- std::is_convertible<From_T, To_T>::value,
- "Must be integral or enum type, or convertible to output.");
-
- To_T conv_val = static_cast<To_T>(val);
- if (static_cast<From_T>(conv_val) != val) {
- assert(0);
- throw ResourceLimitError();
- }
-
- return conv_val;
+template<typename To_T, typename From_T>
+To_T verify_cast(From_T val) {
+ static_assert(std::is_integral<To_T>::value,
+ "Output type must be integral.");
+ static_assert(std::is_integral<From_T>::value ||
+ std::is_enum<From_T>::value ||
+ std::is_convertible<From_T, To_T>::value,
+ "Must be integral or enum type, or convertible to output.");
+
+ To_T conv_val = static_cast<To_T>(val);
+ if (static_cast<From_T>(conv_val) != val) {
+ assert(0);
+ throw ResourceLimitError();
+ }
+
+ return conv_val;
+}
+
+template<typename T>
+s8 verify_s8(T val) {
+ return verify_cast<s8>(val);
}
-template<typename T>
-s8 verify_s8(T val) {
- return verify_cast<s8>(val);
+template<typename T>
+u8 verify_u8(T val) {
+ return verify_cast<u8>(val);
}
-template<typename T>
-u8 verify_u8(T val) {
- return verify_cast<u8>(val);
+template<typename T>
+s16 verify_s16(T val) {
+ return verify_cast<s16>(val);
}
-template<typename T>
-s16 verify_s16(T val) {
- return verify_cast<s16>(val);
+template<typename T>
+u16 verify_u16(T val) {
+ return verify_cast<u16>(val);
}
-template<typename T>
-u16 verify_u16(T val) {
- return verify_cast<u16>(val);
+template<typename T>
+s32 verify_s32(T val) {
+ return verify_cast<s32>(val);
}
-template<typename T>
-s32 verify_s32(T val) {
- return verify_cast<s32>(val);
+template<typename T>
+u32 verify_u32(T val) {
+ return verify_cast<u32>(val);
}
-template<typename T>
-u32 verify_u32(T val) {
- return verify_cast<u32>(val);
-}
-
} // namespace ue2
#endif // UTIL_VERIFY_TYPES