aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrobot-piglet <robot-piglet@yandex-team.com>2024-04-17 09:35:32 +0300
committerrobot-piglet <robot-piglet@yandex-team.com>2024-04-17 09:42:41 +0300
commit535850f0ad768874a11d81e6494a29b6885565da (patch)
tree52d41f79570abf024ef3003dd606524fd4c2348f
parent3ddcd1575818302540e3eda0e9eda38ea93a74bf (diff)
downloadydb-535850f0ad768874a11d81e6494a29b6885565da.tar.gz
Intermediate changes
-rw-r--r--contrib/libs/croaring/README.md62
-rw-r--r--contrib/libs/croaring/include/roaring/array_util.h4
-rw-r--r--contrib/libs/croaring/include/roaring/bitset/bitset.h56
-rw-r--r--contrib/libs/croaring/include/roaring/bitset_util.h32
-rw-r--r--contrib/libs/croaring/include/roaring/containers/run.h8
-rw-r--r--contrib/libs/croaring/include/roaring/portability.h15
-rw-r--r--contrib/libs/croaring/include/roaring/roaring_version.h13
-rw-r--r--contrib/libs/croaring/src/art/art.c203
-rw-r--r--contrib/libs/croaring/src/bitset.c46
-rw-r--r--contrib/libs/croaring/src/containers/bitset.c128
-rw-r--r--contrib/libs/croaring/src/containers/mixed_andnot.c7
-rw-r--r--contrib/libs/croaring/src/containers/mixed_negation.c2
-rw-r--r--contrib/libs/croaring/src/containers/run.c20
-rw-r--r--contrib/libs/croaring/src/roaring64.c6
-rw-r--r--contrib/libs/croaring/ya.make4
-rw-r--r--contrib/python/clickhouse-connect/.dist-info/METADATA2
-rw-r--r--contrib/python/clickhouse-connect/clickhouse_connect/__version__.py2
-rw-r--r--contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/datatypes/sqltypes.py2
-rw-r--r--contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py4
-rw-r--r--contrib/python/clickhouse-connect/clickhouse_connect/datatypes/registry.py2
-rw-r--r--contrib/python/clickhouse-connect/clickhouse_connect/driver/query.py2
-rw-r--r--contrib/python/clickhouse-connect/ya.make2
-rw-r--r--contrib/python/contourpy/.dist-info/METADATA8
-rw-r--r--contrib/python/contourpy/LICENSE2
-rw-r--r--contrib/python/contourpy/contourpy/__init__.py27
-rw-r--r--contrib/python/contourpy/contourpy/_contourpy.pyi5
-rw-r--r--contrib/python/contourpy/contourpy/_version.py2
-rw-r--r--contrib/python/contourpy/contourpy/dechunk.py4
-rw-r--r--contrib/python/contourpy/contourpy/typecheck.py2
-rw-r--r--contrib/python/contourpy/contourpy/util/_build_config.pyi2
-rw-r--r--contrib/python/contourpy/contourpy/util/bokeh_renderer.py18
-rw-r--r--contrib/python/contourpy/contourpy/util/bokeh_util.py5
-rw-r--r--contrib/python/contourpy/contourpy/util/mpl_renderer.py14
-rw-r--r--contrib/python/contourpy/src/mpl2014.cpp6
-rw-r--r--contrib/python/contourpy/ya.make2
35 files changed, 386 insertions, 333 deletions
diff --git a/contrib/libs/croaring/README.md b/contrib/libs/croaring/README.md
index 7f620eb6ca..954ff2d0d7 100644
--- a/contrib/libs/croaring/README.md
+++ b/contrib/libs/croaring/README.md
@@ -1,4 +1,4 @@
-# CRoaring
+# CRoaring
[![Ubuntu-CI](https://github.com/RoaringBitmap/CRoaring/actions/workflows/ubuntu-noexcept-ci.yml/badge.svg)](https://github.com/RoaringBitmap/CRoaring/actions/workflows/ubuntu-noexcept-ci.yml) [![VS17-CI](https://github.com/RoaringBitmap/CRoaring/actions/workflows/vs17-ci.yml/badge.svg)](https://github.com/RoaringBitmap/CRoaring/actions/workflows/vs17-ci.yml)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/croaring.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:croaring)
@@ -41,7 +41,7 @@ Roaring bitmaps are found to work well in many important applications:
> Use Roaring for bitmap compression whenever possible. Do not use other bitmap compression methods ([Wang et al., SIGMOD 2017](http://db.ucsd.edu/wp-content/uploads/2017/03/sidm338-wangA.pdf))
-[There is a serialized format specification for interoperability between implementations](https://github.com/RoaringBitmap/RoaringFormatSpec/). Hence, it is possible to serialize a Roaring Bitmap from C++, read it in Java, modify it, serialize it back and read it in Go and Python.
+[There is a serialized format specification for interoperability between implementations](https://github.com/RoaringBitmap/RoaringFormatSpec/). Hence, it is possible to serialize a Roaring Bitmap from C++, read it in Java, modify it, serialize it back and read it in Go and Python.
# Objective
@@ -57,7 +57,7 @@ of the latest hardware. Roaring bitmaps are already available on a variety of pl
- Linux, macOS, FreeBSD, Windows (MSYS2 and Microsoft Visual studio).
- We test the library with ARM, x64/x86 and POWER processors. We only support little endian systems (big endian systems are vanishingly rare).
-- Recent C compiler supporting the C11 standard (GCC 7 or better, LLVM 7.0 or better, Xcode 11 or better, Microsoft Visual Studio 2022 or better, Intel oneAPI Compiler 2023.2 or better), there is also an optional C++ class that requires a C++ compiler supporting the C++11 standard.
+- Recent C compiler supporting the C11 standard (GCC 7 or better, LLVM 8 or better (clang), Xcode 11 or better, Microsoft Visual Studio 2022 or better, Intel oneAPI Compiler 2023.2 or better), there is also an optional C++ class that requires a C++ compiler supporting the C++11 standard.
- CMake (to contribute to the project, users can rely on amalgamation/unity builds if they do not wish to use CMake).
- The CMake system assumes that git is available.
- Under x64 systems, the library provides runtime dispatch so that optimized functions are called based on the detected CPU features. It works with GCC, clang (version 9 and up) and Visual Studio (2017 and up). Other systems (e.g., ARM) do not need runtime dispatch.
@@ -72,7 +72,7 @@ IO serialization which is only supported on little-endian systems (see [issue 42
The CRoaring library can be amalgamated into a single source file that makes it easier
for integration into other projects. Moreover, by making it possible to compile
all the critical code into one compilation unit, it can improve the performance. For
-the rationale, please see the [SQLite documentation](https://www.sqlite.org/amalgamation.html),
+the rationale, please see the [SQLite documentation](https://www.sqlite.org/amalgamation.html),
or the corresponding [Wikipedia entry](https://en.wikipedia.org/wiki/Single_Compilation_Unit).
Users who choose this route, do not need to rely on CRoaring's build system (based on CMake).
@@ -129,13 +129,13 @@ Linux or macOS users might follow the following instructions if they have a rece
```
2. Compile
```
- cc -o demo demo.c
+ cc -o demo demo.c
c++ -std=c++11 -o demopp demo.cpp
```
3. `./demo`
```
cardinality = 900
- 1000
+ 1000
```
4. `./demopp`
```
@@ -258,11 +258,11 @@ By default, the benchmark tools picks one data set (e.g., `CRoaring/benchmarks/r
We have several data sets and you may pick others:
```
-./build/microbenchmarks/bench benchmarks/realdata/wikileaks-noquotes
+./build/microbenchmarks/bench benchmarks/realdata/wikileaks-noquotes
```
You may disable some functionality for the purpose of benchmarking. For example, assuming you
-have an x64 processor, you could benchmark the code without AVX-512 even if both your processor
+have an x64 processor, you could benchmark the code without AVX-512 even if both your processor
and compiler supports it:
```
@@ -298,7 +298,7 @@ int main(){
# Example (C)
-This example assumes that CRoaring has been build and that you are linking against the corresponding library. By default, CRoaring will install its header files in a `roaring` directory. If you are working from the amalgamation script, you may add the line `#include "roaring.c"` if you are not linking against a prebuilt CRoaring library and replace `#include <roaring/roaring.h>` by `#include "roaring.h"`.
+This example assumes that CRoaring has been build and that you are linking against the corresponding library. By default, CRoaring will install its header files in a `roaring` directory. If you are working from the amalgamation script, you may add the line `#include "roaring.c"` if you are not linking against a prebuilt CRoaring library and replace `#include <roaring/roaring.h>` by `#include "roaring.h"`.
```c
#include <roaring/roaring.h>
@@ -551,7 +551,7 @@ whether the conversion to a bitset has performance benefits in your case.
# Example (C++)
-This example assumes that CRoaring has been build and that you are linking against the corresponding library. By default, CRoaring will install its header files in a `roaring` directory so you may need to replace `#include "roaring.hh"` by `#include <roaring/roaring.hh>`. If you are working from the amalgamation script, you may add the line `#include "roaring.c"` if you are not linking against a CRoaring prebuilt library.
+This example assumes that CRoaring has been build and that you are linking against the corresponding library. By default, CRoaring will install its header files in a `roaring` directory so you may need to replace `#include "roaring.hh"` by `#include <roaring/roaring.hh>`. If you are working from the amalgamation script, you may add the line `#include "roaring.c"` if you are not linking against a CRoaring prebuilt library.
```c++
#include <iostream>
@@ -769,7 +769,7 @@ On Windows (64-bit):
will build and install `roaring` as a shared library.
```
-.\vcpkg.exe install roaring:x64-windows-static
+.\vcpkg.exe install roaring:x64-windows-static
```
will build and install `roaring` as a static library.
@@ -780,24 +780,44 @@ If you find the version of `roaring` shipped with `vcpkg` is out-of-date, feel f
# SIMD-related throttling
-Our AVX2 code does not use floating-point numbers or multiplications, so it is not subject to turbo frequency throttling on many-core Intel processors.
+Our AVX2 code does not use floating-point numbers or multiplications, so it is not subject to turbo frequency throttling on many-core Intel processors.
Our AVX-512 code is only enabled on recent hardware (Intel Ice Lake or better and AMD Zen 4) where SIMD-specific frequency throttling is not observed.
# Thread safety
-Like, for example, STL containers or Java's default data structures, the CRoaring library has no built-in thread support. Thus whenever you modify a bitmap in one thread, it is unsafe to query it in others. It is safe however to query bitmaps (without modifying them) from several distinct threads, as long as you do not use the copy-on-write attribute. For example, you can safely copy a bitmap and use both copies in concurrently. One should probably avoid the use of the copy-on-write attribute in a threaded environment.
+Like, for example, STL containers, the CRoaring library has no built-in thread support. Thus whenever you modify a bitmap in one thread, it is unsafe to query it in others. However, you can safely copy a bitmap and use both copies in concurrently.
-Some of our users rely on "copy-on-write" (default to disabled). A bitmap with the copy-on-write flag
-set to true might generate shared containers. A shared container is just a reference to a single
-container with reference counting (we keep track of the number of shallow copies). If you copy shared
-containers over several threads, this might be unsafe due to the need to update the counter concurrently.
-Thus for shared containers, we use reference counting with an atomic counter. If the library is compiled
-as a C library (the default), we use C11 atomics. Unfortunately, Visual Studio does not support C11
-atomics at this times (though this is subject to change). To compensate, we
-use Windows-specific code in such instances (`_InterlockedDecrement` `_InterlockedIncrement`).
+If you use "copy-on-write" (default to disabled), then you should pass copies to the different threads. They will create shared containers, and for shared containers, we use reference counting with an atomic counter.
+
+To summarize:
+- If you do not use copy-on-write, you can access concurrent the same bitmap safely as long as you do not modify it. If you plan on modifying it, you should pass different copies to the different threads.
+- If you use copy-on-write, you should always pass copies to the different threads. The copies and then lightweight (shared containers).
+
+Thus the following pattern where you copy bitmaps and pass them to different threads is safe with or without COW:
+
+```C
+ roaring_bitmap_set_copy_on_write(r1, true);
+ roaring_bitmap_set_copy_on_write(r2, true);
+ roaring_bitmap_set_copy_on_write(r3, true);
+
+ roaring_bitmap_t * r1a = roaring_bitmap_copy(r1);
+ roaring_bitmap_t * r1b = roaring_bitmap_copy(r1);
+
+ roaring_bitmap_t * r2a = roaring_bitmap_copy(r2);
+ roaring_bitmap_t * r2b = roaring_bitmap_copy(r2);
+
+ roaring_bitmap_t * r3a = roaring_bitmap_copy(r3);
+ roaring_bitmap_t * r3b = roaring_bitmap_copy(r3);
+
+ roaring_bitmap_t *rarray1[3] = {r1a, r2a, r3a};
+ roaring_bitmap_t *rarray2[3] = {r1b, r2b, r3b};
+ std::thread thread1(run, rarray1);
+ std::thread thread2(run, rarray2);
+```
+
# How to best aggregate bitmaps?
Suppose you want to compute the union (OR) of many bitmaps. How do you proceed? There are many
diff --git a/contrib/libs/croaring/include/roaring/array_util.h b/contrib/libs/croaring/include/roaring/array_util.h
index f062791cb7..8ac875a762 100644
--- a/contrib/libs/croaring/include/roaring/array_util.h
+++ b/contrib/libs/croaring/include/roaring/array_util.h
@@ -1,5 +1,5 @@
-#ifndef ARRAY_UTIL_H
-#define ARRAY_UTIL_H
+#ifndef CROARING_ARRAY_UTIL_H
+#define CROARING_ARRAY_UTIL_H
#include <stddef.h> // for size_t
#include <stdint.h>
diff --git a/contrib/libs/croaring/include/roaring/bitset/bitset.h b/contrib/libs/croaring/include/roaring/bitset/bitset.h
index e79415f2e4..c57d73d108 100644
--- a/contrib/libs/croaring/include/roaring/bitset/bitset.h
+++ b/contrib/libs/croaring/include/roaring/bitset/bitset.h
@@ -1,12 +1,12 @@
-#ifndef CBITSET_BITSET_H
-#define CBITSET_BITSET_H
+#ifndef CROARING_CBITSET_BITSET_H
+#define CROARING_CBITSET_BITSET_H
// For compatibility with MSVC with the use of `restrict`
#if (__STDC_VERSION__ >= 199901L) || \
(defined(__GNUC__) && defined(__STDC_VERSION__))
-#define CBITSET_RESTRICT restrict
+#define CROARING_CBITSET_RESTRICT restrict
#else
-#define CBITSET_RESTRICT
+#define CROARING_CBITSET_RESTRICT
#endif // (__STDC_VERSION__ >= 199901L) || (defined(__GNUC__) &&
// defined(__STDC_VERSION__ ))
@@ -25,7 +25,7 @@ namespace api {
#endif
struct bitset_s {
- uint64_t *CBITSET_RESTRICT array;
+ uint64_t *CROARING_CBITSET_RESTRICT array;
/* For simplicity and performance, we prefer to have a size and a capacity
* that is a multiple of 64 bits. Thus we only track the size and the
* capacity in terms of 64-bit words allocated */
@@ -139,51 +139,53 @@ size_t bitset_maximum(const bitset_t *bitset);
/* compute the union in-place (to b1), returns true if successful, to generate a
* new bitset first call bitset_copy */
-bool bitset_inplace_union(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+bool bitset_inplace_union(bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* report the size of the union (without materializing it) */
-size_t bitset_union_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+size_t bitset_union_count(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* compute the intersection in-place (to b1), to generate a new bitset first
* call bitset_copy */
-void bitset_inplace_intersection(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+void bitset_inplace_intersection(bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* report the size of the intersection (without materializing it) */
-size_t bitset_intersection_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+size_t bitset_intersection_count(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* returns true if the bitsets contain no common elements */
-bool bitsets_disjoint(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+bool bitsets_disjoint(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* returns true if the bitsets contain any common elements */
-bool bitsets_intersect(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+bool bitsets_intersect(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* returns true if b1 contains all of the set bits of b2 */
-bool bitset_contains_all(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+bool bitset_contains_all(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* compute the difference in-place (to b1), to generate a new bitset first call
* bitset_copy */
-void bitset_inplace_difference(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+void bitset_inplace_difference(bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* compute the size of the difference */
-size_t bitset_difference_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+size_t bitset_difference_count(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* compute the symmetric difference in-place (to b1), return true if successful,
* to generate a new bitset first call bitset_copy */
-bool bitset_inplace_symmetric_difference(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+bool bitset_inplace_symmetric_difference(
+ bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* compute the size of the symmetric difference */
-size_t bitset_symmetric_difference_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2);
+size_t bitset_symmetric_difference_count(
+ const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2);
/* iterate over the set bits
like so :
diff --git a/contrib/libs/croaring/include/roaring/bitset_util.h b/contrib/libs/croaring/include/roaring/bitset_util.h
index 7a7da00b7d..7e027f1919 100644
--- a/contrib/libs/croaring/include/roaring/bitset_util.h
+++ b/contrib/libs/croaring/include/roaring/bitset_util.h
@@ -1,5 +1,5 @@
-#ifndef BITSET_UTIL_H
-#define BITSET_UTIL_H
+#ifndef CROARING_BITSET_UTIL_H
+#define CROARING_BITSET_UTIL_H
#include <stdint.h>
@@ -383,7 +383,7 @@ inline static uint64_t avx2_harley_seal_popcount256(const __m256i *data,
}
CROARING_UNTARGET_AVX2
-#define AVXPOPCNTFNC(opname, avx_intrinsic) \
+#define CROARING_AVXPOPCNTFNC(opname, avx_intrinsic) \
static inline uint64_t avx2_harley_seal_popcount256_##opname( \
const __m256i *data1, const __m256i *data2, const uint64_t size) { \
__m256i total = _mm256_setzero_si256(); \
@@ -564,27 +564,27 @@ CROARING_UNTARGET_AVX2
}
CROARING_TARGET_AVX2
-AVXPOPCNTFNC(or, _mm256_or_si256)
+CROARING_AVXPOPCNTFNC(or, _mm256_or_si256)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVXPOPCNTFNC(union, _mm256_or_si256)
+CROARING_AVXPOPCNTFNC(union, _mm256_or_si256)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVXPOPCNTFNC(and, _mm256_and_si256)
+CROARING_AVXPOPCNTFNC(and, _mm256_and_si256)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVXPOPCNTFNC(intersection, _mm256_and_si256)
+CROARING_AVXPOPCNTFNC(intersection, _mm256_and_si256)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVXPOPCNTFNC(xor, _mm256_xor_si256)
+CROARING_AVXPOPCNTFNC(xor, _mm256_xor_si256)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVXPOPCNTFNC(andnot, _mm256_andnot_si256)
+CROARING_AVXPOPCNTFNC(andnot, _mm256_andnot_si256)
CROARING_UNTARGET_AVX2
#define VPOPCNT_AND_ADD(ptr, i, accu) \
@@ -631,7 +631,7 @@ static inline uint64_t avx512_vpopcount(const __m512i *data,
CROARING_UNTARGET_AVX512
#endif
-#define AVXPOPCNTFNC512(opname, avx_intrinsic) \
+#define CROARING_AVXPOPCNTFNC512(opname, avx_intrinsic) \
static inline uint64_t avx512_harley_seal_popcount512_##opname( \
const __m512i *data1, const __m512i *data2, const uint64_t size) { \
__m512i total = _mm512_setzero_si512(); \
@@ -693,12 +693,12 @@ CROARING_UNTARGET_AVX512
#if CROARING_COMPILER_SUPPORTS_AVX512
CROARING_TARGET_AVX512
-AVXPOPCNTFNC512(or, _mm512_or_si512)
-AVXPOPCNTFNC512(union, _mm512_or_si512)
-AVXPOPCNTFNC512(and, _mm512_and_si512)
-AVXPOPCNTFNC512(intersection, _mm512_and_si512)
-AVXPOPCNTFNC512(xor, _mm512_xor_si512)
-AVXPOPCNTFNC512(andnot, _mm512_andnot_si512)
+CROARING_AVXPOPCNTFNC512(or, _mm512_or_si512)
+CROARING_AVXPOPCNTFNC512(union, _mm512_or_si512)
+CROARING_AVXPOPCNTFNC512(and, _mm512_and_si512)
+CROARING_AVXPOPCNTFNC512(intersection, _mm512_and_si512)
+CROARING_AVXPOPCNTFNC512(xor, _mm512_xor_si512)
+CROARING_AVXPOPCNTFNC512(andnot, _mm512_andnot_si512)
CROARING_UNTARGET_AVX512
#endif
/***
diff --git a/contrib/libs/croaring/include/roaring/containers/run.h b/contrib/libs/croaring/include/roaring/containers/run.h
index 1c7e07b2d5..486c984434 100644
--- a/contrib/libs/croaring/include/roaring/containers/run.h
+++ b/contrib/libs/croaring/include/roaring/containers/run.h
@@ -45,10 +45,10 @@ struct rle16_s {
typedef struct rle16_s rle16_t;
#ifdef __cplusplus
-#define MAKE_RLE16(val, len) \
+#define CROARING_MAKE_RLE16(val, len) \
{ (uint16_t)(val), (uint16_t)(len) } // no tagged structs until c++20
#else
-#define MAKE_RLE16(val, len) \
+#define CROARING_MAKE_RLE16(val, len) \
(rle16_t) { .value = (uint16_t)(val), .length = (uint16_t)(len) }
#endif
@@ -360,7 +360,7 @@ static inline void run_container_append_value(run_container_t *run,
rle16_t *previousrl) {
const uint32_t previousend = previousrl->value + previousrl->length;
if (val > previousend + 1) { // we add a new one
- *previousrl = MAKE_RLE16(val, 0);
+ *previousrl = CROARING_MAKE_RLE16(val, 0);
run->runs[run->n_runs] = *previousrl;
run->n_runs++;
} else if (val == previousend + 1) { // we merge
@@ -375,7 +375,7 @@ static inline void run_container_append_value(run_container_t *run,
*/
static inline rle16_t run_container_append_value_first(run_container_t *run,
uint16_t val) {
- rle16_t newrle = MAKE_RLE16(val, 0);
+ rle16_t newrle = CROARING_MAKE_RLE16(val, 0);
run->runs[run->n_runs] = newrle;
run->n_runs++;
return newrle;
diff --git a/contrib/libs/croaring/include/roaring/portability.h b/contrib/libs/croaring/include/roaring/portability.h
index 9962aaf419..78081ec88e 100644
--- a/contrib/libs/croaring/include/roaring/portability.h
+++ b/contrib/libs/croaring/include/roaring/portability.h
@@ -13,8 +13,8 @@
* to ever interact with.
*/
-#ifndef INCLUDE_PORTABILITY_H_
-#define INCLUDE_PORTABILITY_H_
+#ifndef CROARING_INCLUDE_PORTABILITY_H_
+#define CROARING_INCLUDE_PORTABILITY_H_
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1
@@ -437,9 +437,16 @@ static inline int roaring_hamming(uint64_t x) {
#define croaring_htobe64(x) OSSwapInt64(x)
#elif defined(__has_include) && \
- __has_include(<byteswap.h>) // CROARING_IS_BIG_ENDIAN
+ __has_include( \
+ <byteswap.h>) && (defined(__linux__) || defined(__FreeBSD__)) // CROARING_IS_BIG_ENDIAN
#include <byteswap.h>
-#define croaring_htobe64(x) __bswap_64(x)
+#if defined(__linux__)
+#define croaring_htobe64(x) bswap_64(x)
+#elif defined(__FreeBSD__)
+#define croaring_htobe64(x) bswap64(x)
+#else
+#warning "Unknown platform, report as an error"
+#endif
#else // CROARING_IS_BIG_ENDIAN
// Gets compiled to bswap or equivalent on most compilers.
diff --git a/contrib/libs/croaring/include/roaring/roaring_version.h b/contrib/libs/croaring/include/roaring/roaring_version.h
index b2efa33a07..6dcf54f477 100644
--- a/contrib/libs/croaring/include/roaring/roaring_version.h
+++ b/contrib/libs/croaring/include/roaring/roaring_version.h
@@ -1,10 +1,11 @@
-// /include/roaring/roaring_version.h automatically generated by release.py, do not change by hand
-#ifndef ROARING_INCLUDE_ROARING_VERSION
-#define ROARING_INCLUDE_ROARING_VERSION
+// /include/roaring/roaring_version.h automatically generated by release.py, do
+// not change by hand
+#ifndef ROARING_INCLUDE_ROARING_VERSION
+#define ROARING_INCLUDE_ROARING_VERSION
#define ROARING_VERSION "3.0.0"
-enum {
+enum {
ROARING_VERSION_MAJOR = 3,
ROARING_VERSION_MINOR = 0,
ROARING_VERSION_REVISION = 0
-};
-#endif // ROARING_INCLUDE_ROARING_VERSION
+};
+#endif // ROARING_INCLUDE_ROARING_VERSION
diff --git a/contrib/libs/croaring/src/art/art.c b/contrib/libs/croaring/src/art/art.c
index 35d45fdf5b..20a346f8d9 100644
--- a/contrib/libs/croaring/src/art/art.c
+++ b/contrib/libs/croaring/src/art/art.c
@@ -6,14 +6,14 @@
#include <roaring/memory.h>
#include <roaring/portability.h>
-#define ART_NODE4_TYPE 0
-#define ART_NODE16_TYPE 1
-#define ART_NODE48_TYPE 2
-#define ART_NODE256_TYPE 3
-#define ART_NUM_TYPES 4
+#define CROARING_ART_NODE4_TYPE 0
+#define CROARING_ART_NODE16_TYPE 1
+#define CROARING_ART_NODE48_TYPE 2
+#define CROARING_ART_NODE256_TYPE 3
+#define CROARING_ART_NUM_TYPES 4
// Node48 placeholder value to indicate no child is present at this key index.
-#define ART_NODE48_EMPTY_VAL 48
+#define CROARING_ART_NODE48_EMPTY_VAL 48
// We use the least significant bit of node pointers to indicate whether a node
// is a leaf or an inner node. This is never surfaced to the user.
@@ -24,15 +24,15 @@
// deallocation of the ART, we know not to free the leaves without having to
// dereference the leaf pointers.
//
-// All internal operations on leaves should use CAST_LEAF before using the leaf.
-// The only places that use SET_LEAF are locations where a field is directly
-// assigned to a leaf pointer. After using SET_LEAF, the leaf should be treated
-// as a node of unknown type.
-#define IS_LEAF(p) (((uintptr_t)(p) & 1))
-#define SET_LEAF(p) ((art_node_t *)((uintptr_t)(p) | 1))
-#define CAST_LEAF(p) ((art_leaf_t *)((void *)((uintptr_t)(p) & ~1)))
+// All internal operations on leaves should use CROARING_CAST_LEAF before using
+// the leaf. The only places that use CROARING_SET_LEAF are locations where a
+// field is directly assigned to a leaf pointer. After using CROARING_SET_LEAF,
+// the leaf should be treated as a node of unknown type.
+#define CROARING_IS_LEAF(p) (((uintptr_t)(p) & 1))
+#define CROARING_SET_LEAF(p) ((art_node_t *)((uintptr_t)(p) | 1))
+#define CROARING_CAST_LEAF(p) ((art_leaf_t *)((void *)((uintptr_t)(p) & ~1)))
-#define NODE48_AVAILABLE_CHILDREN_MASK ((UINT64_C(1) << 48) - 1)
+#define CROARING_NODE48_AVAILABLE_CHILDREN_MASK ((UINT64_C(1) << 48) - 1)
#ifdef __cplusplus
extern "C" {
@@ -89,7 +89,8 @@ typedef struct art_node16_s {
} art_node16_t;
// Node48: key[i] corresponds with children[key[i]] if key[i] !=
-// ART_NODE48_EMPTY_VAL. Keys are naturally sorted due to direct indexing.
+// CROARING_ART_NODE48_EMPTY_VAL. Keys are naturally sorted due to direct
+// indexing.
typedef struct art_node48_s {
art_inner_node_t base;
uint8_t count;
@@ -115,7 +116,9 @@ typedef struct art_indexed_child_s {
art_key_chunk_t key_chunk;
} art_indexed_child_t;
-static inline bool art_is_leaf(const art_node_t *node) { return IS_LEAF(node); }
+static inline bool art_is_leaf(const art_node_t *node) {
+ return CROARING_IS_LEAF(node);
+}
static void art_leaf_populate(art_leaf_t *leaf, const art_key_chunk_t key[]) {
memcpy(leaf->key, key, ART_KEY_BYTES);
@@ -159,7 +162,8 @@ static art_node_t *art_node256_insert(art_node256_t *node, art_node_t *child,
static art_node4_t *art_node4_create(const art_key_chunk_t prefix[],
uint8_t prefix_size) {
art_node4_t *node = (art_node4_t *)roaring_malloc(sizeof(art_node4_t));
- art_init_inner_node(&node->base, ART_NODE4_TYPE, prefix, prefix_size);
+ art_init_inner_node(&node->base, CROARING_ART_NODE4_TYPE, prefix,
+ prefix_size);
node->count = 0;
return node;
}
@@ -363,7 +367,8 @@ static bool art_node4_internal_validate(const art_node4_t *node,
static art_node16_t *art_node16_create(const art_key_chunk_t prefix[],
uint8_t prefix_size) {
art_node16_t *node = (art_node16_t *)roaring_malloc(sizeof(art_node16_t));
- art_init_inner_node(&node->base, ART_NODE16_TYPE, prefix, prefix_size);
+ art_init_inner_node(&node->base, CROARING_ART_NODE16_TYPE, prefix,
+ prefix_size);
node->count = 0;
return node;
}
@@ -546,18 +551,19 @@ static bool art_node16_internal_validate(const art_node16_t *node,
static art_node48_t *art_node48_create(const art_key_chunk_t prefix[],
uint8_t prefix_size) {
art_node48_t *node = (art_node48_t *)roaring_malloc(sizeof(art_node48_t));
- art_init_inner_node(&node->base, ART_NODE48_TYPE, prefix, prefix_size);
+ art_init_inner_node(&node->base, CROARING_ART_NODE48_TYPE, prefix,
+ prefix_size);
node->count = 0;
- node->available_children = NODE48_AVAILABLE_CHILDREN_MASK;
+ node->available_children = CROARING_NODE48_AVAILABLE_CHILDREN_MASK;
for (size_t i = 0; i < 256; ++i) {
- node->keys[i] = ART_NODE48_EMPTY_VAL;
+ node->keys[i] = CROARING_ART_NODE48_EMPTY_VAL;
}
return node;
}
static void art_free_node48(art_node48_t *node) {
uint64_t used_children =
- (node->available_children) ^ NODE48_AVAILABLE_CHILDREN_MASK;
+ (node->available_children) ^ CROARING_NODE48_AVAILABLE_CHILDREN_MASK;
while (used_children != 0) {
// We checked above that used_children is not zero
uint8_t child_idx = roaring_trailing_zeroes(used_children);
@@ -570,7 +576,7 @@ static void art_free_node48(art_node48_t *node) {
static inline art_node_t *art_node48_find_child(const art_node48_t *node,
art_key_chunk_t key) {
uint8_t val_idx = node->keys[key];
- if (val_idx != ART_NODE48_EMPTY_VAL) {
+ if (val_idx != CROARING_ART_NODE48_EMPTY_VAL) {
return node->children[val_idx];
}
return NULL;
@@ -592,7 +598,7 @@ static art_node_t *art_node48_insert(art_node48_t *node, art_node_t *child,
art_node256_create(node->base.prefix, node->base.prefix_size);
for (size_t i = 0; i < 256; ++i) {
uint8_t val_idx = node->keys[i];
- if (val_idx != ART_NODE48_EMPTY_VAL) {
+ if (val_idx != CROARING_ART_NODE48_EMPTY_VAL) {
art_node256_insert(new_node, node->children[val_idx], i);
}
}
@@ -603,10 +609,10 @@ static art_node_t *art_node48_insert(art_node48_t *node, art_node_t *child,
static inline art_node_t *art_node48_erase(art_node48_t *node,
uint8_t key_chunk) {
uint8_t val_idx = node->keys[key_chunk];
- if (val_idx == ART_NODE48_EMPTY_VAL) {
+ if (val_idx == CROARING_ART_NODE48_EMPTY_VAL) {
return (art_node_t *)node;
}
- node->keys[key_chunk] = ART_NODE48_EMPTY_VAL;
+ node->keys[key_chunk] = CROARING_ART_NODE48_EMPTY_VAL;
node->available_children |= UINT64_C(1) << val_idx;
node->count--;
if (node->count > 16) {
@@ -617,7 +623,7 @@ static inline art_node_t *art_node48_erase(art_node48_t *node,
art_node16_create(node->base.prefix, node->base.prefix_size);
for (size_t i = 0; i < 256; ++i) {
val_idx = node->keys[i];
- if (val_idx != ART_NODE48_EMPTY_VAL) {
+ if (val_idx != CROARING_ART_NODE48_EMPTY_VAL) {
art_node16_insert(new_node, node->children[val_idx], i);
}
}
@@ -629,7 +635,7 @@ static inline void art_node48_replace(art_node48_t *node,
art_key_chunk_t key_chunk,
art_node_t *new_child) {
uint8_t val_idx = node->keys[key_chunk];
- assert(val_idx != ART_NODE48_EMPTY_VAL);
+ assert(val_idx != CROARING_ART_NODE48_EMPTY_VAL);
node->children[val_idx] = new_child;
}
@@ -638,7 +644,7 @@ static inline art_indexed_child_t art_node48_next_child(
art_indexed_child_t indexed_child;
index++;
for (size_t i = index; i < 256; ++i) {
- if (node->keys[i] != ART_NODE48_EMPTY_VAL) {
+ if (node->keys[i] != CROARING_ART_NODE48_EMPTY_VAL) {
indexed_child.index = i;
indexed_child.child = node->children[node->keys[i]];
indexed_child.key_chunk = i;
@@ -657,7 +663,7 @@ static inline art_indexed_child_t art_node48_prev_child(
index--;
art_indexed_child_t indexed_child;
for (int i = index; i >= 0; --i) {
- if (node->keys[i] != ART_NODE48_EMPTY_VAL) {
+ if (node->keys[i] != CROARING_ART_NODE48_EMPTY_VAL) {
indexed_child.index = i;
indexed_child.child = node->children[node->keys[i]];
indexed_child.key_chunk = i;
@@ -685,7 +691,7 @@ static inline art_indexed_child_t art_node48_lower_bound(
art_node48_t *node, art_key_chunk_t key_chunk) {
art_indexed_child_t indexed_child;
for (size_t i = key_chunk; i < 256; ++i) {
- if (node->keys[i] != ART_NODE48_EMPTY_VAL) {
+ if (node->keys[i] != CROARING_ART_NODE48_EMPTY_VAL) {
indexed_child.index = i;
indexed_child.child = node->children[node->keys[i]];
indexed_child.key_chunk = i;
@@ -707,7 +713,7 @@ static bool art_node48_internal_validate(const art_node48_t *node,
uint64_t used_children = 0;
for (int i = 0; i < 256; ++i) {
uint8_t child_idx = node->keys[i];
- if (child_idx != ART_NODE48_EMPTY_VAL) {
+ if (child_idx != CROARING_ART_NODE48_EMPTY_VAL) {
if (used_children & (UINT64_C(1) << child_idx)) {
return art_validate_fail(
&validator, "Node48 keys point to the same child index");
@@ -721,7 +727,7 @@ static bool art_node48_internal_validate(const art_node48_t *node,
}
}
uint64_t expected_used_children =
- (node->available_children) ^ NODE48_AVAILABLE_CHILDREN_MASK;
+ (node->available_children) ^ CROARING_NODE48_AVAILABLE_CHILDREN_MASK;
if (used_children != expected_used_children) {
return art_validate_fail(
&validator,
@@ -744,7 +750,7 @@ static bool art_node48_internal_validate(const art_node48_t *node,
validator.depth++;
for (int i = 0; i < 256; ++i) {
- if (node->keys[i] != ART_NODE48_EMPTY_VAL) {
+ if (node->keys[i] != CROARING_ART_NODE48_EMPTY_VAL) {
validator.current_key[validator.depth - 1] = i;
if (!art_internal_validate_at(node->children[node->keys[i]],
validator)) {
@@ -759,7 +765,8 @@ static art_node256_t *art_node256_create(const art_key_chunk_t prefix[],
uint8_t prefix_size) {
art_node256_t *node =
(art_node256_t *)roaring_malloc(sizeof(art_node256_t));
- art_init_inner_node(&node->base, ART_NODE256_TYPE, prefix, prefix_size);
+ art_init_inner_node(&node->base, CROARING_ART_NODE256_TYPE, prefix,
+ prefix_size);
node->count = 0;
for (size_t i = 0; i < 256; ++i) {
node->children[i] = NULL;
@@ -915,13 +922,13 @@ static bool art_node256_internal_validate(const art_node256_t *node,
static art_node_t *art_find_child(const art_inner_node_t *node,
art_key_chunk_t key_chunk) {
switch (art_get_type(node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
return art_node4_find_child((art_node4_t *)node, key_chunk);
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
return art_node16_find_child((art_node16_t *)node, key_chunk);
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
return art_node48_find_child((art_node48_t *)node, key_chunk);
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
return art_node256_find_child((art_node256_t *)node, key_chunk);
default:
assert(false);
@@ -933,16 +940,16 @@ static art_node_t *art_find_child(const art_inner_node_t *node,
static void art_replace(art_inner_node_t *node, art_key_chunk_t key_chunk,
art_node_t *new_child) {
switch (art_get_type(node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
art_node4_replace((art_node4_t *)node, key_chunk, new_child);
break;
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
art_node16_replace((art_node16_t *)node, key_chunk, new_child);
break;
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
art_node48_replace((art_node48_t *)node, key_chunk, new_child);
break;
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
art_node256_replace((art_node256_t *)node, key_chunk, new_child);
break;
default:
@@ -955,13 +962,13 @@ static void art_replace(art_inner_node_t *node, art_key_chunk_t key_chunk,
static art_node_t *art_node_erase(art_inner_node_t *node,
art_key_chunk_t key_chunk) {
switch (art_get_type(node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
return art_node4_erase((art_node4_t *)node, key_chunk);
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
return art_node16_erase((art_node16_t *)node, key_chunk);
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
return art_node48_erase((art_node48_t *)node, key_chunk);
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
return art_node256_erase((art_node256_t *)node, key_chunk);
default:
assert(false);
@@ -974,15 +981,15 @@ static art_node_t *art_node_erase(art_inner_node_t *node,
static art_node_t *art_node_insert_leaf(art_inner_node_t *node,
art_key_chunk_t key_chunk,
art_leaf_t *leaf) {
- art_node_t *child = (art_node_t *)(SET_LEAF(leaf));
+ art_node_t *child = (art_node_t *)(CROARING_SET_LEAF(leaf));
switch (art_get_type(node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
return art_node4_insert((art_node4_t *)node, child, key_chunk);
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
return art_node16_insert((art_node16_t *)node, child, key_chunk);
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
return art_node48_insert((art_node48_t *)node, child, key_chunk);
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
return art_node256_insert((art_node256_t *)node, child, key_chunk);
default:
assert(false);
@@ -997,16 +1004,16 @@ static void art_free_node(art_node_t *node) {
return;
}
switch (art_get_type((art_inner_node_t *)node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
art_free_node4((art_node4_t *)node);
break;
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
art_free_node16((art_node16_t *)node);
break;
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
art_free_node48((art_node48_t *)node);
break;
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
art_free_node256((art_node256_t *)node);
break;
default:
@@ -1024,13 +1031,13 @@ static art_indexed_child_t art_node_next_child(const art_node_t *node,
return indexed_child;
}
switch (art_get_type((art_inner_node_t *)node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
return art_node4_next_child((art_node4_t *)node, index);
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
return art_node16_next_child((art_node16_t *)node, index);
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
return art_node48_next_child((art_node48_t *)node, index);
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
return art_node256_next_child((art_node256_t *)node, index);
default:
assert(false);
@@ -1048,13 +1055,13 @@ static art_indexed_child_t art_node_prev_child(const art_node_t *node,
return indexed_child;
}
switch (art_get_type((art_inner_node_t *)node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
return art_node4_prev_child((art_node4_t *)node, index);
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
return art_node16_prev_child((art_node16_t *)node, index);
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
return art_node48_prev_child((art_node48_t *)node, index);
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
return art_node256_prev_child((art_node256_t *)node, index);
default:
assert(false);
@@ -1072,13 +1079,13 @@ static art_indexed_child_t art_node_child_at(const art_node_t *node,
return indexed_child;
}
switch (art_get_type((art_inner_node_t *)node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
return art_node4_child_at((art_node4_t *)node, index);
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
return art_node16_child_at((art_node16_t *)node, index);
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
return art_node48_child_at((art_node48_t *)node, index);
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
return art_node256_child_at((art_node256_t *)node, index);
default:
assert(false);
@@ -1096,13 +1103,13 @@ static art_indexed_child_t art_node_lower_bound(const art_node_t *node,
return indexed_child;
}
switch (art_get_type((art_inner_node_t *)node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
return art_node4_lower_bound((art_node4_t *)node, key_chunk);
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
return art_node16_lower_bound((art_node16_t *)node, key_chunk);
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
return art_node48_lower_bound((art_node48_t *)node, key_chunk);
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
return art_node256_lower_bound((art_node256_t *)node, key_chunk);
default:
assert(false);
@@ -1153,7 +1160,7 @@ static uint8_t art_common_prefix(const art_key_chunk_t key1[],
static art_node_t *art_insert_at(art_node_t *node, const art_key_chunk_t key[],
uint8_t depth, art_leaf_t *new_leaf) {
if (art_is_leaf(node)) {
- art_leaf_t *leaf = CAST_LEAF(node);
+ art_leaf_t *leaf = CROARING_CAST_LEAF(node);
uint8_t common_prefix = art_common_prefix(
leaf->key, depth, ART_KEY_BYTES, key, depth, ART_KEY_BYTES);
@@ -1233,7 +1240,7 @@ static art_erase_result_t art_erase_at(art_node_t *node,
result.value_erased = NULL;
if (art_is_leaf(node)) {
- art_leaf_t *leaf = CAST_LEAF(node);
+ art_leaf_t *leaf = CROARING_CAST_LEAF(node);
uint8_t common_prefix = art_common_prefix(leaf->key, 0, ART_KEY_BYTES,
key, 0, ART_KEY_BYTES);
if (common_prefix != ART_KEY_BYTES) {
@@ -1298,7 +1305,7 @@ static art_val_t *art_find_at(const art_node_t *node,
// Include both the prefix and the child key chunk in the depth.
depth += inner_node->prefix_size + 1;
}
- art_leaf_t *leaf = CAST_LEAF(node);
+ art_leaf_t *leaf = CROARING_CAST_LEAF(node);
if (depth >= ART_KEY_BYTES) {
return (art_val_t *)leaf;
}
@@ -1317,16 +1324,16 @@ size_t art_size_in_bytes_at(const art_node_t *node) {
}
size_t size = 0;
switch (art_get_type((art_inner_node_t *)node)) {
- case ART_NODE4_TYPE: {
+ case CROARING_ART_NODE4_TYPE: {
size += sizeof(art_node4_t);
} break;
- case ART_NODE16_TYPE: {
+ case CROARING_ART_NODE16_TYPE: {
size += sizeof(art_node16_t);
} break;
- case ART_NODE48_TYPE: {
+ case CROARING_ART_NODE48_TYPE: {
size += sizeof(art_node48_t);
} break;
- case ART_NODE256_TYPE: {
+ case CROARING_ART_NODE256_TYPE: {
size += sizeof(art_node256_t);
} break;
default:
@@ -1347,16 +1354,16 @@ static void art_node_print_type(const art_node_t *node) {
return;
}
switch (art_get_type((art_inner_node_t *)node)) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
printf("Node4");
return;
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
printf("Node16");
return;
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
printf("Node48");
return;
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
printf("Node256");
return;
default:
@@ -1368,7 +1375,7 @@ static void art_node_print_type(const art_node_t *node) {
void art_node_printf(const art_node_t *node, uint8_t depth) {
if (art_is_leaf(node)) {
printf("{ type: Leaf, key: ");
- art_leaf_t *leaf = CAST_LEAF(node);
+ art_leaf_t *leaf = CROARING_CAST_LEAF(node);
for (size_t i = 0; i < ART_KEY_BYTES; ++i) {
printf("%02x", leaf->key[i]);
}
@@ -1395,7 +1402,7 @@ void art_node_printf(const art_node_t *node, uint8_t depth) {
printf("\n");
switch (art_get_type(inner_node)) {
- case ART_NODE4_TYPE: {
+ case CROARING_ART_NODE4_TYPE: {
art_node4_t *node4 = (art_node4_t *)node;
for (uint8_t i = 0; i < node4->count; ++i) {
printf("%*s", depth, "");
@@ -1403,7 +1410,7 @@ void art_node_printf(const art_node_t *node, uint8_t depth) {
art_node_printf(node4->children[i], depth);
}
} break;
- case ART_NODE16_TYPE: {
+ case CROARING_ART_NODE16_TYPE: {
art_node16_t *node16 = (art_node16_t *)node;
for (uint8_t i = 0; i < node16->count; ++i) {
printf("%*s", depth, "");
@@ -1411,10 +1418,10 @@ void art_node_printf(const art_node_t *node, uint8_t depth) {
art_node_printf(node16->children[i], depth);
}
} break;
- case ART_NODE48_TYPE: {
+ case CROARING_ART_NODE48_TYPE: {
art_node48_t *node48 = (art_node48_t *)node;
for (int i = 0; i < 256; ++i) {
- if (node48->keys[i] != ART_NODE48_EMPTY_VAL) {
+ if (node48->keys[i] != CROARING_ART_NODE48_EMPTY_VAL) {
printf("%*s", depth, "");
printf("key: %02x ", i);
printf("child: %02x ", node48->keys[i]);
@@ -1422,7 +1429,7 @@ void art_node_printf(const art_node_t *node, uint8_t depth) {
}
}
} break;
- case ART_NODE256_TYPE: {
+ case CROARING_ART_NODE256_TYPE: {
art_node256_t *node256 = (art_node256_t *)node;
for (int i = 0; i < 256; ++i) {
if (node256->children[i] != NULL) {
@@ -1445,7 +1452,7 @@ void art_insert(art_t *art, const art_key_chunk_t *key, art_val_t *val) {
art_leaf_t *leaf = (art_leaf_t *)val;
art_leaf_populate(leaf, key);
if (art->root == NULL) {
- art->root = (art_node_t *)SET_LEAF(leaf);
+ art->root = (art_node_t *)CROARING_SET_LEAF(leaf);
return;
}
art->root = art_insert_at(art->root, key, 0, leaf);
@@ -1503,7 +1510,7 @@ static inline art_node_t *art_iterator_node(art_iterator_t *iterator) {
// true for convenience.
static inline bool art_iterator_valid_loc(art_iterator_t *iterator,
art_leaf_t *leaf) {
- iterator->frames[iterator->frame].node = SET_LEAF(leaf);
+ iterator->frames[iterator->frame].node = CROARING_SET_LEAF(leaf);
iterator->frames[iterator->frame].index_in_node = 0;
memcpy(iterator->key, leaf->key, ART_KEY_BYTES);
iterator->value = (art_val_t *)leaf;
@@ -1592,7 +1599,7 @@ static bool art_node_init_iterator(const art_node_t *node,
// We're at a leaf.
iterator->frames[iterator->frame].node = (art_node_t *)node;
iterator->frames[iterator->frame].index_in_node = 0; // Should not matter.
- return art_iterator_valid_loc(iterator, CAST_LEAF(node));
+ return art_iterator_valid_loc(iterator, CROARING_CAST_LEAF(node));
}
bool art_iterator_move(art_iterator_t *iterator, bool forward) {
@@ -1652,7 +1659,7 @@ static bool art_node_iterator_lower_bound(const art_node_t *node,
art_iterator_down(iterator, inner_node, indexed_child.index);
node = indexed_child.child;
}
- art_leaf_t *leaf = CAST_LEAF(node);
+ art_leaf_t *leaf = CROARING_CAST_LEAF(node);
if (art_compare_keys(leaf->key, key) >= 0) {
// Leaf has an equal or larger key.
return art_iterator_valid_loc(iterator, leaf);
@@ -1806,7 +1813,7 @@ static bool art_internal_validate_at(const art_node_t *node,
return art_validate_fail(&validator, "node is null");
}
if (art_is_leaf(node)) {
- art_leaf_t *leaf = CAST_LEAF(node);
+ art_leaf_t *leaf = CROARING_CAST_LEAF(node);
if (art_compare_prefix(leaf->key, 0, validator.current_key, 0,
validator.depth) != 0) {
return art_validate_fail(
@@ -1832,25 +1839,25 @@ static bool art_internal_validate_at(const art_node_t *node,
validator.depth += inner_node->prefix_size;
switch (inner_node->typecode) {
- case ART_NODE4_TYPE:
+ case CROARING_ART_NODE4_TYPE:
if (!art_node4_internal_validate((art_node4_t *)inner_node,
validator)) {
return false;
}
break;
- case ART_NODE16_TYPE:
+ case CROARING_ART_NODE16_TYPE:
if (!art_node16_internal_validate((art_node16_t *)inner_node,
validator)) {
return false;
}
break;
- case ART_NODE48_TYPE:
+ case CROARING_ART_NODE48_TYPE:
if (!art_node48_internal_validate((art_node48_t *)inner_node,
validator)) {
return false;
}
break;
- case ART_NODE256_TYPE:
+ case CROARING_ART_NODE256_TYPE:
if (!art_node256_internal_validate((art_node256_t *)inner_node,
validator)) {
return false;
diff --git a/contrib/libs/croaring/src/bitset.c b/contrib/libs/croaring/src/bitset.c
index a838073303..5d23af1e7b 100644
--- a/contrib/libs/croaring/src/bitset.c
+++ b/contrib/libs/croaring/src/bitset.c
@@ -200,8 +200,8 @@ size_t bitset_count(const bitset_t *bitset) {
return card;
}
-bool bitset_inplace_union(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+bool bitset_inplace_union(bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
for (size_t k = 0; k < minlength; ++k) {
@@ -267,8 +267,8 @@ size_t bitset_maximum(const bitset_t *bitset) {
/* Returns true if bitsets share no common elements, false otherwise.
*
* Performs early-out if common element found. */
-bool bitsets_disjoint(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+bool bitsets_disjoint(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
@@ -282,8 +282,8 @@ bool bitsets_disjoint(const bitset_t *CBITSET_RESTRICT b1,
* disjoint.
*
* Performs early-out if common element found. */
-bool bitsets_intersect(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+bool bitsets_intersect(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
@@ -307,8 +307,8 @@ static bool any_bits_set(const bitset_t *b, size_t starting_loc) {
/* Returns true if b1 has all of b2's bits set.
*
* Performs early out if a bit is found in b2 that is not found in b1. */
-bool bitset_contains_all(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+bool bitset_contains_all(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t min_size = b1->arraysize;
if (b1->arraysize > b2->arraysize) {
min_size = b2->arraysize;
@@ -325,8 +325,8 @@ bool bitset_contains_all(const bitset_t *CBITSET_RESTRICT b1,
return true;
}
-size_t bitset_union_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+size_t bitset_union_count(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t answer = 0;
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
@@ -366,8 +366,8 @@ size_t bitset_union_count(const bitset_t *CBITSET_RESTRICT b1,
return answer;
}
-void bitset_inplace_intersection(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+void bitset_inplace_intersection(bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
size_t k = 0;
@@ -379,8 +379,8 @@ void bitset_inplace_intersection(bitset_t *CBITSET_RESTRICT b1,
}
}
-size_t bitset_intersection_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+size_t bitset_intersection_count(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t answer = 0;
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
@@ -390,8 +390,8 @@ size_t bitset_intersection_count(const bitset_t *CBITSET_RESTRICT b1,
return answer;
}
-void bitset_inplace_difference(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+void bitset_inplace_difference(bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
size_t k = 0;
@@ -400,8 +400,8 @@ void bitset_inplace_difference(bitset_t *CBITSET_RESTRICT b1,
}
}
-size_t bitset_difference_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+size_t bitset_difference_count(const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
size_t k = 0;
@@ -415,8 +415,9 @@ size_t bitset_difference_count(const bitset_t *CBITSET_RESTRICT b1,
return answer;
}
-bool bitset_inplace_symmetric_difference(bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+bool bitset_inplace_symmetric_difference(
+ bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
size_t k = 0;
@@ -432,8 +433,9 @@ bool bitset_inplace_symmetric_difference(bitset_t *CBITSET_RESTRICT b1,
return true;
}
-size_t bitset_symmetric_difference_count(const bitset_t *CBITSET_RESTRICT b1,
- const bitset_t *CBITSET_RESTRICT b2) {
+size_t bitset_symmetric_difference_count(
+ const bitset_t *CROARING_CBITSET_RESTRICT b1,
+ const bitset_t *CROARING_CBITSET_RESTRICT b2) {
size_t minlength =
b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize;
size_t k = 0;
diff --git a/contrib/libs/croaring/src/containers/bitset.c b/contrib/libs/croaring/src/containers/bitset.c
index 9be73ba21f..f49739594c 100644
--- a/contrib/libs/croaring/src/containers/bitset.c
+++ b/contrib/libs/croaring/src/containers/bitset.c
@@ -252,8 +252,8 @@ bool bitset_container_intersect(const bitset_container_t *src_1,
}
#if CROARING_IS_X64
-#ifndef WORDS_IN_AVX2_REG
-#define WORDS_IN_AVX2_REG sizeof(__m256i) / sizeof(uint64_t)
+#ifndef CROARING_WORDS_IN_AVX2_REG
+#define CROARING_WORDS_IN_AVX2_REG sizeof(__m256i) / sizeof(uint64_t)
#endif
#ifndef WORDS_IN_AVX512_REG
#define WORDS_IN_AVX512_REG sizeof(__m512i) / sizeof(uint64_t)
@@ -284,7 +284,7 @@ int bitset_container_compute_cardinality(const bitset_container_t *bitset) {
if (support & ROARING_SUPPORTS_AVX2) {
return (int)avx2_harley_seal_popcount256(
(const __m256i *)bitset->words,
- BITSET_CONTAINER_SIZE_IN_WORDS / (WORDS_IN_AVX2_REG));
+ BITSET_CONTAINER_SIZE_IN_WORDS / (CROARING_WORDS_IN_AVX2_REG));
} else {
return _scalar_bitset_container_compute_cardinality(bitset);
}
@@ -333,7 +333,7 @@ int bitset_container_compute_cardinality(const bitset_container_t *bitset) {
#if CROARING_IS_X64
-#define BITSET_CONTAINER_FN_REPEAT 8
+#define CROARING_BITSET_CONTAINER_FN_REPEAT 8
#ifndef WORDS_IN_AVX512_REG
#define WORDS_IN_AVX512_REG sizeof(__m512i) / sizeof(uint64_t)
#endif // WORDS_IN_AVX512_REG
@@ -341,7 +341,7 @@ int bitset_container_compute_cardinality(const bitset_container_t *bitset) {
/* Computes a binary operation (eg union) on bitset1 and bitset2 and write the
result to bitsetout */
// clang-format off
-#define AVX512_BITSET_CONTAINER_FN1(before, opname, opsymbol, avx_intrinsic, \
+#define CROARING_AVX512_BITSET_CONTAINER_FN1(before, opname, opsymbol, avx_intrinsic, \
neon_intrinsic, after) \
static inline int _avx512_bitset_container_##opname##_nocard( \
const bitset_container_t *src_1, const bitset_container_t *src_2, \
@@ -395,7 +395,7 @@ int bitset_container_compute_cardinality(const bitset_container_t *bitset) {
return dst->cardinality; \
}
-#define AVX512_BITSET_CONTAINER_FN2(before, opname, opsymbol, avx_intrinsic, \
+#define CROARING_AVX512_BITSET_CONTAINER_FN2(before, opname, opsymbol, avx_intrinsic, \
neon_intrinsic, after) \
/* next, a version that updates cardinality*/ \
static inline int _avx512_bitset_container_##opname(const bitset_container_t *src_1, \
@@ -409,7 +409,7 @@ int bitset_container_compute_cardinality(const bitset_container_t *bitset) {
return dst->cardinality; \
}
-#define AVX512_BITSET_CONTAINER_FN3(before, opname, opsymbol, avx_intrinsic, \
+#define CROARING_AVX512_BITSET_CONTAINER_FN3(before, opname, opsymbol, avx_intrinsic, \
neon_intrinsic, after) \
/* next, a version that just computes the cardinality*/ \
static inline int _avx512_bitset_container_##opname##_justcard( \
@@ -424,85 +424,85 @@ int bitset_container_compute_cardinality(const bitset_container_t *bitset) {
// we duplicate the function because other containers use the "or" term, makes API more consistent
#if CROARING_COMPILER_SUPPORTS_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, or, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, or, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, union, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, union, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
// we duplicate the function because other containers use the "intersection" term, makes API more consistent
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, and, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, and, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, intersection, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, intersection, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, xor, ^, _mm512_xor_si512, veorq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, xor, ^, _mm512_xor_si512, veorq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, andnot, &~, _mm512_andnot_si512, vbicq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX512, andnot, &~, _mm512_andnot_si512, vbicq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
// we duplicate the function because other containers use the "or" term, makes API more consistent
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, or, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, or, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, union, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, union, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
// we duplicate the function because other containers use the "intersection" term, makes API more consistent
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, and, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, and, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, intersection, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, intersection, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, xor, ^, _mm512_xor_si512, veorq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, xor, ^, _mm512_xor_si512, veorq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, andnot, &~, _mm512_andnot_si512, vbicq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX512, andnot, &~, _mm512_andnot_si512, vbicq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
// we duplicate the function because other containers use the "or" term, makes API more consistent
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, or, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, or, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, union, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, union, |, _mm512_or_si512, vorrq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
// we duplicate the function because other containers use the "intersection" term, makes API more consistent
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, and, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, and, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, intersection, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, intersection, &, _mm512_and_si512, vandq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, xor, ^, _mm512_xor_si512, veorq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, xor, ^, _mm512_xor_si512, veorq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
CROARING_TARGET_AVX512
-AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, andnot, &~, _mm512_andnot_si512, vbicq_u64, CROARING_UNTARGET_AVX512)
+CROARING_AVX512_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX512, andnot, &~, _mm512_andnot_si512, vbicq_u64, CROARING_UNTARGET_AVX512)
CROARING_UNTARGET_AVX512
#endif // CROARING_COMPILER_SUPPORTS_AVX512
-#ifndef WORDS_IN_AVX2_REG
-#define WORDS_IN_AVX2_REG sizeof(__m256i) / sizeof(uint64_t)
-#endif // WORDS_IN_AVX2_REG
-#define LOOP_SIZE \
+#ifndef CROARING_WORDS_IN_AVX2_REG
+#define CROARING_WORDS_IN_AVX2_REG sizeof(__m256i) / sizeof(uint64_t)
+#endif // CROARING_WORDS_IN_AVX2_REG
+#define CROARING_LOOP_SIZE \
BITSET_CONTAINER_SIZE_IN_WORDS / \
- ((WORDS_IN_AVX2_REG)*BITSET_CONTAINER_FN_REPEAT)
+ ((CROARING_WORDS_IN_AVX2_REG)*CROARING_BITSET_CONTAINER_FN_REPEAT)
/* Computes a binary operation (eg union) on bitset1 and bitset2 and write the
result to bitsetout */
// clang-format off
-#define AVX_BITSET_CONTAINER_FN1(before, opname, opsymbol, avx_intrinsic, \
+#define CROARING_AVX_BITSET_CONTAINER_FN1(before, opname, opsymbol, avx_intrinsic, \
neon_intrinsic, after) \
static inline int _avx2_bitset_container_##opname##_nocard( \
const bitset_container_t *src_1, const bitset_container_t *src_2, \
@@ -513,7 +513,7 @@ CROARING_UNTARGET_AVX512
uint8_t *out = (uint8_t *)dst->words; \
const int innerloop = 8; \
for (size_t i = 0; \
- i < BITSET_CONTAINER_SIZE_IN_WORDS / (WORDS_IN_AVX2_REG); \
+ i < BITSET_CONTAINER_SIZE_IN_WORDS / (CROARING_WORDS_IN_AVX2_REG); \
i += innerloop) { \
__m256i A1, A2, AO; \
A1 = _mm256_lddqu_si256((const __m256i *)(words_1)); \
@@ -556,7 +556,7 @@ CROARING_UNTARGET_AVX512
return dst->cardinality; \
}
-#define AVX_BITSET_CONTAINER_FN2(before, opname, opsymbol, avx_intrinsic, \
+#define CROARING_AVX_BITSET_CONTAINER_FN2(before, opname, opsymbol, avx_intrinsic, \
neon_intrinsic, after) \
/* next, a version that updates cardinality*/ \
static inline int _avx2_bitset_container_##opname(const bitset_container_t *src_1, \
@@ -567,11 +567,11 @@ CROARING_UNTARGET_AVX512
__m256i *out = (__m256i *)dst->words; \
dst->cardinality = (int32_t)avx2_harley_seal_popcount256andstore_##opname( \
words_2, words_1, out, \
- BITSET_CONTAINER_SIZE_IN_WORDS / (WORDS_IN_AVX2_REG)); \
+ BITSET_CONTAINER_SIZE_IN_WORDS / (CROARING_WORDS_IN_AVX2_REG)); \
return dst->cardinality; \
} \
-#define AVX_BITSET_CONTAINER_FN3(before, opname, opsymbol, avx_intrinsic, \
+#define CROARING_AVX_BITSET_CONTAINER_FN3(before, opname, opsymbol, avx_intrinsic, \
neon_intrinsic, after) \
/* next, a version that just computes the cardinality*/ \
static inline int _avx2_bitset_container_##opname##_justcard( \
@@ -579,77 +579,77 @@ CROARING_UNTARGET_AVX512
const __m256i *__restrict__ data1 = (const __m256i *)src_1->words; \
const __m256i *__restrict__ data2 = (const __m256i *)src_2->words; \
return (int)avx2_harley_seal_popcount256_##opname( \
- data2, data1, BITSET_CONTAINER_SIZE_IN_WORDS / (WORDS_IN_AVX2_REG)); \
+ data2, data1, BITSET_CONTAINER_SIZE_IN_WORDS / (CROARING_WORDS_IN_AVX2_REG)); \
}
// we duplicate the function because other containers use the "or" term, makes API more consistent
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, or, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, or, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, union, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, union, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
// we duplicate the function because other containers use the "intersection" term, makes API more consistent
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, and, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, and, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, intersection, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, intersection, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, xor, ^, _mm256_xor_si256, veorq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, xor, ^, _mm256_xor_si256, veorq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, andnot, &~, _mm256_andnot_si256, vbicq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN1(CROARING_TARGET_AVX2, andnot, &~, _mm256_andnot_si256, vbicq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
// we duplicate the function because other containers use the "or" term, makes API more consistent
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, or, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, or, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, union, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, union, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
// we duplicate the function because other containers use the "intersection" term, makes API more consistent
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, and, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, and, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, intersection, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, intersection, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, xor, ^, _mm256_xor_si256, veorq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, xor, ^, _mm256_xor_si256, veorq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, andnot, &~, _mm256_andnot_si256, vbicq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN2(CROARING_TARGET_AVX2, andnot, &~, _mm256_andnot_si256, vbicq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
// we duplicate the function because other containers use the "or" term, makes API more consistent
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, or, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, or, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, union, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, union, |, _mm256_or_si256, vorrq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
// we duplicate the function because other containers use the "intersection" term, makes API more consistent
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, and, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, and, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, intersection, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, intersection, &, _mm256_and_si256, vandq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, xor, ^, _mm256_xor_si256, veorq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, xor, ^, _mm256_xor_si256, veorq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
CROARING_TARGET_AVX2
-AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, andnot, &~, _mm256_andnot_si256, vbicq_u64, CROARING_UNTARGET_AVX2)
+CROARING_AVX_BITSET_CONTAINER_FN3(CROARING_TARGET_AVX2, andnot, &~, _mm256_andnot_si256, vbicq_u64, CROARING_UNTARGET_AVX2)
CROARING_UNTARGET_AVX2
@@ -711,7 +711,7 @@ SCALAR_BITSET_CONTAINER_FN(xor, ^, _mm256_xor_si256, veorq_u64)
SCALAR_BITSET_CONTAINER_FN(andnot, &~, _mm256_andnot_si256, vbicq_u64)
#if CROARING_COMPILER_SUPPORTS_AVX512
-#define BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
+#define CROARING_BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
int bitset_container_##opname(const bitset_container_t *src_1, \
const bitset_container_t *src_2, \
bitset_container_t *dst) { \
@@ -754,7 +754,7 @@ SCALAR_BITSET_CONTAINER_FN(andnot, &~, _mm256_andnot_si256, vbicq_u64)
#else // CROARING_COMPILER_SUPPORTS_AVX512
-#define BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
+#define CROARING_BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
int bitset_container_##opname(const bitset_container_t *src_1, \
const bitset_container_t *src_2, \
bitset_container_t *dst) { \
@@ -786,7 +786,7 @@ SCALAR_BITSET_CONTAINER_FN(andnot, &~, _mm256_andnot_si256, vbicq_u64)
#elif defined(CROARING_USENEON)
-#define BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
+#define CROARING_BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
int bitset_container_##opname(const bitset_container_t *src_1, \
const bitset_container_t *src_2, \
bitset_container_t *dst) { \
@@ -874,7 +874,7 @@ int bitset_container_##opname##_justcard(const bitset_container_t *src_1, \
#else
-#define BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
+#define CROARING_BITSET_CONTAINER_FN(opname, opsymbol, avx_intrinsic, neon_intrinsic) \
int bitset_container_##opname(const bitset_container_t *src_1, \
const bitset_container_t *src_2, \
bitset_container_t *dst) { \
@@ -922,15 +922,15 @@ int bitset_container_##opname##_justcard(const bitset_container_t *src_1, \
#endif // CROARING_IS_X64
// we duplicate the function because other containers use the "or" term, makes API more consistent
-BITSET_CONTAINER_FN(or, |, _mm256_or_si256, vorrq_u64)
-BITSET_CONTAINER_FN(union, |, _mm256_or_si256, vorrq_u64)
+CROARING_BITSET_CONTAINER_FN(or, |, _mm256_or_si256, vorrq_u64)
+CROARING_BITSET_CONTAINER_FN(union, |, _mm256_or_si256, vorrq_u64)
// we duplicate the function because other containers use the "intersection" term, makes API more consistent
-BITSET_CONTAINER_FN(and, &, _mm256_and_si256, vandq_u64)
-BITSET_CONTAINER_FN(intersection, &, _mm256_and_si256, vandq_u64)
+CROARING_BITSET_CONTAINER_FN(and, &, _mm256_and_si256, vandq_u64)
+CROARING_BITSET_CONTAINER_FN(intersection, &, _mm256_and_si256, vandq_u64)
-BITSET_CONTAINER_FN(xor, ^, _mm256_xor_si256, veorq_u64)
-BITSET_CONTAINER_FN(andnot, &~, _mm256_andnot_si256, vbicq_u64)
+CROARING_BITSET_CONTAINER_FN(xor, ^, _mm256_xor_si256, veorq_u64)
+CROARING_BITSET_CONTAINER_FN(andnot, &~, _mm256_andnot_si256, vbicq_u64)
// clang-format On
diff --git a/contrib/libs/croaring/src/containers/mixed_andnot.c b/contrib/libs/croaring/src/containers/mixed_andnot.c
index 8744c84c7b..8641e16b95 100644
--- a/contrib/libs/croaring/src/containers/mixed_andnot.c
+++ b/contrib/libs/croaring/src/containers/mixed_andnot.c
@@ -302,7 +302,7 @@ int run_array_container_andnot(const run_container_t *src_1,
if (end <= xstart) {
// output the first run
answer->runs[answer->n_runs++] =
- MAKE_RLE16(start, end - start - 1);
+ CROARING_MAKE_RLE16(start, end - start - 1);
rlepos++;
if (rlepos < src_1->n_runs) {
start = src_1->runs[rlepos].value;
@@ -317,7 +317,7 @@ int run_array_container_andnot(const run_container_t *src_1,
} else {
if (start < xstart) {
answer->runs[answer->n_runs++] =
- MAKE_RLE16(start, xstart - start - 1);
+ CROARING_MAKE_RLE16(start, xstart - start - 1);
}
if (xstart + 1 < end) {
start = xstart + 1;
@@ -331,7 +331,8 @@ int run_array_container_andnot(const run_container_t *src_1,
}
}
if (rlepos < src_1->n_runs) {
- answer->runs[answer->n_runs++] = MAKE_RLE16(start, end - start - 1);
+ answer->runs[answer->n_runs++] =
+ CROARING_MAKE_RLE16(start, end - start - 1);
rlepos++;
if (rlepos < src_1->n_runs) {
memcpy(answer->runs + answer->n_runs, src_1->runs + rlepos,
diff --git a/contrib/libs/croaring/src/containers/mixed_negation.c b/contrib/libs/croaring/src/containers/mixed_negation.c
index 93c6f2d374..0ed354a837 100644
--- a/contrib/libs/croaring/src/containers/mixed_negation.c
+++ b/contrib/libs/croaring/src/containers/mixed_negation.c
@@ -326,7 +326,7 @@ int run_container_negation_range_inplace(run_container_t *src,
}
// as with Java implementation, use locals to give self a buffer of depth 1
- rle16_t buffered = MAKE_RLE16(0, 0);
+ rle16_t buffered = CROARING_MAKE_RLE16(0, 0);
rle16_t next = buffered;
if (k < my_nbr_runs) buffered = src->runs[k];
diff --git a/contrib/libs/croaring/src/containers/run.c b/contrib/libs/croaring/src/containers/run.c
index 2a2787b245..0d64726434 100644
--- a/contrib/libs/croaring/src/containers/run.c
+++ b/contrib/libs/croaring/src/containers/run.c
@@ -595,7 +595,8 @@ void run_container_andnot(const run_container_t *src_1,
while ((rlepos1 < src_1->n_runs) && (rlepos2 < src_2->n_runs)) {
if (end <= start2) {
// output the first run
- dst->runs[dst->n_runs++] = MAKE_RLE16(start, end - start - 1);
+ dst->runs[dst->n_runs++] =
+ CROARING_MAKE_RLE16(start, end - start - 1);
rlepos1++;
if (rlepos1 < src_1->n_runs) {
start = src_1->runs[rlepos1].value;
@@ -611,7 +612,7 @@ void run_container_andnot(const run_container_t *src_1,
} else {
if (start < start2) {
dst->runs[dst->n_runs++] =
- MAKE_RLE16(start, start2 - start - 1);
+ CROARING_MAKE_RLE16(start, start2 - start - 1);
}
if (end2 < end) {
start = end2;
@@ -625,7 +626,7 @@ void run_container_andnot(const run_container_t *src_1,
}
}
if (rlepos1 < src_1->n_runs) {
- dst->runs[dst->n_runs++] = MAKE_RLE16(start, end - start - 1);
+ dst->runs[dst->n_runs++] = CROARING_MAKE_RLE16(start, end - start - 1);
rlepos1++;
if (rlepos1 < src_1->n_runs) {
memcpy(dst->runs + dst->n_runs, src_1->runs + rlepos1,
@@ -827,7 +828,7 @@ void run_container_smart_append_exclusive(run_container_t *src,
if (!src->n_runs ||
(start > (old_end = last_run->value + last_run->length + 1))) {
- *appended_last_run = MAKE_RLE16(start, length);
+ *appended_last_run = CROARING_MAKE_RLE16(start, length);
src->n_runs++;
return;
}
@@ -841,10 +842,10 @@ void run_container_smart_append_exclusive(run_container_t *src,
if (start == last_run->value) {
// wipe out previous
if (new_end < old_end) {
- *last_run = MAKE_RLE16(new_end, old_end - new_end - 1);
+ *last_run = CROARING_MAKE_RLE16(new_end, old_end - new_end - 1);
return;
} else if (new_end > old_end) {
- *last_run = MAKE_RLE16(old_end, new_end - old_end - 1);
+ *last_run = CROARING_MAKE_RLE16(old_end, new_end - old_end - 1);
return;
} else {
src->n_runs--;
@@ -853,10 +854,12 @@ void run_container_smart_append_exclusive(run_container_t *src,
}
last_run->length = start - last_run->value - 1;
if (new_end < old_end) {
- *appended_last_run = MAKE_RLE16(new_end, old_end - new_end - 1);
+ *appended_last_run =
+ CROARING_MAKE_RLE16(new_end, old_end - new_end - 1);
src->n_runs++;
} else if (new_end > old_end) {
- *appended_last_run = MAKE_RLE16(old_end, new_end - old_end - 1);
+ *appended_last_run =
+ CROARING_MAKE_RLE16(old_end, new_end - old_end - 1);
src->n_runs++;
}
}
@@ -1055,6 +1058,7 @@ int run_container_cardinality(const run_container_t *run) {
#else
/* Get the cardinality of `run'. Requires an actual computation. */
+ALLOW_UNALIGNED
int run_container_cardinality(const run_container_t *run) {
const int32_t n_runs = run->n_runs;
const rle16_t *runs = run->runs;
diff --git a/contrib/libs/croaring/src/roaring64.c b/contrib/libs/croaring/src/roaring64.c
index 5f9f941bdc..c49353ced5 100644
--- a/contrib/libs/croaring/src/roaring64.c
+++ b/contrib/libs/croaring/src/roaring64.c
@@ -1638,7 +1638,7 @@ void roaring64_bitmap_flip_closed_inplace(roaring64_bitmap_t *r, uint64_t min,
static inline uint64_t count_high32(const roaring64_bitmap_t *r) {
art_iterator_t it = art_init_iterator(&r->art, /*first=*/true);
uint64_t high32_count = 0;
- uint32_t prev_high32;
+ uint32_t prev_high32 = 0;
while (it.value != NULL) {
uint32_t current_high32 = (uint32_t)(combine_key(it.key, 0) >> 32);
if (high32_count == 0 || prev_high32 != current_high32) {
@@ -1666,7 +1666,7 @@ size_t roaring64_bitmap_portable_size_in_bytes(const roaring64_bitmap_t *r) {
size += sizeof(high32_count);
art_iterator_t it = art_init_iterator(&r->art, /*first=*/true);
- uint32_t prev_high32;
+ uint32_t prev_high32 = 0;
roaring_bitmap_t *bitmap32 = NULL;
// Iterate through buckets ordered by increasing keys.
@@ -1731,7 +1731,7 @@ size_t roaring64_bitmap_portable_serialize(const roaring64_bitmap_t *r,
buf += sizeof(high32_count);
art_iterator_t it = art_init_iterator(&r->art, /*first=*/true);
- uint32_t prev_high32;
+ uint32_t prev_high32 = 0;
roaring_bitmap_t *bitmap32 = NULL;
// Iterate through buckets ordered by increasing keys.
diff --git a/contrib/libs/croaring/ya.make b/contrib/libs/croaring/ya.make
index 4ca24ffd5d..6e923d2394 100644
--- a/contrib/libs/croaring/ya.make
+++ b/contrib/libs/croaring/ya.make
@@ -10,9 +10,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(3.0.0)
+VERSION(3.0.1)
-ORIGINAL_SOURCE(https://github.com/RoaringBitmap/CRoaring/archive/v3.0.0.tar.gz)
+ORIGINAL_SOURCE(https://github.com/RoaringBitmap/CRoaring/archive/v3.0.1.tar.gz)
ADDINCL(
GLOBAL contrib/libs/croaring/include
diff --git a/contrib/python/clickhouse-connect/.dist-info/METADATA b/contrib/python/clickhouse-connect/.dist-info/METADATA
index 093e1bf5ed..914107f75a 100644
--- a/contrib/python/clickhouse-connect/.dist-info/METADATA
+++ b/contrib/python/clickhouse-connect/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: clickhouse-connect
-Version: 0.7.5
+Version: 0.7.6
Summary: ClickHouse Database Core Driver for Python, Pandas, and Superset
Home-page: https://github.com/ClickHouse/clickhouse-connect
Author: ClickHouse Inc.
diff --git a/contrib/python/clickhouse-connect/clickhouse_connect/__version__.py b/contrib/python/clickhouse-connect/clickhouse_connect/__version__.py
index c584cf2c45..4a603d9009 100644
--- a/contrib/python/clickhouse-connect/clickhouse_connect/__version__.py
+++ b/contrib/python/clickhouse-connect/clickhouse_connect/__version__.py
@@ -1 +1 @@
-version = '0.7.5'
+version = '0.7.6'
diff --git a/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/datatypes/sqltypes.py b/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/datatypes/sqltypes.py
index 403ec20ca5..099edde9a7 100644
--- a/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/datatypes/sqltypes.py
+++ b/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/datatypes/sqltypes.py
@@ -235,7 +235,7 @@ class DateTime64(ChSqlaType, SqlaDateTime):
def __init__(self, precision: int = None, tz: str = None, type_def: TypeDef = None):
"""
Date time constructor with precision and timezone parameters if not constructed with TypeDef
- :param precision: Usually 3/6/9 for mill/micro/nansecond precision on ClickHouse side
+ :param precision: Usually 3/6/9 for mill/micro/nanosecond precision on ClickHouse side
:param tz: Timezone string as defined in pytz
:param type_def: TypeDef from parse_name function
"""
diff --git a/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py b/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py
index 483beaabe2..2a9e452709 100644
--- a/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py
+++ b/contrib/python/clickhouse-connect/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py
@@ -147,6 +147,10 @@ class MergeTree(TableEngine):
super().__init__(locals())
+class SharedMergeTree(MergeTree):
+ pass
+
+
class SummingMergeTree(MergeTree):
pass
diff --git a/contrib/python/clickhouse-connect/clickhouse_connect/datatypes/registry.py b/contrib/python/clickhouse-connect/clickhouse_connect/datatypes/registry.py
index 3544b3776c..52d1036787 100644
--- a/contrib/python/clickhouse-connect/clickhouse_connect/datatypes/registry.py
+++ b/contrib/python/clickhouse-connect/clickhouse_connect/datatypes/registry.py
@@ -36,7 +36,7 @@ def parse_name(name: str) -> Tuple[str, str, TypeDef]:
keys, values = parse_columns(base[5:])
base = 'Tuple'
elif base == 'Point':
- values = ['Float64', 'Float64']
+ values = ('Float64', 'Float64')
else:
try:
base, values, _ = parse_callable(base)
diff --git a/contrib/python/clickhouse-connect/clickhouse_connect/driver/query.py b/contrib/python/clickhouse-connect/clickhouse_connect/driver/query.py
index 235fe5e7de..549dfc3717 100644
--- a/contrib/python/clickhouse-connect/clickhouse_connect/driver/query.py
+++ b/contrib/python/clickhouse-connect/clickhouse_connect/driver/query.py
@@ -339,7 +339,7 @@ class QueryResult(Closable):
BS = '\\'
-must_escape = (BS, '\'', '`')
+must_escape = (BS, '\'', '`', '\t', '\n')
def quote_identifier(identifier: str):
diff --git a/contrib/python/clickhouse-connect/ya.make b/contrib/python/clickhouse-connect/ya.make
index 6bd170d5af..9c9ea8d7df 100644
--- a/contrib/python/clickhouse-connect/ya.make
+++ b/contrib/python/clickhouse-connect/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(0.7.5)
+VERSION(0.7.6)
LICENSE(Apache-2.0)
diff --git a/contrib/python/contourpy/.dist-info/METADATA b/contrib/python/contourpy/.dist-info/METADATA
index ae131f08d6..7078d82456 100644
--- a/contrib/python/contourpy/.dist-info/METADATA
+++ b/contrib/python/contourpy/.dist-info/METADATA
@@ -1,11 +1,11 @@
Metadata-Version: 2.1
Name: contourpy
-Version: 1.2.0
+Version: 1.2.1
Summary: Python library for calculating contours of 2D quadrilateral grids
Author-Email: Ian Thomas <ianthomas23@gmail.com>
License: BSD 3-Clause License
- Copyright (c) 2021-2023, ContourPy Developers.
+ Copyright (c) 2021-2024, ContourPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -50,7 +50,7 @@ Project-URL: Changelog, https://contourpy.readthedocs.io/en/latest/changelog.htm
Project-URL: Documentation, https://contourpy.readthedocs.io
Project-URL: Repository, https://github.com/contourpy/contourpy
Requires-Python: >=3.9
-Requires-Dist: numpy<2.0,>=1.20
+Requires-Dist: numpy>=1.20
Requires-Dist: furo; extra == "docs"
Requires-Dist: sphinx>=7.2; extra == "docs"
Requires-Dist: sphinx-copybutton; extra == "docs"
@@ -58,7 +58,7 @@ Requires-Dist: bokeh; extra == "bokeh"
Requires-Dist: selenium; extra == "bokeh"
Requires-Dist: contourpy[bokeh,docs]; extra == "mypy"
Requires-Dist: docutils-stubs; extra == "mypy"
-Requires-Dist: mypy==1.6.1; extra == "mypy"
+Requires-Dist: mypy==1.8.0; extra == "mypy"
Requires-Dist: types-Pillow; extra == "mypy"
Requires-Dist: contourpy[test-no-images]; extra == "test"
Requires-Dist: matplotlib; extra == "test"
diff --git a/contrib/python/contourpy/LICENSE b/contrib/python/contourpy/LICENSE
index 90f83c97dd..96837bd9ed 100644
--- a/contrib/python/contourpy/LICENSE
+++ b/contrib/python/contourpy/LICENSE
@@ -1,6 +1,6 @@
BSD 3-Clause License
-Copyright (c) 2021-2023, ContourPy Developers.
+Copyright (c) 2021-2024, ContourPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/contrib/python/contourpy/contourpy/__init__.py b/contrib/python/contourpy/contourpy/__init__.py
index 7b85874c73..aa6baf93d0 100644
--- a/contrib/python/contourpy/contourpy/__init__.py
+++ b/contrib/python/contourpy/contourpy/__init__.py
@@ -5,8 +5,15 @@ from typing import TYPE_CHECKING
import numpy as np
from contourpy._contourpy import (
- ContourGenerator, FillType, LineType, Mpl2005ContourGenerator, Mpl2014ContourGenerator,
- SerialContourGenerator, ThreadedContourGenerator, ZInterp, max_threads,
+ ContourGenerator,
+ FillType,
+ LineType,
+ Mpl2005ContourGenerator,
+ Mpl2014ContourGenerator,
+ SerialContourGenerator,
+ ThreadedContourGenerator,
+ ZInterp,
+ max_threads,
)
from contourpy._version import __version__
from contourpy.chunk import calc_chunk_sizes
@@ -41,12 +48,12 @@ __all__ = [
# Simple mapping of algorithm name to class name.
-_class_lookup: dict[str, type[ContourGenerator]] = dict(
- mpl2005=Mpl2005ContourGenerator,
- mpl2014=Mpl2014ContourGenerator,
- serial=SerialContourGenerator,
- threaded=ThreadedContourGenerator,
-)
+_class_lookup: dict[str, type[ContourGenerator]] = {
+ "mpl2005": Mpl2005ContourGenerator,
+ "mpl2014": Mpl2014ContourGenerator,
+ "serial": SerialContourGenerator,
+ "threaded": ThreadedContourGenerator,
+}
def _remove_z_mask(
@@ -257,6 +264,4 @@ def contour_generator(
kwargs["thread_count"] = thread_count
# Create contour generator.
- cont_gen = cls(*args, **kwargs)
-
- return cont_gen
+ return cls(*args, **kwargs)
diff --git a/contrib/python/contourpy/contourpy/_contourpy.pyi b/contrib/python/contourpy/contourpy/_contourpy.pyi
index 7cbca59023..077f5f9b59 100644
--- a/contrib/python/contourpy/contourpy/_contourpy.pyi
+++ b/contrib/python/contourpy/contourpy/_contourpy.pyi
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import ClassVar, NoReturn
import numpy as np
@@ -55,7 +53,6 @@ class FillType:
def __init__(self, value: int) -> None: ...
def __int__(self) -> int: ...
def __ne__(self, other: object) -> bool: ...
- def __repr__(self) -> str: ...
def __setstate__(self, state: int) -> NoReturn: ...
@property
def name(self) -> str: ...
@@ -76,7 +73,6 @@ class LineType:
def __init__(self, value: int) -> None: ...
def __int__(self) -> int: ...
def __ne__(self, other: object) -> bool: ...
- def __repr__(self) -> str: ...
def __setstate__(self, state: int) -> NoReturn: ...
@property
def name(self) -> str: ...
@@ -94,7 +90,6 @@ class ZInterp:
def __init__(self, value: int) -> None: ...
def __int__(self) -> int: ...
def __ne__(self, other: object) -> bool: ...
- def __repr__(self) -> str: ...
def __setstate__(self, state: int) -> NoReturn: ...
@property
def name(self) -> str: ...
diff --git a/contrib/python/contourpy/contourpy/_version.py b/contrib/python/contourpy/contourpy/_version.py
index c68196d1cb..a955fdae12 100644
--- a/contrib/python/contourpy/contourpy/_version.py
+++ b/contrib/python/contourpy/contourpy/_version.py
@@ -1 +1 @@
-__version__ = "1.2.0"
+__version__ = "1.2.1"
diff --git a/contrib/python/contourpy/contourpy/dechunk.py b/contrib/python/contourpy/contourpy/dechunk.py
index 92b61bba29..622a1ceb7d 100644
--- a/contrib/python/contourpy/contourpy/dechunk.py
+++ b/contrib/python/contourpy/contourpy/dechunk.py
@@ -4,7 +4,9 @@ from typing import TYPE_CHECKING, cast
from contourpy._contourpy import FillType, LineType
from contourpy.array import (
- concat_codes_or_none, concat_offsets_or_none, concat_points_or_none,
+ concat_codes_or_none,
+ concat_offsets_or_none,
+ concat_points_or_none,
concat_points_or_none_with_nan,
)
from contourpy.enum_util import as_fill_type, as_line_type
diff --git a/contrib/python/contourpy/contourpy/typecheck.py b/contrib/python/contourpy/contourpy/typecheck.py
index 06a18f6cb0..23fbd54856 100644
--- a/contrib/python/contourpy/contourpy/typecheck.py
+++ b/contrib/python/contourpy/contourpy/typecheck.py
@@ -196,7 +196,7 @@ def check_lines(lines: cpy.LineReturn, line_type: LineType | str) -> None:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedNan, lines)
_check_tuple_of_lists_with_same_length(lines, 1, allow_empty_lists=False)
- for chunk, points_or_none in enumerate(lines[0]):
+ for _chunk, points_or_none in enumerate(lines[0]):
if points_or_none is not None:
check_point_array(points_or_none)
else:
diff --git a/contrib/python/contourpy/contourpy/util/_build_config.pyi b/contrib/python/contourpy/contourpy/util/_build_config.pyi
index 7ca4b0eb8c..cedb032d27 100644
--- a/contrib/python/contourpy/contourpy/util/_build_config.pyi
+++ b/contrib/python/contourpy/contourpy/util/_build_config.pyi
@@ -1,5 +1,3 @@
# Explicit type hints for dynamically-generated python file that mypy may not be able to find.
-from __future__ import annotations
-
def build_config() -> dict[str, str]: ...
diff --git a/contrib/python/contourpy/contourpy/util/bokeh_renderer.py b/contrib/python/contourpy/contourpy/util/bokeh_renderer.py
index c467fe9f70..85b0f8c40e 100644
--- a/contrib/python/contourpy/contourpy/util/bokeh_renderer.py
+++ b/contrib/python/contourpy/contourpy/util/bokeh_renderer.py
@@ -11,7 +11,6 @@ from bokeh.palettes import Category10
from bokeh.plotting import figure
import numpy as np
-from contourpy import FillType, LineType
from contourpy.enum_util import as_fill_type, as_line_type
from contourpy.util.bokeh_util import filled_to_bokeh, lines_to_bokeh
from contourpy.util.renderer import Renderer
@@ -22,6 +21,7 @@ if TYPE_CHECKING:
from numpy.typing import ArrayLike
from selenium.webdriver.remote.webdriver import WebDriver
+ from contourpy import FillType, LineType
from contourpy._contourpy import FillReturn, LineReturn
@@ -146,21 +146,21 @@ class BokehRenderer(Renderer):
"""
fig = self._get_figure(ax)
x, y = self._grid_as_2d(x, y)
- xs = [row for row in x] + [row for row in x.T]
- ys = [row for row in y] + [row for row in y.T]
- kwargs = dict(line_color=color, alpha=alpha)
+ xs = list(x) + list(x.T)
+ ys = list(y) + list(y.T)
+ kwargs = {"line_color": color, "alpha": alpha}
fig.multi_line(xs, ys, **kwargs)
if quad_as_tri_alpha > 0:
# Assumes no quad mask.
xmid = (0.25*(x[:-1, :-1] + x[1:, :-1] + x[:-1, 1:] + x[1:, 1:])).ravel()
ymid = (0.25*(y[:-1, :-1] + y[1:, :-1] + y[:-1, 1:] + y[1:, 1:])).ravel()
fig.multi_line(
- [row for row in np.stack((x[:-1, :-1].ravel(), xmid, x[1:, 1:].ravel()), axis=1)],
- [row for row in np.stack((y[:-1, :-1].ravel(), ymid, y[1:, 1:].ravel()), axis=1)],
+ list(np.stack((x[:-1, :-1].ravel(), xmid, x[1:, 1:].ravel()), axis=1)),
+ list(np.stack((y[:-1, :-1].ravel(), ymid, y[1:, 1:].ravel()), axis=1)),
**kwargs)
fig.multi_line(
- [row for row in np.stack((x[:-1, 1:].ravel(), xmid, x[1:, :-1].ravel()), axis=1)],
- [row for row in np.stack((y[:-1, 1:].ravel(), ymid, y[1:, :-1].ravel()), axis=1)],
+ list(np.stack((x[:-1, 1:].ravel(), xmid, x[1:, :-1].ravel()), axis=1)),
+ list(np.stack((y[:-1, 1:].ravel(), ymid, y[1:, :-1].ravel()), axis=1)),
**kwargs)
if point_color is not None:
fig.circle(
@@ -323,7 +323,7 @@ class BokehRenderer(Renderer):
x, y = self._grid_as_2d(x, y)
z = np.asarray(z)
ny, nx = z.shape
- kwargs = dict(text_color=color, text_align="center", text_baseline="middle")
+ kwargs = {"text_color": color, "text_align": "center", "text_baseline": "middle"}
for j in range(ny):
for i in range(nx):
fig.add_layout(Label(x=x[j, i], y=y[j, i], text=f"{z[j, i]:{fmt}}", **kwargs))
diff --git a/contrib/python/contourpy/contourpy/util/bokeh_util.py b/contrib/python/contourpy/contourpy/util/bokeh_util.py
index 80b9396ea2..e75eb84453 100644
--- a/contrib/python/contourpy/contourpy/util/bokeh_util.py
+++ b/contrib/python/contourpy/contourpy/util/bokeh_util.py
@@ -9,7 +9,10 @@ from contourpy.dechunk import dechunk_lines
if TYPE_CHECKING:
from contourpy._contourpy import (
- CoordinateArray, FillReturn, LineReturn, LineReturn_ChunkCombinedNan,
+ CoordinateArray,
+ FillReturn,
+ LineReturn,
+ LineReturn_ChunkCombinedNan,
)
diff --git a/contrib/python/contourpy/contourpy/util/mpl_renderer.py b/contrib/python/contourpy/contourpy/util/mpl_renderer.py
index 2d8997f2ca..c459e02a92 100644
--- a/contrib/python/contourpy/contourpy/util/mpl_renderer.py
+++ b/contrib/python/contourpy/contourpy/util/mpl_renderer.py
@@ -1,6 +1,5 @@
from __future__ import annotations
-from collections.abc import Sequence
import io
from typing import TYPE_CHECKING, Any, cast
@@ -15,6 +14,8 @@ from contourpy.util.mpl_util import filled_to_mpl_paths, lines_to_mpl_paths
from contourpy.util.renderer import Renderer
if TYPE_CHECKING:
+ from collections.abc import Sequence
+
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from numpy.typing import ArrayLike
@@ -49,14 +50,15 @@ class MplRenderer(Renderer):
gridspec_kw: dict[str, Any] | None = None,
) -> None:
if backend is not None:
- import matplotlib
- matplotlib.use(backend)
+ import matplotlib as mpl
+ mpl.use(backend)
- kwargs: dict[str, Any] = dict(figsize=figsize, squeeze=False, sharex=True, sharey=True)
+ kwargs: dict[str, Any] = {"figsize": figsize, "squeeze": False,
+ "sharex": True, "sharey": True}
if gridspec_kw is not None:
kwargs["gridspec_kw"] = gridspec_kw
else:
- kwargs["subplot_kw"] = dict(aspect="equal")
+ kwargs["subplot_kw"] = {"aspect": "equal"}
self._fig, axes = plt.subplots(nrows, ncols, **kwargs)
self._axes = axes.flatten()
@@ -145,7 +147,7 @@ class MplRenderer(Renderer):
"""
ax = self._get_ax(ax)
x, y = self._grid_as_2d(x, y)
- kwargs: dict[str, Any] = dict(color=color, alpha=alpha)
+ kwargs: dict[str, Any] = {"color": color, "alpha": alpha}
ax.plot(x, y, x.T, y.T, **kwargs)
if quad_as_tri_alpha > 0:
# Assumes no quad mask.
diff --git a/contrib/python/contourpy/src/mpl2014.cpp b/contrib/python/contourpy/src/mpl2014.cpp
index d5012de6fc..a363fa8768 100644
--- a/contrib/python/contourpy/src/mpl2014.cpp
+++ b/contrib/python/contourpy/src/mpl2014.cpp
@@ -399,7 +399,8 @@ void Mpl2014ContourGenerator::append_contour_to_vertices_and_codes(
const ContourLine::Children& children = line.get_children();
py::ssize_t npoints = static_cast<py::ssize_t>(line.size() + 1);
for (children_it = children.begin(); children_it != children.end(); ++children_it)
- npoints += static_cast<py::ssize_t>((*children_it)->size() + 1);
+ // cppcheck-suppress useStlAlgorithm
+ npoints += static_cast<py::ssize_t>((*children_it)->size() + 1);
py::ssize_t vertices_dims[2] = {npoints, 2};
PointArray vertices(vertices_dims);
@@ -642,8 +643,7 @@ unsigned int Mpl2014ContourGenerator::follow_boundary(
// Add point to contour.
get_point_xy(end_point, contour_line);
- if (first_edge)
- first_edge = false;
+ first_edge = false;
}
return level_index;
diff --git a/contrib/python/contourpy/ya.make b/contrib/python/contourpy/ya.make
index 1da5a3ee7c..5c6203a16b 100644
--- a/contrib/python/contourpy/ya.make
+++ b/contrib/python/contourpy/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(1.2.0)
+VERSION(1.2.1)
LICENSE(BSD-3-Clause)