diff options
author | robot-piglet <robot-piglet@yandex-team.com> | 2023-11-18 09:38:04 +0300 |
---|---|---|
committer | robot-piglet <robot-piglet@yandex-team.com> | 2023-11-18 09:53:51 +0300 |
commit | 45d2a1a9ad08d7bb7ecb28ea9fae9f59c67768b7 (patch) | |
tree | aafec20a12fcca14e5309de5a557ac2be2eeee2f | |
parent | dec45798cd231cb90036274023b155393b0ad489 (diff) | |
download | ydb-45d2a1a9ad08d7bb7ecb28ea9fae9f59c67768b7.tar.gz |
Intermediate changes
8 files changed, 151 insertions, 98 deletions
diff --git a/contrib/clickhouse/includes/configs/config_version.cpp b/contrib/clickhouse/includes/configs/config_version.cpp index 28713d19c2..f59bf8089d 100644 --- a/contrib/clickhouse/includes/configs/config_version.cpp +++ b/contrib/clickhouse/includes/configs/config_version.cpp @@ -1,3 +1,3 @@ /// This file was autogenerated by CMake -const char * VERSION_GITHASH = "077df679bed122ad45c8b105d8916ccfec85ae64"; +const char * VERSION_GITHASH = "812b95e14ba8cf744bf1d70e6de607cf130a79fa"; diff --git a/contrib/clickhouse/includes/configs/config_version.h b/contrib/clickhouse/includes/configs/config_version.h index c90a7959d3..7b2920046b 100644 --- a/contrib/clickhouse/includes/configs/config_version.h +++ b/contrib/clickhouse/includes/configs/config_version.h @@ -8,13 +8,13 @@ #define VERSION_NAME "ClickHouse" #define VERSION_MAJOR 23 #define VERSION_MINOR 8 -#define VERSION_PATCH 7 -#define VERSION_STRING "23.8.7.1" +#define VERSION_PATCH 8 +#define VERSION_STRING "23.8.8.1" #define VERSION_STRING_SHORT "23.8" /* #undef VERSION_OFFICIAL */ -#define VERSION_FULL "ClickHouse 23.8.7.1" -#define VERSION_DESCRIBE "v23.8.7.1-lts" -#define VERSION_INTEGER 23008007 +#define VERSION_FULL "ClickHouse 23.8.8.1" +#define VERSION_DESCRIBE "v23.8.8.1-lts" +#define VERSION_INTEGER 23008008 /// These fields are frequently changing and we don't want to have them in the header file to allow caching. extern const char * VERSION_GITHASH; diff --git a/contrib/clickhouse/src/Access/KerberosInit.cpp b/contrib/clickhouse/src/Access/KerberosInit.cpp index 58e4a46f2a..772938ad9b 100644 --- a/contrib/clickhouse/src/Access/KerberosInit.cpp +++ b/contrib/clickhouse/src/Access/KerberosInit.cpp @@ -44,7 +44,7 @@ private: krb5_ccache defcache = nullptr; krb5_get_init_creds_opt * options = nullptr; // Credentials structure including ticket, session key, and lifetime info. - krb5_creds my_creds; + krb5_creds my_creds {}; krb5_keytab keytab = nullptr; krb5_principal defcache_princ = nullptr; String fmtError(krb5_error_code code) const; diff --git a/contrib/clickhouse/src/Compression/CompressionCodecFPC.cpp b/contrib/clickhouse/src/Compression/CompressionCodecFPC.cpp index 8c3e518ed6..ef31c88696 100644 --- a/contrib/clickhouse/src/Compression/CompressionCodecFPC.cpp +++ b/contrib/clickhouse/src/Compression/CompressionCodecFPC.cpp @@ -153,23 +153,23 @@ void registerCodecFPC(CompressionCodecFactory & factory) namespace { -template <std::unsigned_integral TUint> -requires (sizeof(TUint) >= 4) +template <std::unsigned_integral TUInt> +requires (sizeof(TUInt) >= 4) class DfcmPredictor { public: - explicit DfcmPredictor(std::size_t table_size) + explicit DfcmPredictor(size_t table_size) : table(table_size, 0), prev_value{0}, hash{0} { } [[nodiscard]] - TUint predict() const noexcept + TUInt predict() const noexcept { return table[hash] + prev_value; } - void add(TUint value) noexcept + void add(TUInt value) noexcept { table[hash] = value - prev_value; recalculateHash(); @@ -180,38 +180,38 @@ private: void recalculateHash() noexcept { auto value = table[hash]; - if constexpr (sizeof(TUint) >= 8) + if constexpr (sizeof(TUInt) >= 8) { - hash = ((hash << 2) ^ static_cast<std::size_t>(value >> 40)) & (table.size() - 1); + hash = ((hash << 2) ^ static_cast<size_t>(value >> 40)) & (table.size() - 1); } else { - hash = ((hash << 4) ^ static_cast<std::size_t>(value >> 23)) & (table.size() - 1); + hash = ((hash << 4) ^ static_cast<size_t>(value >> 23)) & (table.size() - 1); } } - std::vector<TUint> table; - TUint prev_value; - std::size_t hash; + std::vector<TUInt> table; + TUInt prev_value; + size_t hash; }; -template <std::unsigned_integral TUint> -requires (sizeof(TUint) >= 4) +template <std::unsigned_integral TUInt> +requires (sizeof(TUInt) >= 4) class FcmPredictor { public: - explicit FcmPredictor(std::size_t table_size) + explicit FcmPredictor(size_t table_size) : table(table_size, 0), hash{0} { } [[nodiscard]] - TUint predict() const noexcept + TUInt predict() const noexcept { return table[hash]; } - void add(TUint value) noexcept + void add(TUInt value) noexcept { table[hash] = value; recalculateHash(); @@ -221,31 +221,31 @@ private: void recalculateHash() noexcept { auto value = table[hash]; - if constexpr (sizeof(TUint) >= 8) + if constexpr (sizeof(TUInt) >= 8) { - hash = ((hash << 6) ^ static_cast<std::size_t>(value >> 48)) & (table.size() - 1); + hash = ((hash << 6) ^ static_cast<size_t>(value >> 48)) & (table.size() - 1); } else { - hash = ((hash << 1) ^ static_cast<std::size_t>(value >> 22)) & (table.size() - 1); + hash = ((hash << 1) ^ static_cast<size_t>(value >> 22)) & (table.size() - 1); } } - std::vector<TUint> table; - std::size_t hash; + std::vector<TUInt> table; + size_t hash; }; -template <std::unsigned_integral TUint> +template <std::unsigned_integral TUInt> class FPCOperation { - static constexpr auto VALUE_SIZE = sizeof(TUint); + static constexpr size_t VALUE_SIZE = sizeof(TUInt); static constexpr std::byte FCM_BIT{0}; static constexpr std::byte DFCM_BIT{1u << 3}; - static constexpr auto DFCM_BIT_1 = DFCM_BIT << 4; - static constexpr auto DFCM_BIT_2 = DFCM_BIT; - static constexpr unsigned MAX_ZERO_BYTE_COUNT = 0b111u; + static constexpr std::byte DFCM_BIT_1 = DFCM_BIT << 4; + static constexpr std::byte DFCM_BIT_2 = DFCM_BIT; + static constexpr UInt32 MAX_ZERO_BYTE_COUNT = 0b111u; static constexpr std::endian ENDIAN = std::endian::little; - static constexpr std::size_t CHUNK_SIZE = 64; + static constexpr size_t CHUNK_SIZE = 64; public: FPCOperation(std::span<std::byte> destination, UInt8 compression_level) @@ -253,12 +253,12 @@ public: { } - std::size_t encode(std::span<const std::byte> data) && + size_t encode(std::span<const std::byte> data) && { auto initial_size = result.size(); std::span chunk_view(chunk); - for (std::size_t i = 0; i < data.size(); i += chunk_view.size_bytes()) + for (size_t i = 0; i < data.size(); i += chunk_view.size_bytes()) { auto written_values_count = importChunk(data.subspan(i), chunk_view); encodeChunk(chunk_view.subspan(0, written_values_count)); @@ -267,12 +267,12 @@ public: return initial_size - result.size(); } - void decode(std::span<const std::byte> values, std::size_t decoded_size) && + void decode(std::span<const std::byte> values, size_t decoded_size) && { - std::size_t read_bytes = 0; + size_t read_bytes = 0; - std::span<TUint> chunk_view(chunk); - for (std::size_t i = 0; i < decoded_size; i += chunk_view.size_bytes()) + std::span<TUInt> chunk_view(chunk); + for (size_t i = 0; i < decoded_size; i += chunk_view.size_bytes()) { if (i + chunk_view.size_bytes() > decoded_size) chunk_view = chunk_view.first(ceilBytesToEvenValues(decoded_size - i)); @@ -282,50 +282,50 @@ public: } private: - static std::size_t ceilBytesToEvenValues(std::size_t bytes_count) + static size_t ceilBytesToEvenValues(size_t bytes_count) { - auto values_count = (bytes_count + VALUE_SIZE - 1) / VALUE_SIZE; + size_t values_count = (bytes_count + VALUE_SIZE - 1) / VALUE_SIZE; return values_count % 2 == 0 ? values_count : values_count + 1; } - std::size_t importChunk(std::span<const std::byte> values, std::span<TUint> chnk) + size_t importChunk(std::span<const std::byte> values, std::span<TUInt> current_chunk) { - if (auto chunk_view = std::as_writable_bytes(chnk); chunk_view.size() <= values.size()) + if (auto chunk_view = std::as_writable_bytes(current_chunk); chunk_view.size() <= values.size()) { - std::memcpy(chunk_view.data(), values.data(), chunk_view.size()); + memcpy(chunk_view.data(), values.data(), chunk_view.size()); return chunk_view.size() / VALUE_SIZE; } else { - std::memset(chunk_view.data(), 0, chunk_view.size()); - std::memcpy(chunk_view.data(), values.data(), values.size()); + memset(chunk_view.data(), 0, chunk_view.size()); + memcpy(chunk_view.data(), values.data(), values.size()); return ceilBytesToEvenValues(values.size()); } } - void exportChunk(std::span<const TUint> chnk) + void exportChunk(std::span<const TUInt> current_chunk) { - auto chunk_view = std::as_bytes(chnk).first(std::min(result.size(), chnk.size_bytes())); - std::memcpy(result.data(), chunk_view.data(), chunk_view.size()); + auto chunk_view = std::as_bytes(current_chunk).first(std::min(result.size(), current_chunk.size_bytes())); + memcpy(result.data(), chunk_view.data(), chunk_view.size()); result = result.subspan(chunk_view.size()); } - void encodeChunk(std::span<const TUint> seq) + void encodeChunk(std::span<const TUInt> sequence) { - for (std::size_t i = 0; i < seq.size(); i += 2) + for (size_t i = 0; i < sequence.size(); i += 2) { - encodePair(seq[i], seq[i + 1]); + encodePair(sequence[i], sequence[i + 1]); } } struct CompressedValue { - TUint value; - unsigned compressed_size; + TUInt value; + UInt32 compressed_size; std::byte predictor; }; - unsigned encodeCompressedZeroByteCount(unsigned compressed) + UInt32 encodeCompressedZeroByteCount(UInt32 compressed) { if constexpr (VALUE_SIZE == MAX_ZERO_BYTE_COUNT + 1) { @@ -335,7 +335,7 @@ private: return std::min(compressed, MAX_ZERO_BYTE_COUNT); } - unsigned decodeCompressedZeroByteCount(unsigned encoded_size) + UInt32 decodeCompressedZeroByteCount(UInt32 encoded_size) { if constexpr (VALUE_SIZE == MAX_ZERO_BYTE_COUNT + 1) { @@ -345,22 +345,22 @@ private: return encoded_size; } - CompressedValue compressValue(TUint value) noexcept + CompressedValue compressValue(TUInt value) noexcept { static constexpr auto BITS_PER_BYTE = std::numeric_limits<unsigned char>::digits; - TUint compressed_dfcm = dfcm_predictor.predict() ^ value; - TUint compressed_fcm = fcm_predictor.predict() ^ value; + TUInt compressed_dfcm = dfcm_predictor.predict() ^ value; + TUInt compressed_fcm = fcm_predictor.predict() ^ value; dfcm_predictor.add(value); fcm_predictor.add(value); auto zeroes_dfcm = std::countl_zero(compressed_dfcm); auto zeroes_fcm = std::countl_zero(compressed_fcm); if (zeroes_dfcm > zeroes_fcm) - return {compressed_dfcm, encodeCompressedZeroByteCount(static_cast<unsigned>(zeroes_dfcm) / BITS_PER_BYTE), DFCM_BIT}; - return {compressed_fcm, encodeCompressedZeroByteCount(static_cast<unsigned>(zeroes_fcm) / BITS_PER_BYTE), FCM_BIT}; + return {compressed_dfcm, encodeCompressedZeroByteCount(static_cast<UInt32>(zeroes_dfcm) / BITS_PER_BYTE), DFCM_BIT}; + return {compressed_fcm, encodeCompressedZeroByteCount(static_cast<UInt32>(zeroes_fcm) / BITS_PER_BYTE), FCM_BIT}; } - void encodePair(TUint first, TUint second) + void encodePair(TUInt first, TUInt second) { auto [compressed_value1, zero_byte_count1, predictor1] = compressValue(first); auto [compressed_value2, zero_byte_count2, predictor2] = compressValue(second); @@ -374,24 +374,24 @@ private: auto tail_size1 = VALUE_SIZE - zero_byte_count1; auto tail_size2 = VALUE_SIZE - zero_byte_count2; - std::memcpy(result.data() + 1, valueTail(compressed_value1, zero_byte_count1), tail_size1); - std::memcpy(result.data() + 1 + tail_size1, valueTail(compressed_value2, zero_byte_count2), tail_size2); + memcpy(result.data() + 1, valueTail(compressed_value1, zero_byte_count1), tail_size1); + memcpy(result.data() + 1 + tail_size1, valueTail(compressed_value2, zero_byte_count2), tail_size2); result = result.subspan(1 + tail_size1 + tail_size2); } - std::size_t decodeChunk(std::span<const std::byte> values, std::span<TUint> seq) + size_t decodeChunk(std::span<const std::byte> values, std::span<TUInt> sequence) { - std::size_t read_bytes = 0; - for (std::size_t i = 0; i < seq.size(); i += 2) + size_t read_bytes = 0; + for (size_t i = 0; i < sequence.size(); i += 2) { - read_bytes += decodePair(values.subspan(read_bytes), seq[i], seq[i + 1]); + read_bytes += decodePair(values.subspan(read_bytes), sequence[i], sequence[i + 1]); } return read_bytes; } - TUint decompressValue(TUint value, bool isDfcmPredictor) + TUInt decompressValue(TUInt value, bool isDfcmPredictor) { - TUint decompressed; + TUInt decompressed; if (isDfcmPredictor) { decompressed = dfcm_predictor.predict() ^ value; @@ -405,37 +405,45 @@ private: return decompressed; } - std::size_t decodePair(std::span<const std::byte> bytes, TUint& first, TUint& second) + size_t decodePair(std::span<const std::byte> bytes, TUInt & first, TUInt & second) { if (bytes.empty()) [[unlikely]] throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Unexpected end of encoded sequence"); - auto zero_byte_count1 = decodeCompressedZeroByteCount( - std::to_integer<unsigned>(bytes.front() >> 4) & MAX_ZERO_BYTE_COUNT); - auto zero_byte_count2 = decodeCompressedZeroByteCount( - std::to_integer<unsigned>(bytes.front()) & MAX_ZERO_BYTE_COUNT); + UInt32 zero_byte_count1 = decodeCompressedZeroByteCount( + std::to_integer<UInt32>(bytes.front() >> 4) & MAX_ZERO_BYTE_COUNT); + UInt32 zero_byte_count2 = decodeCompressedZeroByteCount( + std::to_integer<UInt32>(bytes.front()) & MAX_ZERO_BYTE_COUNT); - auto tail_size1 = VALUE_SIZE - zero_byte_count1; - auto tail_size2 = VALUE_SIZE - zero_byte_count2; + if (zero_byte_count1 > VALUE_SIZE || zero_byte_count2 > VALUE_SIZE) [[unlikely]] + throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Invalid zero byte count(s): {} and {}", zero_byte_count1, zero_byte_count2); + + size_t tail_size1 = VALUE_SIZE - zero_byte_count1; + size_t tail_size2 = VALUE_SIZE - zero_byte_count2; + + size_t expected_size = 0; + if (__builtin_add_overflow(tail_size1, tail_size2, &expected_size) + || __builtin_add_overflow(expected_size, 1, &expected_size)) [[unlikely]] + throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Overflow occurred while calculating expected size"); - if (bytes.size() < 1 + tail_size1 + tail_size2) [[unlikely]] + if (bytes.size() < expected_size) [[unlikely]] throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Unexpected end of encoded sequence"); - TUint value1 = 0; - TUint value2 = 0; + TUInt value1 = 0; + TUInt value2 = 0; - std::memcpy(valueTail(value1, zero_byte_count1), bytes.data() + 1, tail_size1); - std::memcpy(valueTail(value2, zero_byte_count2), bytes.data() + 1 + tail_size1, tail_size2); + memcpy(valueTail(value1, zero_byte_count1), bytes.data() + 1, tail_size1); + memcpy(valueTail(value2, zero_byte_count2), bytes.data() + 1 + tail_size1, tail_size2); auto is_dfcm_predictor1 = std::to_integer<unsigned char>(bytes.front() & DFCM_BIT_1) != 0; auto is_dfcm_predictor2 = std::to_integer<unsigned char>(bytes.front() & DFCM_BIT_2) != 0; first = decompressValue(value1, is_dfcm_predictor1); second = decompressValue(value2, is_dfcm_predictor2); - return 1 + tail_size1 + tail_size2; + return expected_size; } - static void* valueTail(TUint& value, unsigned compressed_size) + static void* valueTail(TUInt& value, UInt32 compressed_size) { if constexpr (ENDIAN == std::endian::little) { @@ -447,11 +455,11 @@ private: } } - DfcmPredictor<TUint> dfcm_predictor; - FcmPredictor<TUint> fcm_predictor; + DfcmPredictor<TUInt> dfcm_predictor; + FcmPredictor<TUInt> fcm_predictor; // memcpy the input into this buffer to align reads, this improves performance compared to unaligned reads (bit_cast) by ~10% - std::array<TUint, CHUNK_SIZE> chunk{}; + std::array<TUInt, CHUNK_SIZE> chunk{}; std::span<std::byte> result{}; }; diff --git a/contrib/clickhouse/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp b/contrib/clickhouse/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp index cbdd656fb8..e814f83c0f 100644 --- a/contrib/clickhouse/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp +++ b/contrib/clickhouse/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp @@ -28,8 +28,14 @@ void NormalizeSelectWithUnionQueryMatcher::getSelectsFromUnionListNode(ASTPtr as void NormalizeSelectWithUnionQueryMatcher::visit(ASTPtr & ast, Data & data) { - if (auto * select_union = ast->as<ASTSelectWithUnionQuery>()) + if (auto * select_union = ast->as<ASTSelectWithUnionQuery>(); select_union && !select_union->is_normalized) + { + /// The rewrite of ASTSelectWithUnionQuery may strip the format info, so + /// we need to keep and restore it. + auto format = select_union->format; visit(*select_union, data); + select_union->format = format; + } } void NormalizeSelectWithUnionQueryMatcher::visit(ASTSelectWithUnionQuery & ast, Data & data) diff --git a/contrib/clickhouse/src/Server/TCPHandler.cpp b/contrib/clickhouse/src/Server/TCPHandler.cpp index 136f2dd953..bb3bee3897 100644 --- a/contrib/clickhouse/src/Server/TCPHandler.cpp +++ b/contrib/clickhouse/src/Server/TCPHandler.cpp @@ -585,6 +585,21 @@ void TCPHandler::runImpl() } catch (const Exception & e) { + /// Authentication failure with interserver secret + /// - early exit without trying to send the exception to the client. + /// Because the server should not try to skip (parse, decompress) the remaining packets sent by the client, + /// as it will lead to additional work and unneeded exposure to unauthenticated connections. + + /// Note that the exception AUTHENTICATION_FAILED can be here in two cases: + /// 1. The authentication in receiveHello is skipped with "interserver secret", + /// postponed to receiving the query, and then failed. + /// 2. Receiving exception from a query using a table function to authenticate with another server. + /// In this case, the user is already authenticated with this server, + /// is_interserver_mode is false, and we can send the exception to the client normally. + + if (is_interserver_mode && e.code() == ErrorCodes::AUTHENTICATION_FAILED) + throw; + state.io.onException(); exception.reset(e.clone()); @@ -1586,7 +1601,18 @@ void TCPHandler::receiveQuery() { client_info.interface = ClientInfo::Interface::TCP_INTERSERVER; #if USE_SSL - String cluster_secret = server.context()->getCluster(cluster)->getSecret(); + + String cluster_secret; + try + { + cluster_secret = server.context()->getCluster(cluster)->getSecret(); + } + catch (const Exception & e) + { + auto exception = Exception::createRuntime(ErrorCodes::AUTHENTICATION_FAILED, e.message()); + session->onAuthenticationFailure(/* user_name= */ std::nullopt, socket().peerAddress(), exception); + throw exception; /// NOLINT + } if (salt.empty() || cluster_secret.empty()) { diff --git a/contrib/clickhouse/src/Storages/AlterCommands.cpp b/contrib/clickhouse/src/Storages/AlterCommands.cpp index 58886c6aa4..c5d8986af2 100644 --- a/contrib/clickhouse/src/Storages/AlterCommands.cpp +++ b/contrib/clickhouse/src/Storages/AlterCommands.cpp @@ -43,6 +43,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; extern const int DUPLICATE_COLUMN; extern const int NOT_IMPLEMENTED; + extern const int SUPPORT_IS_DISABLED; } namespace @@ -1076,6 +1077,13 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const throw Exception(ErrorCodes::BAD_ARGUMENTS, "Data type have to be specified for column {} to add", backQuote(column_name)); + /// FIXME: Adding a new column of type Object(JSON) is broken. + /// Looks like there is something around default expression for this column (method `getDefault` is not implemented for the data type Object). + /// But after ALTER TABLE ADD COLUMN we need to fill existing rows with something (exactly the default value). + /// So we don't allow to do it for now. + if (command.data_type->hasDynamicSubcolumns()) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Adding a new column of a type which has dynamic subcolumns to an existing table is not allowed. It has known bugs"); + if (column_name == LightweightDeleteDescription::FILTER_COLUMN.name && std::dynamic_pointer_cast<MergeTreeData>(table)) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot add column {}: " "this column name is reserved for lightweight delete feature", backQuote(column_name)); @@ -1134,17 +1142,22 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const } } - /// The change of data type to/from Object is broken, so disable it for now + /// FIXME: Modifying the column to/from Object(JSON) is broken. + /// Looks like there is something around default expression for this column (method `getDefault` is not implemented for the data type Object). + /// But after ALTER TABLE MODIFY COLUMN we need to fill existing rows with something (exactly the default value) or calculate the common type for it. + /// So we don't allow to do it for now. if (command.data_type) { const GetColumnsOptions options(GetColumnsOptions::AllPhysical); const auto old_data_type = all_columns.getColumn(options, column_name).type; - if (command.data_type->getName().contains("Object") - || old_data_type->getName().contains("Object")) + bool new_type_has_object = command.data_type->hasDynamicSubcolumns(); + bool old_type_has_object = old_data_type->hasDynamicSubcolumns(); + + if (new_type_has_object || old_type_has_object) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "The change of data type {} of column {} to {} is not allowed", + "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } diff --git a/contrib/clickhouse/src/Storages/System/StorageSystemBuildOptions.generated.cpp b/contrib/clickhouse/src/Storages/System/StorageSystemBuildOptions.generated.cpp index f304de7a33..2ca0313362 100644 --- a/contrib/clickhouse/src/Storages/System/StorageSystemBuildOptions.generated.cpp +++ b/contrib/clickhouse/src/Storages/System/StorageSystemBuildOptions.generated.cpp @@ -2,11 +2,11 @@ const char * auto_config_build[] { - "VERSION_FULL", "ClickHouse 23.8.7.1", - "VERSION_DESCRIBE", "v23.8.7.1-lts", - "VERSION_INTEGER", "23008007", + "VERSION_FULL", "ClickHouse 23.8.8.1", + "VERSION_DESCRIBE", "v23.8.8.1-lts", + "VERSION_INTEGER", "23008008", "SYSTEM", "Linux", - "VERSION_GITHASH", "077df679bed122ad45c8b105d8916ccfec85ae64", + "VERSION_GITHASH", "812b95e14ba8cf744bf1d70e6de607cf130a79fa", "VERSION_REVISION", "54477", "BUILD_TYPE", "MinSizeRel", "SYSTEM_PROCESSOR", "x86_64", |