diff options
author | stanly <[email protected]> | 2023-05-03 13:44:14 +0300 |
---|---|---|
committer | stanly <[email protected]> | 2023-05-03 13:44:14 +0300 |
commit | 80284603b494e5f2fd78bd0ca005f3de80adc304 (patch) | |
tree | 1b21b7f640bdbe29847f9507e1cfbec19ea272f6 | |
parent | eb91f1a62f852fb3068571118aabb98909cbc606 (diff) |
add const to local variables
5 files changed, 38 insertions, 39 deletions
diff --git a/ydb/core/tx/columnshard/columnshard_private_events.h b/ydb/core/tx/columnshard/columnshard_private_events.h index c5dbd88e8b0..377f8782f56 100644 --- a/ydb/core/tx/columnshard/columnshard_private_events.h +++ b/ydb/core/tx/columnshard/columnshard_private_events.h @@ -72,7 +72,7 @@ struct TEvPrivate { GroupedBlobRanges = NOlap::TColumnEngineChanges::GroupedBlobRanges(TxEvent->IndexChanges->SwitchedPortions); if (blobManager.HasExternBlobs()) { - for (auto& [blobId, _] : GroupedBlobRanges) { + for (const auto& [blobId, _] : GroupedBlobRanges) { TEvictMetadata meta; if (blobManager.GetEvicted(blobId, meta).IsExternal()) { Externals.insert(blobId); diff --git a/ydb/core/tx/columnshard/engines/column_engine.h b/ydb/core/tx/columnshard/engines/column_engine.h index 58cbf391acf..4fc878fe033 100644 --- a/ydb/core/tx/columnshard/engines/column_engine.h +++ b/ydb/core/tx/columnshard/engines/column_engine.h @@ -147,10 +147,10 @@ public: THashMap<TUnifiedBlobId, std::shared_ptr<arrow::RecordBatch>> CachedBlobs; bool NeedRepeat{false}; - bool IsInsert() const { return Type == INSERT; } - bool IsCompaction() const { return Type == COMPACTION; } - bool IsCleanup() const { return Type == CLEANUP; } - bool IsTtl() const { return Type == TTL; } + bool IsInsert() const noexcept { return Type == INSERT; } + bool IsCompaction() const noexcept { return Type == COMPACTION; } + bool IsCleanup() const noexcept { return Type == CLEANUP; } + bool IsTtl() const noexcept { return Type == TTL; } const char * TypeString() const { switch (Type) { @@ -170,36 +170,38 @@ public: ui64 TotalBlobsSize() const { ui64 size = 0; - for (auto& [blobId, blob] : Blobs) { + for (const auto& [_, blob] : Blobs) { size += blob.size(); } return size; } + /// Returns blob-ranges grouped by blob-id. static THashMap<TUnifiedBlobId, std::vector<TBlobRange>> GroupedBlobRanges(const TVector<TPortionInfo>& portions) { Y_VERIFY(portions.size()); THashMap<TUnifiedBlobId, std::vector<TBlobRange>> sameBlobRanges; - for (auto& portionInfo : portions) { + for (const auto& portionInfo : portions) { Y_VERIFY(!portionInfo.Empty()); - for (auto& rec : portionInfo.Records) { + for (const auto& rec : portionInfo.Records) { sameBlobRanges[rec.BlobRange.BlobId].push_back(rec.BlobRange); } } return sameBlobRanges; } + /// Returns blob-ranges grouped by blob-id. static THashMap<TUnifiedBlobId, std::vector<TBlobRange>> GroupedBlobRanges(const TVector<std::pair<TPortionInfo, TPortionEvictionFeatures>>& portions) { Y_VERIFY(portions.size()); THashMap<TUnifiedBlobId, std::vector<TBlobRange>> sameBlobRanges; - for (auto& [portionInfo, _] : portions) { + for (const auto& [portionInfo, _] : portions) { Y_VERIFY(!portionInfo.Empty()); - for (auto& rec : portionInfo.Records) { + for (const auto& rec : portionInfo.Records) { sameBlobRanges[rec.BlobRange.BlobId].push_back(rec.BlobRange); } } diff --git a/ydb/core/tx/columnshard/engines/column_engine_logs.cpp b/ydb/core/tx/columnshard/engines/column_engine_logs.cpp index 8c0153d3a78..2c3a607e285 100644 --- a/ydb/core/tx/columnshard/engines/column_engine_logs.cpp +++ b/ydb/core/tx/columnshard/engines/column_engine_logs.cpp @@ -110,13 +110,13 @@ bool UpdateEvictedPortion(TPortionInfo& portionInfo, const TIndexInfo& indexInfo return true; } -TVector<TPortionInfo> MakeAppendedPortions(ui64 pathId, const TIndexInfo& indexInfo, - std::shared_ptr<arrow::RecordBatch> batch, - ui64 granule, +TVector<TPortionInfo> MakeAppendedPortions(const ui64 pathId, const TIndexInfo& indexInfo, + const std::shared_ptr<arrow::RecordBatch> batch, + const ui64 granule, const TSnapshot& minSnapshot, TVector<TString>& blobs) { Y_VERIFY(batch->num_rows()); - auto schema = indexInfo.ArrowSchemaWithSpecials(); + const auto schema = indexInfo.ArrowSchemaWithSpecials(); TVector<TPortionInfo> out; TString tierName; @@ -124,12 +124,12 @@ TVector<TPortionInfo> MakeAppendedPortions(ui64 pathId, const TIndexInfo& indexI if (pathId) { if (auto* tiering = indexInfo.GetTiering(pathId)) { tierName = tiering->GetHottestTierName(); - if (auto tierCompression = tiering->GetCompression(tierName)) { + if (const auto& tierCompression = tiering->GetCompression(tierName)) { compression = *tierCompression; } } } - auto writeOptions = WriteOptions(compression); + const auto writeOptions = WriteOptions(compression); std::shared_ptr<arrow::RecordBatch> portionBatch = batch; for (i32 pos = 0; pos < batch->num_rows();) { @@ -143,7 +143,7 @@ TVector<TPortionInfo> MakeAppendedPortions(ui64 pathId, const TIndexInfo& indexI // Serialize portion's columns into blobs bool ok = true; - for (auto& field : schema->fields()) { + for (const auto& field : schema->fields()) { const auto& name = field->name(); ui32 columnId = indexInfo.GetColumnId(TString(name.data(), name.size())); @@ -171,7 +171,7 @@ TVector<TPortionInfo> MakeAppendedPortions(ui64 pathId, const TIndexInfo& indexI portionBatch = batch->Slice(pos); } } else { - i64 halfLen = portionBatch->num_rows() / 2; + const i64 halfLen = portionBatch->num_rows() / 2; Y_VERIFY(halfLen); portionBatch = batch->Slice(pos, halfLen); } @@ -211,7 +211,7 @@ bool InitInGranuleMerge(const TMark& granuleMark, TVector<TPortionInfo>& portion { TMap<NArrow::TReplaceKey, TVector<const TPortionInfo*>> points; - for (auto& portionInfo : portions) { + for (const auto& portionInfo : portions) { if (portionInfo.IsInserted()) { ++insertedCount; if (portionInfo.Snapshot().PlanStep > oldTimePlanStep) { @@ -232,8 +232,8 @@ bool InitInGranuleMerge(const TMark& granuleMark, TVector<TPortionInfo>& portion ui64 bucketStartPortion = 0; bool isGood = false; int sum = 0; - for (auto& [key, vec] : points) { - for (auto& portionInfo : vec) { + for (const auto& [key, vec] : points) { + for (const auto* portionInfo : vec) { if (portionInfo) { ++sum; ui64 currentPortion = portionInfo->Portion(); @@ -285,7 +285,7 @@ bool InitInGranuleMerge(const TMark& granuleMark, TVector<TPortionInfo>& portion TVector<TPortionInfo> tmp; tmp.reserve(portions.size()); - for (auto& portionInfo : portions) { + for (const auto& portionInfo : portions) { ui64 curPortion = portionInfo.Portion(); // Prevent merge of compacted portions with no intersections @@ -1698,7 +1698,7 @@ static std::shared_ptr<arrow::RecordBatch> CompactInOneGranule(const TIndexInfo& static TVector<TString> CompactInGranule(const TIndexInfo& indexInfo, std::shared_ptr<TColumnEngineForLogs::TChanges> changes) { - ui64 pathId = changes->SrcGranule->PathId; + const ui64 pathId = changes->SrcGranule->PathId; TVector<TString> blobs; auto& switchedProtions = changes->SwitchedPortions; Y_VERIFY(switchedProtions.size()); @@ -1738,14 +1738,14 @@ static TVector<TString> CompactInGranule(const TIndexInfo& indexInfo, static TVector<std::pair<TMark, std::shared_ptr<arrow::RecordBatch>>> SliceGranuleBatches(const TIndexInfo& indexInfo, const TColumnEngineForLogs::TChanges& changes, - std::vector<std::shared_ptr<arrow::RecordBatch>>&& batches, + const std::vector<std::shared_ptr<arrow::RecordBatch>>& batches, const TMark& ts0) { TVector<std::pair<TMark, std::shared_ptr<arrow::RecordBatch>>> out; // Extract unique effective keys and their counts i64 numRows = 0; TMap<NArrow::TReplaceKey, ui32> uniqKeyCount; - for (auto& batch : batches) { + for (const auto& batch : batches) { Y_VERIFY(batch); if (batch->num_rows() == 0) { continue; @@ -1804,7 +1804,7 @@ SliceGranuleBatches(const TIndexInfo& indexInfo, // Find offsets in source batches TVector<TVector<int>> offsets(batches.size()); // vec[batch][border] = offset for (size_t i = 0; i < batches.size(); ++i) { - auto& batch = batches[i]; + const auto& batch = batches[i]; auto& batchOffsets = offsets[i]; batchOffsets.reserve(borders.size() + 1); @@ -1838,7 +1838,7 @@ SliceGranuleBatches(const TIndexInfo& indexInfo, // Extract granule: slice source batches with offsets i64 granuleNumRows = 0; for (size_t i = 0; i < batches.size(); ++i) { - auto& batch = batches[i]; + const auto& batch = batches[i]; auto& batchOffsets = offsets[i]; int offset = batchOffsets[granuleNo]; @@ -1971,7 +1971,7 @@ static TVector<TString> CompactSplitGranule(const TIndexInfo& indexInfo, std::vector<std::pair<TMark, ui64>> tsIds; ui64 movedRows = TryMovePortions(ts0, portions, tsIds, changes->PortionsToMove); - auto srcBatches = PortionsToBatches(indexInfo, portions, changes->Blobs, movedRows != 0); + const auto& srcBatches = PortionsToBatches(indexInfo, portions, changes->Blobs, movedRows != 0); Y_VERIFY(srcBatches.size() == portions.size()); TVector<TString> blobs; @@ -1983,7 +1983,7 @@ static TVector<TString> CompactSplitGranule(const TIndexInfo& indexInfo, // Calculate total number of rows. ui64 numRows = movedRows; - for (auto& batch : srcBatches) { + for (const auto& batch : srcBatches) { numRows += batch->num_rows(); } @@ -2071,7 +2071,7 @@ static TVector<TString> CompactSplitGranule(const TIndexInfo& indexInfo, } } } else { - auto batches = SliceGranuleBatches(indexInfo, *changes, std::move(srcBatches), ts0); + auto batches = SliceGranuleBatches(indexInfo, *changes, srcBatches, ts0); changes->SetTmpGranule(pathId, ts0); for (auto& [ts, batch] : batches) { diff --git a/ydb/core/tx/columnshard/engines/column_engine_logs.h b/ydb/core/tx/columnshard/engines/column_engine_logs.h index 396daf9cab8..6608fe67569 100644 --- a/ydb/core/tx/columnshard/engines/column_engine_logs.h +++ b/ydb/core/tx/columnshard/engines/column_engine_logs.h @@ -118,7 +118,7 @@ public: THashMap<ui64, ui64> TmpToNewGranules(ui64 start) { THashMap<ui64, ui64> granuleRemap; - for (auto& [mark, counter] : TmpGranuleIds) { + for (const auto& [mark, counter] : TmpGranuleIds) { ui64 granule = start + counter; if (mark == SrcGranule->Mark) { Y_VERIFY(!counter); @@ -132,14 +132,11 @@ public: return granuleRemap; } - ui32 NumSplitInto(ui32 srcRows) const { + ui32 NumSplitInto(const ui32 srcRows) const { Y_VERIFY(srcRows > 1); - ui64 totalBytes = TotalBlobsSize(); - ui32 numSplitInto = totalBytes / Limits.GranuleExpectedSize + 1; - if (numSplitInto < 2) { - numSplitInto = 2; - } - return numSplitInto; + const ui64 totalBytes = TotalBlobsSize(); + const ui32 numSplitInto = (totalBytes / Limits.GranuleExpectedSize) + 1; + return std::max<ui32>(2, numSplitInto); } TMark DefaultMark; diff --git a/ydb/core/tx/columnshard/engines/portion_info.cpp b/ydb/core/tx/columnshard/engines/portion_info.cpp index c0d34a36c55..f5174fd6068 100644 --- a/ydb/core/tx/columnshard/engines/portion_info.cpp +++ b/ydb/core/tx/columnshard/engines/portion_info.cpp @@ -18,7 +18,7 @@ TString TPortionInfo::AddOneChunkColumn(const std::shared_ptr<arrow::Array>& arr const std::shared_ptr<arrow::Field>& field, TColumnRecord&& record, const arrow::ipc::IpcWriteOptions& writeOptions, - ui32 limitBytes) { + const ui32 limitBytes) { auto blob = SerializeColumn(array, field, writeOptions); if (blob.size() >= limitBytes) { return {}; |