aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormikailbag <mikailbag@yandex-team.com>2025-06-03 20:57:45 +0300
committermikailbag <mikailbag@yandex-team.com>2025-06-03 22:02:22 +0300
commit3de0a42adb64f435508e027547a2a90bf53ff74d (patch)
tree53762015eeae11ee1a26c076009139876fb973a0
parentbd6193dfbcc6fe9a1061b85e9f1427ad5419b2f1 (diff)
downloadydb-3de0a42adb64f435508e027547a2a90bf53ff74d.tar.gz
Fix user data lifetime bug
Profile previously have not extended lifetime for user data referenced by contained allocations. Such data could be destroyed too early, potentially during deallocation, rendering user_data pointer invalid (and thus making the whole feature effectively unusable). As a fix, profiles now act as strong pointers for all sampled allocations. commit_hash:8f77b397661ca7eab30793aece6f2e655722e99c
-rw-r--r--contrib/libs/tcmalloc/patches/020-user-data.patch31
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/internal/logging.h11
-rw-r--r--contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc3
3 files changed, 36 insertions, 9 deletions
diff --git a/contrib/libs/tcmalloc/patches/020-user-data.patch b/contrib/libs/tcmalloc/patches/020-user-data.patch
index 811d4cacd2a..3ff3805f3a6 100644
--- a/contrib/libs/tcmalloc/patches/020-user-data.patch
+++ b/contrib/libs/tcmalloc/patches/020-user-data.patch
@@ -26,10 +26,10 @@ index 1cd8d18..a53c26a 100644
// may not be able to heap-allocate while crashing.
ABSL_CONST_INIT static absl::base_internal::SpinLock crash_lock(
diff --git a/tcmalloc/internal/logging.h b/tcmalloc/internal/logging.h
-index 2a5c761..f2ecc1d 100644
+index 2a5c761..43975dd 100644
--- a/tcmalloc/internal/logging.h
+++ b/tcmalloc/internal/logging.h
-@@ -51,6 +51,87 @@ GOOGLE_MALLOC_SECTION_BEGIN
+@@ -51,6 +51,96 @@ GOOGLE_MALLOC_SECTION_BEGIN
namespace tcmalloc {
namespace tcmalloc_internal {
@@ -44,6 +44,10 @@ index 2a5c761..f2ecc1d 100644
+ static UserData Make() {
+ return UserData{CreateSampleUserData()};
+ }
++ // must be matched with preceding Release
++ static void DestroyRaw(void* ptr) {
++ DestroySampleUserData(ptr);
++ }
+
+ constexpr UserData() noexcept : ptr_(nullptr) {}
+
@@ -75,7 +79,12 @@ index 2a5c761..f2ecc1d 100644
+ DestroySampleUserData(ptr_);
+ }
+
-+ void* Get() const { return ptr_; }
++ // should be paired with subsequent DestroyRaw
++ void* Release() && {
++ void* p = ptr_;
++ ptr_ = nullptr;
++ return p;
++ }
+ private:
+ UserData(void* ptr) noexcept : ptr_(ptr) {}
+ private:
@@ -117,7 +126,7 @@ index 2a5c761..f2ecc1d 100644
static constexpr int kMaxStackDepth = 64;
// An opaque handle type used to identify allocations.
-@@ -84,6 +165,8 @@ struct StackTrace {
+@@ -84,6 +174,8 @@ struct StackTrace {
// between the previous sample and this one
size_t weight;
@@ -212,14 +221,22 @@ index 36fd433..702baa8 100644
} // namespace tcmalloc
diff --git a/tcmalloc/stack_trace_table.cc b/tcmalloc/stack_trace_table.cc
-index cf57148..2de1a25 100644
+index cf57148..c6b6867 100644
--- a/tcmalloc/stack_trace_table.cc
+++ b/tcmalloc/stack_trace_table.cc
-@@ -88,6 +88,7 @@ void StackTraceTable::AddTrace(double sample_weight, const StackTrace& t) {
+@@ -39,6 +39,7 @@ StackTraceTable::StackTraceTable(ProfileType type)
+ StackTraceTable::~StackTraceTable() {
+ LinkedSample* cur = all_;
+ while (cur != nullptr) {
++ SampleUserDataSupport::UserData::DestroyRaw(cur->sample.user_data);
+ LinkedSample* next = cur->next;
+ tc_globals.linked_sample_allocator().Delete(cur);
+ cur = next;
+@@ -88,6 +89,7 @@ void StackTraceTable::AddTrace(double sample_weight, const StackTrace& t) {
s->sample.span_start_address = t.span_start_address;
s->sample.guarded_status = t.guarded_status;
s->sample.type = t.allocation_type;
-+ s->sample.user_data = t.user_data.Get();
++ s->sample.user_data = SampleUserDataSupport::UserData{t.user_data}.Release();
static_assert(kMaxStackDepth <= Profile::Sample::kMaxStackDepth,
"Profile stack size smaller than internal stack sizes");
diff --git a/contrib/libs/tcmalloc/tcmalloc/internal/logging.h b/contrib/libs/tcmalloc/tcmalloc/internal/logging.h
index e41732dc425..ee0fa6f45e9 100644
--- a/contrib/libs/tcmalloc/tcmalloc/internal/logging.h
+++ b/contrib/libs/tcmalloc/tcmalloc/internal/logging.h
@@ -63,6 +63,10 @@ public:
static UserData Make() {
return UserData{CreateSampleUserData()};
}
+ // must be matched with preceding Release
+ static void DestroyRaw(void* ptr) {
+ DestroySampleUserData(ptr);
+ }
constexpr UserData() noexcept : ptr_(nullptr) {}
@@ -94,7 +98,12 @@ public:
DestroySampleUserData(ptr_);
}
- void* Get() const { return ptr_; }
+ // should be paired with subsequent DestroyRaw
+ void* Release() && {
+ void* p = ptr_;
+ ptr_ = nullptr;
+ return p;
+ }
private:
UserData(void* ptr) noexcept : ptr_(ptr) {}
private:
diff --git a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc
index 2de1a254821..c6b686753b7 100644
--- a/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc
+++ b/contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc
@@ -39,6 +39,7 @@ StackTraceTable::StackTraceTable(ProfileType type)
StackTraceTable::~StackTraceTable() {
LinkedSample* cur = all_;
while (cur != nullptr) {
+ SampleUserDataSupport::UserData::DestroyRaw(cur->sample.user_data);
LinkedSample* next = cur->next;
tc_globals.linked_sample_allocator().Delete(cur);
cur = next;
@@ -88,7 +89,7 @@ void StackTraceTable::AddTrace(double sample_weight, const StackTrace& t) {
s->sample.span_start_address = t.span_start_address;
s->sample.guarded_status = t.guarded_status;
s->sample.type = t.allocation_type;
- s->sample.user_data = t.user_data.Get();
+ s->sample.user_data = SampleUserDataSupport::UserData{t.user_data}.Release();
static_assert(kMaxStackDepth <= Profile::Sample::kMaxStackDepth,
"Profile stack size smaller than internal stack sizes");