aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/tcmalloc/patches/userdata.patch
blob: 51012c7ef9dc0fadcbc83bae73c465ee7d6bb0f6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
--- contrib/libs/tcmalloc/tcmalloc/internal/logging.h	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/internal/logging.h	(working tree) 
@@ -67,6 +67,8 @@ struct StackTrace { 
   // between the previous sample and this one 
   size_t weight; 
  
+  void* user_data; 
+ 
   template <typename H> 
   friend H AbslHashValue(H h, const StackTrace& t) { 
     // As we use StackTrace as a key-value node in StackTraceTable, we only 
--- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h	(working tree) 
@@ -120,6 +120,12 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes( 
 ABSL_ATTRIBUTE_WEAK void 
 MallocExtension_EnableForkSupport(); 
  
+ABSL_ATTRIBUTE_WEAK void 
+MallocExtension_SetSampleUserDataCallbacks( 
+    tcmalloc::MallocExtension::CreateSampleUserDataCallback create, 
+    tcmalloc::MallocExtension::CopySampleUserDataCallback copy, 
+    tcmalloc::MallocExtension::DestroySampleUserDataCallback destroy); 
+ 
 } 
  
 #endif 
--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc	(working tree) 
@@ -468,6 +468,21 @@ void MallocExtension::EnableForkSupport() { 
 #endif 
 } 
  
+void MallocExtension::SetSampleUserDataCallbacks( 
+    CreateSampleUserDataCallback create, 
+    CopySampleUserDataCallback copy, 
+    DestroySampleUserDataCallback destroy) { 
+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS 
+  if (&MallocExtension_SetSampleUserDataCallbacks != nullptr) { 
+    MallocExtension_SetSampleUserDataCallbacks(create, copy, destroy); 
+  } 
+#else 
+  (void)create; 
+  (void)copy; 
+  (void)destroy; 
+#endif 
+} 
+ 
 }  // namespace tcmalloc 
  
 // Default implementation just returns size. The expectation is that 
--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h	(working tree) 
@@ -94,6 +94,8 @@ class Profile final { 
  
     int depth; 
     void* stack[kMaxStackDepth]; 
+ 
+    void* user_data; 
   }; 
  
   void Iterate(absl::FunctionRef<void(const Sample&)> f) const; 
@@ -472,6 +474,16 @@ class MallocExtension final { 
   // Enables fork support. 
   // Allocator will continue to function correctly in the child, after calling fork(). 
   static void EnableForkSupport(); 
+ 
+  using CreateSampleUserDataCallback = void*(); 
+  using CopySampleUserDataCallback = void*(void*); 
+  using DestroySampleUserDataCallback = void(void*); 
+ 
+  // Sets callbacks for lifetime control of custom user data attached to allocation samples 
+  static void SetSampleUserDataCallbacks( 
+    CreateSampleUserDataCallback create, 
+    CopySampleUserDataCallback copy, 
+    DestroySampleUserDataCallback destroy); 
 }; 
  
 }  // namespace tcmalloc 
--- contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc	(working tree) 
@@ -55,6 +55,7 @@ void PeakHeapTracker::MaybeSaveSample() { 
   StackTrace *t = peak_sampled_span_stacks_, *next = nullptr; 
   while (t != nullptr) { 
     next = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth - 1]); 
+    Static::DestroySampleUserData(t->user_data); 
     Static::stacktrace_allocator().Delete(t); 
     t = next; 
   } 
@@ -63,7 +64,9 @@ void PeakHeapTracker::MaybeSaveSample() { 
   for (Span* s : Static::sampled_objects_) { 
     t = Static::stacktrace_allocator().New(); 
  
-    *t = *s->sampled_stack(); 
+    StackTrace* sampled_stack = s->sampled_stack(); 
+    *t = *sampled_stack; 
+    t->user_data = Static::CopySampleUserData(sampled_stack->user_data); 
     if (t->depth == kMaxStackDepth) { 
       t->depth = kMaxStackDepth - 1; 
     } 
--- contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc	(working tree) 
@@ -73,6 +73,7 @@ StackTraceTable::~StackTraceTable() { 
       Bucket* b = table_[i]; 
       while (b != nullptr) { 
         Bucket* next = b->next; 
+        Static::DestroySampleUserData(b->trace.user_data); 
         Static::bucket_allocator().Delete(b); 
         b = next; 
       } 
@@ -104,6 +105,7 @@ void StackTraceTable::AddTrace(double count, const StackTrace& t) { 
     b = Static::bucket_allocator().New(); 
     b->hash = h; 
     b->trace = t; 
+    b->trace.user_data = Static::CopySampleUserData(t.user_data); 
     b->count = count; 
     b->total_weight = t.weight * count; 
     b->next = table_[idx]; 
@@ -135,6 +137,8 @@ void StackTraceTable::Iterate( 
       e.requested_alignment = b->trace.requested_alignment; 
       e.allocated_size = allocated_size; 
  
+      e.user_data = b->trace.user_data; 
+ 
       e.depth = b->trace.depth; 
       static_assert(kMaxStackDepth <= Profile::Sample::kMaxStackDepth, 
                     "Profile stack size smaller than internal stack sizes"); 
--- contrib/libs/tcmalloc/tcmalloc/static_vars.cc	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/static_vars.cc	(working tree) 
@@ -60,6 +60,12 @@ ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket> 
 ABSL_CONST_INIT std::atomic<bool> Static::inited_{false}; 
 ABSL_CONST_INIT bool Static::cpu_cache_active_ = false; 
 ABSL_CONST_INIT bool Static::fork_support_enabled_ = false; 
+ABSL_CONST_INIT Static::CreateSampleUserDataCallback* 
+    Static::create_sample_user_data_callback_ = nullptr; 
+ABSL_CONST_INIT Static::CopySampleUserDataCallback* 
+    Static::copy_sample_user_data_callback_ = nullptr; 
+ABSL_CONST_INIT Static::DestroySampleUserDataCallback* 
+    Static::destroy_sample_user_data_callback_ = nullptr; 
 ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_; 
 ABSL_CONST_INIT PageMap Static::pagemap_; 
 ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock( 
--- contrib/libs/tcmalloc/tcmalloc/static_vars.h	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/static_vars.h	(working tree) 
@@ -130,6 +130,34 @@ class Static { 
   static bool ForkSupportEnabled() { return fork_support_enabled_; } 
   static void EnableForkSupport() { fork_support_enabled_ = true; } 
  
+  using CreateSampleUserDataCallback = void*(); 
+  using CopySampleUserDataCallback = void*(void*); 
+  using DestroySampleUserDataCallback = void(void*); 
+ 
+  static void SetSampleUserDataCallbacks( 
+      CreateSampleUserDataCallback create, 
+      CopySampleUserDataCallback copy, 
+      DestroySampleUserDataCallback destroy) { 
+    create_sample_user_data_callback_ = create; 
+    copy_sample_user_data_callback_ = copy; 
+    destroy_sample_user_data_callback_ = destroy; 
+  } 
+ 
+  static void* CreateSampleUserData() { 
+    if (create_sample_user_data_callback_) 
+      return create_sample_user_data_callback_(); 
+    return nullptr; 
+  } 
+  static void* CopySampleUserData(void* user_data) { 
+    if (copy_sample_user_data_callback_) 
+      return copy_sample_user_data_callback_(user_data); 
+    return nullptr; 
+  } 
+  static void DestroySampleUserData(void* user_data) { 
+    if (destroy_sample_user_data_callback_) 
+      destroy_sample_user_data_callback_(user_data); 
+  } 
+ 
   static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() { 
     return 
 #ifndef TCMALLOC_DEPRECATED_PERTHREAD 
@@ -176,6 +204,9 @@ class Static { 
   ABSL_CONST_INIT static std::atomic<bool> inited_; 
   static bool cpu_cache_active_; 
   static bool fork_support_enabled_; 
+  static CreateSampleUserDataCallback* create_sample_user_data_callback_; 
+  static CopySampleUserDataCallback* copy_sample_user_data_callback_; 
+  static DestroySampleUserDataCallback* destroy_sample_user_data_callback_; 
   ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_; 
   ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses> 
       numa_topology_; 
--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc	(index) 
+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc	(working tree) 
@@ -1151,6 +1151,13 @@ void TCMallocPostFork() { 
   } 
 } 
  
+extern "C" void MallocExtension_SetSampleUserDataCallbacks( 
+    MallocExtension::CreateSampleUserDataCallback create, 
+    MallocExtension::CopySampleUserDataCallback copy, 
+    MallocExtension::DestroySampleUserDataCallback destroy) { 
+  Static::SetSampleUserDataCallbacks(create, copy, destroy); 
+} 
+ 
 // nallocx slow path. 
 // Moved to a separate function because size_class_with_alignment is not inlined 
 // which would cause nallocx to become non-leaf function with stack frame and 
@@ -1500,6 +1507,7 @@ static void* SampleifyAllocation(size_t requested_size, size_t weight, 
   tmp.requested_alignment = requested_alignment; 
   tmp.allocated_size = allocated_size; 
   tmp.weight = weight; 
+  tmp.user_data = Static::CreateSampleUserData(); 
  
   { 
     absl::base_internal::SpinLockHolder h(&pageheap_lock); 
@@ -1629,6 +1637,7 @@ static void do_free_pages(void* ptr, const PageId p) { 
                          1); 
       } 
       notify_sampled_alloc = true; 
+      Static::DestroySampleUserData(st->user_data); 
       Static::stacktrace_allocator().Delete(st); 
     } 
     if (IsSampledMemory(ptr)) {