aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/clang16-rt/lib/lsan
diff options
context:
space:
mode:
authornkozlovskiy <nmk@ydb.tech>2023-12-04 19:26:35 +0300
committernkozlovskiy <nmk@ydb.tech>2023-12-05 05:25:43 +0300
commite62474f851635573f9f6631039e113a02fd50179 (patch)
tree597d4bc8aad74ef42c55fd062398e93eceebfee3 /contrib/libs/clang16-rt/lib/lsan
parente7eddec34be4f360877b46ffa2b70fde8a3a5b8f (diff)
downloadydb-e62474f851635573f9f6631039e113a02fd50179.tar.gz
ydb-oss sync: add clang16-rt/ to additionalPathsToCopy
Diffstat (limited to 'contrib/libs/clang16-rt/lib/lsan')
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/.yandex_meta/licenses.list.txt377
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan.cpp115
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan.h56
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_allocator.cpp367
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_allocator.h117
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_common.cpp1089
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_common.h350
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_common_fuchsia.cpp167
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_common_linux.cpp147
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_common_mac.cpp238
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_flags.inc46
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.cpp129
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.h35
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_interceptors.cpp542
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_linux.cpp32
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_mac.cpp191
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_malloc_mac.cpp59
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_posix.cpp102
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_posix.h49
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_preinit.cpp21
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_thread.cpp102
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/lsan_thread.h60
-rw-r--r--contrib/libs/clang16-rt/lib/lsan/ya.make131
23 files changed, 4522 insertions, 0 deletions
diff --git a/contrib/libs/clang16-rt/lib/lsan/.yandex_meta/licenses.list.txt b/contrib/libs/clang16-rt/lib/lsan/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..0dd5fc5a3c
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/.yandex_meta/licenses.list.txt
@@ -0,0 +1,377 @@
+====================Apache-2.0====================
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ InitCache(c);
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ if (UNLIKELY(!b))
+
+
+====================COPYRIGHT====================
+ initCacheMaybe(C);
+ TransferBatch *B = Allocator->popBatch(this, ClassId);
+ if (UNLIKELY(!B))
+
+
+====================COPYRIGHT====================
+// Calling getenv should be fine (c)(tm) at any time.
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+
+====================COPYRIGHT====================
+Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
+
+
+====================COPYRIGHT====================
+Copyright (c) 2009-2019 by the contributors listed in CREDITS.TXT
+
+
+====================File: CREDITS.TXT====================
+This file is a partial list of people who have contributed to the LLVM/CompilerRT
+project. If you have contributed a patch or made some other contribution to
+LLVM/CompilerRT, please submit a patch to this file to add yourself, and it will be
+done!
+
+The list is sorted by surname and formatted to allow easy grepping and
+beautification by scripts. The fields are: name (N), email (E), web-address
+(W), PGP key ID and fingerprint (P), description (D), and snail-mail address
+(S).
+
+N: Craig van Vliet
+E: cvanvliet@auroraux.org
+W: http://www.auroraux.org
+D: Code style and Readability fixes.
+
+N: Edward O'Callaghan
+E: eocallaghan@auroraux.org
+W: http://www.auroraux.org
+D: CMake'ify Compiler-RT build system
+D: Maintain Solaris & AuroraUX ports of Compiler-RT
+
+N: Howard Hinnant
+E: hhinnant@apple.com
+D: Architect and primary author of compiler-rt
+
+N: Guan-Hong Liu
+E: koviankevin@hotmail.com
+D: IEEE Quad-precision functions
+
+N: Joerg Sonnenberger
+E: joerg@NetBSD.org
+D: Maintains NetBSD port.
+
+N: Matt Thomas
+E: matt@NetBSD.org
+D: ARM improvements.
+
+
+====================MIT====================
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+====================NCSA====================
+Compiler-RT is open source software. You may freely distribute it under the
+terms of the license agreement found in LICENSE.txt.
+
+
+====================NCSA====================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+
+
+====================NCSA====================
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+
+====================NCSA====================
+University of Illinois/NCSA
+Open Source License
+
+
+====================NCSA AND MIT====================
+The compiler_rt library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan.cpp
new file mode 100644
index 0000000000..489c5ca01f
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan.cpp
@@ -0,0 +1,115 @@
+//=-- lsan.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan.h"
+
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
+
+bool lsan_inited;
+bool lsan_init_is_running;
+
+namespace __lsan {
+
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+ return false;
+}
+
+} // namespace __lsan
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ using namespace __lsan;
+ uptr stack_top = 0, stack_bottom = 0;
+ if (ThreadContext *t = CurrentThreadContext()) {
+ stack_top = t->stack_end();
+ stack_bottom = t->stack_begin();
+ }
+ if (SANITIZER_MIPS && !IsValidFrame(bp, stack_top, stack_bottom))
+ return;
+ bool fast = StackTrace::WillUseFastUnwind(request_fast);
+ Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+}
+
+using namespace __lsan;
+
+static void InitializeFlags() {
+ // Set all the default values.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 30;
+ cf.intercept_tls_get_addr = true;
+ cf.detect_leaks = true;
+ cf.exitcode = 23;
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterLsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+ // Override from user-specified string.
+ const char *lsan_default_options = __lsan_default_options();
+ parser.ParseString(lsan_default_options);
+ parser.ParseStringFromEnv("LSAN_OPTIONS");
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+}
+
+extern "C" void __lsan_init() {
+ CHECK(!lsan_init_is_running);
+ if (lsan_inited)
+ return;
+ lsan_init_is_running = true;
+ SanitizerToolName = "LeakSanitizer";
+ CacheBinaryName();
+ AvoidCVE_2016_2143();
+ InitializeFlags();
+ InitCommonLsan();
+ InitializeAllocator();
+ ReplaceSystemMalloc();
+ InitTlsSize();
+ InitializeInterceptors();
+ InitializeThreadRegistry();
+ InstallDeadlySignalHandlers(LsanOnDeadlySignal);
+ InitializeMainThread();
+ InstallAtExitCheckLeaks();
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ lsan_inited = true;
+ lsan_init_is_running = false;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_STACK_TRACE_FATAL;
+ stack.Print();
+}
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan.h b/contrib/libs/clang16-rt/lib/lsan/lsan.h
new file mode 100644
index 0000000000..757edec8e1
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan.h
@@ -0,0 +1,56 @@
+//=-- lsan.h --------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private header for standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+#if SANITIZER_POSIX
+# include "lsan_posix.h"
+#elif SANITIZER_FUCHSIA
+# include "lsan_fuchsia.h"
+#endif
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+#define GET_STACK_TRACE(max_size, fast) \
+ __sanitizer::BufferedStackTrace stack; \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, fast, \
+ max_size);
+
+#define GET_STACK_TRACE_FATAL \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
+ common_flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
+
+namespace __lsan {
+
+void InitializeInterceptors();
+void ReplaceSystemMalloc();
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context);
+void InstallAtExitCheckLeaks();
+
+#define ENSURE_LSAN_INITED \
+ do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+ } while (0)
+
+} // namespace __lsan
+
+extern bool lsan_inited;
+extern bool lsan_init_is_running;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __lsan_init();
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_allocator.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_allocator.cpp
new file mode 100644
index 0000000000..b18d829a1a
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_allocator.cpp
@@ -0,0 +1,367 @@
+//=-- lsan_allocator.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_allocator.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_allocator.h"
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "lsan_common.h"
+
+extern "C" void *memset(void *ptr, int value, uptr num);
+
+namespace __lsan {
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1ULL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
+static const uptr kMaxAllowedMallocSize = 4ULL << 30;
+#else
+static const uptr kMaxAllowedMallocSize = 8ULL << 30;
+#endif
+
+static Allocator allocator;
+
+static uptr max_malloc_size;
+
+void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms);
+ if (common_flags()->max_allocation_size_mb)
+ max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+ kMaxAllowedMallocSize);
+ else
+ max_malloc_size = kMaxAllowedMallocSize;
+}
+
+void AllocatorThreadFinish() {
+ allocator.SwallowCache(GetAllocatorCache());
+}
+
+static ChunkMetadata *Metadata(const void *p) {
+ return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
+}
+
+static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
+ if (!p) return;
+ ChunkMetadata *m = Metadata(p);
+ CHECK(m);
+ m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
+ m->stack_trace_id = StackDepotPut(stack);
+ m->requested_size = size;
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
+}
+
+static void RegisterDeallocation(void *p) {
+ if (!p) return;
+ ChunkMetadata *m = Metadata(p);
+ CHECK(m);
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
+}
+
+static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
+}
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+ bool cleared) {
+ if (size == 0)
+ size = 1;
+ if (size > max_malloc_size)
+ return ReportAllocationSizeTooBig(size, stack);
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(&stack);
+ }
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, &stack);
+ }
+ // Do not rely on the allocator to clear the memory (it's slow).
+ if (cleared && allocator.FromPrimary(p))
+ memset(p, 0, size);
+ RegisterAllocation(stack, p, size);
+ RunMallocHooks(p, size);
+ return p;
+}
+
+static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, &stack);
+ }
+ size *= nmemb;
+ return Allocate(stack, size, 1, true);
+}
+
+void Deallocate(void *p) {
+ RunFreeHooks(p);
+ RegisterDeallocation(p);
+ allocator.Deallocate(GetAllocatorCache(), p);
+}
+
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+ uptr alignment) {
+ if (new_size > max_malloc_size) {
+ ReportAllocationSizeTooBig(new_size, stack);
+ return nullptr;
+ }
+ RegisterDeallocation(p);
+ void *new_p =
+ allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
+ if (new_p)
+ RegisterAllocation(stack, new_p, new_size);
+ else if (new_size != 0)
+ RegisterAllocation(stack, p, new_size);
+ return new_p;
+}
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end) {
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
+}
+
+uptr GetMallocUsableSize(const void *p) {
+ if (!p)
+ return 0;
+ ChunkMetadata *m = Metadata(p);
+ if (!m) return 0;
+ return m->requested_size;
+}
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, &stack);
+ }
+ void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by Allocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, &stack);
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
+}
+
+void lsan_free(void *p) {
+ Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Reallocate(stack, p, size, 1));
+}
+
+void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
+ const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, &stack);
+ }
+ return lsan_realloc(ptr, nmemb * size, stack);
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Calloc(nmemb, size, stack));
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(
+ Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
+}
+
+void *lsan_pvalloc(uptr size, const StackTrace &stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
+}
+
+uptr lsan_mz_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
+///// Interface to the common LSan module. /////
+
+void LockAllocator() {
+ allocator.ForceLock();
+}
+
+void UnlockAllocator() {
+ allocator.ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&allocator;
+ *end = *begin + sizeof(allocator);
+}
+
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
+ if (!chunk) return 0;
+ // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
+ // valid, but we don't want that.
+ if (addr < chunk) return 0;
+ ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
+ CHECK(m);
+ if (!m->allocated)
+ return 0;
+ if (addr < chunk + m->requested_size)
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ return chunk;
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = Metadata(reinterpret_cast<void *>(chunk));
+ CHECK(metadata_);
+}
+
+bool LsanMetadata::allocated() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ allocator.ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+ void *chunk = allocator.GetBlockBegin(p);
+ if (!chunk || p < chunk) return kIgnoreObjectInvalid;
+ ChunkMetadata *m = Metadata(chunk);
+ CHECK(m);
+ if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
+ if (m->tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->tag = kIgnored;
+ return kIgnoreObjectSuccess;
+ } else {
+ return kIgnoreObjectInvalid;
+ }
+}
+
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ // This function can be used to treat memory reachable from `tctx` as live.
+ // This is useful for threads that have been created but not yet started.
+
+ // This is currently a no-op because the LSan `pthread_create()` interceptor
+ // blocks until the child thread starts which keeps the thread's `arg` pointer
+ // live.
+}
+
+} // namespace __lsan
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_free_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
+} // extern "C"
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_allocator.h b/contrib/libs/clang16-rt/lib/lsan/lsan_allocator.h
new file mode 100644
index 0000000000..b67d9d7750
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_allocator.h
@@ -0,0 +1,117 @@
+//=-- lsan_allocator.h ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Allocator for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_ALLOCATOR_H
+#define LSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
+
+namespace __lsan {
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+ bool cleared);
+void Deallocate(void *p);
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+ uptr alignment);
+uptr GetMallocUsableSize(const void *p);
+
+template<typename Callable>
+void ForEachChunk(const Callable &callback);
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end);
+void AllocatorThreadFinish();
+void InitializeAllocator();
+
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+ u8 allocated : 8; // Must be first.
+ ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+ uptr requested_size : 54;
+#else
+ uptr requested_size : 32;
+ uptr padding : 22;
+#endif
+ u32 stack_trace_id;
+};
+
+#if !SANITIZER_CAN_USE_ALLOCATOR64
+template <typename AddressSpaceViewTy>
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = AddressSpaceViewTy;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#else
+# if SANITIZER_FUCHSIA || defined(__powerpc64__)
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+#elif defined(__s390x__)
+const uptr kAllocatorSpace = 0x40000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# endif
+template <typename AddressSpaceViewTy>
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = AddressSpaceViewTy;
+};
+
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#endif
+
+template <typename AddressSpaceView>
+using AllocatorASVT = CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
+using Allocator = AllocatorASVT<LocalAddressSpaceView>;
+using AllocatorCache = Allocator::AllocatorCache;
+
+Allocator::AllocatorCache *GetAllocatorCache();
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack);
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_reallocarray(void *p, uptr nmemb, uptr size,
+ const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+void *lsan_pvalloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
+} // namespace __lsan
+
+#endif // LSAN_ALLOCATOR_H
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_common.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_common.cpp
new file mode 100644
index 0000000000..1b47e83a10
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_common.cpp
@@ -0,0 +1,1089 @@
+//=-- lsan_common.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_common.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+#if CAN_SANITIZE_LEAKS
+
+# if SANITIZER_APPLE
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
+# define OBJC_DATA_MASK 0x0000007ffffffff8UL
+# else
+# define OBJC_DATA_MASK 0x00007ffffffffff8UL
+# endif
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
+# define OBJC_FAST_IS_RW 0x8000000000000000UL
+# endif
+
+namespace __lsan {
+
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
+Mutex global_mutex;
+
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+ if (common_flags()->detect_leaks) {
+ Report("Unmatched call to __lsan_enable().\n");
+ Die();
+ }
+}
+
+void Flags::SetDefaults() {
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
+}
+
+void RegisterLsanFlags(FlagParser *parser, Flags *f) {
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
+}
+
+# define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) \
+ Report(__VA_ARGS__); \
+ } while (0)
+
+# define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) \
+ Report(__VA_ARGS__); \
+ } while (0)
+
+class LeakSuppressionContext {
+ bool parsed = false;
+ SuppressionContext context;
+ bool suppressed_stacks_sorted = true;
+ InternalMmapVector<u32> suppressed_stacks;
+ const LoadedModule *suppress_module = nullptr;
+
+ void LazyInit();
+ Suppression *GetSuppressionForAddr(uptr addr);
+ bool SuppressInvalid(const StackTrace &stack);
+ bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
+
+ public:
+ LeakSuppressionContext(const char *supprression_types[],
+ int suppression_types_num)
+ : context(supprression_types, suppression_types_num) {}
+
+ bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
+
+ const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
+ if (!suppressed_stacks_sorted) {
+ suppressed_stacks_sorted = true;
+ SortAndDedup(suppressed_stacks);
+ }
+ return suppressed_stacks;
+ }
+ void PrintMatchedSuppressions();
+};
+
+ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
+static LeakSuppressionContext *suppression_ctx = nullptr;
+static const char kSuppressionLeak[] = "leak";
+static const char *kSuppressionTypes[] = {kSuppressionLeak};
+static const char kStdSuppressions[] =
+# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // definition.
+ "leak:*pthread_exit*\n"
+# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+# if SANITIZER_APPLE
+ // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+ "leak:*_os_trace*\n"
+# endif
+ // TLS leak in some glibc versions, described in
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+ "leak:*tls_get_addr*\n";
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder)
+ LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+}
+
+void LeakSuppressionContext::LazyInit() {
+ if (!parsed) {
+ parsed = true;
+ context.ParseFromFile(flags()->suppressions);
+ if (&__lsan_default_suppressions)
+ context.Parse(__lsan_default_suppressions());
+ context.Parse(kStdSuppressions);
+ if (flags()->use_tls && flags()->use_ld_allocations)
+ suppress_module = GetLinker();
+ }
+}
+
+Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
+ Suppression *s = nullptr;
+
+ // Suppress by module name.
+ const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
+ if (!module_name)
+ module_name = "<unknown module>";
+ if (context.Match(module_name, kSuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
+ context.Match(cur->info.file, kSuppressionLeak, &s)) {
+ break;
+ }
+ }
+ frames->ClearAll();
+ return s;
+}
+
+static uptr GetCallerPC(const StackTrace &stack) {
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+# if SANITIZER_APPLE
+// Objective-C class data pointers are stored with flags in the low bits, so
+// they need to be transformed back into something that looks like a pointer.
+static inline void *MaybeTransformPointer(void *p) {
+ uptr ptr = reinterpret_cast<uptr>(p);
+ if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
+ ptr &= OBJC_DATA_MASK;
+ return reinterpret_cast<void *>(ptr);
+}
+# endif
+
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
+ uptr caller_pc = GetCallerPC(stack);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ return !caller_pc ||
+ (suppress_module && suppress_module->containsAddress(caller_pc));
+}
+
+bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
+ uptr hit_count, uptr total_size) {
+ for (uptr i = 0; i < stack.size; i++) {
+ Suppression *s = GetSuppressionForAddr(
+ StackTrace::GetPreviousInstructionPc(stack.trace[i]));
+ if (s) {
+ s->weight += total_size;
+ atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
+ uptr total_size) {
+ LazyInit();
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
+ return false;
+ suppressed_stacks_sorted = false;
+ suppressed_stacks.push_back(stack_trace_id);
+ return true;
+}
+
+static LeakSuppressionContext *GetSuppressionContext() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+static InternalMmapVectorNoCtor<RootRegion> root_regions;
+
+InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
+ return &root_regions;
+}
+
+void InitCommonLsan() {
+ if (common_flags()->detect_leaks) {
+ // Initialization which can fail or print warnings should only be done if
+ // LSan is actually enabled.
+ InitializeSuppressions();
+ InitializePlatformSpecificModules();
+ }
+}
+
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Error() { return Red(); }
+ const char *Leak() { return Blue(); }
+};
+
+static inline bool MaybeUserPointer(uptr p) {
+ // Since our heap is located in mmap-ed memory, we can assume a sensible lower
+ // bound on heap addresses.
+ const uptr kMinAddress = 4 * 4096;
+ if (p < kMinAddress)
+ return false;
+# if defined(__x86_64__)
+ // Accept only canonical form user-space addresses.
+ return ((p >> 47) == 0);
+# elif defined(__mips64)
+ return ((p >> 40) == 0);
+# elif defined(__aarch64__)
+ // Accept up to 48 bit VMA.
+ return ((p >> 48) == 0);
+# elif defined(__loongarch_lp64)
+ // Allow 47-bit user-space VMA at current.
+ return ((p >> 47) == 0);
+# else
+ return true;
+# endif
+}
+
+// Scans the memory range, looking for byte patterns that point into allocator
+// chunks. Marks those chunks with |tag| and adds them to |frontier|.
+// There are two usage modes for this function: finding reachable chunks
+// (|tag| = kReachable) and finding indirectly leaked chunks
+// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
+// so |frontier| = 0.
+void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
+ const char *region_type, ChunkTag tag) {
+ CHECK(tag == kReachable || tag == kIndirectlyLeaked);
+ const uptr alignment = flags()->pointer_alignment();
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
+ (void *)end);
+ uptr pp = begin;
+ if (pp % alignment)
+ pp = pp + alignment - pp % alignment;
+ for (; pp + sizeof(void *) <= end; pp += alignment) {
+ void *p = *reinterpret_cast<void **>(pp);
+# if SANITIZER_APPLE
+ p = MaybeTransformPointer(p);
+# endif
+ if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
+ continue;
+ uptr chunk = PointsIntoChunk(p);
+ if (!chunk)
+ continue;
+ // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
+ if (chunk == begin)
+ continue;
+ LsanMetadata m(chunk);
+ if (m.tag() == kReachable || m.tag() == kIgnored)
+ continue;
+
+ // Do this check relatively late so we can log only the interesting cases.
+ if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+ LOG_POINTERS(
+ "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+ "%zu.\n",
+ (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
+ m.requested_size());
+ continue;
+ }
+
+ m.set_tag(tag);
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
+ (void *)pp, p, (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
+ if (frontier)
+ frontier->push_back(chunk);
+ }
+}
+
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
+void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
+ Frontier *frontier) {
+ for (uptr i = 0; i < ranges.size(); i++) {
+ ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
+ kReachable);
+ }
+}
+
+# if SANITIZER_FUCHSIA
+
+// Fuchsia handles all threads together with its own callback.
+static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
+ uptr) {}
+
+# else
+
+# if SANITIZER_ANDROID
+// FIXME: Move this out into *libcdep.cpp
+extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
+ pid_t, void (*cb)(void *, void *, uptr, void *), void *);
+# endif
+
+static void ProcessThreadRegistry(Frontier *frontier) {
+ InternalMmapVector<uptr> ptrs;
+ GetAdditionalThreadContextPtrsLocked(&ptrs);
+
+ for (uptr i = 0; i < ptrs.size(); ++i) {
+ void *ptr = reinterpret_cast<void *>(ptrs[i]);
+ uptr chunk = PointsIntoChunk(ptr);
+ if (!chunk)
+ continue;
+ LsanMetadata m(chunk);
+ if (!m.allocated())
+ continue;
+
+ // Mark as reachable and add to frontier.
+ LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
+ m.set_tag(kReachable);
+ frontier->push_back(chunk);
+ }
+}
+
+// Scans thread data (stacks and TLS) for heap pointers.
+static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
+ Frontier *frontier, tid_t caller_tid,
+ uptr caller_sp) {
+ InternalMmapVector<uptr> registers;
+ InternalMmapVector<Range> extra_ranges;
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
+ tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
+ LOG_THREADS("Processing thread %llu.\n", os_id);
+ uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
+ DTLS *dtls;
+ bool thread_found =
+ GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
+ &tls_end, &cache_begin, &cache_end, &dtls);
+ if (!thread_found) {
+ // If a thread can't be found in the thread registry, it's probably in the
+ // process of destruction. Log this event and move on.
+ LOG_THREADS("Thread %llu not found in registry.\n", os_id);
+ continue;
+ }
+ uptr sp;
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, &registers, &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %llu.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
+ continue;
+ sp = stack_begin;
+ }
+ if (suspended_threads.GetThreadID(i) == caller_tid) {
+ sp = caller_sp;
+ }
+
+ if (flags()->use_registers && have_registers) {
+ uptr registers_begin = reinterpret_cast<uptr>(registers.data());
+ uptr registers_end =
+ reinterpret_cast<uptr>(registers.data() + registers.size());
+ ScanRangeForPointers(registers_begin, registers_end, frontier,
+ "REGISTERS", kReachable);
+ }
+
+ if (flags()->use_stacks) {
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
+ (void *)stack_end, (void *)sp);
+ if (sp < stack_begin || sp >= stack_end) {
+ // SP is outside the recorded stack range (e.g. the thread is running a
+ // signal handler on alternate stack, or swapcontext was used).
+ // Again, consider the entire stack range to be reachable.
+ LOG_THREADS("WARNING: stack pointer not in stack range.\n");
+ uptr page_size = GetPageSizeCached();
+ int skipped = 0;
+ while (stack_begin < stack_end &&
+ !IsAccessibleMemoryRange(stack_begin, 1)) {
+ skipped++;
+ stack_begin += page_size;
+ }
+ LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
+ skipped, (void *)stack_begin, (void *)stack_end);
+ } else {
+ // Shrink the stack range to ignore out-of-scope values.
+ stack_begin = sp;
+ }
+ ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
+ kReachable);
+ extra_ranges.clear();
+ GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
+ ScanExtraStackRanges(extra_ranges, frontier);
+ }
+
+ if (flags()->use_tls) {
+ if (tls_begin) {
+ LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
+ // If the tls and cache ranges don't overlap, scan full tls range,
+ // otherwise, only scan the non-overlapping portions
+ if (cache_begin == cache_end || tls_end < cache_begin ||
+ tls_begin > cache_end) {
+ ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
+ } else {
+ if (tls_begin < cache_begin)
+ ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
+ kReachable);
+ if (tls_end > cache_end)
+ ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
+ kReachable);
+ }
+ }
+# if SANITIZER_ANDROID
+ auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
+ void *arg) -> void {
+ ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
+ reinterpret_cast<uptr>(dtls_end),
+ reinterpret_cast<Frontier *>(arg), "DTLS",
+ kReachable);
+ };
+
+ // FIXME: There might be a race-condition here (and in Bionic) if the
+ // thread is suspended in the middle of updating its DTLS. IOWs, we
+ // could scan already freed memory. (probably fine for now)
+ __libc_iterate_dynamic_tls(os_id, cb, frontier);
+# else
+ if (dtls && !DTLSInDestruction(dtls)) {
+ ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
+ uptr dtls_beg = dtv.beg;
+ uptr dtls_end = dtls_beg + dtv.size;
+ if (dtls_beg < dtls_end) {
+ LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
+ (void *)dtls_end);
+ ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
+ kReachable);
+ }
+ });
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
+ }
+# endif
+ }
+ }
+
+ // Add pointers reachable from ThreadContexts
+ ProcessThreadRegistry(frontier);
+}
+
+# endif // SANITIZER_FUCHSIA
+
+void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
+ uptr region_begin, uptr region_end, bool is_readable) {
+ uptr intersection_begin = Max(root_region.begin, region_begin);
+ uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
+ if (intersection_begin >= intersection_end)
+ return;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ (void *)root_region.begin,
+ (void *)(root_region.begin + root_region.size),
+ (void *)region_begin, (void *)region_end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
+ kReachable);
+}
+
+static void ProcessRootRegion(Frontier *frontier,
+ const RootRegion &root_region) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ ScanRootRegion(frontier, root_region, segment.start, segment.end,
+ segment.IsReadable());
+ }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+ if (!flags()->use_root_regions)
+ return;
+ for (uptr i = 0; i < root_regions.size(); i++)
+ ProcessRootRegion(frontier, root_regions[i]);
+}
+
+static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
+ while (frontier->size()) {
+ uptr next_chunk = frontier->back();
+ frontier->pop_back();
+ LsanMetadata m(next_chunk);
+ ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
+ "HEAP", tag);
+ }
+}
+
+// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
+// which are reachable from it as indirectly leaked.
+static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable) {
+ ScanRangeForPointers(chunk, chunk + m.requested_size(),
+ /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
+ }
+}
+
+static void IgnoredSuppressedCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (!m.allocated() || m.tag() == kIgnored)
+ return;
+
+ const InternalMmapVector<u32> &suppressed =
+ *static_cast<const InternalMmapVector<u32> *>(arg);
+ uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
+ if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
+ return;
+
+ LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
+ m.set_tag(kIgnored);
+}
+
+// ForEachChunk callback. If chunk is marked as ignored, adds its address to
+// frontier.
+static void CollectIgnoredCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() == kIgnored) {
+ LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
+ reinterpret_cast<Frontier *>(arg)->push_back(chunk);
+ }
+}
+
+// Sets the appropriate tag on each chunk.
+static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
+ Frontier *frontier, tid_t caller_tid,
+ uptr caller_sp) {
+ const InternalMmapVector<u32> &suppressed_stacks =
+ GetSuppressionContext()->GetSortedSuppressedStacks();
+ if (!suppressed_stacks.empty()) {
+ ForEachChunk(IgnoredSuppressedCb,
+ const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
+ }
+ ForEachChunk(CollectIgnoredCb, frontier);
+ ProcessGlobalRegions(frontier);
+ ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
+ ProcessRootRegions(frontier);
+ FloodFillTag(frontier, kReachable);
+
+ // The check here is relatively expensive, so we do this in a separate flood
+ // fill. That way we can skip the check for chunks that are reachable
+ // otherwise.
+ LOG_POINTERS("Processing platform-specific allocations.\n");
+ ProcessPlatformSpecificAllocations(frontier);
+ FloodFillTag(frontier, kReachable);
+
+ // Iterate over leaked chunks and mark those that are reachable from other
+ // leaked chunks.
+ LOG_POINTERS("Scanning leaked chunks.\n");
+ ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
+}
+
+// ForEachChunk callback. Resets the tags to pre-leak-check state.
+static void ResetTagsCb(uptr chunk, void *arg) {
+ (void)arg;
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kIgnored)
+ m.set_tag(kDirectlyLeaked);
+}
+
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
+static void CollectLeaksCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (!m.allocated())
+ return;
+ if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
+ leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
+}
+
+void LeakSuppressionContext::PrintMatchedSuppressions() {
+ InternalMmapVector<Suppression *> matched;
+ context.GetMatched(&matched);
+ if (!matched.size())
+ return;
+ const char *line = "-----------------------------------------------------";
+ Printf("%s\n", line);
+ Printf("Suppressions used:\n");
+ Printf(" count bytes template\n");
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%7zu %10zu %s\n",
+ static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
+ matched[i]->weight, matched[i]->templ);
+ }
+ Printf("%s\n\n", line);
+}
+
+# if SANITIZER_FUCHSIA
+
+// Fuchsia provides a libc interface that guarantees all threads are
+// covered, and SuspendedThreadList is never really used.
+static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
+
+# else // !SANITIZER_FUCHSIA
+
+static void ReportUnsuspendedThreads(
+ const SuspendedThreadsList &suspended_threads) {
+ InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
+ threads[i] = suspended_threads.GetThreadID(i);
+
+ Sort(threads.data(), threads.size());
+
+ InternalMmapVector<tid_t> unsuspended;
+ GetRunningThreadsLocked(&unsuspended);
+
+ for (auto os_id : unsuspended) {
+ uptr i = InternalLowerBound(threads, os_id);
+ if (i >= threads.size() || threads[i] != os_id)
+ Report(
+ "Running thread %zu was not suspended. False leaks are possible.\n",
+ os_id);
+ }
+}
+
+# endif // !SANITIZER_FUCHSIA
+
+static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
+ void *arg) {
+ CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
+ CHECK(param);
+ CHECK(!param->success);
+ ReportUnsuspendedThreads(suspended_threads);
+ ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
+ param->caller_sp);
+ ForEachChunk(CollectLeaksCb, &param->leaks);
+ // Clean up for subsequent leak checks. This assumes we did not overwrite any
+ // kIgnored tags.
+ ForEachChunk(ResetTagsCb, nullptr);
+ param->success = true;
+}
+
+static bool PrintResults(LeakReport &report) {
+ uptr unsuppressed_count = report.UnsuppressedLeakCount();
+ if (unsuppressed_count) {
+ Decorator d;
+ Printf(
+ "\n"
+ "================================================================="
+ "\n");
+ Printf("%s", d.Error());
+ Report("ERROR: LeakSanitizer: detected memory leaks\n");
+ Printf("%s", d.Default());
+ report.ReportTopLeaks(flags()->max_leaks);
+ }
+ if (common_flags()->print_suppressions)
+ GetSuppressionContext()->PrintMatchedSuppressions();
+ if (unsuppressed_count > 0) {
+ report.PrintSummary();
+ return true;
+ }
+ return false;
+}
+
+static bool CheckForLeaks() {
+ if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
+ VReport(1, "LeakSanitizer is disabled");
+ return false;
+ }
+ VReport(1, "LeakSanitizer: checking for leaks");
+ // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
+ // suppressions. However if a stack id was previously suppressed, it should be
+ // suppressed in future checks as well.
+ for (int i = 0;; ++i) {
+ EnsureMainThreadIDIsCorrect();
+ CheckForLeaksParam param;
+ // Capture calling thread's stack pointer early, to avoid false negatives.
+ // Old frame with dead pointers might be overlapped by new frame inside
+ // CheckForLeaks which does not use bytes with pointers before the
+ // threads are suspended and stack pointers captured.
+ param.caller_tid = GetTid();
+ param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
+ LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
+ if (!param.success) {
+ Report("LeakSanitizer has encountered a fatal error.\n");
+ Report(
+ "HINT: For debugging, try setting environment variable "
+ "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
+ "etc)\n");
+ Die();
+ }
+ LeakReport leak_report;
+ leak_report.AddLeakedChunks(param.leaks);
+
+ // No new suppressions stacks, so rerun will not help and we can report.
+ if (!leak_report.ApplySuppressions())
+ return PrintResults(leak_report);
+
+ // No indirect leaks to report, so we are done here.
+ if (!leak_report.IndirectUnsuppressedLeakCount())
+ return PrintResults(leak_report);
+
+ if (i >= 8) {
+ Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
+ return PrintResults(leak_report);
+ }
+
+ // We found a new previously unseen suppressed call stack. Rerun to make
+ // sure it does not hold indirect leaks.
+ VReport(1, "Rerun with %zu suppressed stacks.",
+ GetSuppressionContext()->GetSortedSuppressedStacks().size());
+ }
+}
+
+static bool has_reported_leaks = false;
+bool HasReportedLeaks() { return has_reported_leaks; }
+
+void DoLeakCheck() {
+ Lock l(&global_mutex);
+ static bool already_done;
+ if (already_done)
+ return;
+ already_done = true;
+ has_reported_leaks = CheckForLeaks();
+ if (has_reported_leaks)
+ HandleLeaks();
+}
+
+static int DoRecoverableLeakCheck() {
+ Lock l(&global_mutex);
+ bool have_leaks = CheckForLeaks();
+ return have_leaks ? 1 : 0;
+}
+
+void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
+
+///// LeakReport implementation. /////
+
+// A hard limit on the number of distinct leaks, to avoid quadratic complexity
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
+// FIXME: Get rid of this limit by moving logic into DedupLeaks.
+const uptr kMaxLeaksConsidered = 5000;
+
+void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
+ for (const LeakedChunk &leak : chunks) {
+ uptr chunk = leak.chunk;
+ u32 stack_trace_id = leak.stack_trace_id;
+ uptr leaked_size = leak.leaked_size;
+ ChunkTag tag = leak.tag;
+ CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
+
+ if (u32 resolution = flags()->resolution) {
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ stack.size = Min(stack.size, resolution);
+ stack_trace_id = StackDepotPut(stack);
+ }
+
+ bool is_directly_leaked = (tag == kDirectlyLeaked);
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].stack_trace_id == stack_trace_id &&
+ leaks_[i].is_directly_leaked == is_directly_leaked) {
+ leaks_[i].hit_count++;
+ leaks_[i].total_size += leaked_size;
+ break;
+ }
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered)
+ return;
+ Leak leak = {next_id_++, /* hit_count */ 1,
+ leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false};
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ leaked_objects_.push_back(obj);
+ }
+ }
+}
+
+static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
+ if (leak1.is_directly_leaked == leak2.is_directly_leaked)
+ return leak1.total_size > leak2.total_size;
+ else
+ return leak1.is_directly_leaked;
+}
+
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
+ CHECK(leaks_.size() <= kMaxLeaksConsidered);
+ Printf("\n");
+ if (leaks_.size() == kMaxLeaksConsidered)
+ Printf(
+ "Too many leaks! Only the first %zu leaks encountered will be "
+ "reported.\n",
+ kMaxLeaksConsidered);
+
+ uptr unsuppressed_count = UnsuppressedLeakCount();
+ if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+ Printf("The %zu top leak(s):\n", num_leaks_to_report);
+ Sort(leaks_.data(), leaks_.size(), &LeakComparator);
+ uptr leaks_reported = 0;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].is_suppressed)
+ continue;
+ PrintReportForLeak(i);
+ leaks_reported++;
+ if (leaks_reported == num_leaks_to_report)
+ break;
+ }
+ if (leaks_reported < unsuppressed_count) {
+ uptr remaining = unsuppressed_count - leaks_reported;
+ Printf("Omitting %zu more leak(s).\n", remaining);
+ }
+}
+
+void LeakReport::PrintReportForLeak(uptr index) {
+ Decorator d;
+ Printf("%s", d.Leak());
+ Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+ leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+ leaks_[index].total_size, leaks_[index].hit_count);
+ Printf("%s", d.Default());
+
+ CHECK(leaks_[index].stack_trace_id);
+ StackDepotGet(leaks_[index].stack_trace_id).Print();
+
+ if (flags()->report_objects) {
+ Printf("Objects leaked above:\n");
+ PrintLeakedObjectsForLeak(index);
+ Printf("\n");
+ }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+ u32 leak_id = leaks_[index].id;
+ for (uptr j = 0; j < leaked_objects_.size(); j++) {
+ if (leaked_objects_[j].leak_id == leak_id)
+ Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
+ leaked_objects_[j].size);
+ }
+}
+
+void LeakReport::PrintSummary() {
+ CHECK(leaks_.size() <= kMaxLeaksConsidered);
+ uptr bytes = 0, allocations = 0;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].is_suppressed)
+ continue;
+ bytes += leaks_[i].total_size;
+ allocations += leaks_[i].hit_count;
+ }
+ InternalScopedString summary;
+ summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
+ allocations);
+ ReportErrorSummary(summary.data());
+}
+
+uptr LeakReport::ApplySuppressions() {
+ LeakSuppressionContext *suppressions = GetSuppressionContext();
+ uptr new_suppressions = false;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
+ leaks_[i].total_size)) {
+ leaks_[i].is_suppressed = true;
+ ++new_suppressions;
+ }
+ }
+ return new_suppressions;
+}
+
+uptr LeakReport::UnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed)
+ result++;
+ return result;
+}
+
+uptr LeakReport::IndirectUnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
+ result++;
+ return result;
+}
+
+} // namespace __lsan
+#else // CAN_SANITIZE_LEAKS
+namespace __lsan {
+void InitCommonLsan() {}
+void DoLeakCheck() {}
+void DoRecoverableLeakCheckVoid() {}
+void DisableInThisThread() {}
+void EnableInThisThread() {}
+} // namespace __lsan
+#endif // CAN_SANITIZE_LEAKS
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_ignore_object(const void *p) {
+#if CAN_SANITIZE_LEAKS
+ if (!common_flags()->detect_leaks)
+ return;
+ // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
+ // locked.
+ Lock l(&global_mutex);
+ IgnoreObjectResult res = IgnoreObjectLocked(p);
+ if (res == kIgnoreObjectInvalid)
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
+ if (res == kIgnoreObjectAlreadyIgnored)
+ VReport(1,
+ "__lsan_ignore_object(): "
+ "heap object at %p is already being ignored\n",
+ p);
+ if (res == kIgnoreObjectSuccess)
+ VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ Lock l(&global_mutex);
+ RootRegion region = {reinterpret_cast<uptr>(begin), size};
+ root_regions.push_back(region);
+ VReport(1, "Registered root region at %p of size %zu\n", begin, size);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ Lock l(&global_mutex);
+ bool removed = false;
+ for (uptr i = 0; i < root_regions.size(); i++) {
+ RootRegion region = root_regions[i];
+ if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
+ removed = true;
+ uptr last_index = root_regions.size() - 1;
+ root_regions[i] = root_regions[last_index];
+ root_regions.pop_back();
+ VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
+ break;
+ }
+ }
+ if (!removed) {
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %zu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+ }
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_disable() {
+#if CAN_SANITIZE_LEAKS
+ __lsan::DisableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_enable() {
+#if CAN_SANITIZE_LEAKS
+ __lsan::EnableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_do_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ __lsan::DoLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __lsan_do_recoverable_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ return __lsan::DoRecoverableLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+ return 0;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
+ return "";
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
+ return 0;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
+ return "";
+}
+#endif
+} // extern "C"
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_common.h b/contrib/libs/clang16-rt/lib/lsan/lsan_common.h
new file mode 100644
index 0000000000..0d5c003108
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_common.h
@@ -0,0 +1,350 @@
+//=-- lsan_common.h -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private LSan header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_COMMON_H
+#define LSAN_COMMON_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
+// Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on a new architecture, one needs to implement the
+// internal_clone function as well as (probably) adjust the TLS machinery for
+// the new architecture inside the sanitizer library.
+// Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
+// is missing. This caused a link error.
+#if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
+# define CAN_SANITIZE_LEAKS 0
+#elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390x__))
+# define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
+# define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && SANITIZER_LINUX
+# define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX
+# define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_RISCV64 && SANITIZER_LINUX
+# define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
+# define CAN_SANITIZE_LEAKS 1
+#else
+# define CAN_SANITIZE_LEAKS 0
+#endif
+
+namespace __sanitizer {
+class FlagParser;
+class ThreadRegistry;
+class ThreadContextBase;
+struct DTLS;
+}
+
+// This section defines function and class prototypes which must be implemented
+// by the parent tool linking in LSan. There are implementations provided by the
+// LSan library which will be linked in when LSan is used as a standalone tool.
+namespace __lsan {
+
+// Chunk tags.
+enum ChunkTag {
+ kDirectlyLeaked = 0, // default
+ kIndirectlyLeaked = 1,
+ kReachable = 2,
+ kIgnored = 3
+};
+
+enum IgnoreObjectResult {
+ kIgnoreObjectSuccess,
+ kIgnoreObjectAlreadyIgnored,
+ kIgnoreObjectInvalid
+};
+
+struct Range {
+ uptr begin;
+ uptr end;
+};
+
+//// --------------------------------------------------------------------------
+//// Poisoning prototypes.
+//// --------------------------------------------------------------------------
+
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
+
+//// --------------------------------------------------------------------------
+//// Thread prototypes.
+//// --------------------------------------------------------------------------
+
+// Wrappers for ThreadRegistry access.
+void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+// If called from the main thread, updates the main thread's TID in the thread
+// registry. We need this to handle processes that fork() without a subsequent
+// exec(), which invalidates the recorded TID. To update it, we must call
+// gettid() from the main thread. Our solution is to call this function before
+// leak checking and also before every call to pthread_create() (to handle cases
+// where leak checking is initiated from a non-main thread).
+void EnsureMainThreadIDIsCorrect();
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls);
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges);
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
+
+//// --------------------------------------------------------------------------
+//// Allocator prototypes.
+//// --------------------------------------------------------------------------
+
+// Wrappers for allocator's ForceLock()/ForceUnlock().
+void LockAllocator();
+void UnlockAllocator();
+
+// Returns the address range occupied by the global allocator object.
+void GetAllocatorGlobalRange(uptr *begin, uptr *end);
+// If p points into a chunk that has been allocated to the user, returns its
+// user-visible address. Otherwise, returns 0.
+uptr PointsIntoChunk(void *p);
+// Returns address of user-visible chunk contained in this allocator chunk.
+uptr GetUserBegin(uptr chunk);
+
+// Wrapper for chunk metadata operations.
+class LsanMetadata {
+ public:
+ // Constructor accepts address of user-visible chunk.
+ explicit LsanMetadata(uptr chunk);
+ bool allocated() const;
+ ChunkTag tag() const;
+ void set_tag(ChunkTag value);
+ uptr requested_size() const;
+ u32 stack_trace_id() const;
+
+ private:
+ void *metadata_;
+};
+
+// Iterate over all existing chunks. Allocator must be locked.
+void ForEachChunk(ForEachChunkCallback callback, void *arg);
+
+// Helper for __lsan_ignore_object().
+IgnoreObjectResult IgnoreObjectLocked(const void *p);
+
+// The rest of the LSan interface which is implemented by library.
+
+struct ScopedStopTheWorldLock {
+ ScopedStopTheWorldLock() {
+ LockThreadRegistry();
+ LockAllocator();
+ }
+
+ ~ScopedStopTheWorldLock() {
+ UnlockAllocator();
+ UnlockThreadRegistry();
+ }
+
+ ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
+ ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
+};
+
+struct Flags {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+
+ void SetDefaults();
+ uptr pointer_alignment() const {
+ return use_unaligned ? 1 : sizeof(uptr);
+ }
+};
+
+extern Flags lsan_flags;
+inline Flags *flags() { return &lsan_flags; }
+void RegisterLsanFlags(FlagParser *parser, Flags *f);
+
+struct LeakedChunk {
+ uptr chunk;
+ u32 stack_trace_id;
+ uptr leaked_size;
+ ChunkTag tag;
+};
+
+using LeakedChunks = InternalMmapVector<LeakedChunk>;
+
+struct Leak {
+ u32 id;
+ uptr hit_count;
+ uptr total_size;
+ u32 stack_trace_id;
+ bool is_directly_leaked;
+ bool is_suppressed;
+};
+
+struct LeakedObject {
+ u32 leak_id;
+ uptr addr;
+ uptr size;
+};
+
+// Aggregates leaks by stack trace prefix.
+class LeakReport {
+ public:
+ LeakReport() {}
+ void AddLeakedChunks(const LeakedChunks &chunks);
+ void ReportTopLeaks(uptr max_leaks);
+ void PrintSummary();
+ uptr ApplySuppressions();
+ uptr UnsuppressedLeakCount();
+ uptr IndirectUnsuppressedLeakCount();
+
+ private:
+ void PrintReportForLeak(uptr index);
+ void PrintLeakedObjectsForLeak(uptr index);
+
+ u32 next_id_ = 0;
+ InternalMmapVector<Leak> leaks_;
+ InternalMmapVector<LeakedObject> leaked_objects_;
+};
+
+typedef InternalMmapVector<uptr> Frontier;
+
+// Platform-specific functions.
+void InitializePlatformSpecificModules();
+void ProcessGlobalRegions(Frontier *frontier);
+void ProcessPlatformSpecificAllocations(Frontier *frontier);
+
+struct RootRegion {
+ uptr begin;
+ uptr size;
+};
+
+// LockStuffAndStopTheWorld can start to use Scan* calls to collect into
+// this Frontier vector before the StopTheWorldCallback actually runs.
+// This is used when the OS has a unified callback API for suspending
+// threads and enumerating roots.
+struct CheckForLeaksParam {
+ Frontier frontier;
+ LeakedChunks leaks;
+ tid_t caller_tid;
+ uptr caller_sp;
+ bool success = false;
+};
+
+InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
+void ScanRootRegion(Frontier *frontier, RootRegion const &region,
+ uptr region_begin, uptr region_end, bool is_readable);
+// Run stoptheworld while holding any platform-specific locks, as well as the
+// allocator and thread registry locks.
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam* argument);
+
+void ScanRangeForPointers(uptr begin, uptr end,
+ Frontier *frontier,
+ const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
+void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
+ Frontier *frontier);
+
+// Functions called from the parent tool.
+const char *MaybeCallLsanDefaultOptions();
+void InitCommonLsan();
+void DoLeakCheck();
+void DoRecoverableLeakCheckVoid();
+void DisableCounterUnderflow();
+bool DisabledInThisThread();
+
+// Used to implement __lsan::ScopedDisabler.
+void DisableInThisThread();
+void EnableInThisThread();
+// Can be used to ignore memory allocated by an intercepted
+// function.
+struct ScopedInterceptorDisabler {
+ ScopedInterceptorDisabler() { DisableInThisThread(); }
+ ~ScopedInterceptorDisabler() { EnableInThisThread(); }
+};
+
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+#if defined(__arm__)
+ return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+ return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
+}
+
+// Return the linker module, if valid for the platform.
+LoadedModule *GetLinker();
+
+// Return true if LSan has finished leak checking and reported leaks.
+bool HasReportedLeaks();
+
+// Run platform-specific leak handlers.
+void HandleLeaks();
+
+} // namespace __lsan
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+int __lsan_is_turned_off();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *p, __lsan::uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
+
+} // extern "C"
+
+#endif // LSAN_COMMON_H
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_common_fuchsia.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_common_fuchsia.cpp
new file mode 100644
index 0000000000..3e1bf8eac7
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_common_fuchsia.cpp
@@ -0,0 +1,167 @@
+//=-- lsan_common_fuchsia.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Fuchsia-specific code.
+//
+//===---------------------------------------------------------------------===//
+
+#include "lsan_common.h"
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA
+#error #include <zircon/sanitizer.h>
+
+#include "lsan_allocator.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stoptheworld_fuchsia.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+// Ensure that the Zircon system ABI is linked in.
+#pragma comment(lib, "zircon")
+
+namespace __lsan {
+
+void InitializePlatformSpecificModules() {}
+
+LoadedModule *GetLinker() { return nullptr; }
+
+__attribute__((tls_model("initial-exec"))) THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
+// There is nothing left to do after the globals callbacks.
+void ProcessGlobalRegions(Frontier *frontier) {}
+
+// Nothing to do here.
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
+
+// On Fuchsia, we can intercept _Exit gracefully, and return a failing exit
+// code if required at that point. Calling Die() here is undefined
+// behavior and causes rare race conditions.
+void HandleLeaks() {}
+
+// This is defined differently in asan_fuchsia.cpp and lsan_fuchsia.cpp.
+bool UseExitcodeOnLeak();
+
+int ExitHook(int status) {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (UseExitcodeOnLeak())
+ DoLeakCheck();
+ else
+ DoRecoverableLeakCheckVoid();
+ }
+ return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status;
+}
+
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam *argument) {
+ ScopedStopTheWorldLock lock;
+
+ struct Params {
+ InternalMmapVector<uptr> allocator_caches;
+ StopTheWorldCallback callback;
+ CheckForLeaksParam *argument;
+ } params = {{}, callback, argument};
+
+ // Callback from libc for globals (data/bss modulo relro), when enabled.
+ auto globals = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ ScanGlobalRange(begin, end, &params->argument->frontier);
+ };
+
+ // Callback from libc for thread stacks.
+ auto stacks = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ ScanRangeForPointers(begin, end, &params->argument->frontier, "STACK",
+ kReachable);
+ };
+
+ // Callback from libc for thread registers.
+ auto registers = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ ScanRangeForPointers(begin, end, &params->argument->frontier, "REGISTERS",
+ kReachable);
+ };
+
+ if (flags()->use_tls) {
+ // Collect the allocator cache range from each thread so these
+ // can all be excluded from the reported TLS ranges.
+ GetAllThreadAllocatorCachesLocked(&params.allocator_caches);
+ __sanitizer::Sort(params.allocator_caches.data(),
+ params.allocator_caches.size());
+ }
+
+ // Callback from libc for TLS regions. This includes thread_local
+ // variables as well as C11 tss_set and POSIX pthread_setspecific.
+ auto tls = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
+ if (i < params->allocator_caches.size() &&
+ params->allocator_caches[i] >= begin &&
+ end - params->allocator_caches[i] <= sizeof(AllocatorCache)) {
+ // Split the range in two and omit the allocator cache within.
+ ScanRangeForPointers(begin, params->allocator_caches[i],
+ &params->argument->frontier, "TLS", kReachable);
+ uptr begin2 = params->allocator_caches[i] + sizeof(AllocatorCache);
+ ScanRangeForPointers(begin2, end, &params->argument->frontier, "TLS",
+ kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, &params->argument->frontier, "TLS",
+ kReachable);
+ }
+ };
+
+ // This stops the world and then makes callbacks for various memory regions.
+ // The final callback is the last thing before the world starts up again.
+ __sanitizer_memory_snapshot(
+ flags()->use_globals ? globals : nullptr,
+ flags()->use_stacks ? stacks : nullptr,
+ flags()->use_registers ? registers : nullptr,
+ flags()->use_tls ? tls : nullptr,
+ [](zx_status_t, void *data) {
+ auto params = static_cast<const Params *>(data);
+
+ // We don't use the thread registry at all for enumerating the threads
+ // and their stacks, registers, and TLS regions. So use it separately
+ // just for the allocator cache, and to call ScanExtraStackRanges,
+ // which ASan needs.
+ if (flags()->use_stacks) {
+ InternalMmapVector<Range> ranges;
+ GetThreadExtraStackRangesLocked(&ranges);
+ ScanExtraStackRanges(ranges, &params->argument->frontier);
+ }
+ params->callback(SuspendedThreadsListFuchsia(), params->argument);
+ },
+ &params);
+}
+
+} // namespace __lsan
+
+// This is declared (in extern "C") by <zircon/sanitizer.h>.
+// _Exit calls this directly to intercept and change the status value.
+int __sanitizer_process_exit_hook(int status) {
+ return __lsan::ExitHook(status);
+}
+
+#endif
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_common_linux.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_common_linux.cpp
new file mode 100644
index 0000000000..692ad35169
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_common_linux.cpp
@@ -0,0 +1,147 @@
+//=-- lsan_common_linux.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Linux/NetBSD-specific
+// code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD)
+#include <link.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_getauxval.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __lsan {
+
+static const char kLinkerName[] = "ld";
+
+static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
+static LoadedModule *linker = nullptr;
+
+static bool IsLinker(const LoadedModule& module) {
+#if SANITIZER_USE_GETAUXVAL
+ return module.base_address() == getauxval(AT_BASE);
+#else
+ return LibraryNameIs(module.full_name(), kLinkerName);
+#endif // SANITIZER_USE_GETAUXVAL
+}
+
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
+void InitializePlatformSpecificModules() {
+ ListOfModules modules;
+ modules.init();
+ for (LoadedModule &module : modules) {
+ if (!IsLinker(module))
+ continue;
+ if (linker == nullptr) {
+ linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
+ *linker = module;
+ module = LoadedModule();
+ } else {
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS and other allocations originating from linker might be "
+ "falsely reported as leaks.\n", kLinkerName);
+ linker->clear();
+ linker = nullptr;
+ return;
+ }
+ }
+ if (linker == nullptr) {
+ VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
+ "allocations originating from linker might be falsely reported "
+ "as leaks.\n");
+ }
+}
+
+static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ Frontier *frontier = reinterpret_cast<Frontier *>(data);
+ for (uptr j = 0; j < info->dlpi_phnum; j++) {
+ const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
+ // We're looking for .data and .bss sections, which reside in writeable,
+ // loadable segments.
+ if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
+ (phdr->p_memsz == 0))
+ continue;
+ uptr begin = info->dlpi_addr + phdr->p_vaddr;
+ uptr end = begin + phdr->p_memsz;
+ ScanGlobalRange(begin, end, frontier);
+ }
+ return 0;
+}
+
+#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ if (!flags()->use_globals) return;
+ dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
+}
+
+LoadedModule *GetLinker() { return linker; }
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
+
+struct DoStopTheWorldParam {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+// While calling Die() here is undefined behavior and can potentially
+// cause race conditions, it isn't possible to intercept exit on linux,
+// so we have no choice but to call Die() from the atexit handler.
+void HandleLeaks() {
+ if (common_flags()->exitcode) Die();
+}
+
+static int LockStuffAndStopTheWorldCallback(struct dl_phdr_info *info,
+ size_t size, void *data) {
+ ScopedStopTheWorldLock lock;
+ DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+ StopTheWorld(param->callback, param->argument);
+ return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam *argument) {
+ DoStopTheWorldParam param = {callback, argument};
+ dl_iterate_phdr(LockStuffAndStopTheWorldCallback, &param);
+}
+
+} // namespace __lsan
+
+#endif
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_common_mac.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_common_mac.cpp
new file mode 100644
index 0000000000..b6b1509574
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_common_mac.cpp
@@ -0,0 +1,238 @@
+//=-- lsan_common_mac.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Darwin-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_APPLE
+
+# include <mach/mach.h>
+# include <mach/vm_statistics.h>
+# include <pthread.h>
+
+# include "lsan_allocator.h"
+# include "sanitizer_common/sanitizer_allocator_internal.h"
+namespace __lsan {
+
+enum class SeenRegion {
+ None = 0,
+ AllocOnce = 1 << 0,
+ LibDispatch = 1 << 1,
+ Foundation = 1 << 2,
+ All = AllocOnce | LibDispatch | Foundation
+};
+
+inline SeenRegion operator|(SeenRegion left, SeenRegion right) {
+ return static_cast<SeenRegion>(static_cast<int>(left) |
+ static_cast<int>(right));
+}
+
+inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) {
+ left = left | right;
+ return left;
+}
+
+struct RegionScanState {
+ SeenRegion seen_regions = SeenRegion::None;
+ bool in_libdispatch = false;
+};
+
+typedef struct {
+ int disable_counter;
+ u32 current_thread_id;
+ AllocatorCache cache;
+} thread_local_data_t;
+
+static pthread_key_t key;
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
+
+// The main thread destructor requires the current thread id,
+// so we can't destroy it until it's been used and reset to invalid tid
+void restore_tid_data(void *ptr) {
+ thread_local_data_t *data = (thread_local_data_t *)ptr;
+ if (data->current_thread_id != kInvalidTid)
+ pthread_setspecific(key, data);
+}
+
+static void make_tls_key() {
+ CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
+}
+
+static thread_local_data_t *get_tls_val(bool alloc) {
+ pthread_once(&key_once, make_tls_key);
+
+ thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
+ if (ptr == NULL && alloc) {
+ ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
+ ptr->disable_counter = 0;
+ ptr->current_thread_id = kInvalidTid;
+ ptr->cache = AllocatorCache();
+ pthread_setspecific(key, ptr);
+ }
+
+ return ptr;
+}
+
+bool DisabledInThisThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->disable_counter > 0 : false;
+}
+
+void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
+
+void EnableInThisThread() {
+ int *disable_counter = &get_tls_val(true)->disable_counter;
+ if (*disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ --*disable_counter;
+}
+
+u32 GetCurrentThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->current_thread_id : kInvalidTid;
+}
+
+void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+
+AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
+
+LoadedModule *GetLinker() { return nullptr; }
+
+// Required on Linux for initialization of TLS behavior, but should not be
+// required on Darwin.
+void InitializePlatformSpecificModules() {}
+
+// Sections which can't contain contain global pointers. This list errs on the
+// side of caution to avoid false positives, at the expense of performance.
+//
+// Other potentially safe sections include:
+// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break
+//
+// Sections which definitely cannot be included here are:
+// __objc_data, __objc_const, __data, __bss, __common, __thread_data,
+// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs
+static const char *kSkippedSecNames[] = {
+ "__cfstring", "__la_symbol_ptr", "__mod_init_func",
+ "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist",
+ "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist",
+ "__objc_protolist", "__objc_selrefs", "__objc_superrefs"};
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ for (auto name : kSkippedSecNames)
+ CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName);
+
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules;
+ modules.reserve(128);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ // Even when global scanning is disabled, we still need to scan
+ // system libraries for stashed pointers
+ if (!flags()->use_globals && modules[i].instrumented()) continue;
+
+ for (const __sanitizer::LoadedModule::AddressRange &range :
+ modules[i].ranges()) {
+ // Sections storing global variables are writable and non-executable
+ if (range.executable || !range.writable) continue;
+
+ for (auto name : kSkippedSecNames) {
+ if (!internal_strcmp(range.name, name)) continue;
+ }
+
+ ScanGlobalRange(range.beg, range.end, frontier);
+ }
+ }
+}
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+
+ InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
+
+ RegionScanState scan_state;
+ while (err == KERN_SUCCESS) {
+ vm_size_t size = 0;
+ unsigned depth = 1;
+ struct vm_region_submap_info_64 info;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+ err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+
+ uptr end_address = address + size;
+ if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
+ // libxpc stashes some pointers in the Kernel Alloc Once page,
+ // make sure not to report those as leaks.
+ scan_state.seen_regions |= SeenRegion::AllocOnce;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (info.user_tag == VM_MEMORY_FOUNDATION) {
+ // Objective-C block trampolines use the Foundation region.
+ scan_state.seen_regions |= SeenRegion::Foundation;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) {
+ // Dispatch continuations use the libdispatch region. Empirically, there
+ // can be more than one region with this tag, so we'll optimistically
+ // assume that they're continguous. Otherwise, we would need to scan every
+ // region to ensure we find them all.
+ scan_state.in_libdispatch = true;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (scan_state.in_libdispatch) {
+ scan_state.seen_regions |= SeenRegion::LibDispatch;
+ scan_state.in_libdispatch = false;
+ }
+
+ // Recursing over the full memory map is very slow, break out
+ // early if we don't need the full iteration.
+ if (scan_state.seen_regions == SeenRegion::All &&
+ !(flags()->use_root_regions && root_regions->size() > 0)) {
+ break;
+ }
+
+ // This additional root region scan is required on Darwin in order to
+ // detect root regions contained within mmap'd memory regions, because
+ // the Darwin implementation of sanitizer_procmaps traverses images
+ // as loaded by dyld, and not the complete set of all memory regions.
+ //
+ // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
+ // behavior as sanitizer_procmaps_linux and traverses all memory regions
+ if (flags()->use_root_regions) {
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
+ info.protection & kProtectionRead);
+ }
+ }
+
+ address = end_address;
+ }
+}
+
+// On darwin, we can intercept _exit gracefully, and return a failing exit code
+// if required at that point. Calling Die() here is undefined behavior and
+// causes rare race conditions.
+void HandleLeaks() {}
+
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam *argument) {
+ ScopedStopTheWorldLock lock;
+ StopTheWorld(callback, argument);
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_flags.inc b/contrib/libs/clang16-rt/lib/lsan/lsan_flags.inc
new file mode 100644
index 0000000000..9350f4bcdc
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_flags.inc
@@ -0,0 +1,46 @@
+//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// LSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LSAN_FLAG
+# error "Define LSAN_FLAG prior to including this file!"
+#endif
+
+// LSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+LSAN_FLAG(bool, report_objects, false,
+ "Print addresses of leaked objects after main leak report.")
+LSAN_FLAG(
+ int, resolution, 0,
+ "Aggregate two objects into one leak if this many stack frames match. If "
+ "zero, the entire stack trace must match.")
+LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
+
+// Flags controlling the root set of reachable memory.
+LSAN_FLAG(bool, use_globals, true,
+ "Root set: include global variables (.data and .bss)")
+LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
+LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
+LSAN_FLAG(bool, use_tls, true,
+ "Root set: include TLS and thread-specific storage")
+LSAN_FLAG(bool, use_root_regions, true,
+ "Root set: include regions added via __lsan_register_root_region().")
+LSAN_FLAG(bool, use_ld_allocations, true,
+ "Root set: mark as reachable all allocations made from dynamic "
+ "linker. This was the old way to handle dynamic TLS, and will "
+ "be removed soon. Do not use this flag.")
+
+LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
+LSAN_FLAG(bool, use_poisoned, false,
+ "Consider pointers found in poisoned memory to be valid.")
+LSAN_FLAG(bool, log_pointers, false, "Debug logging")
+LSAN_FLAG(bool, log_threads, false, "Debug logging")
+LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.cpp
new file mode 100644
index 0000000000..755024460d
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.cpp
@@ -0,0 +1,129 @@
+//=-- lsan_fuchsia.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code specific to Fuchsia.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_FUCHSIA
+#error #include <zircon/sanitizer.h>
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+
+using namespace __lsan;
+
+namespace __lsan {
+
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {}
+
+ThreadContext::ThreadContext(int tid) : ThreadContextLsanBase(tid) {}
+
+struct OnCreatedArgs {
+ uptr stack_begin, stack_end;
+};
+
+// On Fuchsia, the stack bounds of a new thread are available before
+// the thread itself has started running.
+void ThreadContext::OnCreated(void *arg) {
+ // Stack bounds passed through from __sanitizer_before_thread_create_hook
+ // or InitializeMainThread.
+ auto args = reinterpret_cast<const OnCreatedArgs *>(arg);
+ stack_begin_ = args->stack_begin;
+ stack_end_ = args->stack_end;
+}
+
+struct OnStartedArgs {
+ uptr cache_begin, cache_end;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ auto args = reinterpret_cast<const OnStartedArgs *>(arg);
+ cache_begin_ = args->cache_begin;
+ cache_end_ = args->cache_end;
+}
+
+void ThreadStart(u32 tid) {
+ OnStartedArgs args;
+ GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
+ CHECK_EQ(args.cache_end - args.cache_begin, sizeof(AllocatorCache));
+ ThreadContextLsanBase::ThreadStart(tid, GetTid(), ThreadType::Regular, &args);
+}
+
+void InitializeMainThread() {
+ OnCreatedArgs args;
+ __sanitizer::GetThreadStackTopAndBottom(true, &args.stack_end,
+ &args.stack_begin);
+ u32 tid = ThreadCreate(kMainTid, true, &args);
+ CHECK_EQ(tid, 0);
+ ThreadStart(tid);
+}
+
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *arg) {
+ auto ctx = static_cast<ThreadContext *>(tctx);
+ static_cast<decltype(caches)>(arg)->push_back(ctx->cache_begin());
+ },
+ caches);
+}
+
+// On Fuchsia, leak detection is done by a special hook after atexit hooks.
+// So this doesn't install any atexit hook like on other platforms.
+void InstallAtExitCheckLeaks() {}
+
+// ASan defines this to check its `halt_on_error` flag.
+bool UseExitcodeOnLeak() { return true; }
+
+} // namespace __lsan
+
+// These are declared (in extern "C") by <zircon/sanitizer.h>.
+// The system runtime will call our definitions directly.
+
+// This is called before each thread creation is attempted. So, in
+// its first call, the calling thread is the initial and sole thread.
+void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
+ const char *name, void *stack_base,
+ size_t stack_size) {
+ ENSURE_LSAN_INITED;
+ EnsureMainThreadIDIsCorrect();
+ OnCreatedArgs args;
+ args.stack_begin = reinterpret_cast<uptr>(stack_base);
+ args.stack_end = args.stack_begin + stack_size;
+ u32 parent_tid = GetCurrentThread();
+ u32 tid = ThreadCreate(parent_tid, detached, &args);
+ return reinterpret_cast<void *>(static_cast<uptr>(tid));
+}
+
+// This is called after creating a new thread (in the creating thread),
+// with the pointer returned by __sanitizer_before_thread_create_hook (above).
+void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
+ u32 tid = static_cast<u32>(reinterpret_cast<uptr>(hook));
+ // On success, there is nothing to do here.
+ if (error != thrd_success) {
+ // Clean up the thread registry for the thread creation that didn't happen.
+ GetLsanThreadRegistryLocked()->FinishThread(tid);
+ }
+}
+
+// This is called in the newly-created thread before it runs anything else,
+// with the pointer returned by __sanitizer_before_thread_create_hook (above).
+void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
+ u32 tid = static_cast<u32>(reinterpret_cast<uptr>(hook));
+ ThreadStart(tid);
+}
+
+// Each thread runs this just before it exits,
+// with the pointer returned by BeforeThreadCreateHook (above).
+// All per-thread destructors have already been called.
+void __sanitizer_thread_exit_hook(void *hook, thrd_t self) { ThreadFinish(); }
+
+#endif // SANITIZER_FUCHSIA
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.h b/contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.h
new file mode 100644
index 0000000000..e730d8f25f
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_fuchsia.h
@@ -0,0 +1,35 @@
+//=-- lsan_fuchsia.h ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code specific to Fuchsia.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LSAN_FUCHSIA_H
+#define LSAN_FUCHSIA_H
+
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if !SANITIZER_FUCHSIA
+#error "lsan_fuchsia.h is used only on Fuchsia systems (SANITIZER_FUCHSIA)"
+#endif
+
+namespace __lsan {
+
+class ThreadContext final : public ThreadContextLsanBase {
+ public:
+ explicit ThreadContext(int tid);
+ void OnCreated(void *arg) override;
+ void OnStarted(void *arg) override;
+};
+
+} // namespace __lsan
+
+#endif // LSAN_FUCHSIA_H
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_interceptors.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_interceptors.cpp
new file mode 100644
index 0000000000..3a1b2afdbb
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_interceptors.cpp
@@ -0,0 +1,542 @@
+//=-- lsan_interceptors.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Interceptors for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#if SANITIZER_POSIX
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+
+#include <stddef.h>
+
+using namespace __lsan;
+
+extern "C" {
+int pthread_attr_init(void *attr);
+int pthread_attr_destroy(void *attr);
+int pthread_attr_getdetachstate(void *attr, int *v);
+int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+int pthread_setspecific(unsigned key, const void *v);
+}
+
+struct DlsymAlloc : DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return lsan_init_is_running; }
+ static void OnAllocate(const void *ptr, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+#endif
+ }
+ static void OnFree(const void *ptr, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+#endif
+ }
+};
+
+///// Malloc/free interceptors. /////
+
+namespace std {
+ struct nothrow_t;
+ enum class align_val_t: size_t;
+}
+
+#if !SANITIZER_APPLE
+INTERCEPTOR(void*, malloc, uptr size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_malloc(size, stack);
+}
+
+INTERCEPTOR(void, free, void *p) {
+ if (DlsymAlloc::PointerIsMine(p))
+ return DlsymAlloc::Free(p);
+ ENSURE_LSAN_INITED;
+ lsan_free(p);
+}
+
+INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_calloc(nmemb, size, stack);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_realloc(ptr, size, stack);
+}
+
+INTERCEPTOR(void*, reallocarray, void *q, uptr nmemb, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_reallocarray(q, nmemb, size, stack);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_posix_memalign(memptr, alignment, size, stack);
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_valloc(size, stack);
+}
+#endif // !SANITIZER_APPLE
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_memalign(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+
+#if SANITIZER_INTERCEPT___LIBC_MEMALIGN
+INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ void *res = lsan_memalign(alignment, size, stack);
+ DTLS_on_libc_memalign(res, size);
+ return res;
+}
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
+#endif // SANITIZER_INTERCEPT___LIBC_MEMALIGN
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_aligned_alloc(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ ENSURE_LSAN_INITED;
+ return GetMallocUsableSize(ptr);
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+struct fake_mallinfo {
+ int x[10];
+};
+
+INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
+ struct fake_mallinfo res;
+ internal_memset(&res, 0, sizeof(res));
+ return res;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+
+INTERCEPTOR(int, mallopt, int cmd, int value) {
+ return 0;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLINFO
+#define LSAN_MAYBE_INTERCEPT_MALLOPT
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR(void*, pvalloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_pvalloc(size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_PVALLOC
+#endif // SANITIZER_INTERCEPT_PVALLOC
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
+#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define LSAN_MAYBE_INTERCEPT_CFREE
+#endif // SANITIZER_INTERCEPT_CFREE
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_malloc(size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_memalign((uptr)align, size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res;
+
+#define OPERATOR_DELETE_BODY\
+ ENSURE_LSAN_INITED;\
+ lsan_free(ptr);
+
+// On OS X it's not enough to just provide our own 'operator new' and
+// 'operator delete' implementations, because they're going to be in the runtime
+// dylib, and the main executable will depend on both the runtime dylib and
+// libstdc++, each of has its implementation of new and delete.
+// To make sure that C++ allocation/deallocation operators are overridden on
+// OS X we need to intercept them using their mangled names.
+#if !SANITIZER_APPLE
+
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const &)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+
+#else // SANITIZER_APPLE
+
+INTERCEPTOR(void *, _Znwm, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _Znam, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+
+INTERCEPTOR(void, _ZdlPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+
+#endif // !SANITIZER_APPLE
+
+
+///// Thread initialization and finalization. /////
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD && !SANITIZER_FUCHSIA
+static unsigned g_thread_finalize_key;
+
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
+ Report("LeakSanitizer: failed to set thread key.\n");
+ Die();
+ }
+ return;
+ }
+ ThreadFinish();
+}
+#endif
+
+#if SANITIZER_NETBSD
+INTERCEPTOR(void, _lwp_exit) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(_lwp_exit)();
+}
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_INTERCEPT_THR_EXIT
+INTERCEPTOR(void, thr_exit, tid_t *state) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(thr_exit)(state);
+}
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+#if SANITIZER_INTERCEPT___CXA_ATEXIT
+INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
+ void *dso_handle) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ return REAL(__cxa_atexit)(func, arg, dso_handle);
+}
+#define LSAN_MAYBE_INTERCEPT___CXA_ATEXIT INTERCEPT_FUNCTION(__cxa_atexit)
+#else
+#define LSAN_MAYBE_INTERCEPT___CXA_ATEXIT
+#endif
+
+#if SANITIZER_INTERCEPT_ATEXIT
+INTERCEPTOR(int, atexit, void (*f)()) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ return REAL(__cxa_atexit)((void (*)(void *a))f, 0, 0);
+}
+#define LSAN_MAYBE_INTERCEPT_ATEXIT INTERCEPT_FUNCTION(atexit)
+#else
+#define LSAN_MAYBE_INTERCEPT_ATEXIT
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATFORK
+extern "C" {
+extern int _pthread_atfork(void (*prepare)(), void (*parent)(),
+ void (*child)());
+};
+
+INTERCEPTOR(int, pthread_atfork, void (*prepare)(), void (*parent)(),
+ void (*child)()) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ // REAL(pthread_atfork) cannot be called due to symbol indirections at least
+ // on NetBSD
+ return _pthread_atfork(prepare, parent, child);
+}
+#define LSAN_MAYBE_INTERCEPT_PTHREAD_ATFORK INTERCEPT_FUNCTION(pthread_atfork)
+#else
+#define LSAN_MAYBE_INTERCEPT_PTHREAD_ATFORK
+#endif
+
+#if SANITIZER_INTERCEPT_STRERROR
+INTERCEPTOR(char *, strerror, int errnum) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ return REAL(strerror)(errnum);
+}
+#define LSAN_MAYBE_INTERCEPT_STRERROR INTERCEPT_FUNCTION(strerror)
+#else
+#define LSAN_MAYBE_INTERCEPT_STRERROR
+#endif
+
+#if SANITIZER_POSIX
+
+struct ThreadParam {
+ void *(*callback)(void *arg);
+ void *param;
+ atomic_uintptr_t tid;
+};
+
+extern "C" void *__lsan_thread_start_func(void *arg) {
+ ThreadParam *p = (ThreadParam*)arg;
+ void* (*callback)(void *arg) = p->callback;
+ void *param = p->param;
+ // Wait until the last iteration to maximize the chance that we are the last
+ // destructor to run.
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_setspecific(g_thread_finalize_key,
+ (void*)GetPthreadDestructorIterations())) {
+ Report("LeakSanitizer: failed to set thread key.\n");
+ Die();
+ }
+#endif
+ int tid = 0;
+ while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
+ internal_sched_yield();
+ ThreadStart(tid, GetTid());
+ atomic_store(&p->tid, 0, memory_order_release);
+ return callback(param);
+}
+
+INTERCEPTOR(int, pthread_create, void *th, void *attr,
+ void *(*callback)(void *), void *param) {
+ ENSURE_LSAN_INITED;
+ EnsureMainThreadIDIsCorrect();
+ __sanitizer_pthread_attr_t myattr;
+ if (!attr) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ AdjustStackSize(attr);
+ int detached = 0;
+ pthread_attr_getdetachstate(attr, &detached);
+ ThreadParam p;
+ p.callback = callback;
+ p.param = param;
+ atomic_store(&p.tid, 0, memory_order_relaxed);
+ int res;
+ {
+ // Ignore all allocations made by pthread_create: thread stack/TLS may be
+ // stored by pthread for future reuse even after thread destruction, and
+ // the linked list it's stored in doesn't even hold valid pointers to the
+ // objects, the latter are calculated by obscure pointer arithmetic.
+ ScopedInterceptorDisabler disabler;
+ res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
+ }
+ if (res == 0) {
+ int tid = ThreadCreate(GetCurrentThread(), IsStateDetached(detached));
+ CHECK_NE(tid, kMainTid);
+ atomic_store(&p.tid, tid, memory_order_release);
+ while (atomic_load(&p.tid, memory_order_acquire) != 0)
+ internal_sched_yield();
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return REAL(pthread_join)(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
+INTERCEPTOR(void, _exit, int status) {
+ if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
+ REAL(_exit)(status);
+}
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+#endif // SANITIZER_POSIX
+
+namespace __lsan {
+
+void InitializeInterceptors() {
+ // Fuchsia doesn't use interceptors that require any setup.
+#if !SANITIZER_FUCHSIA
+ InitializeSignalInterceptors();
+
+ INTERCEPT_FUNCTION(malloc);
+ INTERCEPT_FUNCTION(free);
+ LSAN_MAYBE_INTERCEPT_CFREE;
+ INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(realloc);
+ LSAN_MAYBE_INTERCEPT_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
+ INTERCEPT_FUNCTION(posix_memalign);
+ INTERCEPT_FUNCTION(valloc);
+ LSAN_MAYBE_INTERCEPT_PVALLOC;
+ LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
+ LSAN_MAYBE_INTERCEPT_MALLINFO;
+ LSAN_MAYBE_INTERCEPT_MALLOPT;
+ INTERCEPT_FUNCTION(pthread_create);
+ INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(_exit);
+
+ LSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ LSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+ LSAN_MAYBE_INTERCEPT___CXA_ATEXIT;
+ LSAN_MAYBE_INTERCEPT_ATEXIT;
+ LSAN_MAYBE_INTERCEPT_PTHREAD_ATFORK;
+
+ LSAN_MAYBE_INTERCEPT_STRERROR;
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
+ Report("LeakSanitizer: failed to create thread key.\n");
+ Die();
+ }
+#endif
+
+#endif // !SANITIZER_FUCHSIA
+}
+
+} // namespace __lsan
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_linux.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_linux.cpp
new file mode 100644
index 0000000000..47c2f21b5a
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_linux.cpp
@@ -0,0 +1,32 @@
+//=-- lsan_linux.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer. Linux/NetBSD/Fuchsia-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA
+
+#include "lsan_allocator.h"
+
+namespace __lsan {
+
+static THREADLOCAL u32 current_thread_tid = kInvalidTid;
+u32 GetCurrentThread() { return current_thread_tid; }
+void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
+
+void ReplaceSystemMalloc() {}
+
+} // namespace __lsan
+
+#endif // SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_mac.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_mac.cpp
new file mode 100644
index 0000000000..6964a9ba28
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_mac.cpp
@@ -0,0 +1,191 @@
+//===-- lsan_mac.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer, a memory leak checker.
+//
+// Mac-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_APPLE
+
+#include "interception/interception.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+#include <pthread.h>
+
+namespace __lsan {
+// Support for the following functions from libdispatch on Mac OS:
+// dispatch_async_f()
+// dispatch_async()
+// dispatch_sync_f()
+// dispatch_sync()
+// dispatch_after_f()
+// dispatch_after()
+// dispatch_group_async_f()
+// dispatch_group_async()
+// TODO(glider): libdispatch API contains other functions that we don't support
+// yet.
+//
+// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
+// they can cause jobs to run on a thread different from the current one.
+// TODO(glider): if so, we need a test for this (otherwise we should remove
+// them).
+//
+// The following functions use dispatch_barrier_async_f() (which isn't a library
+// function but is exported) and are thus supported:
+// dispatch_source_set_cancel_handler_f()
+// dispatch_source_set_cancel_handler()
+// dispatch_source_set_event_handler_f()
+// dispatch_source_set_event_handler()
+//
+// The reference manual for Grand Central Dispatch is available at
+// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
+// The implementation details are at
+// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
+
+typedef void *dispatch_group_t;
+typedef void *dispatch_queue_t;
+typedef void *dispatch_source_t;
+typedef u64 dispatch_time_t;
+typedef void (*dispatch_function_t)(void *block);
+typedef void *(*worker_t)(void *block);
+
+// A wrapper for the ObjC blocks used to support libdispatch.
+typedef struct {
+ void *block;
+ dispatch_function_t func;
+ u32 parent_tid;
+} lsan_block_context_t;
+
+ALWAYS_INLINE
+void lsan_register_worker_thread(int parent_tid) {
+ if (GetCurrentThread() == kInvalidTid) {
+ u32 tid = ThreadCreate(parent_tid, true);
+ ThreadStart(tid, GetTid());
+ SetCurrentThread(tid);
+ }
+}
+
+// For use by only those functions that allocated the context via
+// alloc_lsan_context().
+extern "C" void lsan_dispatch_call_block_and_release(void *block) {
+ lsan_block_context_t *context = (lsan_block_context_t *)block;
+ VReport(2,
+ "lsan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
+ lsan_register_worker_thread(context->parent_tid);
+ // Call the original dispatcher for the block.
+ context->func(context->block);
+ lsan_free(context);
+}
+
+} // namespace __lsan
+
+using namespace __lsan;
+
+// Wrap |ctxt| and |func| into an lsan_block_context_t.
+// The caller retains control of the allocated context.
+extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_THREAD;
+ lsan_block_context_t *lsan_ctxt =
+ (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
+ lsan_ctxt->block = ctxt;
+ lsan_ctxt->func = func;
+ lsan_ctxt->parent_tid = GetCurrentThread();
+ return lsan_ctxt;
+}
+
+// Define interceptor for dispatch_*_f function with the three most common
+// parameters: dispatch_queue_t, context, dispatch_function_t.
+#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
+ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
+ dispatch_function_t func) { \
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
+ return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
+ lsan_dispatch_call_block_and_release); \
+ }
+
+INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
+
+INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
+ void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
+ lsan_dispatch_call_block_and_release);
+}
+
+INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ REAL(dispatch_group_async_f)
+ (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
+}
+
+#if !defined(MISSING_BLOCKS_SUPPORT)
+extern "C" {
+void dispatch_async(dispatch_queue_t dq, void (^work)(void));
+void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
+ void (^work)(void));
+void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void));
+void dispatch_source_set_cancel_handler(dispatch_source_t ds,
+ void (^work)(void));
+void dispatch_source_set_event_handler(dispatch_source_t ds,
+ void (^work)(void));
+}
+
+#define GET_LSAN_BLOCK(work) \
+ void (^lsan_block)(void); \
+ int parent_tid = GetCurrentThread(); \
+ lsan_block = ^(void) { \
+ lsan_register_worker_thread(parent_tid); \
+ work(); \
+ }
+
+INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_async)(dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
+ dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_group_async)(dg, dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_after)(when, queue, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_event_handler)(ds, lsan_block);
+}
+#endif
+
+#endif // SANITIZER_APPLE
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_malloc_mac.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_malloc_mac.cpp
new file mode 100644
index 0000000000..525c30272c
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_malloc_mac.cpp
@@ -0,0 +1,59 @@
+//===-- lsan_malloc_mac.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer (LSan), a memory leak detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_APPLE
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+using namespace __lsan;
+#define COMMON_MALLOC_ZONE_NAME "lsan"
+#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
+#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_memalign(alignment, size, stack)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_malloc(size, stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_realloc(ptr, size, stack)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ int res = lsan_posix_memalign(memptr, alignment, size, stack)
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_valloc(size, stack)
+#define COMMON_MALLOC_FREE(ptr) \
+ lsan_free(ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = lsan_mz_size(ptr)
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __lsan
+#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif // SANITIZER_APPLE
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_posix.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_posix.cpp
new file mode 100644
index 0000000000..3c7bc15a85
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_posix.cpp
@@ -0,0 +1,102 @@
+//=-- lsan_posix.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code common to POSIX-like systems.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_POSIX
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __lsan {
+
+ThreadContext::ThreadContext(int tid) : ThreadContextLsanBase(tid) {}
+
+struct OnStartedArgs {
+ uptr stack_begin;
+ uptr stack_end;
+ uptr cache_begin;
+ uptr cache_end;
+ uptr tls_begin;
+ uptr tls_end;
+ DTLS *dtls;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ auto args = reinterpret_cast<const OnStartedArgs *>(arg);
+ stack_begin_ = args->stack_begin;
+ stack_end_ = args->stack_end;
+ tls_begin_ = args->tls_begin;
+ tls_end_ = args->tls_end;
+ cache_begin_ = args->cache_begin;
+ cache_end_ = args->cache_end;
+ dtls_ = args->dtls;
+}
+
+void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) {
+ OnStartedArgs args;
+ uptr stack_size = 0;
+ uptr tls_size = 0;
+ GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &stack_size,
+ &args.tls_begin, &tls_size);
+ args.stack_end = args.stack_begin + stack_size;
+ args.tls_end = args.tls_begin + tls_size;
+ GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
+ args.dtls = DTLS_Get();
+ ThreadContextLsanBase::ThreadStart(tid, os_id, thread_type, &args);
+}
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ ThreadContext *context = static_cast<ThreadContext *>(
+ GetLsanThreadRegistryLocked()->FindThreadContextByOsIDLocked(os_id));
+ if (!context)
+ return false;
+ *stack_begin = context->stack_begin();
+ *stack_end = context->stack_end();
+ *tls_begin = context->tls_begin();
+ *tls_end = context->tls_end();
+ *cache_begin = context->cache_begin();
+ *cache_end = context->cache_end();
+ *dtls = context->dtls();
+ return true;
+}
+
+void InitializeMainThread() {
+ u32 tid = ThreadCreate(kMainTid, true);
+ CHECK_EQ(tid, kMainTid);
+ ThreadStart(tid, GetTid());
+}
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
+ nullptr);
+}
+
+void InstallAtExitCheckLeaks() {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
+ Atexit(DoLeakCheck);
+}
+
+} // namespace __lsan
+
+#endif // SANITIZER_POSIX
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_posix.h b/contrib/libs/clang16-rt/lib/lsan/lsan_posix.h
new file mode 100644
index 0000000000..b1265f233f
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_posix.h
@@ -0,0 +1,49 @@
+//=-- lsan_posix.h -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code common to POSIX-like systems.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LSAN_POSIX_H
+#define LSAN_POSIX_H
+
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if !SANITIZER_POSIX
+#error "lsan_posix.h is used only on POSIX-like systems (SANITIZER_POSIX)"
+#endif
+
+namespace __sanitizer {
+struct DTLS;
+}
+
+namespace __lsan {
+
+class ThreadContext final : public ThreadContextLsanBase {
+ public:
+ explicit ThreadContext(int tid);
+ void OnStarted(void *arg) override;
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
+
+ private:
+ uptr tls_begin_ = 0;
+ uptr tls_end_ = 0;
+ DTLS *dtls_ = nullptr;
+};
+
+void ThreadStart(u32 tid, tid_t os_id,
+ ThreadType thread_type = ThreadType::Regular);
+
+} // namespace __lsan
+
+#endif // LSAN_POSIX_H
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_preinit.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_preinit.cpp
new file mode 100644
index 0000000000..cd94e1e871
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_preinit.cpp
@@ -0,0 +1,21 @@
+//===-- lsan_preinit.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+//
+// Call __lsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "lsan.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+ // We force __lsan_init to be called before anyone else by placing it into
+ // .preinit_array section.
+ __attribute__((section(".preinit_array"), used))
+ void (*__local_lsan_preinit)(void) = __lsan_init;
+#endif
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_thread.cpp b/contrib/libs/clang16-rt/lib/lsan/lsan_thread.cpp
new file mode 100644
index 0000000000..137c7e4e4f
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_thread.cpp
@@ -0,0 +1,102 @@
+//=-- lsan_thread.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_thread.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __lsan {
+
+static ThreadRegistry *thread_registry;
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+ void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
+ return new (mem) ThreadContext(tid);
+}
+
+void InitializeThreadRegistry() {
+ static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+ thread_registry =
+ new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
+}
+
+ThreadContextLsanBase::ThreadContextLsanBase(int tid)
+ : ThreadContextBase(tid) {}
+
+void ThreadContextLsanBase::OnFinished() {
+ AllocatorThreadFinish();
+ DTLS_Destroy();
+}
+
+u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
+ return thread_registry->CreateThread(0, detached, parent_tid, arg);
+}
+
+void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id,
+ ThreadType thread_type, void *arg) {
+ thread_registry->StartThread(tid, os_id, thread_type, arg);
+ SetCurrentThread(tid);
+}
+
+void ThreadFinish() {
+ thread_registry->FinishThread(GetCurrentThread());
+ SetCurrentThread(kInvalidTid);
+}
+
+ThreadContext *CurrentThreadContext() {
+ if (!thread_registry)
+ return nullptr;
+ if (GetCurrentThread() == kInvalidTid)
+ return nullptr;
+ // No lock needed when getting current thread.
+ return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
+}
+
+void EnsureMainThreadIDIsCorrect() {
+ if (GetCurrentThread() == kMainTid)
+ CurrentThreadContext()->os_id = GetTid();
+}
+
+///// Interface to the common LSan module. /////
+
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
+
+void LockThreadRegistry() { thread_registry->Lock(); }
+
+void UnlockThreadRegistry() { thread_registry->Unlock(); }
+
+ThreadRegistry *GetLsanThreadRegistryLocked() {
+ thread_registry->CheckLocked();
+ return thread_registry;
+}
+
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *threads) {
+ if (tctx->status == ThreadStatusRunning) {
+ reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ tctx->os_id);
+ }
+ },
+ threads);
+}
+
+} // namespace __lsan
diff --git a/contrib/libs/clang16-rt/lib/lsan/lsan_thread.h b/contrib/libs/clang16-rt/lib/lsan/lsan_thread.h
new file mode 100644
index 0000000000..049c7e2038
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/lsan_thread.h
@@ -0,0 +1,60 @@
+//=-- lsan_thread.h -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Thread registry for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_THREAD_H
+#define LSAN_THREAD_H
+
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+namespace __lsan {
+
+class ThreadContextLsanBase : public ThreadContextBase {
+ public:
+ explicit ThreadContextLsanBase(int tid);
+ void OnFinished() override;
+ uptr stack_begin() { return stack_begin_; }
+ uptr stack_end() { return stack_end_; }
+ uptr cache_begin() { return cache_begin_; }
+ uptr cache_end() { return cache_end_; }
+
+ // The argument is passed on to the subclass's OnStarted member function.
+ static void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type,
+ void *onstarted_arg);
+
+ protected:
+ ~ThreadContextLsanBase() {}
+ uptr stack_begin_ = 0;
+ uptr stack_end_ = 0;
+ uptr cache_begin_ = 0;
+ uptr cache_end_ = 0;
+};
+
+// This subclass of ThreadContextLsanBase is declared in an OS-specific header.
+class ThreadContext;
+
+void InitializeThreadRegistry();
+void InitializeMainThread();
+
+ThreadRegistry *GetLsanThreadRegistryLocked();
+
+u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr);
+void ThreadFinish();
+
+u32 GetCurrentThread();
+void SetCurrentThread(u32 tid);
+ThreadContext *CurrentThreadContext();
+void EnsureMainThreadIDIsCorrect();
+
+} // namespace __lsan
+
+#endif // LSAN_THREAD_H
diff --git a/contrib/libs/clang16-rt/lib/lsan/ya.make b/contrib/libs/clang16-rt/lib/lsan/ya.make
new file mode 100644
index 0000000000..64677c9588
--- /dev/null
+++ b/contrib/libs/clang16-rt/lib/lsan/ya.make
@@ -0,0 +1,131 @@
+# Generated by devtools/yamaker.
+
+INCLUDE(${ARCADIA_ROOT}/build/platform/clang/arch.cmake)
+
+LIBRARY(clang_rt.lsan${CLANG_RT_SUFFIX})
+
+LICENSE(
+ Apache-2.0 AND
+ Apache-2.0 WITH LLVM-exception AND
+ MIT AND
+ NCSA
+)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+OWNER(g:cpp-contrib)
+
+ADDINCL(
+ contrib/libs/clang16-rt/lib
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+NO_SANITIZE()
+
+CFLAGS(
+ -DHAVE_RPC_XDR_H=0
+ -fno-builtin
+ -fno-exceptions
+ -fno-lto
+ -fno-rtti
+ -fno-stack-protector
+ -fomit-frame-pointer
+ -funwind-tables
+ -fvisibility=hidden
+)
+
+SRCDIR(contrib/libs/clang16-rt/lib)
+
+SRCS(
+ interception/interception_linux.cpp
+ interception/interception_mac.cpp
+ interception/interception_type_test.cpp
+ interception/interception_win.cpp
+ lsan/lsan.cpp
+ lsan/lsan_allocator.cpp
+ lsan/lsan_common.cpp
+ lsan/lsan_common_fuchsia.cpp
+ lsan/lsan_common_linux.cpp
+ lsan/lsan_common_mac.cpp
+ lsan/lsan_fuchsia.cpp
+ lsan/lsan_interceptors.cpp
+ lsan/lsan_linux.cpp
+ lsan/lsan_mac.cpp
+ lsan/lsan_malloc_mac.cpp
+ lsan/lsan_posix.cpp
+ lsan/lsan_preinit.cpp
+ lsan/lsan_thread.cpp
+ sanitizer_common/sancov_flags.cpp
+ sanitizer_common/sanitizer_allocator.cpp
+ sanitizer_common/sanitizer_allocator_checks.cpp
+ sanitizer_common/sanitizer_allocator_report.cpp
+ sanitizer_common/sanitizer_chained_origin_depot.cpp
+ sanitizer_common/sanitizer_common.cpp
+ sanitizer_common/sanitizer_common_libcdep.cpp
+ sanitizer_common/sanitizer_coverage_fuchsia.cpp
+ sanitizer_common/sanitizer_coverage_libcdep_new.cpp
+ sanitizer_common/sanitizer_coverage_win_sections.cpp
+ sanitizer_common/sanitizer_deadlock_detector1.cpp
+ sanitizer_common/sanitizer_deadlock_detector2.cpp
+ sanitizer_common/sanitizer_errno.cpp
+ sanitizer_common/sanitizer_file.cpp
+ sanitizer_common/sanitizer_flag_parser.cpp
+ sanitizer_common/sanitizer_flags.cpp
+ sanitizer_common/sanitizer_fuchsia.cpp
+ sanitizer_common/sanitizer_libc.cpp
+ sanitizer_common/sanitizer_libignore.cpp
+ sanitizer_common/sanitizer_linux.cpp
+ sanitizer_common/sanitizer_linux_libcdep.cpp
+ sanitizer_common/sanitizer_linux_s390.cpp
+ sanitizer_common/sanitizer_mac.cpp
+ sanitizer_common/sanitizer_mac_libcdep.cpp
+ sanitizer_common/sanitizer_mutex.cpp
+ sanitizer_common/sanitizer_netbsd.cpp
+ sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+ sanitizer_common/sanitizer_platform_limits_linux.cpp
+ sanitizer_common/sanitizer_platform_limits_netbsd.cpp
+ sanitizer_common/sanitizer_platform_limits_posix.cpp
+ sanitizer_common/sanitizer_platform_limits_solaris.cpp
+ sanitizer_common/sanitizer_posix.cpp
+ sanitizer_common/sanitizer_posix_libcdep.cpp
+ sanitizer_common/sanitizer_printf.cpp
+ sanitizer_common/sanitizer_procmaps_bsd.cpp
+ sanitizer_common/sanitizer_procmaps_common.cpp
+ sanitizer_common/sanitizer_procmaps_fuchsia.cpp
+ sanitizer_common/sanitizer_procmaps_linux.cpp
+ sanitizer_common/sanitizer_procmaps_mac.cpp
+ sanitizer_common/sanitizer_procmaps_solaris.cpp
+ sanitizer_common/sanitizer_solaris.cpp
+ sanitizer_common/sanitizer_stack_store.cpp
+ sanitizer_common/sanitizer_stackdepot.cpp
+ sanitizer_common/sanitizer_stacktrace.cpp
+ sanitizer_common/sanitizer_stacktrace_libcdep.cpp
+ sanitizer_common/sanitizer_stacktrace_printer.cpp
+ sanitizer_common/sanitizer_stacktrace_sparc.cpp
+ sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp
+ sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
+ sanitizer_common/sanitizer_stoptheworld_mac.cpp
+ sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
+ sanitizer_common/sanitizer_stoptheworld_win.cpp
+ sanitizer_common/sanitizer_suppressions.cpp
+ sanitizer_common/sanitizer_symbolizer.cpp
+ sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
+ sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+ sanitizer_common/sanitizer_symbolizer_mac.cpp
+ sanitizer_common/sanitizer_symbolizer_markup.cpp
+ sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+ sanitizer_common/sanitizer_symbolizer_report.cpp
+ sanitizer_common/sanitizer_symbolizer_win.cpp
+ sanitizer_common/sanitizer_termination.cpp
+ sanitizer_common/sanitizer_thread_registry.cpp
+ sanitizer_common/sanitizer_tls_get_addr.cpp
+ sanitizer_common/sanitizer_type_traits.cpp
+ sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
+ sanitizer_common/sanitizer_unwind_win.cpp
+ sanitizer_common/sanitizer_win.cpp
+)
+
+END()