aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm16
diff options
context:
space:
mode:
authormaxim-yurchuk <maxim-yurchuk@yandex-team.com>2024-10-09 12:29:46 +0300
committermaxim-yurchuk <maxim-yurchuk@yandex-team.com>2024-10-09 13:14:22 +0300
commit9731d8a4bb7ee2cc8554eaf133bb85498a4c7d80 (patch)
treea8fb3181d5947c0d78cf402aa56e686130179049 /contrib/libs/llvm16
parenta44b779cd359f06c3ebbef4ec98c6b38609d9d85 (diff)
downloadydb-9731d8a4bb7ee2cc8554eaf133bb85498a4c7d80.tar.gz
publishFullContrib: true for ydb
<HIDDEN_URL> commit_hash:c82a80ac4594723cebf2c7387dec9c60217f603e
Diffstat (limited to 'contrib/libs/llvm16')
-rw-r--r--contrib/libs/llvm16/.yandex_meta/devtools.copyrights.report1324
-rw-r--r--contrib/libs/llvm16/.yandex_meta/devtools.licenses.report21157
-rw-r--r--contrib/libs/llvm16/.yandex_meta/licenses.list.txt312
-rw-r--r--contrib/libs/llvm16/include/.yandex_meta/licenses.list.txt289
-rw-r--r--contrib/libs/llvm16/include/llvm-c/module.modulemap4
-rw-r--r--contrib/libs/llvm16/include/llvm/ADT/IntervalTree.h704
-rw-r--r--contrib/libs/llvm16/include/llvm/ADT/Optional.h38
-rw-r--r--contrib/libs/llvm16/include/llvm/ADT/TypeSwitch.h198
-rw-r--r--contrib/libs/llvm16/include/llvm/Bitcode/BitcodeConvenience.h498
-rw-r--r--contrib/libs/llvm16/include/llvm/CodeGen/MachORelocation.h66
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/AsmParsers.def.in29
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/AsmPrinters.def.in29
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/Disassemblers.def.in29
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/TargetExegesis.def.in29
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/TargetMCAs.def.in29
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/Targets.def.in28
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/abi-breaking.h.cmake62
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/config.h.cmake337
-rw-r--r--contrib/libs/llvm16/include/llvm/Config/llvm-config.h.cmake129
-rw-r--r--contrib/libs/llvm16/include/llvm/DWARFLinkerParallel/DWARFLinker.h27
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/FunctionId.h66
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h46
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVCompare.h100
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVElement.h363
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLine.h175
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLocation.h208
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVObject.h361
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVOptions.h656
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVRange.h109
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVReader.h250
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVScope.h838
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSort.h62
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h102
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h295
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h206
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVType.h301
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/LVReaderHandler.h111
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h191
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h165
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h44
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h47
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h46
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h46
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h46
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h49
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h48
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h48
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h46
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAError.h61
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h49
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h48
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h50
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h244
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h65
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASession.h104
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h51
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASupport.h51
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIATable.h42
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAUtils.h41
-rw-r--r--contrib/libs/llvm16/include/llvm/DebugInfo/PDB/Native/Formatters.h55
-rw-r--r--contrib/libs/llvm16/include/llvm/ExecutionEngine/OProfileWrapper.h134
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.inc8441
-rw-r--r--contrib/libs/llvm16/include/llvm/FuzzMutate/FuzzerCLI.h68
-rw-r--r--contrib/libs/llvm16/include/llvm/Support/AMDHSAKernelDescriptor.h246
-rw-r--r--contrib/libs/llvm16/include/llvm/Support/Solaris/sys/regset.h49
-rw-r--r--contrib/libs/llvm16/include/llvm/Support/TaskQueue.h149
-rw-r--r--contrib/libs/llvm16/include/llvm/Support/X86TargetParser.def15
-rw-r--r--contrib/libs/llvm16/include/llvm/Support/raw_sha1_ostream.h57
-rw-r--r--contrib/libs/llvm16/include/llvm/TableGen/Automaton.td95
-rw-r--r--contrib/libs/llvm16/include/llvm/Testing/ADT/StringMap.h57
-rw-r--r--contrib/libs/llvm16/include/llvm/Testing/ADT/StringMapEntry.h140
-rw-r--r--contrib/libs/llvm16/include/llvm/Testing/Annotations/Annotations.h144
-rw-r--r--contrib/libs/llvm16/include/llvm/Testing/Support/Error.h238
-rw-r--r--contrib/libs/llvm16/include/llvm/Testing/Support/SupportHelpers.h265
-rw-r--r--contrib/libs/llvm16/include/llvm/WindowsResource/ResourceProcessor.h61
-rw-r--r--contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptToken.h69
-rw-r--r--contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptTokenList.h45
-rw-r--r--contrib/libs/llvm16/include/llvm/XRay/FDRLogBuilder.h51
-rw-r--r--contrib/libs/llvm16/include/llvm/module.extern.modulemap6
-rw-r--r--contrib/libs/llvm16/include/llvm/module.modulemap462
-rw-r--r--contrib/libs/llvm16/include/llvm/module.modulemap.build13
-rw-r--r--contrib/libs/llvm16/lib/Analysis/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/BinaryFormat/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/CodeGen/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/libs/llvm16/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt13
-rw-r--r--contrib/libs/llvm16/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt12
-rw-r--r--contrib/libs/llvm16/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DWARFLinker/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DWARFLinkerParallel/DWARFLinker.cpp13
-rw-r--r--contrib/libs/llvm16/lib/DWP/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DebugInfo/GSYM/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Debuginfod/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Demangle/.yandex_meta/licenses.list.txt11
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/Interpreter/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/JITLink/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/Orc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/FileCheck/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Frontend/HLSL/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Frontend/OpenACC/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/FuzzMutate/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/IR/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/IRPrinter/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/IRReader/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/InterfaceStub/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/LTO/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/LineEditor/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Linker/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/MC/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/MC/MCParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/MCA/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ObjCopy/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Object/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ObjectYAML/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Option/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Passes/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ProfileData/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Remarks/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Support/.yandex_meta/licenses.list.txt992
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2.c326
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm1828
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512.c1207
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm2634
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2.c566
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm2350
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41.c560
-rw-r--r--contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm2089
-rw-r--r--contrib/libs/llvm16/lib/TableGen/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/AArch64/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/ARM/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/BPF/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/LoongArch/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/LoongArch/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/LoongArch/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/LoongArch/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/LoongArch/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/NVPTX/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/PowerPC/.yandex_meta/licenses.list.txt588
-rw-r--r--contrib/libs/llvm16/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.cpp51
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.h44
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp103
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp23
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h28
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp25
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h37
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCV.h80
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVAsmPrinter.cpp467
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVCodeGenPrepare.cpp178
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp681
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp402
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.cpp1369
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.h94
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVGatherScatterLowering.cpp543
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.cpp2946
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.h235
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.cpp14257
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.h783
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVInsertVSETVLI.cpp1442
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.cpp2869
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.h279
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVMCInstLower.cpp257
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp45
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.h136
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.cpp73
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.h28
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVMakeCompressible.cpp391
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVMergeBaseOffset.cpp455
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp181
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.cpp786
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.h105
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVSExtWRemoval.cpp377
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVStripWSuffix.cpp87
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.cpp180
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.h197
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.cpp382
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.h65
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.cpp115
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.h47
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.cpp1486
-rw-r--r--contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.h346
-rw-r--r--contrib/libs/llvm16/lib/Target/WebAssembly/.yandex_meta/licenses.list.txt13
-rw-r--r--contrib/libs/llvm16/lib/Target/WebAssembly/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/WebAssembly/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/WebAssembly/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/WebAssembly/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/WebAssembly/Utils/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/X86/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/X86/MCA/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/TargetParser/.yandex_meta/licenses.list.txt579
-rw-r--r--contrib/libs/llvm16/lib/TextAPI/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Transforms/Coroutines/.yandex_meta/licenses.list.txt13
-rw-r--r--contrib/libs/llvm16/lib/Transforms/IPO/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt13
-rw-r--r--contrib/libs/llvm16/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt13
-rw-r--r--contrib/libs/llvm16/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/Transforms/Utils/.yandex_meta/licenses.list.txt35
-rw-r--r--contrib/libs/llvm16/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/WindowsDriver/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/WindowsManifest/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/lib/XRay/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/provides.pbtxt192
-rw-r--r--contrib/libs/llvm16/tools/bugpoint/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/dsymutil/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/gold/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/lli/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/lli/ChildTarget/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-ar/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-as/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-bcanalyzer/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-cat/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-cfi-verify/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-cfi-verify/lib/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-config/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-cov/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-cvtres/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-cxxdump/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-cxxfilt/.yandex_meta/licenses.list.txt13
-rw-r--r--contrib/libs/llvm16/tools/llvm-cxxmap/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-diff/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-diff/lib/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-dis/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-dwarfdump/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-dwp/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-exegesis/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-exegesis/lib/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-exegesis/lib/AArch64/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-exegesis/lib/PowerPC/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-exegesis/lib/X86/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-extract/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-gsymutil/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-ifs/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-jitlink/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-jitlink/llvm-jitlink-executor/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-libtool-darwin/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-link/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-lipo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-lto/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-lto2/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-mc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-mca/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-ml/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-modextract/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-mt/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-nm/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-objcopy/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-objdump/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-opt-report/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-pdbutil/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-profdata/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-profgen/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-rc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-readobj/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-reduce/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-rtdyld/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-size/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-split/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-stress/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-strings/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-symbolizer/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-undname/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/llvm-xray/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/lto/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/lto/lto.exports77
-rw-r--r--contrib/libs/llvm16/tools/obj2yaml/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/opt/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/polly/include/polly/Support/ISLOperators.h184
-rw-r--r--contrib/libs/llvm16/tools/polly/lib/.yandex_meta/licenses.list.txt11
-rw-r--r--contrib/libs/llvm16/tools/polly/lib/External/isl/.yandex_meta/licenses.list.txt332
-rw-r--r--contrib/libs/llvm16/tools/polly/lib/External/isl/GIT_HEAD_ID1
-rw-r--r--contrib/libs/llvm16/tools/polly/lib/External/ppcg/.yandex_meta/licenses.list.txt45
-rw-r--r--contrib/libs/llvm16/tools/polly/lib/External/ppcg/GIT_HEAD_ID1
-rw-r--r--contrib/libs/llvm16/tools/remarks-shlib/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/sancov/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/sanstats/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/verify-uselistorder/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/tools/yaml2obj/.yandex_meta/licenses.list.txt7
-rwxr-xr-xcontrib/libs/llvm16/utils/DSAclean.py35
-rwxr-xr-xcontrib/libs/llvm16/utils/DSAextract.py113
-rw-r--r--contrib/libs/llvm16/utils/TableGen/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm16/utils/gdb-scripts/prettyprinters.py499
330 files changed, 91276 insertions, 0 deletions
diff --git a/contrib/libs/llvm16/.yandex_meta/devtools.copyrights.report b/contrib/libs/llvm16/.yandex_meta/devtools.copyrights.report
new file mode 100644
index 0000000000..ce3596471e
--- /dev/null
+++ b/contrib/libs/llvm16/.yandex_meta/devtools.copyrights.report
@@ -0,0 +1,1324 @@
+# File format ($ symbol means the beginning of a line):
+#
+# $ # this message
+# $ # =======================
+# $ # comments (all commentaries should starts with some number of spaces and # symbol)
+# ${action} {license id} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make
+# ${all_file_action} filename
+# $ # user commentaries (many lines)
+# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify)
+# ${action} {license spdx} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/3/ya.make
+# ${all_file_action} filename
+# $ # user commentaries
+# $ generated description
+# $ ...
+#
+# You can modify action, all_file_action and add commentaries
+# Available actions:
+# keep - keep license in contrib and use in credits
+# skip - skip license
+# remove - remove all files with this license
+# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file
+#
+# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory)
+# We suppose that that files can contain some license info
+# Available all file actions:
+# FILE_IGNORE - ignore file (do nothing)
+# FILE_INCLUDE - include all file data into licenses text file
+# =======================
+
+KEEP COPYRIGHT_SERVICE_LABEL 07668cb5215fea6ee1517de255c09e3e
+BELONGS lib/Support/ya.make
+ License text:
+ Copyright © 1991-2022 Unicode, Inc. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [32:32]
+
+KEEP COPYRIGHT_SERVICE_LABEL 09d1731fda037a42651b22724e02e848
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Copyright 2013 Ecole Normale Superieure
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/extract_key.c [2:2]
+ tools/polly/lib/External/isl/include/isl/hmap_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_coalesce.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_dim_id_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_move_dims_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_no_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_read_no_explicit_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_point.c [2:5]
+ tools/polly/lib/External/isl/isl_pw_eval.c [2:3]
+ tools/polly/lib/External/isl/isl_tab.c [2:5]
+ tools/polly/lib/External/isl/isl_type_has_space_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_union_multi.c [2:4]
+ tools/polly/lib/External/isl/isl_union_single.c [2:3]
+ tools/polly/lib/External/isl/isl_union_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_val.c [2:2]
+ tools/polly/lib/External/isl/isl_vec.c [2:3]
+ tools/polly/lib/External/ppcg/gpu_hybrid.c [2:3]
+ tools/polly/lib/External/ppcg/gpu_tree.c [2:2]
+ tools/polly/lib/External/ppcg/hybrid.c [2:3]
+ tools/polly/lib/External/ppcg/ppcg.c [2:4]
+
+SKIP COPYRIGHT_SERVICE_LABEL 0b6aaa97daae741e03da41749ac1d155
+BELONGS lib/IR/ya.make
+ License text:
+ Type *I32Ty = Type::getInt32Ty(C);
+ Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
+ ConstantInt::get(I32Ty, 0));
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/IR/AutoUpgrade.cpp [2335:2337]
+
+KEEP COPYRIGHT_SERVICE_LABEL 11038801a66a8aad80a421c2e7b10837
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2012 Ecole Normale Superieure
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_affine_hull.c [2:4]
+ tools/polly/lib/External/isl/isl_bernstein.c [2:4]
+ tools/polly/lib/External/isl/isl_bound.c [2:2]
+ tools/polly/lib/External/isl/isl_coalesce.c [2:7]
+ tools/polly/lib/External/isl/isl_constraint.c [2:3]
+ tools/polly/lib/External/isl/isl_equalities.c [2:3]
+ tools/polly/lib/External/isl/isl_factorization.c [2:4]
+ tools/polly/lib/External/isl/isl_farkas.c [2:2]
+ tools/polly/lib/External/isl/isl_flow.c [2:6]
+ tools/polly/lib/External/isl/isl_fold.c [2:2]
+ tools/polly/lib/External/isl/isl_input.c [2:5]
+ tools/polly/lib/External/isl/isl_map.c [2:8]
+ tools/polly/lib/External/isl/isl_map_lexopt_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_mat.c [2:5]
+ tools/polly/lib/External/isl/isl_morph.h [2:2]
+ tools/polly/lib/External/isl/isl_obj.c [2:4]
+ tools/polly/lib/External/isl/isl_output.c [2:5]
+ tools/polly/lib/External/isl/isl_point.c [2:5]
+ tools/polly/lib/External/isl/isl_polynomial.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_eval.c [2:3]
+ tools/polly/lib/External/isl/isl_pw_insert_dims_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_lift_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_morph_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_move_dims_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_neg_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_opt_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_sub_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_reordering.c [2:2]
+ tools/polly/lib/External/isl/isl_space.c [2:5]
+ tools/polly/lib/External/isl/isl_tab_lexopt_templ.c [2:4]
+ tools/polly/lib/External/isl/isl_tab_pip.c [2:4]
+ tools/polly/lib/External/isl/isl_transitive_closure.c [2:2]
+ tools/polly/lib/External/isl/isl_type_has_equal_space_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_union_eval.c [2:2]
+ tools/polly/lib/External/isl/isl_union_multi.c [2:4]
+ tools/polly/lib/External/isl/isl_union_neg.c [2:2]
+ tools/polly/lib/External/isl/isl_union_single.c [2:3]
+ tools/polly/lib/External/isl/isl_union_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_vertices.c [2:2]
+ tools/polly/lib/External/ppcg/cuda_common.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 12c13673979d84c93fb3f9aed80709bd
+BELONGS lib/Support/ya.make
+ License text:
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/regstrlcpy.c [4:4]
+
+KEEP COPYRIGHT_SERVICE_LABEL 144b4e824389b748144f4e60485d3c2f
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Copyright 2011 INRIA Saclay
+ * Copyright 2012-2013 Ecole Normale Superieure
+ * Copyright 2016 Sven Verdoolaege
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_aff_map.c [2:4]
+ tools/polly/lib/External/isl/isl_ast.c [2:2]
+ tools/polly/lib/External/isl/isl_ast_build.c [2:3]
+ tools/polly/lib/External/isl/isl_box.c [2:3]
+ tools/polly/lib/External/isl/isl_coalesce.c [2:7]
+ tools/polly/lib/External/isl/isl_input.c [2:5]
+ tools/polly/lib/External/isl/isl_list_templ.c [2:5]
+ tools/polly/lib/External/isl/isl_map_simplify.c [2:5]
+ tools/polly/lib/External/isl/isl_multi_apply_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_arith_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_dims.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_gist.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_intersect.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_tuple_id_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_output.c [2:5]
+ tools/polly/lib/External/isl/isl_stride.c [2:2]
+ tools/polly/lib/External/ppcg/gpu.c [2:4]
+ tools/polly/lib/External/ppcg/print.c [2:2]
+ tools/polly/lib/External/ppcg/util.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 18078dcc93cabe2b2081eb3e0ee92c6b
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Copyright 2012 Ecole Normale Superieure
+ * Copyright 2015-2016 Sven Verdoolaege
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_schedule_constraints.c [2:3]
+ tools/polly/lib/External/isl/isl_scheduler.c [2:6]
+ tools/polly/lib/External/ppcg/gpu.c [2:4]
+
+KEEP COPYRIGHT_SERVICE_LABEL 1e274833b7fb3d36a3be23a694f46aac
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_flow.c [2:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL 1e686ce4ffe6d63b91a69c1b26712d33
+BELONGS lib/Support/ya.make
+ License text:
+ * Copyright (c) 1994
+ * The Regents of the University of California. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/COPYRIGHT.regex [26:27]
+
+KEEP COPYRIGHT_SERVICE_LABEL 23befcc9a22e8ccf6e5df3134d3e033e
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_coalesce.c [2:7]
+ tools/polly/lib/External/isl/isl_multi_zero_space_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_locals_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_union_opt.c [2:4]
+ tools/polly/lib/External/isl/isl_union_locals_templ.c [2:2]
+
+SKIP COPYRIGHT_SERVICE_LABEL 28368d51c5dcd8ee8080c84c4f8f192e
+BELONGS lib/Target/X86/ya.make
+ License text:
+ Trampoline->setComdat(C);
+ BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", Trampoline);
+ IRBuilder<> Builder(EntryBB);
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Target/X86/X86WinEHState.cpp [403:405]
+
+KEEP COPYRIGHT_SERVICE_LABEL 2a22d1f4218625c4ce9e47576ddfe109
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2012,2014 Ecole Normale Superieure
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_multi_from_base_templ.c [2:2]
+
+SKIP COPYRIGHT_SERVICE_LABEL 30978c73becf399db8f52c771e38e22d
+BELONGS lib/Transforms/InstCombine/ya.make
+ License text:
+ // The instruction has the form "(A op' B) op (C)". Try to factorize common
+ // term.
+ if (Op0)
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/InstCombine/InstructionCombining.cpp [754:756]
+
+KEEP COPYRIGHT_SERVICE_LABEL 3110a89ff9a82a822a3c1cadfce9ba6d
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ * Copyright © 1991-2015 Unicode, Inc. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/llvm/Support/ConvertUTF.h [16:16]
+ lib/Support/ConvertUTF.cpp [9:9]
+
+KEEP COPYRIGHT_SERVICE_LABEL 3166dbaed102eed6d3653822d5176d42
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ IMath is copyright &copy; 2002-2009 Michael J. Fromberger.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/imath/README.md [9:9]
+
+SKIP COPYRIGHT_SERVICE_LABEL 318577927099d6e8a6d11b7f87ea7da3
+BELONGS lib/Analysis/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Analysis/DependenceGraphBuilder.cpp [465:469]
+
+SKIP COPYRIGHT_SERVICE_LABEL 3680a79f7158604adc35f5aca3f57d93
+BELONGS lib/CodeGen/ya.make
+ License text:
+ IntegerType *WordType = DL.getIntPtrType(C);
+ PointerType *InitPtrType = InitValue ?
+ PointerType::getUnqual(InitValue->getType()) : VoidPtrType;
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/CodeGen/LowerEmuTLS.cpp [116:118]
+
+KEEP COPYRIGHT_SERVICE_LABEL 3c354bb3a6485498273ede375ac114c2
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2016-2017 Sven Verdoolaege
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_tab_pip.c [2:4]
+ tools/polly/lib/External/isl/isl_union_map.c [2:5]
+
+SKIP COPYRIGHT_SERVICE_LABEL 3e3009e9cc74d820b2d85abf2790f180
+BELONGS lib/DebugInfo/DWARF/ya.make
+ License text:
+ Value0 = Data.getULEB128(C);
+ Value1 = Data.getULEB128(C);
+ break;
+ case dwarf::DW_RLE_startx_length: {
+ Value0 = Data.getULEB128(C);
+ Value1 = Data.getULEB128(C);
+ break;
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/DebugInfo/DWARF/DWARFDebugRnglists.cpp [40:46]
+ lib/DebugInfo/DWARF/DWARFDebugRnglists.cpp [49:51]
+
+SKIP COPYRIGHT_SERVICE_LABEL 42ae7da0badf3b2f130af3a54173f220
+BELONGS lib/DebugInfo/DWARF/ya.make
+ License text:
+ : DebugLineData(Data), Context(C) {
+ LineToUnit = buildLineToUnitMap(Units);
+ if (!DebugLineData.isValidOffset(Offset))
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/DebugInfo/DWARF/DWARFDebugLine.cpp [1459:1461]
+
+KEEP COPYRIGHT_SERVICE_LABEL 436fc5ecfdba3f630dca8faa57fe7939
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_aff.c [2:8]
+
+KEEP COPYRIGHT_SERVICE_LABEL 461e4cbb7bcd561964b60cfc5349b46f
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_aff.c [2:8]
+ tools/polly/lib/External/isl/isl_ast_build_expr.c [2:3]
+ tools/polly/lib/External/isl/isl_ast_codegen.c [2:3]
+ tools/polly/lib/External/isl/isl_local_space.c [2:3]
+ tools/polly/lib/External/isl/isl_map.c [2:8]
+ tools/polly/lib/External/isl/isl_multi_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_pw_templ.c [2:4]
+ tools/polly/lib/External/isl/isl_schedule.c [2:4]
+ tools/polly/lib/External/isl/isl_scheduler.c [2:6]
+ tools/polly/lib/External/ppcg/gpu_group.c [2:4]
+
+KEEP COPYRIGHT_SERVICE_LABEL 48792ca6e267c7339e5b010e5dfcf7e8
+BELONGS lib/CodeGen/MIRParser/ya.make
+ License text:
+ return isalpha(C) || isdigit(C) || C == '_' || C == '-' || C == '.' ||
+ C == '$';
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/CodeGen/MIRParser/MILexer.cpp [119:120]
+
+SKIP COPYRIGHT_SERVICE_LABEL 54e2525236fb6c3cc97e13b6fb250b63
+BELONGS lib/DebugInfo/DWARF/ya.make
+ License text:
+ uint64_t BlockLength = Data.getULEB128(C);
+ StringRef Expression = Data.getBytes(C, BlockLength);
+ DataExtractor Extractor(Expression, Data.isLittleEndian(),
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/DebugInfo/DWARF/DWARFDebugFrame.cpp [375:377]
+
+KEEP COPYRIGHT_SERVICE_LABEL 5655c5febb1994aec6687062b2e02bd9
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2012 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+ * Copyright 2019 Cerebras Systems
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_ast_graft.c [2:4]
+ tools/polly/lib/External/isl/isl_copy_tuple_id_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_ilp_opt_multi_val_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_input.c [2:5]
+ tools/polly/lib/External/isl/isl_insert_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_add_constant_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_insert_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_locals_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_min_max_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_unbind_params_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_output.c [2:5]
+ tools/polly/lib/External/isl/isl_point.c [2:5]
+ tools/polly/lib/External/isl/isl_pw_add_constant_multi_val_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_add_constant_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_add_constant_val_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_insert_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_range_tuple_id_templ.c [2:3]
+
+KEEP COPYRIGHT_SERVICE_LABEL 5c85ca831b8809876c0790cfad2d8956
+BELONGS lib/Support/ya.make
+ License text:
+ Copyright 1992, 1993, 1994 Henry Spencer. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/COPYRIGHT.regex [3:3]
+
+KEEP COPYRIGHT_SERVICE_LABEL 63572b23fe8322d08573c7ea9280b27f
+BELONGS lib/Transforms/Utils/ya.make
+ License text:
+ Type *DsoHandleTy = Type::getInt8Ty(C);
+ Constant *DsoHandle = M.getOrInsertGlobal("__dso_handle", DsoHandleTy, [&] {
+ auto *GV = new GlobalVariable(M, DsoHandleTy, /*isConstant=*/true,
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/Utils/LowerGlobalDtors.cpp [144:146]
+
+KEEP COPYRIGHT_SERVICE_LABEL 65ab4ffe80a963d1ba83b72e467c317b
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_map.c [2:8]
+ tools/polly/lib/External/isl/isl_space.c [2:5]
+
+KEEP COPYRIGHT_SERVICE_LABEL 6889c51be687d2aa2ca198228ffada0d
+BELONGS ya.make
+ License text:
+ Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ LICENSE.TXT [242:243]
+
+KEEP COPYRIGHT_SERVICE_LABEL 698365507d95a4f75e3a8ada69384de8
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ Copyright (C) 2002-2007 Michael J. Fromberger, All Rights Reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/imath/imath.c [6:6]
+ tools/polly/lib/External/isl/imath/imath.h [6:6]
+ tools/polly/lib/External/isl/imath/imrat.c [6:6]
+ tools/polly/lib/External/isl/imath/imrat.h [6:6]
+
+SKIP COPYRIGHT_SERVICE_LABEL 6c1074072381bc6362e1b37c9f1b2256
+BELONGS lib/DebugInfo/DWARF/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [390:398]
+
+KEEP COPYRIGHT_SERVICE_LABEL 6c41413cfb5767906448d3945dee5b1e
+BELONGS lib/CodeGen/GlobalISel/ya.make
+ License text:
+ auto *CI = cast<ConstantInt>(C);
+ APInt Divisor = CI->getValue();
+ unsigned Shift = Divisor.countTrailingZeros();
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/CodeGen/GlobalISel/CombinerHelper.cpp [5145:5147]
+
+KEEP COPYRIGHT_SERVICE_LABEL 71a5570b4ed12474c30d73d9c6d6af57
+BELONGS lib/Support/ya.make
+ License text:
+ * Copyright (c) 1992 Henry Spencer.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/regex_impl.h [4:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL 72954459d846489a9437b458790243b4
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/basis_reduction_tab.c [2:2]
+ tools/polly/lib/External/isl/basis_reduction_templ.c [2:3]
+ tools/polly/lib/External/isl/include/isl/arg.h [2:2]
+ tools/polly/lib/External/isl/include/isl/constraint.h [2:2]
+ tools/polly/lib/External/isl/include/isl/ctx.h [2:2]
+ tools/polly/lib/External/isl/include/isl/hash.h [2:2]
+ tools/polly/lib/External/isl/include/isl/ilp.h [2:2]
+ tools/polly/lib/External/isl/include/isl/list.h [2:2]
+ tools/polly/lib/External/isl/include/isl/lp.h [2:2]
+ tools/polly/lib/External/isl/include/isl/map.h [2:2]
+ tools/polly/lib/External/isl/include/isl/mat.h [2:2]
+ tools/polly/lib/External/isl/include/isl/options.h [2:2]
+ tools/polly/lib/External/isl/include/isl/set.h [2:2]
+ tools/polly/lib/External/isl/include/isl/space.h [2:2]
+ tools/polly/lib/External/isl/include/isl/stream.h [2:2]
+ tools/polly/lib/External/isl/include/isl/vec.h [2:2]
+ tools/polly/lib/External/isl/isl_affine_hull.c [2:4]
+ tools/polly/lib/External/isl/isl_arg.c [2:2]
+ tools/polly/lib/External/isl/isl_basis_reduction.h [2:2]
+ tools/polly/lib/External/isl/isl_bernstein.c [2:4]
+ tools/polly/lib/External/isl/isl_blk.c [2:2]
+ tools/polly/lib/External/isl/isl_blk.h [2:2]
+ tools/polly/lib/External/isl/isl_coalesce.c [2:7]
+ tools/polly/lib/External/isl/isl_constraint.c [2:3]
+ tools/polly/lib/External/isl/isl_convex_hull.c [2:3]
+ tools/polly/lib/External/isl/isl_ctx.c [2:2]
+ tools/polly/lib/External/isl/isl_dim_map.c [2:3]
+ tools/polly/lib/External/isl/isl_equalities.c [2:3]
+ tools/polly/lib/External/isl/isl_equalities.h [2:2]
+ tools/polly/lib/External/isl/isl_factorization.c [2:4]
+ tools/polly/lib/External/isl/isl_flow.c [2:6]
+ tools/polly/lib/External/isl/isl_hash.c [2:2]
+ tools/polly/lib/External/isl/isl_id.c [2:2]
+ tools/polly/lib/External/isl/isl_id_private.h [2:2]
+ tools/polly/lib/External/isl/isl_ilp.c [2:2]
+ tools/polly/lib/External/isl/isl_input.c [2:5]
+ tools/polly/lib/External/isl/isl_int.h [2:2]
+ tools/polly/lib/External/isl/isl_list_templ.c [2:5]
+ tools/polly/lib/External/isl/isl_lp.c [2:2]
+ tools/polly/lib/External/isl/isl_map.c [2:8]
+ tools/polly/lib/External/isl/isl_map_private.h [2:2]
+ tools/polly/lib/External/isl/isl_map_simplify.c [2:5]
+ tools/polly/lib/External/isl/isl_map_subtract.c [2:2]
+ tools/polly/lib/External/isl/isl_mat.c [2:5]
+ tools/polly/lib/External/isl/isl_options.c [2:2]
+ tools/polly/lib/External/isl/isl_output.c [2:5]
+ tools/polly/lib/External/isl/isl_sample.c [2:2]
+ tools/polly/lib/External/isl/isl_sample.h [2:2]
+ tools/polly/lib/External/isl/isl_scan.c [2:2]
+ tools/polly/lib/External/isl/isl_scan.h [2:2]
+ tools/polly/lib/External/isl/isl_seq.c [2:3]
+ tools/polly/lib/External/isl/isl_seq.h [2:2]
+ tools/polly/lib/External/isl/isl_space.c [2:5]
+ tools/polly/lib/External/isl/isl_stream.c [2:2]
+ tools/polly/lib/External/isl/isl_tab.c [2:5]
+ tools/polly/lib/External/isl/isl_tab.h [2:2]
+ tools/polly/lib/External/isl/isl_tab_lexopt_templ.c [2:4]
+ tools/polly/lib/External/isl/isl_tab_pip.c [2:4]
+ tools/polly/lib/External/isl/isl_vec.c [2:3]
+
+KEEP COPYRIGHT_SERVICE_LABEL 729b0fb03d9f51f2d741dd6f8e10d012
+BELONGS lib/Transforms/Instrumentation/ya.make
+ License text:
+ GV->setComdat(C);
+ // COFF doesn't allow the comdat group leader to have private linkage, so
+ // upgrade private linkage to internal linkage to produce a symbol table
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/Instrumentation/InstrProfiling.cpp [923:925]
+
+KEEP COPYRIGHT_SERVICE_LABEL 74cb8b1ee2bcf27ccbef2c7016611351
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2015 INRIA Paris-Rocquencourt
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_int_sioimath.h [2:2]
+ tools/polly/lib/External/isl/isl_union_multi.c [2:4]
+
+SKIP COPYRIGHT_SERVICE_LABEL 7ab4b45edb7aa5542d2298596f7ac077
+BELONGS lib/Transforms/InstCombine/ya.make
+ License text:
+ Value *NotC = Builder.CreateNot(C);
+ Value *RHS = Builder.CreateAnd(B, NotC);
+ return BinaryOperator::CreateOr(LHS, RHS);
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/InstCombine/InstCombineAndOrXor.cpp [3622:3624]
+
+SKIP COPYRIGHT_SERVICE_LABEL 7ade3b3e64da0b5a8c95933670bd325e
+BELONGS lib/Analysis/ya.make
+ License text:
+ // |X| < |C| --> X > -abs(C) and X < abs(C)
+ Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
+ Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Analysis/InstructionSimplify.cpp [1127:1129]
+
+KEEP COPYRIGHT_SERVICE_LABEL 7e53573e64541341a91e201e6f51f486
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ Copyright (C) 2012-2016, Yann Collet.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/llvm/Support/xxhash.h [11:11]
+ lib/Support/xxhash.cpp [3:3]
+
+KEEP COPYRIGHT_SERVICE_LABEL 8227611821c29c046c24e9d2d617e4d1
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2012-2013 Ecole Normale Superieure
+ * Copyright 2014-2015 INRIA Rocquencourt
+ * Copyright 2016 Sven Verdoolaege
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_map_simplify.c [2:5]
+
+SKIP COPYRIGHT_SERVICE_LABEL 86fd792ceb251fb3fcc341f66082f35f
+BELONGS lib/Analysis/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Analysis/ValueTracking.cpp [7150:7154]
+
+KEEP COPYRIGHT_SERVICE_LABEL 8a97f85e3d546ee687a70cc1eddc6fb9
+BELONGS lib/Transforms/Coroutines/ya.make
+ License text:
+ IRBuilder<> Builder(C);
+ StructType *FrameTy = Shape.FrameTy;
+ Value *FramePtr = Shape.FramePtr;
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/Coroutines/CoroFrame.cpp [1589:1591]
+
+SKIP COPYRIGHT_SERVICE_LABEL 8b4064d8567439548a359b7b1a7a28a1
+BELONGS lib/Analysis/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Analysis/InstructionSimplify.cpp [1108:1113]
+
+SKIP COPYRIGHT_SERVICE_LABEL 90576b7033efccaaffbad736f1a0d521
+BELONGS lib/IR/ya.make
+ License text:
+ LLVMContext &Context = *unwrap(C);
+ SmallVector<Metadata *, 8> MDs;
+ for (auto *OV : ArrayRef(Vals, Count)) {
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/IR/Core.cpp [1155:1157]
+
+KEEP COPYRIGHT_SERVICE_LABEL 94704cb20c3a3607018b836f1a5d4355
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_aff.c [2:8]
+ tools/polly/lib/External/isl/isl_aff_lex_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_ast_build.c [2:3]
+ tools/polly/lib/External/isl/isl_ast_build_expr.c [2:3]
+ tools/polly/lib/External/isl/isl_ast_codegen.c [2:3]
+ tools/polly/lib/External/isl/isl_ast_graft.c [2:4]
+ tools/polly/lib/External/isl/isl_coalesce.c [2:7]
+ tools/polly/lib/External/isl/isl_convex_hull.c [2:3]
+ tools/polly/lib/External/isl/isl_map.c [2:8]
+ tools/polly/lib/External/isl/isl_obj.c [2:4]
+ tools/polly/lib/External/isl/isl_schedule_band.c [2:3]
+ tools/polly/lib/External/isl/isl_schedule_node.c [2:4]
+ tools/polly/lib/External/isl/isl_schedule_tree.c [2:4]
+ tools/polly/lib/External/isl/isl_tab.c [2:5]
+ tools/polly/lib/External/isl/isl_union_map.c [2:5]
+ tools/polly/lib/External/isl/isl_union_map_lex_templ.c [2:2]
+
+SKIP COPYRIGHT_SERVICE_LABEL 9834d5fa1f0986d34086e1d17571116a
+BELONGS lib/CodeGen/SelectionDAG/ya.make
+ License text:
+ Type *Ty = MemOps[0].getTypeForEVT(C);
+ Align NewAlign = DL.getABITypeAlign(Ty);
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/CodeGen/SelectionDAG/SelectionDAG.cpp [7022:7023]
+ lib/CodeGen/SelectionDAG/SelectionDAG.cpp [7217:7218]
+
+SKIP COPYRIGHT_SERVICE_LABEL 9ae7967f03e7e2a04a2e21f088823a55
+BELONGS lib/DebugInfo/DWARF/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [390:398]
+
+SKIP COPYRIGHT_SERVICE_LABEL 9c879e80b33217edcdd9207481a44d94
+BELONGS lib/DebugInfo/DWARF/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [390:398]
+
+KEEP COPYRIGHT_SERVICE_LABEL 9d4ec4453cbd8bf6cc9370c38dd298ed
+BELONGS lib/Support/ya.make
+ License text:
+ * Copyright (c) 1992, 1993, 1994 Henry Spencer.
+ * Copyright (c) 1992, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/regcomp.c [4:6]
+ lib/Support/regengine.inc [4:6]
+ lib/Support/regerror.c [4:6]
+ lib/Support/regex2.h [4:6]
+ lib/Support/regexec.c [4:6]
+ lib/Support/regfree.c [4:6]
+ lib/Support/regutils.h [4:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL a1124e6dbfac6a8682f68b97ffa3c4b7
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_aff.c [2:8]
+ tools/polly/lib/External/isl/isl_aff_map.c [2:4]
+ tools/polly/lib/External/isl/isl_map.c [2:8]
+ tools/polly/lib/External/isl/isl_map_simplify.c [2:5]
+ tools/polly/lib/External/isl/isl_multi_cmp.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_hash.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_hash.c [2:2]
+ tools/polly/lib/External/isl/isl_schedule.c [2:4]
+ tools/polly/lib/External/isl/isl_schedule_node.c [2:4]
+ tools/polly/lib/External/isl/isl_tab.c [2:5]
+ tools/polly/lib/External/ppcg/grouping.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL a4b3a3b7e276c28b916f8f737b8e582b
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2012 Ecole Normale Superieure
+ * Copyright 2017 Sven Verdoolaege
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_domain_factor_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_ilp_opt_val_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_list_read_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_list_templ.c [2:5]
+ tools/polly/lib/External/isl/isl_mat.c [2:5]
+ tools/polly/lib/External/isl/isl_multi_align_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_explicit_domain.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_no_explicit_domain.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_pw_aff_explicit_domain.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_union_add_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_union_pw_aff_explicit_domain.c [2:2]
+ tools/polly/lib/External/isl/isl_scheduler.c [2:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL a5abc66d4dc93961fd15d25d8a6e8d4f
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2006-2007 Universiteit Leiden
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/basis_reduction_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_bernstein.c [2:4]
+
+KEEP COPYRIGHT_SERVICE_LABEL aa55046ed38838026ba71159b2c09f09
+BELONGS lib/Transforms/InstCombine/ya.make
+ License text:
+ // icmp <pred> iK %E, trunc(C)
+ Value *Vec;
+ ArrayRef<int> Mask;
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/InstCombine/InstCombineCompares.cpp [3044:3046]
+
+KEEP COPYRIGHT_SERVICE_LABEL aca63d9e9935fcd3523aa703d86c95b1
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2018 Cerebras Systems
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_bind_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_map_bound_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_bind_domain_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_bind_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_opt_mpa_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_bind_domain_templ.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL b1eeacd142c9ee1731757d12ccdeca48
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2005-2007 Universiteit Leiden
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_factorization.c [2:4]
+ tools/polly/lib/External/isl/isl_flow.c [2:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL b3d9c252ff8f5a3a5d20c558a29e2e16
+BELONGS include/ya.make tools/llvm-cxxfilt/ya.make
+ License text:
+ static bool isChar6(char C) { return isAlnum(C) || C == '.' || C == '_'; }
+ static unsigned EncodeChar6(char C) {
+ if (C >= 'a' && C <= 'z') return C-'a';
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/llvm/Bitstream/BitCodes.h [89:91]
+ tools/llvm-cxxfilt/llvm-cxxfilt.cpp [128:129]
+
+KEEP COPYRIGHT_SERVICE_LABEL b7be40807046549a01a511a04d441db0
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2012 Ecole Normale Superieure
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_affine_hull.c [2:4]
+ tools/polly/lib/External/isl/isl_ast_graft.c [2:4]
+ tools/polly/lib/External/isl/isl_domain_factor_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_map_lexopt_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_identity_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_product_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_splice_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_zero_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_pullback_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_union_opt.c [2:4]
+ tools/polly/lib/External/isl/isl_schedule_constraints.c [2:3]
+ tools/polly/lib/External/isl/isl_tarjan.c [2:3]
+ tools/polly/lib/External/ppcg/cuda.c [2:2]
+ tools/polly/lib/External/ppcg/gpu_print.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL ba76fb602cdb427419e5298f61c13346
+BELONGS lib/Support/ya.make
+ License text:
+ * Copyright (c) 1992 Henry Spencer.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/regex_impl.h [4:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL bae2c8c0881abaf9a917b263650fe746
+BELONGS lib/Demangle/ya.make
+ License text:
+ return isDigit(C) || isLower(C) || isUpper(C) || C == '_';
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Demangle/RustDemangle.cpp [185:185]
+
+KEEP COPYRIGHT_SERVICE_LABEL bed102526902357f786febaf3d1184f2
+BELONGS ya.make
+ License text:
+ Copyright (c) 2009-2019 Polly Team
+ All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/LICENSE.TXT [242:243]
+
+KEEP COPYRIGHT_SERVICE_LABEL c02ea0d68190938e6849401dd3849f04
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_flow.c [2:6]
+ tools/polly/lib/External/isl/isl_local.c [2:3]
+ tools/polly/lib/External/isl/isl_mat.c [2:5]
+ tools/polly/lib/External/isl/isl_morph.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_floor.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_nan_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_obj.c [2:4]
+
+KEEP COPYRIGHT_SERVICE_LABEL c4511e6caf8a6a741fb6fda077fd3335
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/include/isl/hmap_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_aff.c [2:8]
+ tools/polly/lib/External/isl/isl_aff_map.c [2:4]
+ tools/polly/lib/External/isl/isl_list_templ.c [2:5]
+ tools/polly/lib/External/isl/isl_local.c [2:3]
+ tools/polly/lib/External/isl/isl_local_space.c [2:3]
+ tools/polly/lib/External/isl/isl_pw_union_opt.c [2:4]
+ tools/polly/lib/External/isl/isl_schedule.c [2:4]
+ tools/polly/lib/External/isl/isl_scheduler.c [2:6]
+ tools/polly/lib/External/isl/isl_seq.c [2:3]
+ tools/polly/lib/External/ppcg/ppcg.c [2:4]
+
+KEEP COPYRIGHT_SERVICE_LABEL c8069f9f5455fa4a970af50594daf852
+BELONGS lib/Support/ya.make
+ License text:
+ * Copyright (c) 1992, 1993, 1994 Henry Spencer.
+ * Copyright (c) 1992, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/regcomp.c [4:6]
+ lib/Support/regengine.inc [4:6]
+ lib/Support/regerror.c [4:6]
+ lib/Support/regex2.h [4:6]
+ lib/Support/regexec.c [4:6]
+ lib/Support/regfree.c [4:6]
+ lib/Support/regutils.h [4:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL ca248aadf2ee691ae97d11398afe624b
+BELONGS lib/Transforms/Utils/ya.make
+ License text:
+ Type *SizeTy = M.getDataLayout().getIntPtrType(C);
+ Type *SizePtrTy = SizeTy->getPointerTo();
+ GlobalVariable *GV = new GlobalVariable(M, SizeTy, /*isConstant=*/false,
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/Utils/EntryExitInstrumenter.cpp [40:42]
+
+KEEP COPYRIGHT_SERVICE_LABEL cd92f11018b3e27ac7b0cc593c1c92c2
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Copyright 2010-2011 INRIA Saclay
+ * Copyright 2012-2013 Ecole Normale Superieure
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_box.c [2:3]
+ tools/polly/lib/External/isl/isl_dim_map.c [2:3]
+ tools/polly/lib/External/isl/isl_morph.c [2:3]
+ tools/polly/lib/External/isl/isl_pw_templ.c [2:4]
+ tools/polly/lib/External/isl/isl_tarjan.c [2:3]
+ tools/polly/lib/External/isl/isl_union_map.c [2:5]
+ tools/polly/lib/External/ppcg/gpu.c [2:4]
+ tools/polly/lib/External/ppcg/gpu_group.c [2:4]
+ tools/polly/lib/External/ppcg/ppcg_options.c [2:2]
+ tools/polly/lib/External/ppcg/schedule.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL d0ef211acd80f8c1a5c894ade7f87538
+BELONGS lib/Transforms/Utils/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/Utils/LowerGlobalDtors.cpp [131:135]
+
+KEEP COPYRIGHT_SERVICE_LABEL dbc9f80f01ad96a42bc3446397c989b0
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_aff.c [2:8]
+ tools/polly/lib/External/isl/isl_align_params_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_apply_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_arith_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_dim_id_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_dims.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_gist.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_intersect.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_multi_tuple_id_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_pw_templ.c [2:4]
+ tools/polly/lib/External/isl/isl_tab_lexopt_templ.c [2:4]
+ tools/polly/lib/External/isl/isl_type_check_equal_space_templ.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL ddd440bf10074cb20bfb5212ec27c689
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2018 Sven Verdoolaege
+ * Copyright 2019 Cerebras Systems
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_pw_range_tuple_id_templ.c [2:3]
+ tools/polly/lib/External/isl/isl_unbind_params_templ.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL deda618abf56343269e2c3dd9368f6b4
+BELONGS lib/Transforms/Utils/ya.make
+ License text:
+ if (C) {
+ ValueLatticeElement CV;
+ CV.markConstant(C);
+ mergeInValue(&I, CV);
+ return;
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/Utils/SCCPSolver.cpp [1332:1336]
+
+KEEP COPYRIGHT_SERVICE_LABEL e0df4d49b106bd82efbad864a93ffce1
+BELONGS include/ya.make
+ License text:
+ // <copyright file="Program.cpp" company="Microsoft Corporation">
+ // Copyright (C) Microsoft Corporation. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/llvm/WindowsDriver/MSVCSetupApi.h [8:9]
+ include/llvm/WindowsDriver/MSVCSetupApi.h [15:15]
+
+KEEP COPYRIGHT_SERVICE_LABEL e10011579f8ed393a2b2f8c4c64b386e
+BELONGS lib/Target/WebAssembly/ya.make
+ License text:
+ IRBuilder<> IRB(C);
+ SmallVector<Instruction *, 64> ToErase;
+ // Vector of %setjmpTable values
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp [1269:1271]
+ lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp [1467:1468]
+
+KEEP COPYRIGHT_SERVICE_LABEL e7bd6251e104540a6ebba2803dc81206
+BELONGS lib/Support/ya.make
+ License text:
+ Copyright 2019 Jack O'Connor and Samuel Neves
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [318:318]
+
+SKIP COPYRIGHT_SERVICE_LABEL eb7be45e1211bdf61ad67a5266a4d362
+BELONGS lib/DebugInfo/DWARF/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [390:398]
+
+KEEP COPYRIGHT_SERVICE_LABEL ec68b124b848dab3076211e5404e326f
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2013 Ecole Normale Superieure
+ * Copyright 2015 Sven Verdoolaege
+ * Copyright 2019 Cerebras Systems
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_point.c [2:5]
+ tools/polly/lib/External/ppcg/gpu_group.c [2:4]
+ tools/polly/lib/External/ppcg/gpu_hybrid.c [2:3]
+ tools/polly/lib/External/ppcg/hybrid.c [2:3]
+ tools/polly/lib/External/ppcg/ppcg.c [2:4]
+
+KEEP COPYRIGHT_SERVICE_LABEL ec84ddedf1dc5016b4ff73d6f7a41324
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * Copyright 2013-2014 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_schedule_band.c [2:3]
+ tools/polly/lib/External/isl/isl_schedule_node.c [2:4]
+ tools/polly/lib/External/isl/isl_schedule_tree.c [2:4]
+ tools/polly/lib/External/isl/isl_space.c [2:5]
+ tools/polly/lib/External/isl/isl_union_map.c [2:5]
+
+SKIP COPYRIGHT_SERVICE_LABEL ecb0e3c869f22cc8f0b923cd3df84147
+BELONGS include/ya.make lib/Transforms/Utils/ya.make
+ License text:
+ // if (c) if (c)
+ // X1 = ... X1 = ...
+ // else else
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/llvm/Transforms/Utils/LCSSA.h [21:23]
+ lib/Transforms/Utils/LCSSA.cpp [14:16]
+
+SKIP COPYRIGHT_SERVICE_LABEL f2bff1d61453f018fe1397173f3dfa43
+BELONGS lib/Support/ya.make
+ License text:
+ : Code(C) {
+ ErrMsg = "Stream Error: ";
+ switch (C) {
+ case stream_error_code::unspecified:
+ ErrMsg += "An unspecified error has occurred.";
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Support/BinaryStreamError.cpp [23:27]
+
+KEEP COPYRIGHT_SERVICE_LABEL f38404a02e69976b94505df0c691bed0
+BELONGS include/ya.make lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/llvm/Support/MD5.h [20:25]
+ lib/Support/MD5.cpp [13:18]
+
+KEEP COPYRIGHT_SERVICE_LABEL f44570b515bc9a09cb026ea674529c30
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_coalesce.c [2:7]
+ tools/polly/lib/External/isl/isl_map.c [2:8]
+ tools/polly/lib/External/isl/isl_schedule_tree.c [2:4]
+ tools/polly/lib/External/isl/isl_scheduler.c [2:6]
+
+KEEP COPYRIGHT_SERVICE_LABEL f45dfdcd89f19aa627fe9a2953c87df6
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ Copyright (c) 2012 Qualcomm Innovation Center, Inc. All rights reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/imath/gmp_compat.c [6:6]
+ tools/polly/lib/External/isl/imath/gmp_compat.h [6:6]
+
+SKIP COPYRIGHT_SERVICE_LABEL f894c606faef3610df8e9cfcc3a19c2e
+BELONGS lib/CodeGen/ya.make
+ License text:
+ // Fix (C).
+ if (Restore && (MLI->getLoopFor(Save) || MLI->getLoopFor(Restore))) {
+ if (MLI->getLoopDepth(Save) > MLI->getLoopDepth(Restore)) {
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/CodeGen/ShrinkWrap.cpp [418:420]
+
+SKIP COPYRIGHT_SERVICE_LABEL fb5e17767e7a12f5b9913e726533b21e
+BELONGS lib/Transforms/Instrumentation/ya.make
+ License text:
+ Value *Sc = getShadow(C);
+ Value *Sd = getShadow(D);
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/Transforms/Instrumentation/MemorySanitizer.cpp [4364:4365]
+
+KEEP COPYRIGHT_SERVICE_LABEL fd1d589caaee8f6dbff3390e3f4e472b
+BELONGS tools/polly/lib/External/isl/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_aff.c [2:8]
+
+KEEP COPYRIGHT_SERVICE_LABEL fd871c9566d755e525c0d6c201633a49
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ IMath is Copyright © 2002-2009 Michael J. Fromberger
+ You may use it subject to the following Licensing Terms:
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ tools/polly/lib/External/isl/imath/LICENSE [1:2]
diff --git a/contrib/libs/llvm16/.yandex_meta/devtools.licenses.report b/contrib/libs/llvm16/.yandex_meta/devtools.licenses.report
new file mode 100644
index 0000000000..e8b0d1a09a
--- /dev/null
+++ b/contrib/libs/llvm16/.yandex_meta/devtools.licenses.report
@@ -0,0 +1,21157 @@
+# File format ($ symbol means the beginning of a line):
+#
+# $ # this message
+# $ # =======================
+# $ # comments (all commentaries should starts with some number of spaces and # symbol)
+# ${action} {license spdx} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make
+# ${all_file_action} filename
+# $ # user commentaries (many lines)
+# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify)
+# ${action} {license spdx} {license text hash}
+# $BELONGS ./ya/make/file/relative/path/3/ya.make
+# ${all_file_action} filename
+# $ # user commentaries
+# $ generated description
+# $ ...
+#
+# You can modify action, all_file_action and add commentaries
+# Available actions:
+# keep - keep license in contrib and use in credits
+# skip - skip license
+# remove - remove all files with this license
+# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file
+#
+# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory)
+# We suppose that that files can contain some license info
+# Available all file actions:
+# FILE_IGNORE - ignore file (do nothing)
+# FILE_INCLUDE - include all file data into licenses text file
+# =======================
+
+KEEP Public-Domain AND CC0-1.0 01d55fe4c7a8be726acb6db8b21ede04
+BELONGS lib/Support/ya.make
+ License text:
+ This work is released into the public domain with CC0 1.0. Alternatively, it is
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-public-domain
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [1:1]
+ Scancode info:
+ Original SPDX id: CC0-1.0
+ Score : 95.00
+ Match type : REFERENCE
+ Links : http://creativecommons.org/publicdomain/zero/1.0/, http://creativecommons.org/publicdomain/zero/1.0/legalcode, https://spdx.org/licenses/CC0-1.0
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [1:1]
+
+KEEP Apache-2.0 WITH LLVM-exception 03f8757f5610f97de538dcf0b0cd9238
+BELONGS lib/Target/PowerPC/ya.make
+ License text:
+ // SPDX-License-Identifier) Apache-2.0 WITH LLVM-exception
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ lib/Target/PowerPC/PPCMacroFusion.def [5:5]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ lib/Target/PowerPC/PPCMacroFusion.def [5:5]
+
+KEEP MIT 0775f17ed13c3609fd7c31449f8749d3
+BELONGS tools/polly/lib/External/isl/ya.make
+FILE_INCLUDE tools/polly/lib/External/isl/AUTHORS found in files: tools/polly/lib/External/isl/isl_sort.c at line 19
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_sort.c [6:22]
+
+KEEP Apache-2.0 WITH LLVM-exception 13ccb04dbdb720576c825193b2c4014c
+BELONGS include/ya.make
+ License text:
+ |* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ |* See https://llvm.org/LICENSE.txt for license information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/IR/FixedMetadataKinds.def [3:4]
+ include/llvm/ProfileData/InstrProfData.inc [3:4]
+ include/llvm/ProfileData/MIBEntryDef.inc [3:4]
+ include/llvm/ProfileData/MemProfData.inc [5:6]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/IR/FixedMetadataKinds.def [3:4]
+ include/llvm/ProfileData/InstrProfData.inc [3:4]
+ include/llvm/ProfileData/MIBEntryDef.inc [3:4]
+ include/llvm/ProfileData/MemProfData.inc [5:6]
+
+KEEP Apache-2.0 WITH LLVM-exception 1b60f7f111a6969efddaa6e0b74a858e
+BELONGS include/ya.make lib/CodeGen/ya.make
+ License text:
+ /// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h [12:12]
+ include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h [12:12]
+ lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp [5:5]
+ lib/CodeGen/MachineOptimizationRemarkEmitter.cpp [5:5]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h [12:12]
+ include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h [12:12]
+ lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp [5:5]
+ lib/CodeGen/MachineOptimizationRemarkEmitter.cpp [5:5]
+
+KEEP Apache-2.0 1ba076899c705f8bab7d55c2bed4286c
+BELONGS lib/Support/ya.make
+ License text:
+ APPENDIX: How to apply the Apache License to your work.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 90.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [307:307]
+
+KEEP Unicode 27d484fecd9f547fb4a260aa9922daeb
+BELONGS lib/Support/ya.make
+FILE_INCLUDE lib/Support/BLAKE3/LICENSE found in files: lib/Support/UnicodeNameToCodepointGenerated.cpp at line 17
+FILE_INCLUDE tools/polly/lib/External/isl/LICENSE found in files: lib/Support/UnicodeNameToCodepointGenerated.cpp at line 17
+FILE_INCLUDE tools/polly/lib/External/isl/imath/LICENSE found in files: lib/Support/UnicodeNameToCodepointGenerated.cpp at line 17
+ License text:
+ UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-unknown-license-reference
+ Score : 11.00
+ Match type : INTRO
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unknown-license-reference.LICENSE
+ Files with this license:
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [17:17]
+
+SKIP LicenseRef-scancode-proprietary-license 2c4dd50d63fb261233446bdde3c23089
+BELONGS tools/polly/lib/ya.make
+ # paramter descrition
+ License text:
+ /// @param Relevant The map with mapping that may not be modified.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-proprietary-license
+ Score : 100.00
+ Match type : REFERENCE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/proprietary-license.LICENSE
+ Files with this license:
+ tools/polly/lib/Transform/DeLICM.cpp [137:137]
+
+KEEP Spencer-94 30eace72605d5577b021570d9aed7b2b
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Spencer-94
+ Score : 100.00
+ Match type : TEXT
+ Links : http://search.cpan.org/~knok/File-MMagic-1.12/MMagic.pm, https://github.com/garyhouston/regex/blob/master/COPYRIGHT, https://spdx.org/licenses/Spencer-94
+ Files with this license:
+ lib/Support/COPYRIGHT.regex [4:22]
+
+KEEP Apache-2.0 34f8c1142fd6208a8be89399cb521df9
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [130:305]
+
+KEEP Apache-2.0 357d2087501b0e25cfa2131afa741aff
+BELONGS lib/Support/ya.make
+ License text:
+ To apply the Apache License to your work, attach the following
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 90.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [309:309]
+
+KEEP CC0-1.0 3853e2a78a247145b4aa16667736f6de
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: CC0-1.0
+ Score : 100.00
+ Match type : TEXT
+ Links : http://creativecommons.org/publicdomain/zero/1.0/, http://creativecommons.org/publicdomain/zero/1.0/legalcode, https://spdx.org/licenses/CC0-1.0
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [6:126]
+
+KEEP MIT 49f5fbc2289b80cbefc6e7607c0538be
+BELONGS include/ya.make
+ License text:
+ // Licensed under the MIT license.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ include/llvm/WindowsDriver/MSVCSetupApi.h [10:10]
+
+KEEP Public-Domain 5ab2e1de3ac199ad15211e88647b49aa
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ // This code is taken from public domain
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-public-domain
+ Score : 70.00
+ Match type : REFERENCE
+ Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
+ Files with this license:
+ include/llvm/Support/SHA1.h [15:15]
+ lib/Support/SHA1.cpp [9:9]
+
+KEEP Apache-2.0 5b1f5708e6e10c9740fe8b501f9b7c9e
+BELONGS lib/Support/ya.make
+ License text:
+ licensed under the Apache License 2.0.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [2:2]
+
+SKIP LGPL-2.1-only 5c2944c74978c553735aedb7ca4e6258
+BELONGS tools/polly/lib/External/isl/ya.make
+ # changelog
+ License text:
+ - change license from LGPL 2.1 to MIT
+ Scancode info:
+ Original SPDX id: LGPL-2.1-only
+ Score : 100.00
+ Match type : REFERENCE
+ Links : http://www.gnu.org/licenses/lgpl-2.1.html, http://www.gnu.org/licenses/lgpl-2.1.txt, https://spdx.org/licenses/LGPL-2.1-only
+ Files with this license:
+ tools/polly/lib/External/isl/ChangeLog [166:166]
+
+KEEP Public-Domain 5c874ed316f5b266c37d6f43b0b9f411
+BELONGS include/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-public-domain
+ Score : 85.59
+ Match type : NOTICE
+ Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
+ Files with this license:
+ include/llvm/Support/MD5.h [14:30]
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-other-permissive
+ Score : 85.59
+ Match type : NOTICE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/other-permissive.LICENSE
+ Files with this license:
+ include/llvm/Support/MD5.h [14:30]
+
+KEEP MIT 600d283dba45624ed5aa9b9f58550678
+BELONGS tools/polly/lib/External/isl/ya.make
+FILE_INCLUDE tools/polly/lib/External/isl/AUTHORS found in files: tools/polly/lib/External/isl/imath/gmp_compat.c at line 21, tools/polly/lib/External/isl/imath/gmp_compat.h at line 21, tools/polly/lib/External/isl/imath/imath.c at line 21, tools/polly/lib/External/isl/imath/imath.h at line 21, tools/polly/lib/External/isl/imath/imrat.c at line 21, tools/polly/lib/External/isl/imath/imrat.h at line 21
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/imath/gmp_compat.c [8:24]
+ tools/polly/lib/External/isl/imath/gmp_compat.h [8:24]
+ tools/polly/lib/External/isl/imath/imath.c [8:24]
+ tools/polly/lib/External/isl/imath/imath.h [8:24]
+ tools/polly/lib/External/isl/imath/imrat.c [8:24]
+ tools/polly/lib/External/isl/imath/imrat.h [8:24]
+
+KEEP NCSA 69458bad99c3c8aa775c05e5e859d2b8
+BELONGS ya.make
+ License text:
+ LLVM is open source software. You may freely distribute it under the terms of
+ the license agreement found in LICENSE.txt.
+ Scancode info:
+ Original SPDX id: NCSA
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.otm.illinois.edu/faculty/forms/opensource.asp, https://spdx.org/licenses/NCSA
+ Files with this license:
+ README.txt [8:9]
+
+KEEP BSD-3-Clause 6daffccbe81cc81cf1b75435c1342138
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: BSD-3-Clause
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause
+ Files with this license:
+ lib/Support/COPYRIGHT.regex [29:51]
+ lib/Support/regcomp.c [11:33]
+ lib/Support/regengine.inc [11:33]
+ lib/Support/regerror.c [11:33]
+ lib/Support/regex2.h [11:33]
+ lib/Support/regex_impl.h [11:33]
+ lib/Support/regexec.c [11:33]
+ lib/Support/regfree.c [11:33]
+ lib/Support/regutils.h [11:33]
+
+KEEP BSD-2-Clause 6e29f89bee6b824e7670e0be0a109eb0
+BELONGS include/ya.make
+ License text:
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ Scancode info:
+ Original SPDX id: BSD-2-Clause
+ Score : 100.00
+ Match type : REFERENCE
+ Links : http://opensource.org/licenses/bsd-license.php, http://www.opensource.org/licenses/BSD-2-Clause, https://spdx.org/licenses/BSD-2-Clause
+ Files with this license:
+ include/llvm/Support/xxhash.h [13:13]
+
+KEEP Apache-2.0 6f445b2044ede028105b9327341574cc
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ lib/Support/BLAKE3/LICENSE [320:330]
+
+KEEP MIT 6f7fa4f1e7e5bf53455ec555e87f09bd
+BELONGS tools/polly/lib/External/isl/ya.make
+FILE_INCLUDE tools/polly/lib/External/isl/AUTHORS found in files: tools/polly/lib/External/isl/imath/README.md at line 24
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/imath/README.md [11:27]
+
+KEEP Apache-2.0 WITH LLVM-exception 705a75a25bce12b12334e8fc577d8a46
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/Support/ConvertUTF.h [10:11]
+ include/llvm/Support/Solaris/sys/regset.h [10:11]
+ lib/Support/ConvertUTF.cpp [3:4]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/Support/ConvertUTF.h [10:11]
+ include/llvm/Support/Solaris/sys/regset.h [10:11]
+ lib/Support/ConvertUTF.cpp [3:4]
+
+KEEP Apache-2.0 WITH LLVM-exception 72b6f4956d6b77a76dc1fd0eec36d594
+BELONGS include/ya.make
+ License text:
+ /* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm-c/Object.h [13:13]
+ include/llvm-c/Target.h [13:13]
+ include/llvm/Config/abi-breaking.h [13:13]
+ include/llvm/Config/abi-breaking.h.cmake [6:6]
+ include/llvm/Config/llvm-config-linux.h [13:13]
+ include/llvm/Config/llvm-config-osx.h [6:6]
+ include/llvm/Config/llvm-config-win.h [6:6]
+ include/llvm/Config/llvm-config.h.cmake [6:6]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm-c/Object.h [13:13]
+ include/llvm-c/Target.h [13:13]
+ include/llvm/Config/abi-breaking.h [13:13]
+ include/llvm/Config/abi-breaking.h.cmake [6:6]
+ include/llvm/Config/llvm-config-linux.h [13:13]
+ include/llvm/Config/llvm-config-osx.h [6:6]
+ include/llvm/Config/llvm-config-win.h [6:6]
+ include/llvm/Config/llvm-config.h.cmake [6:6]
+
+KEEP Apache-2.0 WITH LLVM-exception 755ab7da3ff8c5d6ae90bdbebd177e49
+BELONGS include/ya.make lib/Analysis/ya.make lib/AsmParser/ya.make lib/BinaryFormat/ya.make lib/Bitcode/Reader/ya.make lib/Bitcode/Writer/ya.make lib/Bitstream/Reader/ya.make lib/CodeGen/AsmPrinter/ya.make lib/CodeGen/GlobalISel/ya.make lib/CodeGen/MIRParser/ya.make lib/CodeGen/SelectionDAG/ya.make lib/CodeGen/ya.make lib/DWARFLinker/ya.make lib/DWP/ya.make lib/DebugInfo/CodeView/ya.make lib/DebugInfo/DWARF/ya.make lib/DebugInfo/GSYM/ya.make lib/DebugInfo/MSF/ya.make lib/DebugInfo/PDB/ya.make lib/DebugInfo/Symbolize/ya.make lib/Debuginfod/ya.make lib/Demangle/ya.make lib/ExecutionEngine/Interpreter/ya.make lib/ExecutionEngine/JITLink/ya.make lib/ExecutionEngine/MCJIT/ya.make lib/ExecutionEngine/Orc/Shared/ya.make lib/ExecutionEngine/Orc/TargetProcess/ya.make lib/ExecutionEngine/Orc/ya.make lib/ExecutionEngine/PerfJITEvents/ya.make lib/ExecutionEngine/RuntimeDyld/ya.make lib/ExecutionEngine/ya.make lib/FileCheck/ya.make lib/Frontend/HLSL/ya.make lib/Frontend/OpenACC/ya.make lib/Frontend/OpenMP/ya.make lib/FuzzMutate/ya.make lib/IR/ya.make lib/IRPrinter/ya.make lib/IRReader/ya.make lib/InterfaceStub/ya.make lib/LTO/ya.make lib/LineEditor/ya.make lib/Linker/ya.make lib/MC/MCDisassembler/ya.make lib/MC/MCParser/ya.make lib/MC/ya.make lib/MCA/ya.make lib/ObjCopy/ya.make lib/Object/ya.make lib/ObjectYAML/ya.make lib/Option/ya.make lib/Passes/ya.make lib/ProfileData/Coverage/ya.make lib/ProfileData/ya.make lib/Remarks/ya.make lib/Support/ya.make lib/TableGen/ya.make lib/Target/AArch64/AsmParser/ya.make lib/Target/AArch64/Disassembler/ya.make lib/Target/AArch64/MCTargetDesc/ya.make lib/Target/AArch64/TargetInfo/ya.make lib/Target/AArch64/Utils/ya.make lib/Target/AArch64/ya.make lib/Target/ARM/AsmParser/ya.make lib/Target/ARM/Disassembler/ya.make lib/Target/ARM/MCTargetDesc/ya.make lib/Target/ARM/TargetInfo/ya.make lib/Target/ARM/Utils/ya.make lib/Target/ARM/ya.make lib/Target/BPF/AsmParser/ya.make lib/Target/BPF/Disassembler/ya.make lib/Target/BPF/MCTargetDesc/ya.make lib/Target/BPF/TargetInfo/ya.make lib/Target/BPF/ya.make lib/Target/LoongArch/AsmParser/ya.make lib/Target/LoongArch/Disassembler/ya.make lib/Target/LoongArch/MCTargetDesc/ya.make lib/Target/LoongArch/TargetInfo/ya.make lib/Target/LoongArch/ya.make lib/Target/NVPTX/MCTargetDesc/ya.make lib/Target/NVPTX/TargetInfo/ya.make lib/Target/NVPTX/ya.make lib/Target/PowerPC/AsmParser/ya.make lib/Target/PowerPC/Disassembler/ya.make lib/Target/PowerPC/MCTargetDesc/ya.make lib/Target/PowerPC/TargetInfo/ya.make lib/Target/PowerPC/ya.make lib/Target/WebAssembly/AsmParser/ya.make lib/Target/WebAssembly/Disassembler/ya.make lib/Target/WebAssembly/MCTargetDesc/ya.make lib/Target/WebAssembly/TargetInfo/ya.make lib/Target/WebAssembly/Utils/ya.make lib/Target/WebAssembly/ya.make lib/Target/X86/AsmParser/ya.make lib/Target/X86/Disassembler/ya.make lib/Target/X86/MCA/ya.make lib/Target/X86/MCTargetDesc/ya.make lib/Target/X86/TargetInfo/ya.make lib/Target/X86/ya.make lib/Target/ya.make lib/TargetParser/ya.make lib/TextAPI/ya.make lib/ToolDrivers/llvm-dlltool/ya.make lib/ToolDrivers/llvm-lib/ya.make lib/Transforms/AggressiveInstCombine/ya.make lib/Transforms/CFGuard/ya.make lib/Transforms/Coroutines/ya.make lib/Transforms/IPO/ya.make lib/Transforms/InstCombine/ya.make lib/Transforms/Instrumentation/ya.make lib/Transforms/ObjCARC/ya.make lib/Transforms/Scalar/ya.make lib/Transforms/Utils/ya.make lib/Transforms/Vectorize/ya.make lib/WindowsDriver/ya.make lib/WindowsManifest/ya.make lib/XRay/ya.make tools/bugpoint/ya.make tools/dsymutil/ya.make tools/gold/ya.make tools/llc/ya.make tools/lli/ChildTarget/ya.make tools/lli/ya.make tools/llvm-ar/ya.make tools/llvm-as/ya.make tools/llvm-bcanalyzer/ya.make tools/llvm-cat/ya.make tools/llvm-cfi-verify/lib/ya.make tools/llvm-cfi-verify/ya.make tools/llvm-config/ya.make tools/llvm-cov/ya.make tools/llvm-cvtres/ya.make tools/llvm-cxxdump/ya.make tools/llvm-cxxfilt/ya.make tools/llvm-cxxmap/ya.make tools/llvm-diff/lib/ya.make tools/llvm-diff/ya.make tools/llvm-dis/ya.make tools/llvm-dwarfdump/ya.make tools/llvm-dwp/ya.make tools/llvm-exegesis/lib/AArch64/ya.make tools/llvm-exegesis/lib/PowerPC/ya.make tools/llvm-exegesis/lib/X86/ya.make tools/llvm-exegesis/lib/ya.make tools/llvm-exegesis/ya.make tools/llvm-extract/ya.make tools/llvm-gsymutil/ya.make tools/llvm-ifs/ya.make tools/llvm-jitlink/llvm-jitlink-executor/ya.make tools/llvm-jitlink/ya.make tools/llvm-libtool-darwin/ya.make tools/llvm-link/ya.make tools/llvm-lipo/ya.make tools/llvm-lto/ya.make tools/llvm-lto2/ya.make tools/llvm-mc/ya.make tools/llvm-mca/ya.make tools/llvm-ml/ya.make tools/llvm-modextract/ya.make tools/llvm-mt/ya.make tools/llvm-nm/ya.make tools/llvm-objcopy/ya.make tools/llvm-objdump/ya.make tools/llvm-opt-report/ya.make tools/llvm-pdbutil/ya.make tools/llvm-profdata/ya.make tools/llvm-profgen/ya.make tools/llvm-rc/ya.make tools/llvm-readobj/ya.make tools/llvm-reduce/ya.make tools/llvm-rtdyld/ya.make tools/llvm-size/ya.make tools/llvm-split/ya.make tools/llvm-stress/ya.make tools/llvm-strings/ya.make tools/llvm-symbolizer/ya.make tools/llvm-undname/ya.make tools/llvm-xray/ya.make tools/lto/ya.make tools/obj2yaml/ya.make tools/opt/ya.make tools/polly/lib/ya.make tools/remarks-shlib/ya.make tools/sancov/ya.make tools/sanstats/ya.make tools/verify-uselistorder/ya.make tools/yaml2obj/ya.make utils/TableGen/GlobalISel/ya.make utils/TableGen/ya.make ya.make
+ License text:
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ // See https://llvm.org/LICENSE.txt for license information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm-c/DebugInfo.h [10:11]
+ include/llvm/ADT/APFixedPoint.h [10:11]
+ include/llvm/ADT/APFloat.h [10:11]
+ include/llvm/ADT/APInt.h [10:11]
+ include/llvm/ADT/APSInt.h [10:11]
+ include/llvm/ADT/AddressRanges.h [10:11]
+ include/llvm/ADT/AllocatorList.h [10:11]
+ include/llvm/ADT/Any.h [10:11]
+ include/llvm/ADT/ArrayRef.h [10:11]
+ include/llvm/ADT/BitVector.h [10:11]
+ include/llvm/ADT/Bitfields.h [10:11]
+ include/llvm/ADT/BitmaskEnum.h [10:11]
+ include/llvm/ADT/BreadthFirstIterator.h [10:11]
+ include/llvm/ADT/CachedHashString.h [10:11]
+ include/llvm/ADT/CoalescingBitVector.h [10:11]
+ include/llvm/ADT/CombinationGenerator.h [10:11]
+ include/llvm/ADT/DAGDeltaAlgorithm.h [10:11]
+ include/llvm/ADT/DeltaAlgorithm.h [10:11]
+ include/llvm/ADT/DenseMap.h [10:11]
+ include/llvm/ADT/DenseMapInfo.h [10:11]
+ include/llvm/ADT/DenseSet.h [10:11]
+ include/llvm/ADT/DepthFirstIterator.h [10:11]
+ include/llvm/ADT/DirectedGraph.h [10:11]
+ include/llvm/ADT/EnumeratedArray.h [10:11]
+ include/llvm/ADT/EpochTracker.h [10:11]
+ include/llvm/ADT/EquivalenceClasses.h [10:11]
+ include/llvm/ADT/FloatingPointMode.h [10:11]
+ include/llvm/ADT/FoldingSet.h [10:11]
+ include/llvm/ADT/FunctionExtras.h [10:11]
+ include/llvm/ADT/GenericCycleImpl.h [10:11]
+ include/llvm/ADT/GenericCycleInfo.h [10:11]
+ include/llvm/ADT/GenericSSAContext.h [10:11]
+ include/llvm/ADT/GenericUniformityImpl.h [10:11]
+ include/llvm/ADT/GenericUniformityInfo.h [10:11]
+ include/llvm/ADT/GraphTraits.h [10:11]
+ include/llvm/ADT/Hashing.h [10:11]
+ include/llvm/ADT/ImmutableList.h [10:11]
+ include/llvm/ADT/ImmutableMap.h [10:11]
+ include/llvm/ADT/ImmutableSet.h [10:11]
+ include/llvm/ADT/IndexedMap.h [10:11]
+ include/llvm/ADT/IntEqClasses.h [10:11]
+ include/llvm/ADT/IntervalMap.h [10:11]
+ include/llvm/ADT/IntervalTree.h [10:11]
+ include/llvm/ADT/IntrusiveRefCntPtr.h [10:11]
+ include/llvm/ADT/MapVector.h [10:11]
+ include/llvm/ADT/None.h [10:11]
+ include/llvm/ADT/Optional.h [10:11]
+ include/llvm/ADT/PackedVector.h [10:11]
+ include/llvm/ADT/PointerEmbeddedInt.h [10:11]
+ include/llvm/ADT/PointerIntPair.h [10:11]
+ include/llvm/ADT/PointerSumType.h [10:11]
+ include/llvm/ADT/PointerUnion.h [10:11]
+ include/llvm/ADT/PostOrderIterator.h [10:11]
+ include/llvm/ADT/PriorityQueue.h [10:11]
+ include/llvm/ADT/PriorityWorklist.h [10:11]
+ include/llvm/ADT/SCCIterator.h [10:11]
+ include/llvm/ADT/STLExtras.h [10:11]
+ include/llvm/ADT/STLForwardCompat.h [10:11]
+ include/llvm/ADT/STLFunctionalExtras.h [10:11]
+ include/llvm/ADT/ScopeExit.h [10:11]
+ include/llvm/ADT/ScopedHashTable.h [10:11]
+ include/llvm/ADT/Sequence.h [10:11]
+ include/llvm/ADT/SetOperations.h [10:11]
+ include/llvm/ADT/SetVector.h [10:11]
+ include/llvm/ADT/SmallBitVector.h [10:11]
+ include/llvm/ADT/SmallPtrSet.h [10:11]
+ include/llvm/ADT/SmallSet.h [10:11]
+ include/llvm/ADT/SmallString.h [10:11]
+ include/llvm/ADT/SmallVector.h [10:11]
+ include/llvm/ADT/SparseBitVector.h [10:11]
+ include/llvm/ADT/SparseMultiSet.h [10:11]
+ include/llvm/ADT/SparseSet.h [10:11]
+ include/llvm/ADT/Statistic.h [10:11]
+ include/llvm/ADT/StringExtras.h [10:11]
+ include/llvm/ADT/StringMap.h [10:11]
+ include/llvm/ADT/StringMapEntry.h [10:11]
+ include/llvm/ADT/StringRef.h [10:11]
+ include/llvm/ADT/StringSet.h [10:11]
+ include/llvm/ADT/StringSwitch.h [10:11]
+ include/llvm/ADT/TinyPtrVector.h [10:11]
+ include/llvm/ADT/Triple.h [10:11]
+ include/llvm/ADT/Twine.h [10:11]
+ include/llvm/ADT/TypeSwitch.h [10:11]
+ include/llvm/ADT/Uniformity.h [10:11]
+ include/llvm/ADT/UniqueVector.h [10:11]
+ include/llvm/ADT/bit.h [10:11]
+ include/llvm/ADT/edit_distance.h [10:11]
+ include/llvm/ADT/fallible_iterator.h [10:11]
+ include/llvm/ADT/identity.h [10:11]
+ include/llvm/ADT/ilist.h [10:11]
+ include/llvm/ADT/ilist_base.h [10:11]
+ include/llvm/ADT/ilist_iterator.h [10:11]
+ include/llvm/ADT/ilist_node.h [10:11]
+ include/llvm/ADT/ilist_node_base.h [10:11]
+ include/llvm/ADT/ilist_node_options.h [10:11]
+ include/llvm/ADT/iterator.h [10:11]
+ include/llvm/ADT/iterator_range.h [10:11]
+ include/llvm/ADT/simple_ilist.h [10:11]
+ include/llvm/Analysis/AliasAnalysis.h [10:11]
+ include/llvm/Analysis/AliasAnalysisEvaluator.h [10:11]
+ include/llvm/Analysis/AliasSetTracker.h [10:11]
+ include/llvm/Analysis/AssumeBundleQueries.h [10:11]
+ include/llvm/Analysis/AssumptionCache.h [10:11]
+ include/llvm/Analysis/BasicAliasAnalysis.h [10:11]
+ include/llvm/Analysis/BlockFrequencyInfo.h [10:11]
+ include/llvm/Analysis/BlockFrequencyInfoImpl.h [10:11]
+ include/llvm/Analysis/BranchProbabilityInfo.h [10:11]
+ include/llvm/Analysis/CFG.h [10:11]
+ include/llvm/Analysis/CFGPrinter.h [10:11]
+ include/llvm/Analysis/CFGSCCPrinter.h [10:11]
+ include/llvm/Analysis/CGSCCPassManager.h [10:11]
+ include/llvm/Analysis/CallGraph.h [10:11]
+ include/llvm/Analysis/CallGraphSCCPass.h [10:11]
+ include/llvm/Analysis/CallPrinter.h [10:11]
+ include/llvm/Analysis/CaptureTracking.h [10:11]
+ include/llvm/Analysis/CmpInstAnalysis.h [10:11]
+ include/llvm/Analysis/CodeMetrics.h [10:11]
+ include/llvm/Analysis/ConstantFolding.h [10:11]
+ include/llvm/Analysis/ConstraintSystem.h [10:11]
+ include/llvm/Analysis/CostModel.h [10:11]
+ include/llvm/Analysis/CycleAnalysis.h [10:11]
+ include/llvm/Analysis/DDG.h [10:11]
+ include/llvm/Analysis/DDGPrinter.h [10:11]
+ include/llvm/Analysis/DOTGraphTraitsPass.h [10:11]
+ include/llvm/Analysis/Delinearization.h [10:11]
+ include/llvm/Analysis/DemandedBits.h [10:11]
+ include/llvm/Analysis/DependenceAnalysis.h [10:11]
+ include/llvm/Analysis/DependenceGraphBuilder.h [10:11]
+ include/llvm/Analysis/DivergenceAnalysis.h [10:11]
+ include/llvm/Analysis/DomPrinter.h [10:11]
+ include/llvm/Analysis/DomTreeUpdater.h [10:11]
+ include/llvm/Analysis/DominanceFrontier.h [10:11]
+ include/llvm/Analysis/DominanceFrontierImpl.h [10:11]
+ include/llvm/Analysis/EHPersonalities.h [10:11]
+ include/llvm/Analysis/FunctionPropertiesAnalysis.h [10:11]
+ include/llvm/Analysis/GlobalsModRef.h [10:11]
+ include/llvm/Analysis/GuardUtils.h [10:11]
+ include/llvm/Analysis/HeatUtils.h [10:11]
+ include/llvm/Analysis/IRSimilarityIdentifier.h [10:11]
+ include/llvm/Analysis/IVDescriptors.h [10:11]
+ include/llvm/Analysis/IVUsers.h [10:11]
+ include/llvm/Analysis/IndirectCallPromotionAnalysis.h [10:11]
+ include/llvm/Analysis/IndirectCallVisitor.h [10:11]
+ include/llvm/Analysis/InlineAdvisor.h [10:11]
+ include/llvm/Analysis/InlineCost.h [10:11]
+ include/llvm/Analysis/InlineModelFeatureMaps.h [10:11]
+ include/llvm/Analysis/InlineOrder.h [10:11]
+ include/llvm/Analysis/InlineSizeEstimatorAnalysis.h [10:11]
+ include/llvm/Analysis/InstCount.h [10:11]
+ include/llvm/Analysis/InstSimplifyFolder.h [10:11]
+ include/llvm/Analysis/InstructionPrecedenceTracking.h [10:11]
+ include/llvm/Analysis/InstructionSimplify.h [10:11]
+ include/llvm/Analysis/Interval.h [10:11]
+ include/llvm/Analysis/IntervalIterator.h [10:11]
+ include/llvm/Analysis/IntervalPartition.h [10:11]
+ include/llvm/Analysis/IteratedDominanceFrontier.h [10:11]
+ include/llvm/Analysis/LazyBlockFrequencyInfo.h [10:11]
+ include/llvm/Analysis/LazyBranchProbabilityInfo.h [10:11]
+ include/llvm/Analysis/LazyCallGraph.h [10:11]
+ include/llvm/Analysis/LazyValueInfo.h [10:11]
+ include/llvm/Analysis/LegacyDivergenceAnalysis.h [10:11]
+ include/llvm/Analysis/Lint.h [10:11]
+ include/llvm/Analysis/Loads.h [10:11]
+ include/llvm/Analysis/LoopAccessAnalysis.h [10:11]
+ include/llvm/Analysis/LoopAnalysisManager.h [10:11]
+ include/llvm/Analysis/LoopCacheAnalysis.h [10:11]
+ include/llvm/Analysis/LoopInfo.h [10:11]
+ include/llvm/Analysis/LoopInfoImpl.h [10:11]
+ include/llvm/Analysis/LoopIterator.h [10:11]
+ include/llvm/Analysis/LoopNestAnalysis.h [10:11]
+ include/llvm/Analysis/LoopPass.h [10:11]
+ include/llvm/Analysis/LoopUnrollAnalyzer.h [10:11]
+ include/llvm/Analysis/MLInlineAdvisor.h [10:11]
+ include/llvm/Analysis/MLModelRunner.h [10:11]
+ include/llvm/Analysis/MemDerefPrinter.h [10:11]
+ include/llvm/Analysis/MemoryBuiltins.h [10:11]
+ include/llvm/Analysis/MemoryDependenceAnalysis.h [10:11]
+ include/llvm/Analysis/MemoryLocation.h [10:11]
+ include/llvm/Analysis/MemoryProfileInfo.h [10:11]
+ include/llvm/Analysis/MemorySSA.h [10:11]
+ include/llvm/Analysis/MemorySSAUpdater.h [10:11]
+ include/llvm/Analysis/ModelUnderTrainingRunner.h [10:11]
+ include/llvm/Analysis/ModuleDebugInfoPrinter.h [10:11]
+ include/llvm/Analysis/ModuleSummaryAnalysis.h [10:11]
+ include/llvm/Analysis/MustExecute.h [10:11]
+ include/llvm/Analysis/NoInferenceModelRunner.h [10:11]
+ include/llvm/Analysis/ObjCARCAliasAnalysis.h [10:11]
+ include/llvm/Analysis/ObjCARCAnalysisUtils.h [10:11]
+ include/llvm/Analysis/ObjCARCInstKind.h [10:11]
+ include/llvm/Analysis/ObjCARCUtil.h [10:11]
+ include/llvm/Analysis/OptimizationRemarkEmitter.h [10:11]
+ include/llvm/Analysis/OverflowInstAnalysis.h [10:11]
+ include/llvm/Analysis/PHITransAddr.h [10:11]
+ include/llvm/Analysis/Passes.h [10:11]
+ include/llvm/Analysis/PhiValues.h [10:11]
+ include/llvm/Analysis/PostDominators.h [10:11]
+ include/llvm/Analysis/ProfileSummaryInfo.h [10:11]
+ include/llvm/Analysis/PtrUseVisitor.h [10:11]
+ include/llvm/Analysis/RegionInfo.h [10:11]
+ include/llvm/Analysis/RegionInfoImpl.h [10:11]
+ include/llvm/Analysis/RegionIterator.h [10:11]
+ include/llvm/Analysis/RegionPass.h [10:11]
+ include/llvm/Analysis/RegionPrinter.h [10:11]
+ include/llvm/Analysis/ReleaseModeModelRunner.h [10:11]
+ include/llvm/Analysis/ReplayInlineAdvisor.h [10:11]
+ include/llvm/Analysis/ScalarEvolution.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionDivision.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionExpressions.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionNormalization.h [10:11]
+ include/llvm/Analysis/ScalarFuncs.def [3:4]
+ include/llvm/Analysis/ScopedNoAliasAA.h [10:11]
+ include/llvm/Analysis/SparsePropagation.h [10:11]
+ include/llvm/Analysis/StackLifetime.h [10:11]
+ include/llvm/Analysis/StackSafetyAnalysis.h [10:11]
+ include/llvm/Analysis/SyncDependenceAnalysis.h [10:11]
+ include/llvm/Analysis/SyntheticCountsUtils.h [10:11]
+ include/llvm/Analysis/TargetFolder.h [10:11]
+ include/llvm/Analysis/TargetLibraryInfo.def [3:4]
+ include/llvm/Analysis/TargetLibraryInfo.h [10:11]
+ include/llvm/Analysis/TargetTransformInfo.h [10:11]
+ include/llvm/Analysis/TargetTransformInfoImpl.h [10:11]
+ include/llvm/Analysis/TensorSpec.h [10:11]
+ include/llvm/Analysis/Trace.h [10:11]
+ include/llvm/Analysis/TypeBasedAliasAnalysis.h [10:11]
+ include/llvm/Analysis/TypeMetadataUtils.h [10:11]
+ include/llvm/Analysis/UniformityAnalysis.h [10:11]
+ include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h [10:11]
+ include/llvm/Analysis/Utils/Local.h [10:11]
+ include/llvm/Analysis/Utils/TFUtils.h [10:11]
+ include/llvm/Analysis/Utils/TrainingLogger.h [10:11]
+ include/llvm/Analysis/ValueLattice.h [10:11]
+ include/llvm/Analysis/ValueLatticeUtils.h [10:11]
+ include/llvm/Analysis/ValueTracking.h [10:11]
+ include/llvm/Analysis/VecFuncs.def [3:4]
+ include/llvm/Analysis/VectorUtils.h [10:11]
+ include/llvm/AsmParser/LLLexer.h [10:11]
+ include/llvm/AsmParser/LLParser.h [10:11]
+ include/llvm/AsmParser/LLToken.h [10:11]
+ include/llvm/AsmParser/Parser.h [10:11]
+ include/llvm/AsmParser/SlotMapping.h [10:11]
+ include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h [10:11]
+ include/llvm/BinaryFormat/COFF.h [10:11]
+ include/llvm/BinaryFormat/DXContainer.h [10:11]
+ include/llvm/BinaryFormat/Dwarf.def [3:4]
+ include/llvm/BinaryFormat/Dwarf.h [10:11]
+ include/llvm/BinaryFormat/ELF.h [10:11]
+ include/llvm/BinaryFormat/GOFF.h [10:11]
+ include/llvm/BinaryFormat/MachO.def [3:4]
+ include/llvm/BinaryFormat/MachO.h [10:11]
+ include/llvm/BinaryFormat/Magic.h [10:11]
+ include/llvm/BinaryFormat/Minidump.h [10:11]
+ include/llvm/BinaryFormat/MinidumpConstants.def [3:4]
+ include/llvm/BinaryFormat/MsgPack.def [3:4]
+ include/llvm/BinaryFormat/MsgPack.h [10:11]
+ include/llvm/BinaryFormat/MsgPackDocument.h [10:11]
+ include/llvm/BinaryFormat/MsgPackReader.h [10:11]
+ include/llvm/BinaryFormat/MsgPackWriter.h [10:11]
+ include/llvm/BinaryFormat/Swift.def [3:4]
+ include/llvm/BinaryFormat/Swift.h [10:11]
+ include/llvm/BinaryFormat/Wasm.h [10:11]
+ include/llvm/BinaryFormat/WasmTraits.h [10:11]
+ include/llvm/BinaryFormat/XCOFF.h [10:11]
+ include/llvm/Bitcode/BitcodeAnalyzer.h [10:11]
+ include/llvm/Bitcode/BitcodeCommon.h [10:11]
+ include/llvm/Bitcode/BitcodeConvenience.h [10:11]
+ include/llvm/Bitcode/BitcodeReader.h [10:11]
+ include/llvm/Bitcode/BitcodeWriter.h [10:11]
+ include/llvm/Bitcode/BitcodeWriterPass.h [10:11]
+ include/llvm/Bitcode/LLVMBitCodes.h [10:11]
+ include/llvm/Bitstream/BitCodeEnums.h [10:11]
+ include/llvm/Bitstream/BitCodes.h [10:11]
+ include/llvm/Bitstream/BitstreamReader.h [10:11]
+ include/llvm/Bitstream/BitstreamWriter.h [10:11]
+ include/llvm/CodeGen/AccelTable.h [10:11]
+ include/llvm/CodeGen/Analysis.h [10:11]
+ include/llvm/CodeGen/AntiDepBreaker.h [10:11]
+ include/llvm/CodeGen/AsmPrinter.h [10:11]
+ include/llvm/CodeGen/AsmPrinterHandler.h [10:11]
+ include/llvm/CodeGen/AtomicExpandUtils.h [10:11]
+ include/llvm/CodeGen/BasicBlockSectionUtils.h [10:11]
+ include/llvm/CodeGen/BasicBlockSectionsProfileReader.h [10:11]
+ include/llvm/CodeGen/BasicTTIImpl.h [10:11]
+ include/llvm/CodeGen/CFIFixup.h [10:11]
+ include/llvm/CodeGen/CSEConfigBase.h [10:11]
+ include/llvm/CodeGen/CalcSpillWeights.h [10:11]
+ include/llvm/CodeGen/CallingConvLower.h [10:11]
+ include/llvm/CodeGen/CodeGenCommonISel.h [10:11]
+ include/llvm/CodeGen/CodeGenPassBuilder.h [10:11]
+ include/llvm/CodeGen/CommandFlags.h [10:11]
+ include/llvm/CodeGen/ComplexDeinterleavingPass.h [10:11]
+ include/llvm/CodeGen/CostTable.h [10:11]
+ include/llvm/CodeGen/DAGCombine.h [10:11]
+ include/llvm/CodeGen/DFAPacketizer.h [10:11]
+ include/llvm/CodeGen/DIE.h [10:11]
+ include/llvm/CodeGen/DIEValue.def [3:4]
+ include/llvm/CodeGen/DbgEntityHistoryCalculator.h [10:11]
+ include/llvm/CodeGen/DebugHandlerBase.h [10:11]
+ include/llvm/CodeGen/DwarfStringPoolEntry.h [10:11]
+ include/llvm/CodeGen/EdgeBundles.h [10:11]
+ include/llvm/CodeGen/ExecutionDomainFix.h [10:11]
+ include/llvm/CodeGen/ExpandReductions.h [10:11]
+ include/llvm/CodeGen/ExpandVectorPredication.h [10:11]
+ include/llvm/CodeGen/FastISel.h [10:11]
+ include/llvm/CodeGen/FaultMaps.h [10:11]
+ include/llvm/CodeGen/FunctionLoweringInfo.h [10:11]
+ include/llvm/CodeGen/GCMetadata.h [10:11]
+ include/llvm/CodeGen/GCMetadataPrinter.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CSEInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CallLowering.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Combiner.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CombinerHelper.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CombinerInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GISelKnownBits.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GISelWorkList.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h [10:11]
+ include/llvm/CodeGen/GlobalISel/IRTranslator.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InstructionSelect.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InstructionSelector.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegacyLegalizerInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Legalizer.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegalizerHelper.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegalizerInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Localizer.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h [10:11]
+ include/llvm/CodeGen/GlobalISel/MIPatternMatch.h [10:11]
+ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h [10:11]
+ include/llvm/CodeGen/GlobalISel/RegBankSelect.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Utils.h [10:11]
+ include/llvm/CodeGen/ISDOpcodes.h [10:11]
+ include/llvm/CodeGen/IndirectThunks.h [10:11]
+ include/llvm/CodeGen/IntrinsicLowering.h [10:11]
+ include/llvm/CodeGen/LatencyPriorityQueue.h [10:11]
+ include/llvm/CodeGen/LexicalScopes.h [10:11]
+ include/llvm/CodeGen/LinkAllAsmWriterComponents.h [10:11]
+ include/llvm/CodeGen/LinkAllCodegenComponents.h [10:11]
+ include/llvm/CodeGen/LiveInterval.h [10:11]
+ include/llvm/CodeGen/LiveIntervalCalc.h [10:11]
+ include/llvm/CodeGen/LiveIntervalUnion.h [10:11]
+ include/llvm/CodeGen/LiveIntervals.h [10:11]
+ include/llvm/CodeGen/LivePhysRegs.h [10:11]
+ include/llvm/CodeGen/LiveRangeCalc.h [10:11]
+ include/llvm/CodeGen/LiveRangeEdit.h [10:11]
+ include/llvm/CodeGen/LiveRegMatrix.h [10:11]
+ include/llvm/CodeGen/LiveRegUnits.h [10:11]
+ include/llvm/CodeGen/LiveStacks.h [10:11]
+ include/llvm/CodeGen/LiveVariables.h [10:11]
+ include/llvm/CodeGen/LoopTraversal.h [10:11]
+ include/llvm/CodeGen/LowLevelType.h [10:11]
+ include/llvm/CodeGen/MBFIWrapper.h [10:11]
+ include/llvm/CodeGen/MIRFSDiscriminator.h [10:11]
+ include/llvm/CodeGen/MIRFormatter.h [10:11]
+ include/llvm/CodeGen/MIRParser/MIParser.h [10:11]
+ include/llvm/CodeGen/MIRParser/MIRParser.h [10:11]
+ include/llvm/CodeGen/MIRPrinter.h [10:11]
+ include/llvm/CodeGen/MIRSampleProfile.h [10:11]
+ include/llvm/CodeGen/MIRYamlMapping.h [10:11]
+ include/llvm/CodeGen/MachORelocation.h [10:11]
+ include/llvm/CodeGen/MachineBasicBlock.h [10:11]
+ include/llvm/CodeGen/MachineBlockFrequencyInfo.h [10:11]
+ include/llvm/CodeGen/MachineBranchProbabilityInfo.h [10:11]
+ include/llvm/CodeGen/MachineCFGPrinter.h [10:11]
+ include/llvm/CodeGen/MachineCombinerPattern.h [11:12]
+ include/llvm/CodeGen/MachineConstantPool.h [10:11]
+ include/llvm/CodeGen/MachineCycleAnalysis.h [10:11]
+ include/llvm/CodeGen/MachineDominanceFrontier.h [10:11]
+ include/llvm/CodeGen/MachineDominators.h [10:11]
+ include/llvm/CodeGen/MachineFrameInfo.h [10:11]
+ include/llvm/CodeGen/MachineFunction.h [10:11]
+ include/llvm/CodeGen/MachineFunctionPass.h [10:11]
+ include/llvm/CodeGen/MachineInstr.h [10:11]
+ include/llvm/CodeGen/MachineInstrBuilder.h [10:11]
+ include/llvm/CodeGen/MachineInstrBundle.h [10:11]
+ include/llvm/CodeGen/MachineInstrBundleIterator.h [10:11]
+ include/llvm/CodeGen/MachineJumpTableInfo.h [10:11]
+ include/llvm/CodeGen/MachineLoopInfo.h [10:11]
+ include/llvm/CodeGen/MachineLoopUtils.h [10:11]
+ include/llvm/CodeGen/MachineMemOperand.h [10:11]
+ include/llvm/CodeGen/MachineModuleInfo.h [10:11]
+ include/llvm/CodeGen/MachineModuleInfoImpls.h [10:11]
+ include/llvm/CodeGen/MachineModuleSlotTracker.h [10:11]
+ include/llvm/CodeGen/MachineOperand.h [10:11]
+ include/llvm/CodeGen/MachineOutliner.h [10:11]
+ include/llvm/CodeGen/MachinePassManager.h [10:11]
+ include/llvm/CodeGen/MachinePassRegistry.def [3:4]
+ include/llvm/CodeGen/MachinePassRegistry.h [10:11]
+ include/llvm/CodeGen/MachinePipeliner.h [10:11]
+ include/llvm/CodeGen/MachinePostDominators.h [10:11]
+ include/llvm/CodeGen/MachineRegionInfo.h [10:11]
+ include/llvm/CodeGen/MachineRegisterInfo.h [10:11]
+ include/llvm/CodeGen/MachineSSAContext.h [10:11]
+ include/llvm/CodeGen/MachineSSAUpdater.h [10:11]
+ include/llvm/CodeGen/MachineScheduler.h [10:11]
+ include/llvm/CodeGen/MachineSizeOpts.h [10:11]
+ include/llvm/CodeGen/MachineStableHash.h [10:11]
+ include/llvm/CodeGen/MachineTraceMetrics.h [10:11]
+ include/llvm/CodeGen/MachineUniformityAnalysis.h [10:11]
+ include/llvm/CodeGen/MacroFusion.h [10:11]
+ include/llvm/CodeGen/ModuloSchedule.h [10:11]
+ include/llvm/CodeGen/MultiHazardRecognizer.h [10:11]
+ include/llvm/CodeGen/NonRelocatableStringpool.h [10:11]
+ include/llvm/CodeGen/PBQP/CostAllocator.h [10:11]
+ include/llvm/CodeGen/PBQP/Graph.h [10:11]
+ include/llvm/CodeGen/PBQP/Math.h [10:11]
+ include/llvm/CodeGen/PBQP/ReductionRules.h [10:11]
+ include/llvm/CodeGen/PBQP/Solution.h [10:11]
+ include/llvm/CodeGen/PBQPRAConstraint.h [10:11]
+ include/llvm/CodeGen/ParallelCG.h [10:11]
+ include/llvm/CodeGen/Passes.h [10:11]
+ include/llvm/CodeGen/PreISelIntrinsicLowering.h [10:11]
+ include/llvm/CodeGen/PseudoSourceValue.h [10:11]
+ include/llvm/CodeGen/RDFGraph.h [10:11]
+ include/llvm/CodeGen/RDFLiveness.h [10:11]
+ include/llvm/CodeGen/RDFRegisters.h [10:11]
+ include/llvm/CodeGen/ReachingDefAnalysis.h [10:11]
+ include/llvm/CodeGen/RegAllocCommon.h [10:11]
+ include/llvm/CodeGen/RegAllocPBQP.h [10:11]
+ include/llvm/CodeGen/RegAllocRegistry.h [10:11]
+ include/llvm/CodeGen/Register.h [10:11]
+ include/llvm/CodeGen/RegisterBank.h [10:11]
+ include/llvm/CodeGen/RegisterBankInfo.h [10:11]
+ include/llvm/CodeGen/RegisterClassInfo.h [10:11]
+ include/llvm/CodeGen/RegisterPressure.h [10:11]
+ include/llvm/CodeGen/RegisterScavenging.h [10:11]
+ include/llvm/CodeGen/RegisterUsageInfo.h [10:11]
+ include/llvm/CodeGen/ReplaceWithVeclib.h [10:11]
+ include/llvm/CodeGen/ResourcePriorityQueue.h [10:11]
+ include/llvm/CodeGen/RuntimeLibcalls.h [10:11]
+ include/llvm/CodeGen/SDNodeProperties.td [3:4]
+ include/llvm/CodeGen/ScheduleDAG.h [10:11]
+ include/llvm/CodeGen/ScheduleDAGInstrs.h [10:11]
+ include/llvm/CodeGen/ScheduleDAGMutation.h [10:11]
+ include/llvm/CodeGen/ScheduleDFS.h [10:11]
+ include/llvm/CodeGen/ScheduleHazardRecognizer.h [10:11]
+ include/llvm/CodeGen/SchedulerRegistry.h [10:11]
+ include/llvm/CodeGen/ScoreboardHazardRecognizer.h [10:11]
+ include/llvm/CodeGen/SelectionDAG.h [10:11]
+ include/llvm/CodeGen/SelectionDAGAddressAnalysis.h [10:11]
+ include/llvm/CodeGen/SelectionDAGISel.h [10:11]
+ include/llvm/CodeGen/SelectionDAGNodes.h [10:11]
+ include/llvm/CodeGen/SelectionDAGTargetInfo.h [10:11]
+ include/llvm/CodeGen/SlotIndexes.h [10:11]
+ include/llvm/CodeGen/Spiller.h [10:11]
+ include/llvm/CodeGen/StableHashing.h [10:11]
+ include/llvm/CodeGen/StackMaps.h [10:11]
+ include/llvm/CodeGen/StackProtector.h [10:11]
+ include/llvm/CodeGen/SwiftErrorValueTracking.h [10:11]
+ include/llvm/CodeGen/SwitchLoweringUtils.h [10:11]
+ include/llvm/CodeGen/TailDuplicator.h [10:11]
+ include/llvm/CodeGen/TargetCallingConv.h [10:11]
+ include/llvm/CodeGen/TargetFrameLowering.h [10:11]
+ include/llvm/CodeGen/TargetInstrInfo.h [10:11]
+ include/llvm/CodeGen/TargetLowering.h [10:11]
+ include/llvm/CodeGen/TargetLoweringObjectFileImpl.h [10:11]
+ include/llvm/CodeGen/TargetOpcodes.h [10:11]
+ include/llvm/CodeGen/TargetPassConfig.h [10:11]
+ include/llvm/CodeGen/TargetRegisterInfo.h [10:11]
+ include/llvm/CodeGen/TargetSchedule.h [10:11]
+ include/llvm/CodeGen/TargetSubtargetInfo.h [10:11]
+ include/llvm/CodeGen/TileShapeInfo.h [10:11]
+ include/llvm/CodeGen/TypePromotion.h [10:11]
+ include/llvm/CodeGen/UnreachableBlockElim.h [10:11]
+ include/llvm/CodeGen/VLIWMachineScheduler.h [10:11]
+ include/llvm/CodeGen/ValueTypes.h [10:11]
+ include/llvm/CodeGen/ValueTypes.td [3:4]
+ include/llvm/CodeGen/VirtRegMap.h [10:11]
+ include/llvm/CodeGen/WasmEHFuncInfo.h [10:11]
+ include/llvm/CodeGen/WinEHFuncInfo.h [10:11]
+ include/llvm/DWARFLinker/DWARFLinker.h [10:11]
+ include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h [10:11]
+ include/llvm/DWARFLinker/DWARFLinkerDeclContext.h [10:11]
+ include/llvm/DWARFLinker/DWARFStreamer.h [10:11]
+ include/llvm/DWARFLinkerParallel/DWARFLinker.h [10:11]
+ include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/CVRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/CVTypeVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeView.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeViewError.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeViewRegisters.def [3:4]
+ include/llvm/DebugInfo/CodeView/CodeViewSymbols.def [3:4]
+ include/llvm/DebugInfo/CodeView/CodeViewTypes.def [3:4]
+ include/llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugCrossExSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugLinesSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugStringTableSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugUnknownSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/EnumTables.h [10:11]
+ include/llvm/DebugInfo/CodeView/Formatters.h [10:11]
+ include/llvm/DebugInfo/CodeView/FunctionId.h [10:11]
+ include/llvm/DebugInfo/CodeView/GUID.h [10:11]
+ include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h [10:11]
+ include/llvm/DebugInfo/CodeView/Line.h [10:11]
+ include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/RecordName.h [10:11]
+ include/llvm/DebugInfo/CodeView/RecordSerialization.h [10:11]
+ include/llvm/DebugInfo/CodeView/SimpleTypeSerializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/StringsAndChecksums.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolDeserializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolDumper.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolSerializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeCollection.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeDeserializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeDumpVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeHashing.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeIndex.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeRecordMapping.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeStreamMerger.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeTableCollection.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h [10:11]
+ include/llvm/DebugInfo/DIContext.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAddressRange.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAttribute.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFContext.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDataExtractor.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAddr.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLine.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDie.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFExpression.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFFormValue.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFListTable.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFLocationExpression.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFObject.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFRelocMap.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFSection.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFTypePrinter.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFUnit.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFVerifier.h [10:11]
+ include/llvm/DebugInfo/GSYM/DwarfTransformer.h [10:11]
+ include/llvm/DebugInfo/GSYM/ExtractRanges.h [10:11]
+ include/llvm/DebugInfo/GSYM/FileEntry.h [10:11]
+ include/llvm/DebugInfo/GSYM/FileWriter.h [10:11]
+ include/llvm/DebugInfo/GSYM/FunctionInfo.h [10:11]
+ include/llvm/DebugInfo/GSYM/GsymCreator.h [10:11]
+ include/llvm/DebugInfo/GSYM/GsymReader.h [10:11]
+ include/llvm/DebugInfo/GSYM/Header.h [10:11]
+ include/llvm/DebugInfo/GSYM/InlineInfo.h [10:11]
+ include/llvm/DebugInfo/GSYM/LineEntry.h [10:11]
+ include/llvm/DebugInfo/GSYM/LineTable.h [10:11]
+ include/llvm/DebugInfo/GSYM/LookupResult.h [10:11]
+ include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h [10:11]
+ include/llvm/DebugInfo/GSYM/StringTable.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVCompare.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVElement.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVLine.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVLocation.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVObject.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVOptions.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVRange.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVReader.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVScope.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVSort.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVSupport.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVType.h [10:11]
+ include/llvm/DebugInfo/LogicalView/LVReaderHandler.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h [10:11]
+ include/llvm/DebugInfo/MSF/IMSFFile.h [10:11]
+ include/llvm/DebugInfo/MSF/MSFBuilder.h [10:11]
+ include/llvm/DebugInfo/MSF/MSFCommon.h [10:11]
+ include/llvm/DebugInfo/MSF/MSFError.h [10:11]
+ include/llvm/DebugInfo/MSF/MappedBlockStream.h [10:11]
+ include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIADataStream.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAError.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASession.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASupport.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIATable.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAUtils.h [10:11]
+ include/llvm/DebugInfo/PDB/GenericError.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBDataStream.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBEnumChildren.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBFrameData.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBInjectedSource.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBLineNumber.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBRawSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBSectionContrib.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBSession.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBSourceFile.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBTable.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleList.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/EnumTables.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/FormatUtil.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/Formatters.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/GlobalsStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/Hash.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/HashTable.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/ISectionContribVisitor.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InfoStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InjectedSourceStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InputFile.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/LinePrinter.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumInjectedSources.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumLineNumbers.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumSymbols.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeFunctionSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeInlineSiteSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeLineNumber.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativePublicSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeSession.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeSourceFile.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeBuiltin.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBFile.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTable.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PublicsStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/RawConstants.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/RawError.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/RawTypes.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/SymbolCache.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/SymbolStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/TpiHashing.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/TpiStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/PDB.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBContext.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBExtras.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymDumper.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolBlock.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCustom.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolData.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolExe.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolFunc.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolLabel.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolThunk.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBTypes.h [10:11]
+ include/llvm/DebugInfo/PDB/UDTLayout.h [10:11]
+ include/llvm/DebugInfo/Symbolize/DIPrinter.h [10:11]
+ include/llvm/DebugInfo/Symbolize/Markup.h [10:11]
+ include/llvm/DebugInfo/Symbolize/MarkupFilter.h [10:11]
+ include/llvm/DebugInfo/Symbolize/SymbolizableModule.h [10:11]
+ include/llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h [10:11]
+ include/llvm/DebugInfo/Symbolize/Symbolize.h [10:11]
+ include/llvm/Debuginfod/BuildIDFetcher.h [10:11]
+ include/llvm/Debuginfod/Debuginfod.h [10:11]
+ include/llvm/Debuginfod/HTTPClient.h [10:11]
+ include/llvm/Debuginfod/HTTPServer.h [10:11]
+ include/llvm/Demangle/Demangle.h [10:11]
+ include/llvm/Demangle/DemangleConfig.h [10:11]
+ include/llvm/Demangle/ItaniumDemangle.h [10:11]
+ include/llvm/Demangle/ItaniumNodes.def [3:4]
+ include/llvm/Demangle/MicrosoftDemangle.h [10:11]
+ include/llvm/Demangle/MicrosoftDemangleNodes.h [10:11]
+ include/llvm/Demangle/StringView.h [10:11]
+ include/llvm/Demangle/Utility.h [10:11]
+ include/llvm/ExecutionEngine/ExecutionEngine.h [10:11]
+ include/llvm/ExecutionEngine/GenericValue.h [10:11]
+ include/llvm/ExecutionEngine/Interpreter.h [10:11]
+ include/llvm/ExecutionEngine/JITEventListener.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/COFF.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_aarch64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_i386.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_loongarch.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_riscv.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/JITLink.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/JITLinkDylib.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/MachO.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/MachO_arm64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/MachO_x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/TableManager.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/aarch64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/i386.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/loongarch.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/riscv.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITSymbol.h [10:11]
+ include/llvm/ExecutionEngine/MCJIT.h [10:11]
+ include/llvm/ExecutionEngine/OProfileWrapper.h [10:11]
+ include/llvm/ExecutionEngine/ObjectCache.h [10:11]
+ include/llvm/ExecutionEngine/Orc/COFFPlatform.h [10:11]
+ include/llvm/ExecutionEngine/Orc/COFFVCRuntimeSupport.h [10:11]
+ include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/CompileUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Core.h [10:11]
+ include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h [10:11]
+ include/llvm/ExecutionEngine/Orc/DebugUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericDylibManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ExecutionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ExecutorProcessControl.h [10:11]
+ include/llvm/ExecutionEngine/Orc/IRCompileLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/IRTransformLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/IndirectionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h [10:11]
+ include/llvm/ExecutionEngine/Orc/LLJIT.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Layer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/LazyReexports.h [10:11]
+ include/llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h [10:11]
+ include/llvm/ExecutionEngine/Orc/MachOPlatform.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Mangling.h [10:11]
+ include/llvm/ExecutionEngine/Orc/MapperJITLinkMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/MemoryMapper.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/OrcABISupport.h [10:11]
+ include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcError.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h [10:11]
+ include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Speculation.h [10:11]
+ include/llvm/ExecutionEngine/Orc/SymbolStringPool.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TaskDispatch.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h [10:11]
+ include/llvm/ExecutionEngine/RTDyldMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/RuntimeDyld.h [10:11]
+ include/llvm/ExecutionEngine/RuntimeDyldChecker.h [10:11]
+ include/llvm/ExecutionEngine/SectionMemoryManager.h [10:11]
+ include/llvm/FileCheck/FileCheck.h [10:11]
+ include/llvm/Frontend/Directive/DirectiveBase.td [3:4]
+ include/llvm/Frontend/HLSL/HLSLResource.h [10:11]
+ include/llvm/Frontend/OpenACC/ACC.td [3:4]
+ include/llvm/Frontend/OpenMP/OMP.td [3:4]
+ include/llvm/Frontend/OpenMP/OMPAssume.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPConstants.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPContext.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPDeviceConstants.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPGridValues.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPIRBuilder.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPKinds.def [3:4]
+ include/llvm/FuzzMutate/FuzzerCLI.h [10:11]
+ include/llvm/FuzzMutate/IRMutator.h [10:11]
+ include/llvm/FuzzMutate/OpDescriptor.h [10:11]
+ include/llvm/FuzzMutate/Operations.h [10:11]
+ include/llvm/FuzzMutate/Random.h [10:11]
+ include/llvm/FuzzMutate/RandomIRBuilder.h [10:11]
+ include/llvm/IR/AbstractCallSite.h [10:11]
+ include/llvm/IR/Argument.h [10:11]
+ include/llvm/IR/AssemblyAnnotationWriter.h [10:11]
+ include/llvm/IR/Assumptions.h [10:11]
+ include/llvm/IR/Attributes.h [10:11]
+ include/llvm/IR/Attributes.td [3:4]
+ include/llvm/IR/AutoUpgrade.h [10:11]
+ include/llvm/IR/BasicBlock.h [10:11]
+ include/llvm/IR/BuiltinGCs.h [10:11]
+ include/llvm/IR/CFG.h [10:11]
+ include/llvm/IR/CallingConv.h [10:11]
+ include/llvm/IR/Comdat.h [10:11]
+ include/llvm/IR/Constant.h [10:11]
+ include/llvm/IR/ConstantFold.h [10:11]
+ include/llvm/IR/ConstantFolder.h [10:11]
+ include/llvm/IR/ConstantRange.h [10:11]
+ include/llvm/IR/Constants.h [10:11]
+ include/llvm/IR/ConstrainedOps.def [3:4]
+ include/llvm/IR/DIBuilder.h [10:11]
+ include/llvm/IR/DataLayout.h [10:11]
+ include/llvm/IR/DebugInfo.h [10:11]
+ include/llvm/IR/DebugInfoFlags.def [3:4]
+ include/llvm/IR/DebugInfoMetadata.h [10:11]
+ include/llvm/IR/DebugLoc.h [10:11]
+ include/llvm/IR/DerivedTypes.h [10:11]
+ include/llvm/IR/DerivedUser.h [10:11]
+ include/llvm/IR/DiagnosticHandler.h [10:11]
+ include/llvm/IR/DiagnosticInfo.h [10:11]
+ include/llvm/IR/DiagnosticPrinter.h [10:11]
+ include/llvm/IR/Dominators.h [10:11]
+ include/llvm/IR/FMF.h [10:11]
+ include/llvm/IR/FPEnv.h [10:11]
+ include/llvm/IR/FixedPointBuilder.h [10:11]
+ include/llvm/IR/Function.h [10:11]
+ include/llvm/IR/GCStrategy.h [10:11]
+ include/llvm/IR/GVMaterializer.h [10:11]
+ include/llvm/IR/GetElementPtrTypeIterator.h [10:11]
+ include/llvm/IR/GlobalAlias.h [10:11]
+ include/llvm/IR/GlobalIFunc.h [10:11]
+ include/llvm/IR/GlobalObject.h [10:11]
+ include/llvm/IR/GlobalValue.h [10:11]
+ include/llvm/IR/GlobalVariable.h [10:11]
+ include/llvm/IR/IRBuilder.h [10:11]
+ include/llvm/IR/IRBuilderFolder.h [10:11]
+ include/llvm/IR/IRPrintingPasses.h [10:11]
+ include/llvm/IR/InlineAsm.h [10:11]
+ include/llvm/IR/InstIterator.h [10:11]
+ include/llvm/IR/InstVisitor.h [10:11]
+ include/llvm/IR/InstrTypes.h [10:11]
+ include/llvm/IR/Instruction.def [3:4]
+ include/llvm/IR/Instruction.h [10:11]
+ include/llvm/IR/Instructions.h [10:11]
+ include/llvm/IR/IntrinsicInst.h [10:11]
+ include/llvm/IR/Intrinsics.h [10:11]
+ include/llvm/IR/Intrinsics.td [3:4]
+ include/llvm/IR/IntrinsicsAArch64.td [3:4]
+ include/llvm/IR/IntrinsicsAMDGPU.td [3:4]
+ include/llvm/IR/IntrinsicsARM.td [3:4]
+ include/llvm/IR/IntrinsicsBPF.td [3:4]
+ include/llvm/IR/IntrinsicsDirectX.td [3:4]
+ include/llvm/IR/IntrinsicsHexagon.td [2:3]
+ include/llvm/IR/IntrinsicsHexagonDep.td [3:4]
+ include/llvm/IR/IntrinsicsLoongArch.td [3:4]
+ include/llvm/IR/IntrinsicsMips.td [3:4]
+ include/llvm/IR/IntrinsicsNVVM.td [3:4]
+ include/llvm/IR/IntrinsicsPowerPC.td [3:4]
+ include/llvm/IR/IntrinsicsRISCV.td [3:4]
+ include/llvm/IR/IntrinsicsSPIRV.td [3:4]
+ include/llvm/IR/IntrinsicsSystemZ.td [3:4]
+ include/llvm/IR/IntrinsicsWebAssembly.td [3:4]
+ include/llvm/IR/IntrinsicsX86.td [3:4]
+ include/llvm/IR/IntrinsicsXCore.td [3:4]
+ include/llvm/IR/LLVMContext.h [10:11]
+ include/llvm/IR/LLVMRemarkStreamer.h [10:11]
+ include/llvm/IR/LegacyPassManager.h [10:11]
+ include/llvm/IR/LegacyPassManagers.h [10:11]
+ include/llvm/IR/LegacyPassNameParser.h [10:11]
+ include/llvm/IR/MDBuilder.h [10:11]
+ include/llvm/IR/Mangler.h [10:11]
+ include/llvm/IR/MatrixBuilder.h [10:11]
+ include/llvm/IR/Metadata.def [3:4]
+ include/llvm/IR/Metadata.h [10:11]
+ include/llvm/IR/Module.h [10:11]
+ include/llvm/IR/ModuleSlotTracker.h [10:11]
+ include/llvm/IR/ModuleSummaryIndex.h [10:11]
+ include/llvm/IR/ModuleSummaryIndexYAML.h [10:11]
+ include/llvm/IR/NoFolder.h [10:11]
+ include/llvm/IR/OperandTraits.h [10:11]
+ include/llvm/IR/Operator.h [10:11]
+ include/llvm/IR/OptBisect.h [10:11]
+ include/llvm/IR/PassInstrumentation.h [10:11]
+ include/llvm/IR/PassManager.h [10:11]
+ include/llvm/IR/PassManagerImpl.h [10:11]
+ include/llvm/IR/PassManagerInternal.h [10:11]
+ include/llvm/IR/PassTimingInfo.h [10:11]
+ include/llvm/IR/PatternMatch.h [10:11]
+ include/llvm/IR/PredIteratorCache.h [10:11]
+ include/llvm/IR/PrintPasses.h [10:11]
+ include/llvm/IR/ProfDataUtils.h [10:11]
+ include/llvm/IR/ProfileSummary.h [10:11]
+ include/llvm/IR/PseudoProbe.h [10:11]
+ include/llvm/IR/ReplaceConstant.h [10:11]
+ include/llvm/IR/RuntimeLibcalls.def [3:4]
+ include/llvm/IR/SSAContext.h [10:11]
+ include/llvm/IR/SafepointIRVerifier.h [10:11]
+ include/llvm/IR/Statepoint.h [10:11]
+ include/llvm/IR/StructuralHash.h [10:11]
+ include/llvm/IR/SymbolTableListTraits.h [10:11]
+ include/llvm/IR/TrackingMDRef.h [10:11]
+ include/llvm/IR/Type.h [10:11]
+ include/llvm/IR/TypeFinder.h [10:11]
+ include/llvm/IR/TypedPointerType.h [10:11]
+ include/llvm/IR/Use.h [10:11]
+ include/llvm/IR/UseListOrder.h [10:11]
+ include/llvm/IR/User.h [10:11]
+ include/llvm/IR/VPIntrinsics.def [3:4]
+ include/llvm/IR/Value.def [3:4]
+ include/llvm/IR/Value.h [10:11]
+ include/llvm/IR/ValueHandle.h [10:11]
+ include/llvm/IR/ValueMap.h [10:11]
+ include/llvm/IR/ValueSymbolTable.h [10:11]
+ include/llvm/IR/VectorBuilder.h [10:11]
+ include/llvm/IR/Verifier.h [10:11]
+ include/llvm/IRPrinter/IRPrintingPasses.h [10:11]
+ include/llvm/IRReader/IRReader.h [10:11]
+ include/llvm/InitializePasses.h [10:11]
+ include/llvm/InterfaceStub/ELFObjHandler.h [10:11]
+ include/llvm/InterfaceStub/IFSHandler.h [10:11]
+ include/llvm/InterfaceStub/IFSStub.h [10:11]
+ include/llvm/LTO/Config.h [10:11]
+ include/llvm/LTO/LTO.h [10:11]
+ include/llvm/LTO/LTOBackend.h [10:11]
+ include/llvm/LTO/SummaryBasedOptimizations.h [10:11]
+ include/llvm/LTO/legacy/LTOCodeGenerator.h [10:11]
+ include/llvm/LTO/legacy/LTOModule.h [10:11]
+ include/llvm/LTO/legacy/ThinLTOCodeGenerator.h [10:11]
+ include/llvm/LTO/legacy/UpdateCompilerUsed.h [10:11]
+ include/llvm/LineEditor/LineEditor.h [10:11]
+ include/llvm/LinkAllIR.h [10:11]
+ include/llvm/LinkAllPasses.h [10:11]
+ include/llvm/Linker/IRMover.h [10:11]
+ include/llvm/Linker/Linker.h [10:11]
+ include/llvm/MC/ConstantPools.h [10:11]
+ include/llvm/MC/LaneBitmask.h [10:11]
+ include/llvm/MC/MCAsmBackend.h [10:11]
+ include/llvm/MC/MCAsmInfo.h [10:11]
+ include/llvm/MC/MCAsmInfoCOFF.h [10:11]
+ include/llvm/MC/MCAsmInfoDarwin.h [10:11]
+ include/llvm/MC/MCAsmInfoELF.h [10:11]
+ include/llvm/MC/MCAsmInfoGOFF.h [10:11]
+ include/llvm/MC/MCAsmInfoWasm.h [10:11]
+ include/llvm/MC/MCAsmInfoXCOFF.h [10:11]
+ include/llvm/MC/MCAsmLayout.h [10:11]
+ include/llvm/MC/MCAsmMacro.h [10:11]
+ include/llvm/MC/MCAssembler.h [10:11]
+ include/llvm/MC/MCCodeEmitter.h [10:11]
+ include/llvm/MC/MCCodeView.h [10:11]
+ include/llvm/MC/MCContext.h [10:11]
+ include/llvm/MC/MCDXContainerStreamer.h [10:11]
+ include/llvm/MC/MCDXContainerWriter.h [10:11]
+ include/llvm/MC/MCDecoderOps.h [10:11]
+ include/llvm/MC/MCDirectives.h [10:11]
+ include/llvm/MC/MCDisassembler/MCDisassembler.h [10:11]
+ include/llvm/MC/MCDisassembler/MCExternalSymbolizer.h [10:11]
+ include/llvm/MC/MCDisassembler/MCRelocationInfo.h [10:11]
+ include/llvm/MC/MCDisassembler/MCSymbolizer.h [10:11]
+ include/llvm/MC/MCDwarf.h [10:11]
+ include/llvm/MC/MCELFObjectWriter.h [10:11]
+ include/llvm/MC/MCELFStreamer.h [10:11]
+ include/llvm/MC/MCExpr.h [10:11]
+ include/llvm/MC/MCFixup.h [10:11]
+ include/llvm/MC/MCFixupKindInfo.h [10:11]
+ include/llvm/MC/MCFragment.h [10:11]
+ include/llvm/MC/MCInst.h [10:11]
+ include/llvm/MC/MCInstBuilder.h [10:11]
+ include/llvm/MC/MCInstPrinter.h [10:11]
+ include/llvm/MC/MCInstrAnalysis.h [10:11]
+ include/llvm/MC/MCInstrDesc.h [10:11]
+ include/llvm/MC/MCInstrInfo.h [10:11]
+ include/llvm/MC/MCInstrItineraries.h [10:11]
+ include/llvm/MC/MCLabel.h [10:11]
+ include/llvm/MC/MCLinkerOptimizationHint.h [11:12]
+ include/llvm/MC/MCMachObjectWriter.h [10:11]
+ include/llvm/MC/MCObjectFileInfo.h [10:11]
+ include/llvm/MC/MCObjectStreamer.h [10:11]
+ include/llvm/MC/MCObjectWriter.h [10:11]
+ include/llvm/MC/MCParser/AsmCond.h [10:11]
+ include/llvm/MC/MCParser/AsmLexer.h [10:11]
+ include/llvm/MC/MCParser/MCAsmLexer.h [10:11]
+ include/llvm/MC/MCParser/MCAsmParser.h [10:11]
+ include/llvm/MC/MCParser/MCAsmParserExtension.h [10:11]
+ include/llvm/MC/MCParser/MCAsmParserUtils.h [10:11]
+ include/llvm/MC/MCParser/MCParsedAsmOperand.h [10:11]
+ include/llvm/MC/MCParser/MCTargetAsmParser.h [10:11]
+ include/llvm/MC/MCPseudoProbe.h [10:11]
+ include/llvm/MC/MCRegister.h [10:11]
+ include/llvm/MC/MCRegisterInfo.h [10:11]
+ include/llvm/MC/MCSPIRVObjectWriter.h [10:11]
+ include/llvm/MC/MCSPIRVStreamer.h [10:11]
+ include/llvm/MC/MCSchedule.h [10:11]
+ include/llvm/MC/MCSection.h [10:11]
+ include/llvm/MC/MCSectionCOFF.h [10:11]
+ include/llvm/MC/MCSectionDXContainer.h [10:11]
+ include/llvm/MC/MCSectionELF.h [10:11]
+ include/llvm/MC/MCSectionGOFF.h [10:11]
+ include/llvm/MC/MCSectionMachO.h [10:11]
+ include/llvm/MC/MCSectionSPIRV.h [10:11]
+ include/llvm/MC/MCSectionWasm.h [10:11]
+ include/llvm/MC/MCSectionXCOFF.h [10:11]
+ include/llvm/MC/MCStreamer.h [10:11]
+ include/llvm/MC/MCSubtargetInfo.h [10:11]
+ include/llvm/MC/MCSymbol.h [10:11]
+ include/llvm/MC/MCSymbolCOFF.h [10:11]
+ include/llvm/MC/MCSymbolELF.h [10:11]
+ include/llvm/MC/MCSymbolGOFF.h [10:11]
+ include/llvm/MC/MCSymbolMachO.h [10:11]
+ include/llvm/MC/MCSymbolWasm.h [10:11]
+ include/llvm/MC/MCSymbolXCOFF.h [10:11]
+ include/llvm/MC/MCTargetOptions.h [10:11]
+ include/llvm/MC/MCTargetOptionsCommandFlags.h [10:11]
+ include/llvm/MC/MCValue.h [10:11]
+ include/llvm/MC/MCWasmObjectWriter.h [10:11]
+ include/llvm/MC/MCWasmStreamer.h [10:11]
+ include/llvm/MC/MCWin64EH.h [10:11]
+ include/llvm/MC/MCWinCOFFObjectWriter.h [10:11]
+ include/llvm/MC/MCWinCOFFStreamer.h [10:11]
+ include/llvm/MC/MCWinEH.h [10:11]
+ include/llvm/MC/MCXCOFFObjectWriter.h [10:11]
+ include/llvm/MC/MCXCOFFStreamer.h [10:11]
+ include/llvm/MC/MachineLocation.h [10:11]
+ include/llvm/MC/SectionKind.h [10:11]
+ include/llvm/MC/StringTableBuilder.h [10:11]
+ include/llvm/MC/SubtargetFeature.h [10:11]
+ include/llvm/MC/TargetRegistry.h [10:11]
+ include/llvm/MCA/CodeEmitter.h [10:11]
+ include/llvm/MCA/Context.h [10:11]
+ include/llvm/MCA/CustomBehaviour.h [10:11]
+ include/llvm/MCA/HWEventListener.h [10:11]
+ include/llvm/MCA/HardwareUnits/HardwareUnit.h [10:11]
+ include/llvm/MCA/HardwareUnits/LSUnit.h [10:11]
+ include/llvm/MCA/HardwareUnits/RegisterFile.h [10:11]
+ include/llvm/MCA/HardwareUnits/ResourceManager.h [10:11]
+ include/llvm/MCA/HardwareUnits/RetireControlUnit.h [10:11]
+ include/llvm/MCA/HardwareUnits/Scheduler.h [10:11]
+ include/llvm/MCA/IncrementalSourceMgr.h [10:11]
+ include/llvm/MCA/InstrBuilder.h [10:11]
+ include/llvm/MCA/Instruction.h [10:11]
+ include/llvm/MCA/Pipeline.h [10:11]
+ include/llvm/MCA/SourceMgr.h [10:11]
+ include/llvm/MCA/Stages/DispatchStage.h [10:11]
+ include/llvm/MCA/Stages/EntryStage.h [10:11]
+ include/llvm/MCA/Stages/ExecuteStage.h [10:11]
+ include/llvm/MCA/Stages/InOrderIssueStage.h [10:11]
+ include/llvm/MCA/Stages/InstructionTables.h [10:11]
+ include/llvm/MCA/Stages/MicroOpQueueStage.h [10:11]
+ include/llvm/MCA/Stages/RetireStage.h [10:11]
+ include/llvm/MCA/Stages/Stage.h [10:11]
+ include/llvm/MCA/Support.h [10:11]
+ include/llvm/MCA/View.h [10:11]
+ include/llvm/ObjCopy/COFF/COFFConfig.h [10:11]
+ include/llvm/ObjCopy/COFF/COFFObjcopy.h [10:11]
+ include/llvm/ObjCopy/CommonConfig.h [10:11]
+ include/llvm/ObjCopy/ConfigManager.h [10:11]
+ include/llvm/ObjCopy/ELF/ELFConfig.h [10:11]
+ include/llvm/ObjCopy/ELF/ELFObjcopy.h [10:11]
+ include/llvm/ObjCopy/MachO/MachOConfig.h [10:11]
+ include/llvm/ObjCopy/MachO/MachOObjcopy.h [10:11]
+ include/llvm/ObjCopy/MultiFormatConfig.h [10:11]
+ include/llvm/ObjCopy/ObjCopy.h [10:11]
+ include/llvm/ObjCopy/XCOFF/XCOFFConfig.h [10:11]
+ include/llvm/ObjCopy/XCOFF/XCOFFObjcopy.h [10:11]
+ include/llvm/ObjCopy/wasm/WasmConfig.h [10:11]
+ include/llvm/ObjCopy/wasm/WasmObjcopy.h [10:11]
+ include/llvm/Object/Archive.h [10:11]
+ include/llvm/Object/ArchiveWriter.h [10:11]
+ include/llvm/Object/Binary.h [10:11]
+ include/llvm/Object/BuildID.h [10:11]
+ include/llvm/Object/COFF.h [10:11]
+ include/llvm/Object/COFFImportFile.h [10:11]
+ include/llvm/Object/COFFModuleDefinition.h [10:11]
+ include/llvm/Object/CVDebugRecord.h [10:11]
+ include/llvm/Object/DXContainer.h [10:11]
+ include/llvm/Object/Decompressor.h [10:11]
+ include/llvm/Object/ELF.h [10:11]
+ include/llvm/Object/ELFObjectFile.h [10:11]
+ include/llvm/Object/ELFTypes.h [10:11]
+ include/llvm/Object/Error.h [10:11]
+ include/llvm/Object/FaultMapParser.h [10:11]
+ include/llvm/Object/IRObjectFile.h [10:11]
+ include/llvm/Object/IRSymtab.h [10:11]
+ include/llvm/Object/MachO.h [10:11]
+ include/llvm/Object/MachOUniversal.h [10:11]
+ include/llvm/Object/MachOUniversalWriter.h [10:11]
+ include/llvm/Object/Minidump.h [10:11]
+ include/llvm/Object/ModuleSymbolTable.h [10:11]
+ include/llvm/Object/ObjectFile.h [10:11]
+ include/llvm/Object/OffloadBinary.h [10:11]
+ include/llvm/Object/RelocationResolver.h [10:11]
+ include/llvm/Object/StackMapParser.h [10:11]
+ include/llvm/Object/SymbolSize.h [10:11]
+ include/llvm/Object/SymbolicFile.h [10:11]
+ include/llvm/Object/TapiFile.h [10:11]
+ include/llvm/Object/TapiUniversal.h [10:11]
+ include/llvm/Object/Wasm.h [10:11]
+ include/llvm/Object/WindowsMachineFlag.h [10:11]
+ include/llvm/Object/WindowsResource.h [10:11]
+ include/llvm/Object/XCOFFObjectFile.h [10:11]
+ include/llvm/ObjectYAML/ArchiveYAML.h [10:11]
+ include/llvm/ObjectYAML/COFFYAML.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLSymbols.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLTypes.h [10:11]
+ include/llvm/ObjectYAML/DWARFEmitter.h [10:11]
+ include/llvm/ObjectYAML/DWARFYAML.h [10:11]
+ include/llvm/ObjectYAML/DXContainerYAML.h [10:11]
+ include/llvm/ObjectYAML/ELFYAML.h [10:11]
+ include/llvm/ObjectYAML/MachOYAML.h [10:11]
+ include/llvm/ObjectYAML/MinidumpYAML.h [10:11]
+ include/llvm/ObjectYAML/ObjectYAML.h [10:11]
+ include/llvm/ObjectYAML/OffloadYAML.h [10:11]
+ include/llvm/ObjectYAML/WasmYAML.h [10:11]
+ include/llvm/ObjectYAML/XCOFFYAML.h [10:11]
+ include/llvm/ObjectYAML/YAML.h [10:11]
+ include/llvm/ObjectYAML/yaml2obj.h [10:11]
+ include/llvm/Option/Arg.h [10:11]
+ include/llvm/Option/ArgList.h [10:11]
+ include/llvm/Option/OptParser.td [3:4]
+ include/llvm/Option/OptSpecifier.h [10:11]
+ include/llvm/Option/OptTable.h [10:11]
+ include/llvm/Option/Option.h [10:11]
+ include/llvm/Pass.h [10:11]
+ include/llvm/PassAnalysisSupport.h [10:11]
+ include/llvm/PassInfo.h [10:11]
+ include/llvm/PassRegistry.h [10:11]
+ include/llvm/PassSupport.h [10:11]
+ include/llvm/Passes/OptimizationLevel.h [10:11]
+ include/llvm/Passes/PassBuilder.h [10:11]
+ include/llvm/Passes/PassPlugin.h [10:11]
+ include/llvm/Passes/StandardInstrumentations.h [10:11]
+ include/llvm/ProfileData/Coverage/CoverageMapping.h [10:11]
+ include/llvm/ProfileData/Coverage/CoverageMappingReader.h [10:11]
+ include/llvm/ProfileData/Coverage/CoverageMappingWriter.h [10:11]
+ include/llvm/ProfileData/GCOV.h [10:11]
+ include/llvm/ProfileData/InstrProf.h [10:11]
+ include/llvm/ProfileData/InstrProfCorrelator.h [10:11]
+ include/llvm/ProfileData/InstrProfReader.h [10:11]
+ include/llvm/ProfileData/InstrProfWriter.h [10:11]
+ include/llvm/ProfileData/ProfileCommon.h [10:11]
+ include/llvm/ProfileData/RawMemProfReader.h [12:13]
+ include/llvm/ProfileData/SampleProf.h [10:11]
+ include/llvm/ProfileData/SampleProfReader.h [10:11]
+ include/llvm/ProfileData/SampleProfWriter.h [10:11]
+ include/llvm/Remarks/BitstreamRemarkContainer.h [10:11]
+ include/llvm/Remarks/BitstreamRemarkParser.h [10:11]
+ include/llvm/Remarks/BitstreamRemarkSerializer.h [10:11]
+ include/llvm/Remarks/HotnessThresholdParser.h [10:11]
+ include/llvm/Remarks/Remark.h [10:11]
+ include/llvm/Remarks/RemarkFormat.h [10:11]
+ include/llvm/Remarks/RemarkLinker.h [10:11]
+ include/llvm/Remarks/RemarkParser.h [10:11]
+ include/llvm/Remarks/RemarkSerializer.h [10:11]
+ include/llvm/Remarks/RemarkStreamer.h [10:11]
+ include/llvm/Remarks/RemarkStringTable.h [10:11]
+ include/llvm/Remarks/YAMLRemarkSerializer.h [10:11]
+ include/llvm/Support/AArch64TargetParser.h [10:11]
+ include/llvm/Support/AMDGPUMetadata.h [10:11]
+ include/llvm/Support/AMDHSAKernelDescriptor.h [10:11]
+ include/llvm/Support/ARMAttributeParser.h [10:11]
+ include/llvm/Support/ARMBuildAttributes.h [10:11]
+ include/llvm/Support/ARMEHABI.h [10:11]
+ include/llvm/Support/ARMTargetParser.h [10:11]
+ include/llvm/Support/ARMTargetParserCommon.h [10:11]
+ include/llvm/Support/ARMWinEH.h [10:11]
+ include/llvm/Support/AlignOf.h [10:11]
+ include/llvm/Support/Alignment.h [10:11]
+ include/llvm/Support/Allocator.h [10:11]
+ include/llvm/Support/AllocatorBase.h [10:11]
+ include/llvm/Support/ArrayRecycler.h [10:11]
+ include/llvm/Support/Atomic.h [10:11]
+ include/llvm/Support/AtomicOrdering.h [10:11]
+ include/llvm/Support/AutoConvert.h [10:11]
+ include/llvm/Support/Automaton.h [10:11]
+ include/llvm/Support/BCD.h [10:11]
+ include/llvm/Support/BLAKE3.h [10:11]
+ include/llvm/Support/Base64.h [10:11]
+ include/llvm/Support/BinaryByteStream.h [10:11]
+ include/llvm/Support/BinaryItemStream.h [10:11]
+ include/llvm/Support/BinaryStream.h [10:11]
+ include/llvm/Support/BinaryStreamArray.h [10:11]
+ include/llvm/Support/BinaryStreamError.h [10:11]
+ include/llvm/Support/BinaryStreamReader.h [10:11]
+ include/llvm/Support/BinaryStreamRef.h [10:11]
+ include/llvm/Support/BinaryStreamWriter.h [10:11]
+ include/llvm/Support/BlockFrequency.h [10:11]
+ include/llvm/Support/BranchProbability.h [10:11]
+ include/llvm/Support/BuryPointer.h [10:11]
+ include/llvm/Support/CBindingWrapping.h [10:11]
+ include/llvm/Support/CFGDiff.h [10:11]
+ include/llvm/Support/CFGUpdate.h [10:11]
+ include/llvm/Support/COM.h [10:11]
+ include/llvm/Support/CRC.h [10:11]
+ include/llvm/Support/CSKYAttributeParser.h [10:11]
+ include/llvm/Support/CSKYAttributes.h [10:11]
+ include/llvm/Support/CSKYTargetParser.h [10:11]
+ include/llvm/Support/CachePruning.h [10:11]
+ include/llvm/Support/Caching.h [10:11]
+ include/llvm/Support/Capacity.h [10:11]
+ include/llvm/Support/Casting.h [10:11]
+ include/llvm/Support/CheckedArithmetic.h [10:11]
+ include/llvm/Support/Chrono.h [10:11]
+ include/llvm/Support/CodeGen.h [10:11]
+ include/llvm/Support/CodeGenCoverage.h [10:11]
+ include/llvm/Support/CommandLine.h [10:11]
+ include/llvm/Support/Compiler.h [10:11]
+ include/llvm/Support/Compression.h [10:11]
+ include/llvm/Support/CrashRecoveryContext.h [10:11]
+ include/llvm/Support/DJB.h [10:11]
+ include/llvm/Support/DOTGraphTraits.h [10:11]
+ include/llvm/Support/DXILOperationCommon.h [10:11]
+ include/llvm/Support/DataExtractor.h [10:11]
+ include/llvm/Support/DataTypes.h [10:11]
+ include/llvm/Support/Debug.h [10:11]
+ include/llvm/Support/DebugCounter.h [10:11]
+ include/llvm/Support/Discriminator.h [10:11]
+ include/llvm/Support/DivisionByConstantInfo.h [10:11]
+ include/llvm/Support/Duration.h [10:11]
+ include/llvm/Support/DynamicLibrary.h [10:11]
+ include/llvm/Support/ELFAttributeParser.h [10:11]
+ include/llvm/Support/ELFAttributes.h [10:11]
+ include/llvm/Support/Endian.h [10:11]
+ include/llvm/Support/EndianStream.h [10:11]
+ include/llvm/Support/Errc.h [10:11]
+ include/llvm/Support/Errno.h [10:11]
+ include/llvm/Support/Error.h [10:11]
+ include/llvm/Support/ErrorHandling.h [10:11]
+ include/llvm/Support/ErrorOr.h [10:11]
+ include/llvm/Support/ExitCodes.h [10:11]
+ include/llvm/Support/ExtensibleRTTI.h [10:11]
+ include/llvm/Support/FileCollector.h [10:11]
+ include/llvm/Support/FileOutputBuffer.h [10:11]
+ include/llvm/Support/FileSystem.h [10:11]
+ include/llvm/Support/FileSystem/UniqueID.h [10:11]
+ include/llvm/Support/FileUtilities.h [10:11]
+ include/llvm/Support/Format.h [10:11]
+ include/llvm/Support/FormatAdapters.h [10:11]
+ include/llvm/Support/FormatCommon.h [10:11]
+ include/llvm/Support/FormatProviders.h [10:11]
+ include/llvm/Support/FormatVariadic.h [10:11]
+ include/llvm/Support/FormatVariadicDetails.h [10:11]
+ include/llvm/Support/FormattedStream.h [10:11]
+ include/llvm/Support/GenericDomTree.h [10:11]
+ include/llvm/Support/GenericDomTreeConstruction.h [10:11]
+ include/llvm/Support/GenericIteratedDominanceFrontier.h [10:11]
+ include/llvm/Support/GlobPattern.h [10:11]
+ include/llvm/Support/GraphWriter.h [10:11]
+ include/llvm/Support/HashBuilder.h [10:11]
+ include/llvm/Support/Host.h [10:11]
+ include/llvm/Support/InitLLVM.h [10:11]
+ include/llvm/Support/InstructionCost.h [10:11]
+ include/llvm/Support/ItaniumManglingCanonicalizer.h [10:11]
+ include/llvm/Support/JSON.h [10:11]
+ include/llvm/Support/KnownBits.h [10:11]
+ include/llvm/Support/LEB128.h [10:11]
+ include/llvm/Support/LineIterator.h [10:11]
+ include/llvm/Support/LockFileManager.h [10:11]
+ include/llvm/Support/LoongArchTargetParser.h [10:11]
+ include/llvm/Support/LowLevelTypeImpl.h [10:11]
+ include/llvm/Support/MSP430AttributeParser.h [10:11]
+ include/llvm/Support/MSP430Attributes.h [10:11]
+ include/llvm/Support/MSVCErrorWorkarounds.h [10:11]
+ include/llvm/Support/MachineValueType.h [10:11]
+ include/llvm/Support/ManagedStatic.h [10:11]
+ include/llvm/Support/MathExtras.h [10:11]
+ include/llvm/Support/MemAlloc.h [10:11]
+ include/llvm/Support/Memory.h [10:11]
+ include/llvm/Support/MemoryBuffer.h [10:11]
+ include/llvm/Support/MemoryBufferRef.h [10:11]
+ include/llvm/Support/MipsABIFlags.h [10:11]
+ include/llvm/Support/ModRef.h [10:11]
+ include/llvm/Support/Mutex.h [10:11]
+ include/llvm/Support/NativeFormatting.h [10:11]
+ include/llvm/Support/OnDiskHashTable.h [10:11]
+ include/llvm/Support/OptimizedStructLayout.h [10:11]
+ include/llvm/Support/PGOOptions.h [10:11]
+ include/llvm/Support/Parallel.h [10:11]
+ include/llvm/Support/Path.h [10:11]
+ include/llvm/Support/PluginLoader.h [10:11]
+ include/llvm/Support/PointerLikeTypeTraits.h [10:11]
+ include/llvm/Support/PrettyStackTrace.h [10:11]
+ include/llvm/Support/Printable.h [10:11]
+ include/llvm/Support/Process.h [10:11]
+ include/llvm/Support/Program.h [10:11]
+ include/llvm/Support/RISCVAttributeParser.h [10:11]
+ include/llvm/Support/RISCVAttributes.h [10:11]
+ include/llvm/Support/RISCVISAInfo.h [10:11]
+ include/llvm/Support/RWMutex.h [10:11]
+ include/llvm/Support/RandomNumberGenerator.h [10:11]
+ include/llvm/Support/Recycler.h [10:11]
+ include/llvm/Support/RecyclingAllocator.h [10:11]
+ include/llvm/Support/Regex.h [10:11]
+ include/llvm/Support/Registry.h [10:11]
+ include/llvm/Support/SHA1.h [10:11]
+ include/llvm/Support/SHA256.h [10:11]
+ include/llvm/Support/SMLoc.h [10:11]
+ include/llvm/Support/SMTAPI.h [10:11]
+ include/llvm/Support/SaveAndRestore.h [10:11]
+ include/llvm/Support/ScaledNumber.h [10:11]
+ include/llvm/Support/ScopedPrinter.h [10:11]
+ include/llvm/Support/Signals.h [10:11]
+ include/llvm/Support/Signposts.h [10:11]
+ include/llvm/Support/SmallVectorMemoryBuffer.h [10:11]
+ include/llvm/Support/SourceMgr.h [10:11]
+ include/llvm/Support/SpecialCaseList.h [10:11]
+ include/llvm/Support/StringSaver.h [10:11]
+ include/llvm/Support/SuffixTree.h [10:11]
+ include/llvm/Support/SwapByteOrder.h [10:11]
+ include/llvm/Support/SymbolRemappingReader.h [10:11]
+ include/llvm/Support/SystemUtils.h [10:11]
+ include/llvm/Support/TarWriter.h [10:11]
+ include/llvm/Support/TargetOpcodes.def [3:4]
+ include/llvm/Support/TargetParser.h [10:11]
+ include/llvm/Support/TargetSelect.h [10:11]
+ include/llvm/Support/TaskQueue.h [10:11]
+ include/llvm/Support/ThreadPool.h [10:11]
+ include/llvm/Support/Threading.h [10:11]
+ include/llvm/Support/TimeProfiler.h [10:11]
+ include/llvm/Support/Timer.h [10:11]
+ include/llvm/Support/ToolOutputFile.h [10:11]
+ include/llvm/Support/TrailingObjects.h [10:11]
+ include/llvm/Support/TrigramIndex.h [10:11]
+ include/llvm/Support/TypeName.h [10:11]
+ include/llvm/Support/TypeSize.h [10:11]
+ include/llvm/Support/Unicode.h [10:11]
+ include/llvm/Support/UnicodeCharRanges.h [10:11]
+ include/llvm/Support/Valgrind.h [10:11]
+ include/llvm/Support/VersionTuple.h [10:11]
+ include/llvm/Support/VirtualFileSystem.h [10:11]
+ include/llvm/Support/Watchdog.h [10:11]
+ include/llvm/Support/Win64EH.h [10:11]
+ include/llvm/Support/Windows/WindowsSupport.h [10:11]
+ include/llvm/Support/WindowsError.h [10:11]
+ include/llvm/Support/WithColor.h [10:11]
+ include/llvm/Support/X86DisassemblerDecoderCommon.h [10:11]
+ include/llvm/Support/X86TargetParser.def [3:4]
+ include/llvm/Support/X86TargetParser.h [10:11]
+ include/llvm/Support/YAMLParser.h [10:11]
+ include/llvm/Support/YAMLTraits.h [10:11]
+ include/llvm/Support/circular_raw_ostream.h [10:11]
+ include/llvm/Support/raw_os_ostream.h [10:11]
+ include/llvm/Support/raw_ostream.h [10:11]
+ include/llvm/Support/raw_sha1_ostream.h [10:11]
+ include/llvm/Support/thread.h [10:11]
+ include/llvm/Support/type_traits.h [10:11]
+ include/llvm/TableGen/Automaton.td [3:4]
+ include/llvm/TableGen/Error.h [10:11]
+ include/llvm/TableGen/Main.h [10:11]
+ include/llvm/TableGen/Parser.h [10:11]
+ include/llvm/TableGen/Record.h [10:11]
+ include/llvm/TableGen/SearchableTable.td [3:4]
+ include/llvm/TableGen/SetTheory.h [10:11]
+ include/llvm/TableGen/StringMatcher.h [10:11]
+ include/llvm/TableGen/StringToOffsetTable.h [10:11]
+ include/llvm/TableGen/TableGenBackend.h [10:11]
+ include/llvm/Target/CGPassBuilderOption.h [10:11]
+ include/llvm/Target/CodeGenCWrappers.h [10:11]
+ include/llvm/Target/GenericOpcodes.td [3:4]
+ include/llvm/Target/GlobalISel/Combine.td [3:4]
+ include/llvm/Target/GlobalISel/RegisterBank.td [3:4]
+ include/llvm/Target/GlobalISel/SelectionDAGCompat.td [3:4]
+ include/llvm/Target/GlobalISel/Target.td [3:4]
+ include/llvm/Target/Target.td [3:4]
+ include/llvm/Target/TargetCallingConv.td [3:4]
+ include/llvm/Target/TargetInstrPredicate.td [3:4]
+ include/llvm/Target/TargetIntrinsicInfo.h [10:11]
+ include/llvm/Target/TargetItinerary.td [3:4]
+ include/llvm/Target/TargetLoweringObjectFile.h [10:11]
+ include/llvm/Target/TargetMachine.h [10:11]
+ include/llvm/Target/TargetOptions.h [10:11]
+ include/llvm/Target/TargetPfmCounters.td [3:4]
+ include/llvm/Target/TargetSchedule.td [3:4]
+ include/llvm/Target/TargetSelectionDAG.td [3:4]
+ include/llvm/TargetParser/AArch64TargetParser.h [10:11]
+ include/llvm/TargetParser/ARMTargetParser.def [3:4]
+ include/llvm/TargetParser/ARMTargetParser.h [10:11]
+ include/llvm/TargetParser/ARMTargetParserCommon.h [10:11]
+ include/llvm/TargetParser/CSKYTargetParser.def [3:4]
+ include/llvm/TargetParser/CSKYTargetParser.h [11:12]
+ include/llvm/TargetParser/Host.h [10:11]
+ include/llvm/TargetParser/LoongArchTargetParser.h [10:11]
+ include/llvm/TargetParser/RISCVTargetParser.h [10:11]
+ include/llvm/TargetParser/TargetParser.h [10:11]
+ include/llvm/TargetParser/Triple.h [10:11]
+ include/llvm/TargetParser/X86TargetParser.def [3:4]
+ include/llvm/TargetParser/X86TargetParser.h [10:11]
+ include/llvm/Testing/ADT/StringMap.h [10:11]
+ include/llvm/Testing/ADT/StringMapEntry.h [10:11]
+ include/llvm/Testing/Annotations/Annotations.h [10:11]
+ include/llvm/Testing/Support/Error.h [10:11]
+ include/llvm/Testing/Support/SupportHelpers.h [10:11]
+ include/llvm/TextAPI/Architecture.def [3:4]
+ include/llvm/TextAPI/Architecture.h [10:11]
+ include/llvm/TextAPI/ArchitectureSet.h [10:11]
+ include/llvm/TextAPI/InterfaceFile.h [10:11]
+ include/llvm/TextAPI/PackedVersion.h [10:11]
+ include/llvm/TextAPI/Platform.h [10:11]
+ include/llvm/TextAPI/Symbol.h [10:11]
+ include/llvm/TextAPI/Target.h [10:11]
+ include/llvm/TextAPI/TextAPIReader.h [10:11]
+ include/llvm/TextAPI/TextAPIWriter.h [10:11]
+ include/llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h [10:11]
+ include/llvm/ToolDrivers/llvm-lib/LibDriver.h [10:11]
+ include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h [10:11]
+ include/llvm/Transforms/CFGuard.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroCleanup.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroConditionalWrapper.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroEarly.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroElide.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroSplit.h [10:11]
+ include/llvm/Transforms/IPO.h [10:11]
+ include/llvm/Transforms/IPO/AlwaysInliner.h [10:11]
+ include/llvm/Transforms/IPO/Annotation2Metadata.h [10:11]
+ include/llvm/Transforms/IPO/ArgumentPromotion.h [10:11]
+ include/llvm/Transforms/IPO/Attributor.h [10:11]
+ include/llvm/Transforms/IPO/BlockExtractor.h [10:11]
+ include/llvm/Transforms/IPO/CalledValuePropagation.h [10:11]
+ include/llvm/Transforms/IPO/ConstantMerge.h [10:11]
+ include/llvm/Transforms/IPO/CrossDSOCFI.h [10:11]
+ include/llvm/Transforms/IPO/DeadArgumentElimination.h [10:11]
+ include/llvm/Transforms/IPO/ElimAvailExtern.h [10:11]
+ include/llvm/Transforms/IPO/ExtractGV.h [10:11]
+ include/llvm/Transforms/IPO/ForceFunctionAttrs.h [10:11]
+ include/llvm/Transforms/IPO/FunctionAttrs.h [10:11]
+ include/llvm/Transforms/IPO/FunctionImport.h [10:11]
+ include/llvm/Transforms/IPO/FunctionSpecialization.h [10:11]
+ include/llvm/Transforms/IPO/GlobalDCE.h [10:11]
+ include/llvm/Transforms/IPO/GlobalOpt.h [10:11]
+ include/llvm/Transforms/IPO/GlobalSplit.h [10:11]
+ include/llvm/Transforms/IPO/HotColdSplitting.h [10:11]
+ include/llvm/Transforms/IPO/IROutliner.h [10:11]
+ include/llvm/Transforms/IPO/InferFunctionAttrs.h [10:11]
+ include/llvm/Transforms/IPO/Inliner.h [10:11]
+ include/llvm/Transforms/IPO/Internalize.h [10:11]
+ include/llvm/Transforms/IPO/LoopExtractor.h [10:11]
+ include/llvm/Transforms/IPO/LowerTypeTests.h [10:11]
+ include/llvm/Transforms/IPO/MergeFunctions.h [10:11]
+ include/llvm/Transforms/IPO/ModuleInliner.h [10:11]
+ include/llvm/Transforms/IPO/OpenMPOpt.h [10:11]
+ include/llvm/Transforms/IPO/PartialInlining.h [10:11]
+ include/llvm/Transforms/IPO/PassManagerBuilder.h [10:11]
+ include/llvm/Transforms/IPO/ProfiledCallGraph.h [10:11]
+ include/llvm/Transforms/IPO/SCCP.h [10:11]
+ include/llvm/Transforms/IPO/SampleContextTracker.h [10:11]
+ include/llvm/Transforms/IPO/SampleProfile.h [10:11]
+ include/llvm/Transforms/IPO/SampleProfileProbe.h [10:11]
+ include/llvm/Transforms/IPO/StripDeadPrototypes.h [10:11]
+ include/llvm/Transforms/IPO/StripSymbols.h [10:11]
+ include/llvm/Transforms/IPO/SyntheticCountsPropagation.h [10:11]
+ include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h [10:11]
+ include/llvm/Transforms/IPO/WholeProgramDevirt.h [10:11]
+ include/llvm/Transforms/InstCombine/InstCombine.h [10:11]
+ include/llvm/Transforms/InstCombine/InstCombiner.h [10:11]
+ include/llvm/Transforms/Instrumentation.h [10:11]
+ include/llvm/Transforms/Instrumentation/AddressSanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h [10:11]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerOptions.h [10:11]
+ include/llvm/Transforms/Instrumentation/BoundsChecking.h [10:11]
+ include/llvm/Transforms/Instrumentation/CGProfile.h [10:11]
+ include/llvm/Transforms/Instrumentation/ControlHeightReduction.h [10:11]
+ include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/GCOVProfiler.h [10:11]
+ include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/InstrOrderFile.h [10:11]
+ include/llvm/Transforms/Instrumentation/InstrProfiling.h [10:11]
+ include/llvm/Transforms/Instrumentation/KCFI.h [10:11]
+ include/llvm/Transforms/Instrumentation/MemProfiler.h [10:11]
+ include/llvm/Transforms/Instrumentation/MemorySanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/PGOInstrumentation.h [10:11]
+ include/llvm/Transforms/Instrumentation/PoisonChecking.h [10:11]
+ include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h [10:11]
+ include/llvm/Transforms/Instrumentation/SanitizerCoverage.h [12:13]
+ include/llvm/Transforms/Instrumentation/ThreadSanitizer.h [10:11]
+ include/llvm/Transforms/ObjCARC.h [10:11]
+ include/llvm/Transforms/Scalar.h [10:11]
+ include/llvm/Transforms/Scalar/ADCE.h [10:11]
+ include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h [10:11]
+ include/llvm/Transforms/Scalar/AnnotationRemarks.h [10:11]
+ include/llvm/Transforms/Scalar/BDCE.h [10:11]
+ include/llvm/Transforms/Scalar/CallSiteSplitting.h [10:11]
+ include/llvm/Transforms/Scalar/ConstantHoisting.h [10:11]
+ include/llvm/Transforms/Scalar/ConstraintElimination.h [10:11]
+ include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h [10:11]
+ include/llvm/Transforms/Scalar/DCE.h [10:11]
+ include/llvm/Transforms/Scalar/DFAJumpThreading.h [10:11]
+ include/llvm/Transforms/Scalar/DeadStoreElimination.h [10:11]
+ include/llvm/Transforms/Scalar/DivRemPairs.h [10:11]
+ include/llvm/Transforms/Scalar/EarlyCSE.h [10:11]
+ include/llvm/Transforms/Scalar/FlattenCFG.h [10:11]
+ include/llvm/Transforms/Scalar/Float2Int.h [10:11]
+ include/llvm/Transforms/Scalar/GVN.h [10:11]
+ include/llvm/Transforms/Scalar/GVNExpression.h [10:11]
+ include/llvm/Transforms/Scalar/GuardWidening.h [10:11]
+ include/llvm/Transforms/Scalar/IVUsersPrinter.h [10:11]
+ include/llvm/Transforms/Scalar/IndVarSimplify.h [10:11]
+ include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h [10:11]
+ include/llvm/Transforms/Scalar/InferAddressSpaces.h [10:11]
+ include/llvm/Transforms/Scalar/InstSimplifyPass.h [10:11]
+ include/llvm/Transforms/Scalar/JumpThreading.h [10:11]
+ include/llvm/Transforms/Scalar/LICM.h [10:11]
+ include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h [10:11]
+ include/llvm/Transforms/Scalar/LoopBoundSplit.h [10:11]
+ include/llvm/Transforms/Scalar/LoopDataPrefetch.h [11:12]
+ include/llvm/Transforms/Scalar/LoopDeletion.h [10:11]
+ include/llvm/Transforms/Scalar/LoopDistribute.h [10:11]
+ include/llvm/Transforms/Scalar/LoopFlatten.h [10:11]
+ include/llvm/Transforms/Scalar/LoopFuse.h [10:11]
+ include/llvm/Transforms/Scalar/LoopIdiomRecognize.h [10:11]
+ include/llvm/Transforms/Scalar/LoopInstSimplify.h [10:11]
+ include/llvm/Transforms/Scalar/LoopInterchange.h [10:11]
+ include/llvm/Transforms/Scalar/LoopLoadElimination.h [10:11]
+ include/llvm/Transforms/Scalar/LoopPassManager.h [10:11]
+ include/llvm/Transforms/Scalar/LoopPredication.h [10:11]
+ include/llvm/Transforms/Scalar/LoopReroll.h [10:11]
+ include/llvm/Transforms/Scalar/LoopRotation.h [10:11]
+ include/llvm/Transforms/Scalar/LoopSimplifyCFG.h [10:11]
+ include/llvm/Transforms/Scalar/LoopSink.h [10:11]
+ include/llvm/Transforms/Scalar/LoopStrengthReduce.h [10:11]
+ include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h [10:11]
+ include/llvm/Transforms/Scalar/LoopUnrollPass.h [10:11]
+ include/llvm/Transforms/Scalar/LoopVersioningLICM.h [10:11]
+ include/llvm/Transforms/Scalar/LowerAtomicPass.h [10:11]
+ include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h [10:11]
+ include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h [10:11]
+ include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h [10:11]
+ include/llvm/Transforms/Scalar/LowerMatrixIntrinsics.h [10:11]
+ include/llvm/Transforms/Scalar/LowerWidenableCondition.h [10:11]
+ include/llvm/Transforms/Scalar/MakeGuardsExplicit.h [10:11]
+ include/llvm/Transforms/Scalar/MemCpyOptimizer.h [10:11]
+ include/llvm/Transforms/Scalar/MergeICmps.h [10:11]
+ include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h [10:11]
+ include/llvm/Transforms/Scalar/NaryReassociate.h [10:11]
+ include/llvm/Transforms/Scalar/NewGVN.h [10:11]
+ include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h [10:11]
+ include/llvm/Transforms/Scalar/Reassociate.h [10:11]
+ include/llvm/Transforms/Scalar/Reg2Mem.h [10:11]
+ include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h [10:11]
+ include/llvm/Transforms/Scalar/SCCP.h [10:11]
+ include/llvm/Transforms/Scalar/SROA.h [10:11]
+ include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h [11:12]
+ include/llvm/Transforms/Scalar/Scalarizer.h [10:11]
+ include/llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h [10:11]
+ include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h [10:11]
+ include/llvm/Transforms/Scalar/SimplifyCFG.h [10:11]
+ include/llvm/Transforms/Scalar/Sink.h [10:11]
+ include/llvm/Transforms/Scalar/SpeculativeExecution.h [10:11]
+ include/llvm/Transforms/Scalar/StraightLineStrengthReduce.h [10:11]
+ include/llvm/Transforms/Scalar/StructurizeCFG.h [10:11]
+ include/llvm/Transforms/Scalar/TLSVariableHoist.h [10:11]
+ include/llvm/Transforms/Scalar/TailRecursionElimination.h [10:11]
+ include/llvm/Transforms/Scalar/WarnMissedTransforms.h [10:11]
+ include/llvm/Transforms/Utils.h [10:11]
+ include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h [10:11]
+ include/llvm/Transforms/Utils/ASanStackFrameLayout.h [10:11]
+ include/llvm/Transforms/Utils/AddDiscriminators.h [10:11]
+ include/llvm/Transforms/Utils/AssumeBundleBuilder.h [10:11]
+ include/llvm/Transforms/Utils/BasicBlockUtils.h [10:11]
+ include/llvm/Transforms/Utils/BreakCriticalEdges.h [10:11]
+ include/llvm/Transforms/Utils/BuildLibCalls.h [10:11]
+ include/llvm/Transforms/Utils/BypassSlowDivision.h [10:11]
+ include/llvm/Transforms/Utils/CallGraphUpdater.h [10:11]
+ include/llvm/Transforms/Utils/CallPromotionUtils.h [10:11]
+ include/llvm/Transforms/Utils/CanonicalizeAliases.h [10:11]
+ include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h [10:11]
+ include/llvm/Transforms/Utils/Cloning.h [10:11]
+ include/llvm/Transforms/Utils/CodeExtractor.h [10:11]
+ include/llvm/Transforms/Utils/CodeLayout.h [10:11]
+ include/llvm/Transforms/Utils/CodeMoverUtils.h [10:11]
+ include/llvm/Transforms/Utils/CtorUtils.h [10:11]
+ include/llvm/Transforms/Utils/Debugify.h [10:11]
+ include/llvm/Transforms/Utils/EntryExitInstrumenter.h [10:11]
+ include/llvm/Transforms/Utils/EscapeEnumerator.h [10:11]
+ include/llvm/Transforms/Utils/Evaluator.h [10:11]
+ include/llvm/Transforms/Utils/FixIrreducible.h [10:11]
+ include/llvm/Transforms/Utils/FunctionComparator.h [10:11]
+ include/llvm/Transforms/Utils/FunctionImportUtils.h [10:11]
+ include/llvm/Transforms/Utils/GlobalStatus.h [10:11]
+ include/llvm/Transforms/Utils/GuardUtils.h [10:11]
+ include/llvm/Transforms/Utils/HelloWorld.h [10:11]
+ include/llvm/Transforms/Utils/InjectTLIMappings.h [10:11]
+ include/llvm/Transforms/Utils/InstructionNamer.h [10:11]
+ include/llvm/Transforms/Utils/InstructionWorklist.h [10:11]
+ include/llvm/Transforms/Utils/IntegerDivision.h [10:11]
+ include/llvm/Transforms/Utils/LCSSA.h [10:11]
+ include/llvm/Transforms/Utils/LibCallsShrinkWrap.h [10:11]
+ include/llvm/Transforms/Utils/Local.h [10:11]
+ include/llvm/Transforms/Utils/LoopPeel.h [10:11]
+ include/llvm/Transforms/Utils/LoopRotationUtils.h [10:11]
+ include/llvm/Transforms/Utils/LoopSimplify.h [10:11]
+ include/llvm/Transforms/Utils/LoopUtils.h [10:11]
+ include/llvm/Transforms/Utils/LoopVersioning.h [10:11]
+ include/llvm/Transforms/Utils/LowerAtomic.h [10:11]
+ include/llvm/Transforms/Utils/LowerGlobalDtors.h [10:11]
+ include/llvm/Transforms/Utils/LowerIFunc.h [10:11]
+ include/llvm/Transforms/Utils/LowerInvoke.h [10:11]
+ include/llvm/Transforms/Utils/LowerMemIntrinsics.h [10:11]
+ include/llvm/Transforms/Utils/LowerSwitch.h [10:11]
+ include/llvm/Transforms/Utils/MatrixUtils.h [10:11]
+ include/llvm/Transforms/Utils/Mem2Reg.h [10:11]
+ include/llvm/Transforms/Utils/MemoryOpRemark.h [10:11]
+ include/llvm/Transforms/Utils/MemoryTaggingSupport.h [9:10]
+ include/llvm/Transforms/Utils/MetaRenamer.h [10:11]
+ include/llvm/Transforms/Utils/MisExpect.h [10:11]
+ include/llvm/Transforms/Utils/ModuleUtils.h [10:11]
+ include/llvm/Transforms/Utils/NameAnonGlobals.h [10:11]
+ include/llvm/Transforms/Utils/PredicateInfo.h [10:11]
+ include/llvm/Transforms/Utils/PromoteMemToReg.h [10:11]
+ include/llvm/Transforms/Utils/RelLookupTableConverter.h [10:11]
+ include/llvm/Transforms/Utils/SCCPSolver.h [10:11]
+ include/llvm/Transforms/Utils/SSAUpdater.h [10:11]
+ include/llvm/Transforms/Utils/SSAUpdaterBulk.h [10:11]
+ include/llvm/Transforms/Utils/SSAUpdaterImpl.h [10:11]
+ include/llvm/Transforms/Utils/SampleProfileInference.h [10:11]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h [10:11]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h [10:11]
+ include/llvm/Transforms/Utils/SanitizerStats.h [10:11]
+ include/llvm/Transforms/Utils/ScalarEvolutionExpander.h [10:11]
+ include/llvm/Transforms/Utils/SimplifyCFGOptions.h [10:11]
+ include/llvm/Transforms/Utils/SimplifyIndVar.h [10:11]
+ include/llvm/Transforms/Utils/SimplifyLibCalls.h [10:11]
+ include/llvm/Transforms/Utils/SizeOpts.h [10:11]
+ include/llvm/Transforms/Utils/SplitModule.h [10:11]
+ include/llvm/Transforms/Utils/StripGCRelocates.h [10:11]
+ include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h [10:11]
+ include/llvm/Transforms/Utils/SymbolRewriter.h [10:11]
+ include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h [10:11]
+ include/llvm/Transforms/Utils/UnifyLoopExits.h [10:11]
+ include/llvm/Transforms/Utils/UnrollLoop.h [10:11]
+ include/llvm/Transforms/Utils/VNCoercion.h [10:11]
+ include/llvm/Transforms/Utils/ValueMapper.h [10:11]
+ include/llvm/Transforms/Vectorize.h [10:11]
+ include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h [10:11]
+ include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h [10:11]
+ include/llvm/Transforms/Vectorize/LoopVectorize.h [10:11]
+ include/llvm/Transforms/Vectorize/SLPVectorizer.h [10:11]
+ include/llvm/Transforms/Vectorize/VectorCombine.h [10:11]
+ include/llvm/WindowsDriver/MSVCPaths.h [10:11]
+ include/llvm/WindowsManifest/WindowsManifestMerger.h [10:11]
+ include/llvm/WindowsResource/ResourceProcessor.h [10:11]
+ include/llvm/WindowsResource/ResourceScriptToken.h [10:11]
+ include/llvm/WindowsResource/ResourceScriptTokenList.h [10:11]
+ include/llvm/XRay/BlockIndexer.h [10:11]
+ include/llvm/XRay/BlockPrinter.h [10:11]
+ include/llvm/XRay/BlockVerifier.h [10:11]
+ include/llvm/XRay/FDRLogBuilder.h [10:11]
+ include/llvm/XRay/FDRRecordConsumer.h [10:11]
+ include/llvm/XRay/FDRRecordProducer.h [10:11]
+ include/llvm/XRay/FDRRecords.h [10:11]
+ include/llvm/XRay/FDRTraceExpander.h [10:11]
+ include/llvm/XRay/FDRTraceWriter.h [10:11]
+ include/llvm/XRay/FileHeaderReader.h [10:11]
+ include/llvm/XRay/Graph.h [10:11]
+ include/llvm/XRay/InstrumentationMap.h [10:11]
+ include/llvm/XRay/Profile.h [10:11]
+ include/llvm/XRay/RecordPrinter.h [10:11]
+ include/llvm/XRay/Trace.h [10:11]
+ include/llvm/XRay/XRayRecord.h [10:11]
+ include/llvm/XRay/YAMLXRayRecord.h [10:11]
+ lib/Analysis/AliasAnalysis.cpp [3:4]
+ lib/Analysis/AliasAnalysisEvaluator.cpp [3:4]
+ lib/Analysis/AliasAnalysisSummary.h [3:4]
+ lib/Analysis/AliasSetTracker.cpp [3:4]
+ lib/Analysis/Analysis.cpp [3:4]
+ lib/Analysis/AssumeBundleQueries.cpp [3:4]
+ lib/Analysis/AssumptionCache.cpp [3:4]
+ lib/Analysis/BasicAliasAnalysis.cpp [3:4]
+ lib/Analysis/BlockFrequencyInfo.cpp [3:4]
+ lib/Analysis/BlockFrequencyInfoImpl.cpp [3:4]
+ lib/Analysis/BranchProbabilityInfo.cpp [3:4]
+ lib/Analysis/CFG.cpp [3:4]
+ lib/Analysis/CFGPrinter.cpp [3:4]
+ lib/Analysis/CFGSCCPrinter.cpp [3:4]
+ lib/Analysis/CGSCCPassManager.cpp [3:4]
+ lib/Analysis/CallGraph.cpp [3:4]
+ lib/Analysis/CallGraphSCCPass.cpp [3:4]
+ lib/Analysis/CallPrinter.cpp [3:4]
+ lib/Analysis/CaptureTracking.cpp [3:4]
+ lib/Analysis/CmpInstAnalysis.cpp [3:4]
+ lib/Analysis/CodeMetrics.cpp [3:4]
+ lib/Analysis/ConstantFolding.cpp [3:4]
+ lib/Analysis/ConstraintSystem.cpp [3:4]
+ lib/Analysis/CostModel.cpp [3:4]
+ lib/Analysis/CycleAnalysis.cpp [3:4]
+ lib/Analysis/DDG.cpp [3:4]
+ lib/Analysis/DDGPrinter.cpp [3:4]
+ lib/Analysis/Delinearization.cpp [3:4]
+ lib/Analysis/DemandedBits.cpp [3:4]
+ lib/Analysis/DependenceAnalysis.cpp [3:4]
+ lib/Analysis/DependenceGraphBuilder.cpp [3:4]
+ lib/Analysis/DevelopmentModeInlineAdvisor.cpp [3:4]
+ lib/Analysis/DivergenceAnalysis.cpp [3:4]
+ lib/Analysis/DomPrinter.cpp [3:4]
+ lib/Analysis/DomTreeUpdater.cpp [3:4]
+ lib/Analysis/DominanceFrontier.cpp [3:4]
+ lib/Analysis/EHPersonalities.cpp [3:4]
+ lib/Analysis/FunctionPropertiesAnalysis.cpp [3:4]
+ lib/Analysis/GlobalsModRef.cpp [3:4]
+ lib/Analysis/GuardUtils.cpp [3:4]
+ lib/Analysis/HeatUtils.cpp [3:4]
+ lib/Analysis/IRSimilarityIdentifier.cpp [3:4]
+ lib/Analysis/IVDescriptors.cpp [3:4]
+ lib/Analysis/IVUsers.cpp [3:4]
+ lib/Analysis/ImportedFunctionsInliningStatistics.cpp [3:4]
+ lib/Analysis/IndirectCallPromotionAnalysis.cpp [3:4]
+ lib/Analysis/InlineAdvisor.cpp [3:4]
+ lib/Analysis/InlineCost.cpp [3:4]
+ lib/Analysis/InlineOrder.cpp [3:4]
+ lib/Analysis/InlineSizeEstimatorAnalysis.cpp [3:4]
+ lib/Analysis/InstCount.cpp [3:4]
+ lib/Analysis/InstructionPrecedenceTracking.cpp [3:4]
+ lib/Analysis/InstructionSimplify.cpp [3:4]
+ lib/Analysis/Interval.cpp [3:4]
+ lib/Analysis/IntervalPartition.cpp [3:4]
+ lib/Analysis/LazyBlockFrequencyInfo.cpp [3:4]
+ lib/Analysis/LazyBranchProbabilityInfo.cpp [3:4]
+ lib/Analysis/LazyCallGraph.cpp [3:4]
+ lib/Analysis/LazyValueInfo.cpp [3:4]
+ lib/Analysis/LegacyDivergenceAnalysis.cpp [4:5]
+ lib/Analysis/Lint.cpp [3:4]
+ lib/Analysis/Loads.cpp [3:4]
+ lib/Analysis/Local.cpp [3:4]
+ lib/Analysis/LoopAccessAnalysis.cpp [3:4]
+ lib/Analysis/LoopAnalysisManager.cpp [3:4]
+ lib/Analysis/LoopCacheAnalysis.cpp [5:6]
+ lib/Analysis/LoopInfo.cpp [3:4]
+ lib/Analysis/LoopNestAnalysis.cpp [3:4]
+ lib/Analysis/LoopPass.cpp [3:4]
+ lib/Analysis/LoopUnrollAnalyzer.cpp [3:4]
+ lib/Analysis/MLInlineAdvisor.cpp [3:4]
+ lib/Analysis/MemDepPrinter.cpp [3:4]
+ lib/Analysis/MemDerefPrinter.cpp [3:4]
+ lib/Analysis/MemoryBuiltins.cpp [3:4]
+ lib/Analysis/MemoryDependenceAnalysis.cpp [3:4]
+ lib/Analysis/MemoryLocation.cpp [3:4]
+ lib/Analysis/MemoryProfileInfo.cpp [3:4]
+ lib/Analysis/MemorySSA.cpp [3:4]
+ lib/Analysis/MemorySSAUpdater.cpp [3:4]
+ lib/Analysis/ModelUnderTrainingRunner.cpp [3:4]
+ lib/Analysis/ModuleDebugInfoPrinter.cpp [3:4]
+ lib/Analysis/ModuleSummaryAnalysis.cpp [3:4]
+ lib/Analysis/MustExecute.cpp [3:4]
+ lib/Analysis/NoInferenceModelRunner.cpp [3:4]
+ lib/Analysis/ObjCARCAliasAnalysis.cpp [3:4]
+ lib/Analysis/ObjCARCAnalysisUtils.cpp [3:4]
+ lib/Analysis/ObjCARCInstKind.cpp [3:4]
+ lib/Analysis/OptimizationRemarkEmitter.cpp [3:4]
+ lib/Analysis/OverflowInstAnalysis.cpp [3:4]
+ lib/Analysis/PHITransAddr.cpp [3:4]
+ lib/Analysis/PhiValues.cpp [3:4]
+ lib/Analysis/PostDominators.cpp [3:4]
+ lib/Analysis/ProfileSummaryInfo.cpp [3:4]
+ lib/Analysis/PtrUseVisitor.cpp [3:4]
+ lib/Analysis/RegionInfo.cpp [3:4]
+ lib/Analysis/RegionPass.cpp [3:4]
+ lib/Analysis/RegionPrinter.cpp [3:4]
+ lib/Analysis/ReplayInlineAdvisor.cpp [3:4]
+ lib/Analysis/ScalarEvolution.cpp [3:4]
+ lib/Analysis/ScalarEvolutionAliasAnalysis.cpp [3:4]
+ lib/Analysis/ScalarEvolutionDivision.cpp [3:4]
+ lib/Analysis/ScalarEvolutionNormalization.cpp [3:4]
+ lib/Analysis/ScopedNoAliasAA.cpp [3:4]
+ lib/Analysis/StackLifetime.cpp [3:4]
+ lib/Analysis/StackSafetyAnalysis.cpp [3:4]
+ lib/Analysis/SyncDependenceAnalysis.cpp [3:4]
+ lib/Analysis/SyntheticCountsUtils.cpp [3:4]
+ lib/Analysis/TFLiteUtils.cpp [3:4]
+ lib/Analysis/TargetLibraryInfo.cpp [3:4]
+ lib/Analysis/TargetTransformInfo.cpp [3:4]
+ lib/Analysis/TensorSpec.cpp [3:4]
+ lib/Analysis/Trace.cpp [3:4]
+ lib/Analysis/TrainingLogger.cpp [3:4]
+ lib/Analysis/TypeBasedAliasAnalysis.cpp [3:4]
+ lib/Analysis/TypeMetadataUtils.cpp [3:4]
+ lib/Analysis/UniformityAnalysis.cpp [3:4]
+ lib/Analysis/VFABIDemangling.cpp [3:4]
+ lib/Analysis/ValueLattice.cpp [3:4]
+ lib/Analysis/ValueLatticeUtils.cpp [3:4]
+ lib/Analysis/ValueTracking.cpp [3:4]
+ lib/Analysis/VectorUtils.cpp [3:4]
+ lib/AsmParser/LLLexer.cpp [3:4]
+ lib/AsmParser/LLParser.cpp [3:4]
+ lib/AsmParser/Parser.cpp [3:4]
+ lib/BinaryFormat/AMDGPUMetadataVerifier.cpp [3:4]
+ lib/BinaryFormat/COFF.cpp [3:4]
+ lib/BinaryFormat/DXContainer.cpp [4:5]
+ lib/BinaryFormat/Dwarf.cpp [3:4]
+ lib/BinaryFormat/ELF.cpp [3:4]
+ lib/BinaryFormat/MachO.cpp [3:4]
+ lib/BinaryFormat/Magic.cpp [3:4]
+ lib/BinaryFormat/Minidump.cpp [3:4]
+ lib/BinaryFormat/MsgPackDocument.cpp [3:4]
+ lib/BinaryFormat/MsgPackDocumentYAML.cpp [3:4]
+ lib/BinaryFormat/MsgPackReader.cpp [3:4]
+ lib/BinaryFormat/MsgPackWriter.cpp [3:4]
+ lib/BinaryFormat/Wasm.cpp [3:4]
+ lib/BinaryFormat/XCOFF.cpp [3:4]
+ lib/Bitcode/Reader/BitReader.cpp [3:4]
+ lib/Bitcode/Reader/BitcodeAnalyzer.cpp [3:4]
+ lib/Bitcode/Reader/BitcodeReader.cpp [3:4]
+ lib/Bitcode/Reader/MetadataLoader.cpp [3:4]
+ lib/Bitcode/Reader/MetadataLoader.h [3:4]
+ lib/Bitcode/Reader/ValueList.cpp [3:4]
+ lib/Bitcode/Reader/ValueList.h [3:4]
+ lib/Bitcode/Writer/BitWriter.cpp [3:4]
+ lib/Bitcode/Writer/BitcodeWriter.cpp [3:4]
+ lib/Bitcode/Writer/BitcodeWriterPass.cpp [3:4]
+ lib/Bitcode/Writer/ValueEnumerator.cpp [3:4]
+ lib/Bitcode/Writer/ValueEnumerator.h [3:4]
+ lib/Bitstream/Reader/BitstreamReader.cpp [3:4]
+ lib/CodeGen/AggressiveAntiDepBreaker.cpp [3:4]
+ lib/CodeGen/AggressiveAntiDepBreaker.h [3:4]
+ lib/CodeGen/AllocationOrder.cpp [3:4]
+ lib/CodeGen/AllocationOrder.h [3:4]
+ lib/CodeGen/Analysis.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AIXException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/ARMException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AccelTable.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AddressPool.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AddressPool.h [3:4]
+ lib/CodeGen/AsmPrinter/AsmPrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp [3:4]
+ lib/CodeGen/AsmPrinter/ByteStreamer.h [3:4]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.cpp [3:4]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.h [3:4]
+ lib/CodeGen/AsmPrinter/DIE.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DIEHash.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DIEHash.h [3:4]
+ lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DebugLocEntry.h [3:4]
+ lib/CodeGen/AsmPrinter/DebugLocStream.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DebugLocStream.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfCFIException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfDebug.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfDebug.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfException.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfExpression.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfExpression.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfFile.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfFile.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfUnit.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfUnit.h [3:4]
+ lib/CodeGen/AsmPrinter/EHStreamer.cpp [3:4]
+ lib/CodeGen/AsmPrinter/EHStreamer.h [3:4]
+ lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.h [3:4]
+ lib/CodeGen/AsmPrinter/WasmException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/WasmException.h [3:4]
+ lib/CodeGen/AsmPrinter/WinCFGuard.cpp [3:4]
+ lib/CodeGen/AsmPrinter/WinCFGuard.h [3:4]
+ lib/CodeGen/AsmPrinter/WinException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/WinException.h [3:4]
+ lib/CodeGen/AtomicExpandPass.cpp [3:4]
+ lib/CodeGen/BasicBlockSections.cpp [3:4]
+ lib/CodeGen/BasicBlockSectionsProfileReader.cpp [3:4]
+ lib/CodeGen/BasicTargetTransformInfo.cpp [3:4]
+ lib/CodeGen/BranchFolding.cpp [3:4]
+ lib/CodeGen/BranchFolding.h [3:4]
+ lib/CodeGen/BranchRelaxation.cpp [3:4]
+ lib/CodeGen/BreakFalseDeps.cpp [3:4]
+ lib/CodeGen/CFGuardLongjmp.cpp [3:4]
+ lib/CodeGen/CFIFixup.cpp [3:4]
+ lib/CodeGen/CFIInstrInserter.cpp [3:4]
+ lib/CodeGen/CalcSpillWeights.cpp [3:4]
+ lib/CodeGen/CallingConvLower.cpp [3:4]
+ lib/CodeGen/CodeGen.cpp [3:4]
+ lib/CodeGen/CodeGenCommonISel.cpp [3:4]
+ lib/CodeGen/CodeGenPassBuilder.cpp [3:4]
+ lib/CodeGen/CodeGenPrepare.cpp [3:4]
+ lib/CodeGen/CommandFlags.cpp [3:4]
+ lib/CodeGen/ComplexDeinterleavingPass.cpp [3:4]
+ lib/CodeGen/CriticalAntiDepBreaker.cpp [3:4]
+ lib/CodeGen/CriticalAntiDepBreaker.h [3:4]
+ lib/CodeGen/DFAPacketizer.cpp [3:4]
+ lib/CodeGen/DeadMachineInstructionElim.cpp [3:4]
+ lib/CodeGen/DetectDeadLanes.cpp [3:4]
+ lib/CodeGen/DwarfEHPrepare.cpp [3:4]
+ lib/CodeGen/EHContGuardCatchret.cpp [3:4]
+ lib/CodeGen/EarlyIfConversion.cpp [3:4]
+ lib/CodeGen/EdgeBundles.cpp [3:4]
+ lib/CodeGen/ExecutionDomainFix.cpp [3:4]
+ lib/CodeGen/ExpandLargeDivRem.cpp [3:4]
+ lib/CodeGen/ExpandLargeFpConvert.cpp [3:4]
+ lib/CodeGen/ExpandMemCmp.cpp [3:4]
+ lib/CodeGen/ExpandPostRAPseudos.cpp [3:4]
+ lib/CodeGen/ExpandReductions.cpp [3:4]
+ lib/CodeGen/ExpandVectorPredication.cpp [3:4]
+ lib/CodeGen/FEntryInserter.cpp [3:4]
+ lib/CodeGen/FaultMaps.cpp [3:4]
+ lib/CodeGen/FinalizeISel.cpp [3:4]
+ lib/CodeGen/FixupStatepointCallerSaved.cpp [3:4]
+ lib/CodeGen/FuncletLayout.cpp [3:4]
+ lib/CodeGen/GCMetadata.cpp [3:4]
+ lib/CodeGen/GCMetadataPrinter.cpp [3:4]
+ lib/CodeGen/GCRootLowering.cpp [3:4]
+ lib/CodeGen/GlobalISel/CSEInfo.cpp [3:4]
+ lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp [3:4]
+ lib/CodeGen/GlobalISel/CallLowering.cpp [3:4]
+ lib/CodeGen/GlobalISel/Combiner.cpp [3:4]
+ lib/CodeGen/GlobalISel/CombinerHelper.cpp [3:4]
+ lib/CodeGen/GlobalISel/GISelChangeObserver.cpp [3:4]
+ lib/CodeGen/GlobalISel/GISelKnownBits.cpp [3:4]
+ lib/CodeGen/GlobalISel/GlobalISel.cpp [3:4]
+ lib/CodeGen/GlobalISel/IRTranslator.cpp [3:4]
+ lib/CodeGen/GlobalISel/InlineAsmLowering.cpp [3:4]
+ lib/CodeGen/GlobalISel/InstructionSelect.cpp [3:4]
+ lib/CodeGen/GlobalISel/InstructionSelector.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalityPredicates.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalizeMutations.cpp [3:4]
+ lib/CodeGen/GlobalISel/Legalizer.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalizerHelper.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalizerInfo.cpp [3:4]
+ lib/CodeGen/GlobalISel/LoadStoreOpt.cpp [3:4]
+ lib/CodeGen/GlobalISel/Localizer.cpp [3:4]
+ lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp [3:4]
+ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp [3:4]
+ lib/CodeGen/GlobalISel/RegBankSelect.cpp [3:4]
+ lib/CodeGen/GlobalISel/Utils.cpp [3:4]
+ lib/CodeGen/GlobalMerge.cpp [3:4]
+ lib/CodeGen/HardwareLoops.cpp [3:4]
+ lib/CodeGen/IfConversion.cpp [3:4]
+ lib/CodeGen/ImplicitNullChecks.cpp [3:4]
+ lib/CodeGen/IndirectBrExpandPass.cpp [3:4]
+ lib/CodeGen/InlineSpiller.cpp [3:4]
+ lib/CodeGen/InterferenceCache.cpp [3:4]
+ lib/CodeGen/InterferenceCache.h [3:4]
+ lib/CodeGen/InterleavedAccessPass.cpp [3:4]
+ lib/CodeGen/InterleavedLoadCombinePass.cpp [3:4]
+ lib/CodeGen/IntrinsicLowering.cpp [3:4]
+ lib/CodeGen/JMCInstrumenter.cpp [3:4]
+ lib/CodeGen/LLVMTargetMachine.cpp [3:4]
+ lib/CodeGen/LatencyPriorityQueue.cpp [3:4]
+ lib/CodeGen/LexicalScopes.cpp [3:4]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp [3:4]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h [3:4]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp [3:4]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.h [3:4]
+ lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp [3:4]
+ lib/CodeGen/LiveDebugVariables.cpp [3:4]
+ lib/CodeGen/LiveDebugVariables.h [3:4]
+ lib/CodeGen/LiveInterval.cpp [3:4]
+ lib/CodeGen/LiveIntervalCalc.cpp [3:4]
+ lib/CodeGen/LiveIntervalUnion.cpp [3:4]
+ lib/CodeGen/LiveIntervals.cpp [3:4]
+ lib/CodeGen/LivePhysRegs.cpp [3:4]
+ lib/CodeGen/LiveRangeCalc.cpp [3:4]
+ lib/CodeGen/LiveRangeEdit.cpp [3:4]
+ lib/CodeGen/LiveRangeShrink.cpp [3:4]
+ lib/CodeGen/LiveRangeUtils.h [3:4]
+ lib/CodeGen/LiveRegMatrix.cpp [3:4]
+ lib/CodeGen/LiveRegUnits.cpp [3:4]
+ lib/CodeGen/LiveStacks.cpp [3:4]
+ lib/CodeGen/LiveVariables.cpp [3:4]
+ lib/CodeGen/LocalStackSlotAllocation.cpp [3:4]
+ lib/CodeGen/LoopTraversal.cpp [3:4]
+ lib/CodeGen/LowLevelType.cpp [3:4]
+ lib/CodeGen/LowerEmuTLS.cpp [3:4]
+ lib/CodeGen/MBFIWrapper.cpp [3:4]
+ lib/CodeGen/MIRCanonicalizerPass.cpp [3:4]
+ lib/CodeGen/MIRFSDiscriminator.cpp [3:4]
+ lib/CodeGen/MIRNamerPass.cpp [3:4]
+ lib/CodeGen/MIRParser/MILexer.cpp [3:4]
+ lib/CodeGen/MIRParser/MILexer.h [3:4]
+ lib/CodeGen/MIRParser/MIParser.cpp [3:4]
+ lib/CodeGen/MIRParser/MIRParser.cpp [3:4]
+ lib/CodeGen/MIRPrinter.cpp [3:4]
+ lib/CodeGen/MIRPrintingPass.cpp [3:4]
+ lib/CodeGen/MIRSampleProfile.cpp [3:4]
+ lib/CodeGen/MIRVRegNamerUtils.cpp [3:4]
+ lib/CodeGen/MIRVRegNamerUtils.h [4:5]
+ lib/CodeGen/MIRYamlMapping.cpp [3:4]
+ lib/CodeGen/MLRegallocEvictAdvisor.cpp [3:4]
+ lib/CodeGen/MLRegallocEvictAdvisor.h [3:4]
+ lib/CodeGen/MLRegallocPriorityAdvisor.cpp [3:4]
+ lib/CodeGen/MachineBasicBlock.cpp [3:4]
+ lib/CodeGen/MachineBlockFrequencyInfo.cpp [3:4]
+ lib/CodeGen/MachineBlockPlacement.cpp [3:4]
+ lib/CodeGen/MachineBranchProbabilityInfo.cpp [3:4]
+ lib/CodeGen/MachineCFGPrinter.cpp [3:4]
+ lib/CodeGen/MachineCSE.cpp [3:4]
+ lib/CodeGen/MachineCheckDebugify.cpp [3:4]
+ lib/CodeGen/MachineCombiner.cpp [3:4]
+ lib/CodeGen/MachineCopyPropagation.cpp [3:4]
+ lib/CodeGen/MachineCycleAnalysis.cpp [3:4]
+ lib/CodeGen/MachineDebugify.cpp [3:4]
+ lib/CodeGen/MachineDominanceFrontier.cpp [3:4]
+ lib/CodeGen/MachineDominators.cpp [3:4]
+ lib/CodeGen/MachineFrameInfo.cpp [3:4]
+ lib/CodeGen/MachineFunction.cpp [3:4]
+ lib/CodeGen/MachineFunctionPass.cpp [3:4]
+ lib/CodeGen/MachineFunctionPrinterPass.cpp [3:4]
+ lib/CodeGen/MachineFunctionSplitter.cpp [3:4]
+ lib/CodeGen/MachineInstr.cpp [3:4]
+ lib/CodeGen/MachineInstrBundle.cpp [3:4]
+ lib/CodeGen/MachineLICM.cpp [3:4]
+ lib/CodeGen/MachineLateInstrsCleanup.cpp [3:4]
+ lib/CodeGen/MachineLoopInfo.cpp [3:4]
+ lib/CodeGen/MachineLoopUtils.cpp [3:4]
+ lib/CodeGen/MachineModuleInfo.cpp [3:4]
+ lib/CodeGen/MachineModuleInfoImpls.cpp [3:4]
+ lib/CodeGen/MachineModuleSlotTracker.cpp [3:4]
+ lib/CodeGen/MachineOperand.cpp [3:4]
+ lib/CodeGen/MachineOutliner.cpp [3:4]
+ lib/CodeGen/MachinePassManager.cpp [3:4]
+ lib/CodeGen/MachinePipeliner.cpp [3:4]
+ lib/CodeGen/MachinePostDominators.cpp [3:4]
+ lib/CodeGen/MachineRegionInfo.cpp [3:4]
+ lib/CodeGen/MachineRegisterInfo.cpp [3:4]
+ lib/CodeGen/MachineSSAContext.cpp [3:4]
+ lib/CodeGen/MachineSSAUpdater.cpp [3:4]
+ lib/CodeGen/MachineScheduler.cpp [3:4]
+ lib/CodeGen/MachineSink.cpp [3:4]
+ lib/CodeGen/MachineSizeOpts.cpp [3:4]
+ lib/CodeGen/MachineStableHash.cpp [3:4]
+ lib/CodeGen/MachineStripDebug.cpp [3:4]
+ lib/CodeGen/MachineTraceMetrics.cpp [3:4]
+ lib/CodeGen/MachineUniformityAnalysis.cpp [3:4]
+ lib/CodeGen/MachineVerifier.cpp [3:4]
+ lib/CodeGen/MacroFusion.cpp [3:4]
+ lib/CodeGen/ModuloSchedule.cpp [3:4]
+ lib/CodeGen/MultiHazardRecognizer.cpp [3:4]
+ lib/CodeGen/NonRelocatableStringpool.cpp [3:4]
+ lib/CodeGen/OptimizePHIs.cpp [3:4]
+ lib/CodeGen/PHIElimination.cpp [3:4]
+ lib/CodeGen/PHIEliminationUtils.cpp [3:4]
+ lib/CodeGen/PHIEliminationUtils.h [3:4]
+ lib/CodeGen/ParallelCG.cpp [3:4]
+ lib/CodeGen/PatchableFunction.cpp [3:4]
+ lib/CodeGen/PeepholeOptimizer.cpp [3:4]
+ lib/CodeGen/PostRAHazardRecognizer.cpp [3:4]
+ lib/CodeGen/PostRASchedulerList.cpp [3:4]
+ lib/CodeGen/PreISelIntrinsicLowering.cpp [3:4]
+ lib/CodeGen/ProcessImplicitDefs.cpp [3:4]
+ lib/CodeGen/PrologEpilogInserter.cpp [3:4]
+ lib/CodeGen/PseudoProbeInserter.cpp [3:4]
+ lib/CodeGen/PseudoSourceValue.cpp [3:4]
+ lib/CodeGen/RDFGraph.cpp [3:4]
+ lib/CodeGen/RDFLiveness.cpp [3:4]
+ lib/CodeGen/RDFRegisters.cpp [3:4]
+ lib/CodeGen/ReachingDefAnalysis.cpp [3:4]
+ lib/CodeGen/RegAllocBase.cpp [3:4]
+ lib/CodeGen/RegAllocBase.h [3:4]
+ lib/CodeGen/RegAllocBasic.cpp [3:4]
+ lib/CodeGen/RegAllocEvictionAdvisor.cpp [3:4]
+ lib/CodeGen/RegAllocEvictionAdvisor.h [3:4]
+ lib/CodeGen/RegAllocFast.cpp [3:4]
+ lib/CodeGen/RegAllocGreedy.cpp [3:4]
+ lib/CodeGen/RegAllocGreedy.h [3:4]
+ lib/CodeGen/RegAllocPBQP.cpp [3:4]
+ lib/CodeGen/RegAllocPriorityAdvisor.cpp [3:4]
+ lib/CodeGen/RegAllocPriorityAdvisor.h [3:4]
+ lib/CodeGen/RegAllocScore.cpp [3:4]
+ lib/CodeGen/RegAllocScore.h [3:4]
+ lib/CodeGen/RegUsageInfoCollector.cpp [3:4]
+ lib/CodeGen/RegUsageInfoPropagate.cpp [3:4]
+ lib/CodeGen/RegisterBank.cpp [3:4]
+ lib/CodeGen/RegisterBankInfo.cpp [3:4]
+ lib/CodeGen/RegisterClassInfo.cpp [3:4]
+ lib/CodeGen/RegisterCoalescer.cpp [3:4]
+ lib/CodeGen/RegisterCoalescer.h [3:4]
+ lib/CodeGen/RegisterPressure.cpp [3:4]
+ lib/CodeGen/RegisterScavenging.cpp [3:4]
+ lib/CodeGen/RegisterUsageInfo.cpp [3:4]
+ lib/CodeGen/RemoveRedundantDebugValues.cpp [3:4]
+ lib/CodeGen/RenameIndependentSubregs.cpp [3:4]
+ lib/CodeGen/ReplaceWithVeclib.cpp [3:4]
+ lib/CodeGen/ResetMachineFunctionPass.cpp [3:4]
+ lib/CodeGen/SafeStack.cpp [3:4]
+ lib/CodeGen/SafeStackLayout.cpp [3:4]
+ lib/CodeGen/SafeStackLayout.h [3:4]
+ lib/CodeGen/SanitizerBinaryMetadata.cpp [4:5]
+ lib/CodeGen/ScheduleDAG.cpp [3:4]
+ lib/CodeGen/ScheduleDAGInstrs.cpp [3:4]
+ lib/CodeGen/ScheduleDAGPrinter.cpp [3:4]
+ lib/CodeGen/ScoreboardHazardRecognizer.cpp [3:4]
+ lib/CodeGen/SelectOptimize.cpp [3:4]
+ lib/CodeGen/SelectionDAG/DAGCombiner.cpp [3:4]
+ lib/CodeGen/SelectionDAG/FastISel.cpp [3:4]
+ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp [3:4]
+ lib/CodeGen/SelectionDAG/InstrEmitter.cpp [3:4]
+ lib/CodeGen/SelectionDAG/InstrEmitter.h [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.h [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SDNodeDbgValue.h [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAG.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGTargetInfo.cpp [3:4]
+ lib/CodeGen/SelectionDAG/StatepointLowering.cpp [3:4]
+ lib/CodeGen/SelectionDAG/StatepointLowering.h [3:4]
+ lib/CodeGen/SelectionDAG/TargetLowering.cpp [3:4]
+ lib/CodeGen/ShadowStackGCLowering.cpp [3:4]
+ lib/CodeGen/ShrinkWrap.cpp [3:4]
+ lib/CodeGen/SjLjEHPrepare.cpp [3:4]
+ lib/CodeGen/SlotIndexes.cpp [3:4]
+ lib/CodeGen/SpillPlacement.cpp [3:4]
+ lib/CodeGen/SpillPlacement.h [3:4]
+ lib/CodeGen/SplitKit.cpp [3:4]
+ lib/CodeGen/SplitKit.h [3:4]
+ lib/CodeGen/StackColoring.cpp [3:4]
+ lib/CodeGen/StackFrameLayoutAnalysisPass.cpp [4:5]
+ lib/CodeGen/StackMapLivenessAnalysis.cpp [3:4]
+ lib/CodeGen/StackMaps.cpp [3:4]
+ lib/CodeGen/StackProtector.cpp [3:4]
+ lib/CodeGen/StackSlotColoring.cpp [3:4]
+ lib/CodeGen/SwiftErrorValueTracking.cpp [3:4]
+ lib/CodeGen/SwitchLoweringUtils.cpp [3:4]
+ lib/CodeGen/TailDuplication.cpp [3:4]
+ lib/CodeGen/TailDuplicator.cpp [3:4]
+ lib/CodeGen/TargetFrameLoweringImpl.cpp [3:4]
+ lib/CodeGen/TargetInstrInfo.cpp [3:4]
+ lib/CodeGen/TargetLoweringBase.cpp [3:4]
+ lib/CodeGen/TargetLoweringObjectFileImpl.cpp [3:4]
+ lib/CodeGen/TargetOptionsImpl.cpp [3:4]
+ lib/CodeGen/TargetPassConfig.cpp [3:4]
+ lib/CodeGen/TargetRegisterInfo.cpp [3:4]
+ lib/CodeGen/TargetSchedule.cpp [3:4]
+ lib/CodeGen/TargetSubtargetInfo.cpp [3:4]
+ lib/CodeGen/TwoAddressInstructionPass.cpp [3:4]
+ lib/CodeGen/TypePromotion.cpp [3:4]
+ lib/CodeGen/UnreachableBlockElim.cpp [3:4]
+ lib/CodeGen/VLIWMachineScheduler.cpp [3:4]
+ lib/CodeGen/ValueTypes.cpp [3:4]
+ lib/CodeGen/VirtRegMap.cpp [3:4]
+ lib/CodeGen/WasmEHPrepare.cpp [3:4]
+ lib/CodeGen/WinEHPrepare.cpp [3:4]
+ lib/CodeGen/XRayInstrumentation.cpp [3:4]
+ lib/DWARFLinker/DWARFLinker.cpp [3:4]
+ lib/DWARFLinker/DWARFLinkerCompileUnit.cpp [3:4]
+ lib/DWARFLinker/DWARFLinkerDeclContext.cpp [3:4]
+ lib/DWARFLinker/DWARFStreamer.cpp [3:4]
+ lib/DWARFLinkerParallel/DWARFLinker.cpp [3:4]
+ lib/DWP/DWP.cpp [3:4]
+ lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp [3:4]
+ lib/DebugInfo/CodeView/CVSymbolVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/CVTypeVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/CodeViewError.cpp [3:4]
+ lib/DebugInfo/CodeView/CodeViewRecordIO.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugChecksumsSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugCrossExSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugFrameDataSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugInlineeLinesSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugLinesSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSymbolRVASubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSymbolsSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/EnumTables.cpp [3:4]
+ lib/DebugInfo/CodeView/Formatters.cpp [3:4]
+ lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp [3:4]
+ lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp [3:4]
+ lib/DebugInfo/CodeView/Line.cpp [3:4]
+ lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp [3:4]
+ lib/DebugInfo/CodeView/RecordName.cpp [3:4]
+ lib/DebugInfo/CodeView/RecordSerialization.cpp [3:4]
+ lib/DebugInfo/CodeView/SimpleTypeSerializer.cpp [3:4]
+ lib/DebugInfo/CodeView/StringsAndChecksums.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolDumper.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolRecordMapping.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolSerializer.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeDumpVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeHashing.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeIndex.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeRecordHelpers.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeRecordMapping.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeStreamMerger.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeTableCollection.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFAddressRange.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFCompileUnit.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFContext.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDataExtractor.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugAddr.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugAranges.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugFrame.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugInfoEntry.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugLine.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugLoc.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugMacro.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugPubTable.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugRnglists.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDie.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFExpression.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFFormValue.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFGdbIndex.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFListTable.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFLocationExpression.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFTypeUnit.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFUnit.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFUnitIndex.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFVerifier.cpp [3:4]
+ lib/DebugInfo/GSYM/DwarfTransformer.cpp [3:4]
+ lib/DebugInfo/GSYM/ExtractRanges.cpp [3:4]
+ lib/DebugInfo/GSYM/FileWriter.cpp [3:4]
+ lib/DebugInfo/GSYM/FunctionInfo.cpp [3:4]
+ lib/DebugInfo/GSYM/GsymCreator.cpp [3:4]
+ lib/DebugInfo/GSYM/GsymReader.cpp [3:4]
+ lib/DebugInfo/GSYM/Header.cpp [3:4]
+ lib/DebugInfo/GSYM/InlineInfo.cpp [3:4]
+ lib/DebugInfo/GSYM/LineTable.cpp [3:4]
+ lib/DebugInfo/GSYM/LookupResult.cpp [3:4]
+ lib/DebugInfo/GSYM/ObjectFileTransformer.cpp [3:4]
+ lib/DebugInfo/MSF/MSFBuilder.cpp [3:4]
+ lib/DebugInfo/MSF/MSFCommon.cpp [3:4]
+ lib/DebugInfo/MSF/MSFError.cpp [3:4]
+ lib/DebugInfo/MSF/MappedBlockStream.cpp [3:4]
+ lib/DebugInfo/PDB/GenericError.cpp [3:4]
+ lib/DebugInfo/PDB/IPDBSourceFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptor.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiModuleList.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/EnumTables.cpp [3:4]
+ lib/DebugInfo/PDB/Native/FormatUtil.cpp [3:4]
+ lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/GlobalsStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/Hash.cpp [3:4]
+ lib/DebugInfo/PDB/Native/HashTable.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InfoStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InfoStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InjectedSourceStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InputFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/LinePrinter.cpp [3:4]
+ lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NamedStreamMap.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumGlobals.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumInjectedSources.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumLineNumbers.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumModules.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumSymbols.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeFunctionSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeInlineSiteSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeLineNumber.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativePublicSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeSession.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeSourceFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeSymbolEnumerator.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeArray.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeBuiltin.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeEnum.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeFunctionSig.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypePointer.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeUDT.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBStringTable.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PublicsStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/SymbolStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/TpiHashing.cpp [3:4]
+ lib/DebugInfo/PDB/Native/TpiStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/TpiStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/PDB.cpp [3:4]
+ lib/DebugInfo/PDB/PDBContext.cpp [3:4]
+ lib/DebugInfo/PDB/PDBExtras.cpp [3:4]
+ lib/DebugInfo/PDB/PDBInterfaceAnchors.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymDumper.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolAnnotation.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolBlock.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCompiland.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCompilandDetails.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCompilandEnv.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCustom.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolData.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolExe.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolFunc.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugEnd.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugStart.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolLabel.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolPublicSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolThunk.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeArray.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeBaseClass.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeBuiltin.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeCustom.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeDimension.cpp [4:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeEnum.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeFriend.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionArg.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeManaged.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypePointer.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeTypedef.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeUDT.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTable.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTableShape.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolUnknown.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolUsingNamespace.cpp [3:4]
+ lib/DebugInfo/PDB/UDTLayout.cpp [3:4]
+ lib/DebugInfo/Symbolize/DIPrinter.cpp [3:4]
+ lib/DebugInfo/Symbolize/Markup.cpp [3:4]
+ lib/DebugInfo/Symbolize/MarkupFilter.cpp [3:4]
+ lib/DebugInfo/Symbolize/SymbolizableObjectFile.cpp [3:4]
+ lib/DebugInfo/Symbolize/Symbolize.cpp [3:4]
+ lib/Debuginfod/BuildIDFetcher.cpp [3:4]
+ lib/Debuginfod/Debuginfod.cpp [3:4]
+ lib/Debuginfod/HTTPClient.cpp [3:4]
+ lib/Debuginfod/HTTPServer.cpp [3:4]
+ lib/Demangle/DLangDemangle.cpp [3:4]
+ lib/Demangle/Demangle.cpp [3:4]
+ lib/Demangle/ItaniumDemangle.cpp [3:4]
+ lib/Demangle/MicrosoftDemangle.cpp [3:4]
+ lib/Demangle/MicrosoftDemangleNodes.cpp [3:4]
+ lib/Demangle/RustDemangle.cpp [3:4]
+ lib/ExecutionEngine/ExecutionEngine.cpp [3:4]
+ lib/ExecutionEngine/ExecutionEngineBindings.cpp [3:4]
+ lib/ExecutionEngine/GDBRegistrationListener.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/Execution.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/Interpreter.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/Interpreter.h [3:4]
+ lib/ExecutionEngine/JITLink/COFF.cpp [3:4]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp [3:4]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.h [3:4]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp [3:4]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/COFF_x86_64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp [3:4]
+ lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h [3:4]
+ lib/ExecutionEngine/JITLink/EHFrameSupport.cpp [3:4]
+ lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h [3:4]
+ lib/ExecutionEngine/JITLink/ELF.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/ELF_aarch64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_i386.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_loongarch.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_riscv.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_x86_64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/JITLink.cpp [3:4]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp [3:4]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.h [3:4]
+ lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachO.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/MachO_arm64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachO_x86_64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/SEHFrameSupport.h [3:4]
+ lib/ExecutionEngine/JITLink/aarch64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/i386.cpp [3:4]
+ lib/ExecutionEngine/JITLink/loongarch.cpp [3:4]
+ lib/ExecutionEngine/JITLink/riscv.cpp [3:4]
+ lib/ExecutionEngine/JITLink/x86_64.cpp [3:4]
+ lib/ExecutionEngine/MCJIT/MCJIT.cpp [3:4]
+ lib/ExecutionEngine/MCJIT/MCJIT.h [3:4]
+ lib/ExecutionEngine/Orc/COFFPlatform.cpp [3:4]
+ lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp [3:4]
+ lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/CompileUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/Core.cpp [3:4]
+ lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp [3:4]
+ lib/ExecutionEngine/Orc/DebugUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp [3:4]
+ lib/ExecutionEngine/Orc/ELFNixPlatform.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/ExecutionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp [3:4]
+ lib/ExecutionEngine/Orc/IRCompileLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/IRTransformLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/IndirectionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp [3:4]
+ lib/ExecutionEngine/Orc/LLJIT.cpp [3:4]
+ lib/ExecutionEngine/Orc/Layer.cpp [3:4]
+ lib/ExecutionEngine/Orc/LazyReexports.cpp [3:4]
+ lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp [3:4]
+ lib/ExecutionEngine/Orc/MachOPlatform.cpp [3:4]
+ lib/ExecutionEngine/Orc/Mangling.cpp [3:4]
+ lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/MemoryMapper.cpp [3:4]
+ lib/ExecutionEngine/Orc/ObjectFileInterface.cpp [3:4]
+ lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/OrcABISupport.cpp [3:4]
+ lib/ExecutionEngine/Orc/OrcV2CBindings.cpp [3:4]
+ lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/OrcError.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp [3:4]
+ lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp [3:4]
+ lib/ExecutionEngine/Orc/Speculation.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/TaskDispatch.cpp [3:4]
+ lib/ExecutionEngine/Orc/ThreadSafeModule.cpp [4:5]
+ lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h [4:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h [3:4]
+ lib/ExecutionEngine/SectionMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/TargetSelect.cpp [3:4]
+ lib/FileCheck/FileCheck.cpp [3:4]
+ lib/FileCheck/FileCheckImpl.h [3:4]
+ lib/Frontend/HLSL/HLSLResource.cpp [3:4]
+ lib/Frontend/OpenACC/ACC.cpp [3:4]
+ lib/Frontend/OpenMP/OMP.cpp [3:4]
+ lib/Frontend/OpenMP/OMPContext.cpp [3:4]
+ lib/Frontend/OpenMP/OMPIRBuilder.cpp [3:4]
+ lib/FuzzMutate/IRMutator.cpp [3:4]
+ lib/FuzzMutate/OpDescriptor.cpp [3:4]
+ lib/FuzzMutate/Operations.cpp [3:4]
+ lib/FuzzMutate/RandomIRBuilder.cpp [3:4]
+ lib/IR/AbstractCallSite.cpp [3:4]
+ lib/IR/AsmWriter.cpp [3:4]
+ lib/IR/Assumptions.cpp [3:4]
+ lib/IR/AttributeImpl.h [3:4]
+ lib/IR/Attributes.cpp [3:4]
+ lib/IR/AutoUpgrade.cpp [3:4]
+ lib/IR/BasicBlock.cpp [3:4]
+ lib/IR/BuiltinGCs.cpp [3:4]
+ lib/IR/Comdat.cpp [3:4]
+ lib/IR/ConstantFold.cpp [3:4]
+ lib/IR/ConstantRange.cpp [3:4]
+ lib/IR/Constants.cpp [3:4]
+ lib/IR/ConstantsContext.h [3:4]
+ lib/IR/Core.cpp [3:4]
+ lib/IR/DIBuilder.cpp [3:4]
+ lib/IR/DataLayout.cpp [3:4]
+ lib/IR/DebugInfo.cpp [3:4]
+ lib/IR/DebugInfoMetadata.cpp [3:4]
+ lib/IR/DebugLoc.cpp [3:4]
+ lib/IR/DiagnosticHandler.cpp [3:4]
+ lib/IR/DiagnosticInfo.cpp [3:4]
+ lib/IR/DiagnosticPrinter.cpp [3:4]
+ lib/IR/Dominators.cpp [3:4]
+ lib/IR/FPEnv.cpp [3:4]
+ lib/IR/Function.cpp [3:4]
+ lib/IR/GCStrategy.cpp [3:4]
+ lib/IR/GVMaterializer.cpp [3:4]
+ lib/IR/Globals.cpp [3:4]
+ lib/IR/IRBuilder.cpp [3:4]
+ lib/IR/IRPrintingPasses.cpp [3:4]
+ lib/IR/InlineAsm.cpp [3:4]
+ lib/IR/Instruction.cpp [3:4]
+ lib/IR/Instructions.cpp [3:4]
+ lib/IR/IntrinsicInst.cpp [3:4]
+ lib/IR/LLVMContext.cpp [3:4]
+ lib/IR/LLVMContextImpl.cpp [3:4]
+ lib/IR/LLVMContextImpl.h [3:4]
+ lib/IR/LLVMRemarkStreamer.cpp [3:4]
+ lib/IR/LegacyPassManager.cpp [3:4]
+ lib/IR/MDBuilder.cpp [3:4]
+ lib/IR/Mangler.cpp [3:4]
+ lib/IR/Metadata.cpp [3:4]
+ lib/IR/MetadataImpl.h [3:4]
+ lib/IR/Module.cpp [3:4]
+ lib/IR/ModuleSummaryIndex.cpp [3:4]
+ lib/IR/Operator.cpp [3:4]
+ lib/IR/OptBisect.cpp [3:4]
+ lib/IR/Pass.cpp [3:4]
+ lib/IR/PassInstrumentation.cpp [3:4]
+ lib/IR/PassManager.cpp [3:4]
+ lib/IR/PassRegistry.cpp [3:4]
+ lib/IR/PassTimingInfo.cpp [3:4]
+ lib/IR/PrintPasses.cpp [3:4]
+ lib/IR/ProfDataUtils.cpp [3:4]
+ lib/IR/ProfileSummary.cpp [3:4]
+ lib/IR/PseudoProbe.cpp [3:4]
+ lib/IR/ReplaceConstant.cpp [3:4]
+ lib/IR/SSAContext.cpp [3:4]
+ lib/IR/SafepointIRVerifier.cpp [3:4]
+ lib/IR/Statepoint.cpp [3:4]
+ lib/IR/StructuralHash.cpp [3:4]
+ lib/IR/SymbolTableListTraitsImpl.h [3:4]
+ lib/IR/Type.cpp [3:4]
+ lib/IR/TypeFinder.cpp [3:4]
+ lib/IR/TypedPointerType.cpp [3:4]
+ lib/IR/Use.cpp [3:4]
+ lib/IR/User.cpp [3:4]
+ lib/IR/Value.cpp [3:4]
+ lib/IR/ValueSymbolTable.cpp [3:4]
+ lib/IR/VectorBuilder.cpp [3:4]
+ lib/IR/Verifier.cpp [3:4]
+ lib/IRPrinter/IRPrintingPasses.cpp [3:4]
+ lib/IRReader/IRReader.cpp [3:4]
+ lib/InterfaceStub/ELFObjHandler.cpp [3:4]
+ lib/InterfaceStub/IFSHandler.cpp [3:4]
+ lib/InterfaceStub/IFSStub.cpp [3:4]
+ lib/LTO/LTO.cpp [3:4]
+ lib/LTO/LTOBackend.cpp [3:4]
+ lib/LTO/LTOCodeGenerator.cpp [3:4]
+ lib/LTO/LTOModule.cpp [3:4]
+ lib/LTO/SummaryBasedOptimizations.cpp [3:4]
+ lib/LTO/ThinLTOCodeGenerator.cpp [3:4]
+ lib/LTO/UpdateCompilerUsed.cpp [3:4]
+ lib/LineEditor/LineEditor.cpp [3:4]
+ lib/Linker/IRMover.cpp [3:4]
+ lib/Linker/LinkDiagnosticInfo.h [3:4]
+ lib/Linker/LinkModules.cpp [3:4]
+ lib/MC/ConstantPools.cpp [3:4]
+ lib/MC/ELFObjectWriter.cpp [3:4]
+ lib/MC/MCAsmBackend.cpp [3:4]
+ lib/MC/MCAsmInfo.cpp [3:4]
+ lib/MC/MCAsmInfoCOFF.cpp [3:4]
+ lib/MC/MCAsmInfoDarwin.cpp [3:4]
+ lib/MC/MCAsmInfoELF.cpp [3:4]
+ lib/MC/MCAsmInfoGOFF.cpp [3:4]
+ lib/MC/MCAsmInfoWasm.cpp [3:4]
+ lib/MC/MCAsmInfoXCOFF.cpp [3:4]
+ lib/MC/MCAsmMacro.cpp [3:4]
+ lib/MC/MCAsmStreamer.cpp [3:4]
+ lib/MC/MCAssembler.cpp [3:4]
+ lib/MC/MCCodeEmitter.cpp [3:4]
+ lib/MC/MCCodeView.cpp [3:4]
+ lib/MC/MCContext.cpp [3:4]
+ lib/MC/MCDXContainerStreamer.cpp [3:4]
+ lib/MC/MCDXContainerWriter.cpp [3:4]
+ lib/MC/MCDisassembler/Disassembler.cpp [3:4]
+ lib/MC/MCDisassembler/Disassembler.h [3:4]
+ lib/MC/MCDisassembler/MCDisassembler.cpp [3:4]
+ lib/MC/MCDisassembler/MCExternalSymbolizer.cpp [3:4]
+ lib/MC/MCDisassembler/MCRelocationInfo.cpp [3:4]
+ lib/MC/MCDisassembler/MCSymbolizer.cpp [3:4]
+ lib/MC/MCDwarf.cpp [3:4]
+ lib/MC/MCELFObjectTargetWriter.cpp [3:4]
+ lib/MC/MCELFStreamer.cpp [3:4]
+ lib/MC/MCExpr.cpp [3:4]
+ lib/MC/MCFragment.cpp [3:4]
+ lib/MC/MCInst.cpp [3:4]
+ lib/MC/MCInstPrinter.cpp [3:4]
+ lib/MC/MCInstrAnalysis.cpp [3:4]
+ lib/MC/MCInstrDesc.cpp [3:4]
+ lib/MC/MCInstrInfo.cpp [3:4]
+ lib/MC/MCLabel.cpp [3:4]
+ lib/MC/MCLinkerOptimizationHint.cpp [3:4]
+ lib/MC/MCMachOStreamer.cpp [3:4]
+ lib/MC/MCMachObjectTargetWriter.cpp [3:4]
+ lib/MC/MCNullStreamer.cpp [3:4]
+ lib/MC/MCObjectFileInfo.cpp [3:4]
+ lib/MC/MCObjectStreamer.cpp [3:4]
+ lib/MC/MCObjectWriter.cpp [3:4]
+ lib/MC/MCParser/AsmLexer.cpp [3:4]
+ lib/MC/MCParser/AsmParser.cpp [3:4]
+ lib/MC/MCParser/COFFAsmParser.cpp [3:4]
+ lib/MC/MCParser/COFFMasmParser.cpp [3:4]
+ lib/MC/MCParser/DarwinAsmParser.cpp [3:4]
+ lib/MC/MCParser/ELFAsmParser.cpp [3:4]
+ lib/MC/MCParser/GOFFAsmParser.cpp [3:4]
+ lib/MC/MCParser/MCAsmLexer.cpp [3:4]
+ lib/MC/MCParser/MCAsmParser.cpp [3:4]
+ lib/MC/MCParser/MCAsmParserExtension.cpp [3:4]
+ lib/MC/MCParser/MCTargetAsmParser.cpp [3:4]
+ lib/MC/MCParser/MasmParser.cpp [3:4]
+ lib/MC/MCParser/WasmAsmParser.cpp [3:4]
+ lib/MC/MCParser/XCOFFAsmParser.cpp [4:5]
+ lib/MC/MCPseudoProbe.cpp [3:4]
+ lib/MC/MCRegisterInfo.cpp [3:4]
+ lib/MC/MCSPIRVStreamer.cpp [3:4]
+ lib/MC/MCSchedule.cpp [3:4]
+ lib/MC/MCSection.cpp [3:4]
+ lib/MC/MCSectionCOFF.cpp [3:4]
+ lib/MC/MCSectionDXContainer.cpp [3:4]
+ lib/MC/MCSectionELF.cpp [3:4]
+ lib/MC/MCSectionMachO.cpp [3:4]
+ lib/MC/MCSectionWasm.cpp [3:4]
+ lib/MC/MCSectionXCOFF.cpp [3:4]
+ lib/MC/MCStreamer.cpp [3:4]
+ lib/MC/MCSubtargetInfo.cpp [3:4]
+ lib/MC/MCSymbol.cpp [3:4]
+ lib/MC/MCSymbolELF.cpp [3:4]
+ lib/MC/MCSymbolXCOFF.cpp [3:4]
+ lib/MC/MCTargetOptions.cpp [3:4]
+ lib/MC/MCTargetOptionsCommandFlags.cpp [3:4]
+ lib/MC/MCValue.cpp [3:4]
+ lib/MC/MCWasmObjectTargetWriter.cpp [3:4]
+ lib/MC/MCWasmStreamer.cpp [3:4]
+ lib/MC/MCWin64EH.cpp [3:4]
+ lib/MC/MCWinCOFFStreamer.cpp [3:4]
+ lib/MC/MCWinEH.cpp [3:4]
+ lib/MC/MCXCOFFObjectTargetWriter.cpp [3:4]
+ lib/MC/MCXCOFFStreamer.cpp [3:4]
+ lib/MC/MachObjectWriter.cpp [3:4]
+ lib/MC/SPIRVObjectWriter.cpp [3:4]
+ lib/MC/StringTableBuilder.cpp [3:4]
+ lib/MC/SubtargetFeature.cpp [3:4]
+ lib/MC/TargetRegistry.cpp [3:4]
+ lib/MC/WasmObjectWriter.cpp [3:4]
+ lib/MC/WinCOFFObjectWriter.cpp [3:4]
+ lib/MC/XCOFFObjectWriter.cpp [3:4]
+ lib/MCA/CodeEmitter.cpp [3:4]
+ lib/MCA/Context.cpp [3:4]
+ lib/MCA/CustomBehaviour.cpp [3:4]
+ lib/MCA/HWEventListener.cpp [3:4]
+ lib/MCA/HardwareUnits/HardwareUnit.cpp [3:4]
+ lib/MCA/HardwareUnits/LSUnit.cpp [3:4]
+ lib/MCA/HardwareUnits/RegisterFile.cpp [3:4]
+ lib/MCA/HardwareUnits/ResourceManager.cpp [3:4]
+ lib/MCA/HardwareUnits/RetireControlUnit.cpp [3:4]
+ lib/MCA/HardwareUnits/Scheduler.cpp [3:4]
+ lib/MCA/IncrementalSourceMgr.cpp [3:4]
+ lib/MCA/InstrBuilder.cpp [3:4]
+ lib/MCA/Instruction.cpp [3:4]
+ lib/MCA/Pipeline.cpp [3:4]
+ lib/MCA/Stages/DispatchStage.cpp [3:4]
+ lib/MCA/Stages/EntryStage.cpp [3:4]
+ lib/MCA/Stages/ExecuteStage.cpp [3:4]
+ lib/MCA/Stages/InOrderIssueStage.cpp [3:4]
+ lib/MCA/Stages/InstructionTables.cpp [3:4]
+ lib/MCA/Stages/MicroOpQueueStage.cpp [3:4]
+ lib/MCA/Stages/RetireStage.cpp [3:4]
+ lib/MCA/Stages/Stage.cpp [3:4]
+ lib/MCA/Support.cpp [3:4]
+ lib/MCA/View.cpp [3:4]
+ lib/ObjCopy/Archive.cpp [3:4]
+ lib/ObjCopy/Archive.h [3:4]
+ lib/ObjCopy/COFF/COFFObjcopy.cpp [3:4]
+ lib/ObjCopy/COFF/COFFObject.cpp [3:4]
+ lib/ObjCopy/COFF/COFFObject.h [3:4]
+ lib/ObjCopy/COFF/COFFReader.cpp [3:4]
+ lib/ObjCopy/COFF/COFFReader.h [3:4]
+ lib/ObjCopy/COFF/COFFWriter.cpp [3:4]
+ lib/ObjCopy/COFF/COFFWriter.h [3:4]
+ lib/ObjCopy/CommonConfig.cpp [3:4]
+ lib/ObjCopy/ConfigManager.cpp [3:4]
+ lib/ObjCopy/ELF/ELFObjcopy.cpp [3:4]
+ lib/ObjCopy/ELF/ELFObject.cpp [3:4]
+ lib/ObjCopy/ELF/ELFObject.h [3:4]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.cpp [3:4]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.h [3:4]
+ lib/ObjCopy/MachO/MachOObjcopy.cpp [3:4]
+ lib/ObjCopy/MachO/MachOObject.cpp [3:4]
+ lib/ObjCopy/MachO/MachOObject.h [3:4]
+ lib/ObjCopy/MachO/MachOReader.cpp [3:4]
+ lib/ObjCopy/MachO/MachOReader.h [3:4]
+ lib/ObjCopy/MachO/MachOWriter.cpp [3:4]
+ lib/ObjCopy/MachO/MachOWriter.h [3:4]
+ lib/ObjCopy/ObjCopy.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFObjcopy.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFObject.h [3:4]
+ lib/ObjCopy/XCOFF/XCOFFReader.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFReader.h [3:4]
+ lib/ObjCopy/XCOFF/XCOFFWriter.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFWriter.h [3:4]
+ lib/ObjCopy/wasm/WasmObjcopy.cpp [3:4]
+ lib/ObjCopy/wasm/WasmObject.cpp [3:4]
+ lib/ObjCopy/wasm/WasmObject.h [3:4]
+ lib/ObjCopy/wasm/WasmReader.cpp [3:4]
+ lib/ObjCopy/wasm/WasmReader.h [3:4]
+ lib/ObjCopy/wasm/WasmWriter.cpp [3:4]
+ lib/ObjCopy/wasm/WasmWriter.h [3:4]
+ lib/Object/Archive.cpp [3:4]
+ lib/Object/ArchiveWriter.cpp [3:4]
+ lib/Object/Binary.cpp [3:4]
+ lib/Object/BuildID.cpp [3:4]
+ lib/Object/COFFImportFile.cpp [3:4]
+ lib/Object/COFFModuleDefinition.cpp [3:4]
+ lib/Object/COFFObjectFile.cpp [3:4]
+ lib/Object/DXContainer.cpp [3:4]
+ lib/Object/Decompressor.cpp [3:4]
+ lib/Object/ELF.cpp [3:4]
+ lib/Object/ELFObjectFile.cpp [3:4]
+ lib/Object/Error.cpp [3:4]
+ lib/Object/FaultMapParser.cpp [3:4]
+ lib/Object/IRObjectFile.cpp [3:4]
+ lib/Object/IRSymtab.cpp [3:4]
+ lib/Object/MachOObjectFile.cpp [3:4]
+ lib/Object/MachOUniversal.cpp [3:4]
+ lib/Object/MachOUniversalWriter.cpp [3:4]
+ lib/Object/Minidump.cpp [3:4]
+ lib/Object/ModuleSymbolTable.cpp [3:4]
+ lib/Object/Object.cpp [3:4]
+ lib/Object/ObjectFile.cpp [3:4]
+ lib/Object/OffloadBinary.cpp [3:4]
+ lib/Object/RecordStreamer.cpp [3:4]
+ lib/Object/RecordStreamer.h [3:4]
+ lib/Object/RelocationResolver.cpp [3:4]
+ lib/Object/SymbolSize.cpp [3:4]
+ lib/Object/SymbolicFile.cpp [3:4]
+ lib/Object/TapiFile.cpp [3:4]
+ lib/Object/TapiUniversal.cpp [3:4]
+ lib/Object/WasmObjectFile.cpp [3:4]
+ lib/Object/WindowsMachineFlag.cpp [3:4]
+ lib/Object/WindowsResource.cpp [3:4]
+ lib/Object/XCOFFObjectFile.cpp [3:4]
+ lib/ObjectYAML/ArchiveEmitter.cpp [3:4]
+ lib/ObjectYAML/ArchiveYAML.cpp [3:4]
+ lib/ObjectYAML/COFFEmitter.cpp [3:4]
+ lib/ObjectYAML/COFFYAML.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLDebugSections.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLSymbols.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLTypes.cpp [3:4]
+ lib/ObjectYAML/DWARFEmitter.cpp [3:4]
+ lib/ObjectYAML/DWARFYAML.cpp [3:4]
+ lib/ObjectYAML/DXContainerEmitter.cpp [3:4]
+ lib/ObjectYAML/DXContainerYAML.cpp [3:4]
+ lib/ObjectYAML/ELFEmitter.cpp [3:4]
+ lib/ObjectYAML/ELFYAML.cpp [3:4]
+ lib/ObjectYAML/MachOEmitter.cpp [3:4]
+ lib/ObjectYAML/MachOYAML.cpp [3:4]
+ lib/ObjectYAML/MinidumpEmitter.cpp [3:4]
+ lib/ObjectYAML/MinidumpYAML.cpp [3:4]
+ lib/ObjectYAML/ObjectYAML.cpp [3:4]
+ lib/ObjectYAML/OffloadEmitter.cpp [3:4]
+ lib/ObjectYAML/OffloadYAML.cpp [3:4]
+ lib/ObjectYAML/WasmEmitter.cpp [3:4]
+ lib/ObjectYAML/WasmYAML.cpp [3:4]
+ lib/ObjectYAML/XCOFFEmitter.cpp [3:4]
+ lib/ObjectYAML/XCOFFYAML.cpp [3:4]
+ lib/ObjectYAML/YAML.cpp [3:4]
+ lib/ObjectYAML/yaml2obj.cpp [3:4]
+ lib/Option/Arg.cpp [3:4]
+ lib/Option/ArgList.cpp [3:4]
+ lib/Option/OptTable.cpp [3:4]
+ lib/Option/Option.cpp [3:4]
+ lib/Passes/OptimizationLevel.cpp [3:4]
+ lib/Passes/PassBuilder.cpp [3:4]
+ lib/Passes/PassBuilderBindings.cpp [3:4]
+ lib/Passes/PassBuilderPipelines.cpp [3:4]
+ lib/Passes/PassPlugin.cpp [3:4]
+ lib/Passes/PassRegistry.def [3:4]
+ lib/Passes/StandardInstrumentations.cpp [3:4]
+ lib/ProfileData/Coverage/CoverageMapping.cpp [3:4]
+ lib/ProfileData/Coverage/CoverageMappingReader.cpp [3:4]
+ lib/ProfileData/Coverage/CoverageMappingWriter.cpp [3:4]
+ lib/ProfileData/GCOV.cpp [3:4]
+ lib/ProfileData/InstrProf.cpp [3:4]
+ lib/ProfileData/InstrProfCorrelator.cpp [3:4]
+ lib/ProfileData/InstrProfReader.cpp [3:4]
+ lib/ProfileData/InstrProfWriter.cpp [3:4]
+ lib/ProfileData/ProfileSummaryBuilder.cpp [3:4]
+ lib/ProfileData/RawMemProfReader.cpp [3:4]
+ lib/ProfileData/SampleProf.cpp [3:4]
+ lib/ProfileData/SampleProfReader.cpp [3:4]
+ lib/ProfileData/SampleProfWriter.cpp [3:4]
+ lib/Remarks/BitstreamRemarkParser.cpp [3:4]
+ lib/Remarks/BitstreamRemarkParser.h [3:4]
+ lib/Remarks/BitstreamRemarkSerializer.cpp [3:4]
+ lib/Remarks/Remark.cpp [3:4]
+ lib/Remarks/RemarkFormat.cpp [3:4]
+ lib/Remarks/RemarkLinker.cpp [3:4]
+ lib/Remarks/RemarkParser.cpp [3:4]
+ lib/Remarks/RemarkSerializer.cpp [3:4]
+ lib/Remarks/RemarkStreamer.cpp [3:4]
+ lib/Remarks/RemarkStringTable.cpp [3:4]
+ lib/Remarks/YAMLRemarkParser.cpp [3:4]
+ lib/Remarks/YAMLRemarkParser.h [3:4]
+ lib/Remarks/YAMLRemarkSerializer.cpp [3:4]
+ lib/Support/ABIBreak.cpp [3:4]
+ lib/Support/AMDGPUMetadata.cpp [3:4]
+ lib/Support/APFixedPoint.cpp [3:4]
+ lib/Support/APFloat.cpp [3:4]
+ lib/Support/APInt.cpp [3:4]
+ lib/Support/APSInt.cpp [3:4]
+ lib/Support/ARMAttributeParser.cpp [3:4]
+ lib/Support/ARMBuildAttrs.cpp [3:4]
+ lib/Support/ARMWinEH.cpp [3:4]
+ lib/Support/AddressRanges.cpp [3:4]
+ lib/Support/Allocator.cpp [3:4]
+ lib/Support/Atomic.cpp [3:4]
+ lib/Support/AutoConvert.cpp [3:4]
+ lib/Support/Base64.cpp [3:4]
+ lib/Support/BinaryStreamError.cpp [3:4]
+ lib/Support/BinaryStreamReader.cpp [3:4]
+ lib/Support/BinaryStreamRef.cpp [3:4]
+ lib/Support/BinaryStreamWriter.cpp [3:4]
+ lib/Support/BlockFrequency.cpp [3:4]
+ lib/Support/BranchProbability.cpp [3:4]
+ lib/Support/BuryPointer.cpp [3:4]
+ lib/Support/COM.cpp [3:4]
+ lib/Support/CRC.cpp [3:4]
+ lib/Support/CSKYAttributeParser.cpp [3:4]
+ lib/Support/CSKYAttributes.cpp [3:4]
+ lib/Support/CachePruning.cpp [3:4]
+ lib/Support/Caching.cpp [3:4]
+ lib/Support/Chrono.cpp [3:4]
+ lib/Support/CodeGenCoverage.cpp [3:4]
+ lib/Support/CommandLine.cpp [3:4]
+ lib/Support/Compression.cpp [3:4]
+ lib/Support/ConvertUTFWrapper.cpp [3:4]
+ lib/Support/CrashRecoveryContext.cpp [3:4]
+ lib/Support/DAGDeltaAlgorithm.cpp [3:4]
+ lib/Support/DJB.cpp [3:4]
+ lib/Support/DataExtractor.cpp [3:4]
+ lib/Support/Debug.cpp [3:4]
+ lib/Support/DebugOptions.h [3:4]
+ lib/Support/DeltaAlgorithm.cpp [3:4]
+ lib/Support/DivisionByConstantInfo.cpp [3:4]
+ lib/Support/DynamicLibrary.cpp [3:4]
+ lib/Support/ELFAttributeParser.cpp [3:4]
+ lib/Support/ELFAttributes.cpp [3:4]
+ lib/Support/Errno.cpp [3:4]
+ lib/Support/Error.cpp [3:4]
+ lib/Support/ErrorHandling.cpp [3:4]
+ lib/Support/ExtensibleRTTI.cpp [3:4]
+ lib/Support/FileCollector.cpp [3:4]
+ lib/Support/FileOutputBuffer.cpp [3:4]
+ lib/Support/FileUtilities.cpp [3:4]
+ lib/Support/FoldingSet.cpp [3:4]
+ lib/Support/FormatVariadic.cpp [3:4]
+ lib/Support/FormattedStream.cpp [3:4]
+ lib/Support/GlobPattern.cpp [3:4]
+ lib/Support/GraphWriter.cpp [3:4]
+ lib/Support/Hashing.cpp [3:4]
+ lib/Support/InitLLVM.cpp [3:4]
+ lib/Support/InstructionCost.cpp [3:4]
+ lib/Support/IntEqClasses.cpp [3:4]
+ lib/Support/IntervalMap.cpp [3:4]
+ lib/Support/ItaniumManglingCanonicalizer.cpp [3:4]
+ lib/Support/JSON.cpp [3:4]
+ lib/Support/KnownBits.cpp [3:4]
+ lib/Support/LEB128.cpp [3:4]
+ lib/Support/LineIterator.cpp [3:4]
+ lib/Support/LockFileManager.cpp [3:4]
+ lib/Support/LowLevelType.cpp [3:4]
+ lib/Support/MSP430AttributeParser.cpp [3:4]
+ lib/Support/MSP430Attributes.cpp [3:4]
+ lib/Support/ManagedStatic.cpp [3:4]
+ lib/Support/MathExtras.cpp [3:4]
+ lib/Support/MemAlloc.cpp [3:4]
+ lib/Support/Memory.cpp [3:4]
+ lib/Support/MemoryBuffer.cpp [3:4]
+ lib/Support/MemoryBufferRef.cpp [3:4]
+ lib/Support/NativeFormatting.cpp [3:4]
+ lib/Support/OptimizedStructLayout.cpp [3:4]
+ lib/Support/Optional.cpp [3:4]
+ lib/Support/Parallel.cpp [3:4]
+ lib/Support/Path.cpp [3:4]
+ lib/Support/PluginLoader.cpp [3:4]
+ lib/Support/PrettyStackTrace.cpp [3:4]
+ lib/Support/Process.cpp [3:4]
+ lib/Support/Program.cpp [3:4]
+ lib/Support/RISCVAttributeParser.cpp [3:4]
+ lib/Support/RISCVAttributes.cpp [3:4]
+ lib/Support/RISCVISAInfo.cpp [3:4]
+ lib/Support/RWMutex.cpp [3:4]
+ lib/Support/RandomNumberGenerator.cpp [3:4]
+ lib/Support/Regex.cpp [3:4]
+ lib/Support/SHA1.cpp [3:4]
+ lib/Support/SHA256.cpp [3:4]
+ lib/Support/ScaledNumber.cpp [3:4]
+ lib/Support/Signals.cpp [3:4]
+ lib/Support/Signposts.cpp [3:4]
+ lib/Support/SmallPtrSet.cpp [3:4]
+ lib/Support/SmallVector.cpp [3:4]
+ lib/Support/SourceMgr.cpp [3:4]
+ lib/Support/SpecialCaseList.cpp [3:4]
+ lib/Support/Statistic.cpp [3:4]
+ lib/Support/StringExtras.cpp [3:4]
+ lib/Support/StringMap.cpp [3:4]
+ lib/Support/StringRef.cpp [3:4]
+ lib/Support/StringSaver.cpp [3:4]
+ lib/Support/SuffixTree.cpp [3:4]
+ lib/Support/SymbolRemappingReader.cpp [3:4]
+ lib/Support/SystemUtils.cpp [3:4]
+ lib/Support/TarWriter.cpp [3:4]
+ lib/Support/ThreadPool.cpp [3:4]
+ lib/Support/Threading.cpp [3:4]
+ lib/Support/TimeProfiler.cpp [3:4]
+ lib/Support/Timer.cpp [3:4]
+ lib/Support/ToolOutputFile.cpp [3:4]
+ lib/Support/TrigramIndex.cpp [3:4]
+ lib/Support/Twine.cpp [3:4]
+ lib/Support/TypeSize.cpp [3:4]
+ lib/Support/Unicode.cpp [3:4]
+ lib/Support/UnicodeNameToCodepoint.cpp [4:5]
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [3:4]
+ lib/Support/Unix/COM.inc [3:4]
+ lib/Support/Unix/DynamicLibrary.inc [3:4]
+ lib/Support/Unix/Memory.inc [3:4]
+ lib/Support/Unix/Path.inc [3:4]
+ lib/Support/Unix/Process.inc [3:4]
+ lib/Support/Unix/Program.inc [3:4]
+ lib/Support/Unix/Signals.inc [3:4]
+ lib/Support/Unix/Threading.inc [3:4]
+ lib/Support/Unix/Unix.h [3:4]
+ lib/Support/Unix/Watchdog.inc [3:4]
+ lib/Support/Valgrind.cpp [3:4]
+ lib/Support/VersionTuple.cpp [3:4]
+ lib/Support/VirtualFileSystem.cpp [3:4]
+ lib/Support/Watchdog.cpp [3:4]
+ lib/Support/Windows/COM.inc [3:4]
+ lib/Support/Windows/DynamicLibrary.inc [3:4]
+ lib/Support/Windows/Memory.inc [3:4]
+ lib/Support/Windows/Path.inc [3:4]
+ lib/Support/Windows/Process.inc [3:4]
+ lib/Support/Windows/Program.inc [3:4]
+ lib/Support/Windows/Signals.inc [3:4]
+ lib/Support/Windows/Threading.inc [3:4]
+ lib/Support/Windows/Watchdog.inc [3:4]
+ lib/Support/WithColor.cpp [3:4]
+ lib/Support/YAMLParser.cpp [3:4]
+ lib/Support/YAMLTraits.cpp [3:4]
+ lib/Support/Z3Solver.cpp [3:4]
+ lib/Support/circular_raw_ostream.cpp [3:4]
+ lib/Support/raw_os_ostream.cpp [3:4]
+ lib/Support/raw_ostream.cpp [3:4]
+ lib/TableGen/DetailedRecordsBackend.cpp [3:4]
+ lib/TableGen/Error.cpp [3:4]
+ lib/TableGen/JSONBackend.cpp [3:4]
+ lib/TableGen/Main.cpp [3:4]
+ lib/TableGen/Parser.cpp [3:4]
+ lib/TableGen/Record.cpp [3:4]
+ lib/TableGen/SetTheory.cpp [3:4]
+ lib/TableGen/StringMatcher.cpp [3:4]
+ lib/TableGen/TGLexer.cpp [3:4]
+ lib/TableGen/TGLexer.h [3:4]
+ lib/TableGen/TGParser.cpp [3:4]
+ lib/TableGen/TGParser.h [3:4]
+ lib/TableGen/TableGenBackend.cpp [3:4]
+ lib/TableGen/TableGenBackendSkeleton.cpp [3:4]
+ lib/Target/AArch64/AArch64.h [3:4]
+ lib/Target/AArch64/AArch64.td [3:4]
+ lib/Target/AArch64/AArch64A53Fix835769.cpp [3:4]
+ lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp [3:4]
+ lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp [3:4]
+ lib/Target/AArch64/AArch64AsmPrinter.cpp [3:4]
+ lib/Target/AArch64/AArch64BranchTargets.cpp [3:4]
+ lib/Target/AArch64/AArch64CallingConvention.cpp [3:4]
+ lib/Target/AArch64/AArch64CallingConvention.h [3:4]
+ lib/Target/AArch64/AArch64CallingConvention.td [3:4]
+ lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp [3:4]
+ lib/Target/AArch64/AArch64CollectLOH.cpp [3:4]
+ lib/Target/AArch64/AArch64Combine.td [3:4]
+ lib/Target/AArch64/AArch64CompressJumpTables.cpp [3:4]
+ lib/Target/AArch64/AArch64CondBrTuning.cpp [3:4]
+ lib/Target/AArch64/AArch64ConditionOptimizer.cpp [3:4]
+ lib/Target/AArch64/AArch64ConditionalCompares.cpp [3:4]
+ lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp [3:4]
+ lib/Target/AArch64/AArch64ExpandImm.cpp [3:4]
+ lib/Target/AArch64/AArch64ExpandImm.h [3:4]
+ lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp [3:4]
+ lib/Target/AArch64/AArch64FalkorHWPFFix.cpp [3:4]
+ lib/Target/AArch64/AArch64FastISel.cpp [3:4]
+ lib/Target/AArch64/AArch64FrameLowering.cpp [3:4]
+ lib/Target/AArch64/AArch64FrameLowering.h [3:4]
+ lib/Target/AArch64/AArch64GenRegisterBankInfo.def [3:4]
+ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp [3:4]
+ lib/Target/AArch64/AArch64ISelLowering.cpp [3:4]
+ lib/Target/AArch64/AArch64ISelLowering.h [3:4]
+ lib/Target/AArch64/AArch64InstrAtomics.td [3:4]
+ lib/Target/AArch64/AArch64InstrFormats.td [3:4]
+ lib/Target/AArch64/AArch64InstrGISel.td [3:4]
+ lib/Target/AArch64/AArch64InstrInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64InstrInfo.h [3:4]
+ lib/Target/AArch64/AArch64InstrInfo.td [3:4]
+ lib/Target/AArch64/AArch64KCFI.cpp [3:4]
+ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp [3:4]
+ lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp [3:4]
+ lib/Target/AArch64/AArch64MCInstLower.cpp [3:4]
+ lib/Target/AArch64/AArch64MCInstLower.h [3:4]
+ lib/Target/AArch64/AArch64MIPeepholeOpt.cpp [3:4]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.cpp [4:5]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.h [3:4]
+ lib/Target/AArch64/AArch64MachineScheduler.cpp [3:4]
+ lib/Target/AArch64/AArch64MachineScheduler.h [3:4]
+ lib/Target/AArch64/AArch64MacroFusion.cpp [3:4]
+ lib/Target/AArch64/AArch64MacroFusion.h [3:4]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.cpp [3:4]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.h [3:4]
+ lib/Target/AArch64/AArch64PerfectShuffle.h [3:4]
+ lib/Target/AArch64/AArch64PfmCounters.td [3:4]
+ lib/Target/AArch64/AArch64PromoteConstant.cpp [3:4]
+ lib/Target/AArch64/AArch64RedundantCopyElimination.cpp [3:4]
+ lib/Target/AArch64/AArch64RegisterBanks.td [3:4]
+ lib/Target/AArch64/AArch64RegisterInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64RegisterInfo.h [3:4]
+ lib/Target/AArch64/AArch64RegisterInfo.td [3:4]
+ lib/Target/AArch64/AArch64SIMDInstrOpt.cpp [2:3]
+ lib/Target/AArch64/AArch64SLSHardening.cpp [3:4]
+ lib/Target/AArch64/AArch64SMEInstrInfo.td [3:4]
+ lib/Target/AArch64/AArch64SVEInstrInfo.td [3:4]
+ lib/Target/AArch64/AArch64SchedA53.td [3:4]
+ lib/Target/AArch64/AArch64SchedA55.td [3:4]
+ lib/Target/AArch64/AArch64SchedA57.td [3:4]
+ lib/Target/AArch64/AArch64SchedA57WriteRes.td [3:4]
+ lib/Target/AArch64/AArch64SchedA64FX.td [3:4]
+ lib/Target/AArch64/AArch64SchedAmpere1.td [3:4]
+ lib/Target/AArch64/AArch64SchedCyclone.td [3:4]
+ lib/Target/AArch64/AArch64SchedExynosM3.td [3:4]
+ lib/Target/AArch64/AArch64SchedExynosM4.td [3:4]
+ lib/Target/AArch64/AArch64SchedExynosM5.td [3:4]
+ lib/Target/AArch64/AArch64SchedFalkor.td [3:4]
+ lib/Target/AArch64/AArch64SchedFalkorDetails.td [3:4]
+ lib/Target/AArch64/AArch64SchedKryo.td [3:4]
+ lib/Target/AArch64/AArch64SchedKryoDetails.td [3:4]
+ lib/Target/AArch64/AArch64SchedNeoverseN2.td [3:4]
+ lib/Target/AArch64/AArch64SchedPredAmpere.td [3:4]
+ lib/Target/AArch64/AArch64SchedPredExynos.td [3:4]
+ lib/Target/AArch64/AArch64SchedPredicates.td [3:4]
+ lib/Target/AArch64/AArch64SchedTSV110.td [3:4]
+ lib/Target/AArch64/AArch64SchedThunderX.td [3:4]
+ lib/Target/AArch64/AArch64SchedThunderX2T99.td [3:4]
+ lib/Target/AArch64/AArch64SchedThunderX3T110.td [3:4]
+ lib/Target/AArch64/AArch64Schedule.td [3:4]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.h [3:4]
+ lib/Target/AArch64/AArch64SpeculationHardening.cpp [3:4]
+ lib/Target/AArch64/AArch64StackTagging.cpp [3:4]
+ lib/Target/AArch64/AArch64StackTaggingPreRA.cpp [3:4]
+ lib/Target/AArch64/AArch64StorePairSuppress.cpp [3:4]
+ lib/Target/AArch64/AArch64Subtarget.cpp [3:4]
+ lib/Target/AArch64/AArch64Subtarget.h [3:4]
+ lib/Target/AArch64/AArch64SystemOperands.td [3:4]
+ lib/Target/AArch64/AArch64TargetMachine.cpp [3:4]
+ lib/Target/AArch64/AArch64TargetMachine.h [3:4]
+ lib/Target/AArch64/AArch64TargetObjectFile.cpp [3:4]
+ lib/Target/AArch64/AArch64TargetObjectFile.h [3:4]
+ lib/Target/AArch64/AArch64TargetTransformInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64TargetTransformInfo.h [3:4]
+ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp [3:4]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp [3:4]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.h [3:4]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp [3:4]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h [3:4]
+ lib/Target/AArch64/GISel/AArch64CallLowering.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64CallLowering.h [3:4]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h [3:4]
+ lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.h [3:4]
+ lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h [3:4]
+ lib/Target/AArch64/SMEABIPass.cpp [3:4]
+ lib/Target/AArch64/SMEInstrFormats.td [3:4]
+ lib/Target/AArch64/SVEInstrFormats.td [3:4]
+ lib/Target/AArch64/SVEIntrinsicOpts.cpp [3:4]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp [3:4]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.h [3:4]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.cpp [3:4]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.h [3:4]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp [3:4]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.h [3:4]
+ lib/Target/ARM/A15SDOptimizer.cpp [3:4]
+ lib/Target/ARM/ARM.h [3:4]
+ lib/Target/ARM/ARM.td [3:4]
+ lib/Target/ARM/ARMAsmPrinter.cpp [3:4]
+ lib/Target/ARM/ARMAsmPrinter.h [3:4]
+ lib/Target/ARM/ARMBaseInstrInfo.cpp [3:4]
+ lib/Target/ARM/ARMBaseInstrInfo.h [3:4]
+ lib/Target/ARM/ARMBaseRegisterInfo.cpp [3:4]
+ lib/Target/ARM/ARMBaseRegisterInfo.h [3:4]
+ lib/Target/ARM/ARMBasicBlockInfo.cpp [3:4]
+ lib/Target/ARM/ARMBasicBlockInfo.h [3:4]
+ lib/Target/ARM/ARMBlockPlacement.cpp [3:4]
+ lib/Target/ARM/ARMBranchTargets.cpp [3:4]
+ lib/Target/ARM/ARMCallLowering.cpp [3:4]
+ lib/Target/ARM/ARMCallLowering.h [3:4]
+ lib/Target/ARM/ARMCallingConv.cpp [3:4]
+ lib/Target/ARM/ARMCallingConv.h [3:4]
+ lib/Target/ARM/ARMCallingConv.td [3:4]
+ lib/Target/ARM/ARMConstantIslandPass.cpp [3:4]
+ lib/Target/ARM/ARMConstantPoolValue.cpp [3:4]
+ lib/Target/ARM/ARMConstantPoolValue.h [3:4]
+ lib/Target/ARM/ARMExpandPseudoInsts.cpp [3:4]
+ lib/Target/ARM/ARMFastISel.cpp [3:4]
+ lib/Target/ARM/ARMFeatures.h [3:4]
+ lib/Target/ARM/ARMFixCortexA57AES1742098Pass.cpp [3:4]
+ lib/Target/ARM/ARMFrameLowering.cpp [3:4]
+ lib/Target/ARM/ARMFrameLowering.h [3:4]
+ lib/Target/ARM/ARMHazardRecognizer.cpp [3:4]
+ lib/Target/ARM/ARMHazardRecognizer.h [3:4]
+ lib/Target/ARM/ARMISelDAGToDAG.cpp [3:4]
+ lib/Target/ARM/ARMISelLowering.cpp [3:4]
+ lib/Target/ARM/ARMISelLowering.h [3:4]
+ lib/Target/ARM/ARMInstrCDE.td [3:4]
+ lib/Target/ARM/ARMInstrFormats.td [3:4]
+ lib/Target/ARM/ARMInstrInfo.cpp [3:4]
+ lib/Target/ARM/ARMInstrInfo.h [3:4]
+ lib/Target/ARM/ARMInstrInfo.td [3:4]
+ lib/Target/ARM/ARMInstrMVE.td [3:4]
+ lib/Target/ARM/ARMInstrNEON.td [3:4]
+ lib/Target/ARM/ARMInstrThumb.td [3:4]
+ lib/Target/ARM/ARMInstrThumb2.td [3:4]
+ lib/Target/ARM/ARMInstrVFP.td [3:4]
+ lib/Target/ARM/ARMInstructionSelector.cpp [3:4]
+ lib/Target/ARM/ARMLegalizerInfo.cpp [3:4]
+ lib/Target/ARM/ARMLegalizerInfo.h [3:4]
+ lib/Target/ARM/ARMLoadStoreOptimizer.cpp [3:4]
+ lib/Target/ARM/ARMLowOverheadLoops.cpp [3:4]
+ lib/Target/ARM/ARMMCInstLower.cpp [3:4]
+ lib/Target/ARM/ARMMachineFunctionInfo.cpp [3:4]
+ lib/Target/ARM/ARMMachineFunctionInfo.h [3:4]
+ lib/Target/ARM/ARMMacroFusion.cpp [3:4]
+ lib/Target/ARM/ARMMacroFusion.h [3:4]
+ lib/Target/ARM/ARMOptimizeBarriersPass.cpp [4:5]
+ lib/Target/ARM/ARMParallelDSP.cpp [3:4]
+ lib/Target/ARM/ARMPerfectShuffle.h [3:4]
+ lib/Target/ARM/ARMPredicates.td [3:4]
+ lib/Target/ARM/ARMRegisterBankInfo.cpp [3:4]
+ lib/Target/ARM/ARMRegisterBankInfo.h [3:4]
+ lib/Target/ARM/ARMRegisterBanks.td [3:4]
+ lib/Target/ARM/ARMRegisterInfo.cpp [3:4]
+ lib/Target/ARM/ARMRegisterInfo.h [3:4]
+ lib/Target/ARM/ARMRegisterInfo.td [3:4]
+ lib/Target/ARM/ARMSLSHardening.cpp [3:4]
+ lib/Target/ARM/ARMSchedule.td [3:4]
+ lib/Target/ARM/ARMScheduleA57.td [3:4]
+ lib/Target/ARM/ARMScheduleA57WriteRes.td [3:4]
+ lib/Target/ARM/ARMScheduleA8.td [3:4]
+ lib/Target/ARM/ARMScheduleA9.td [3:4]
+ lib/Target/ARM/ARMScheduleM4.td [3:4]
+ lib/Target/ARM/ARMScheduleM55.td [3:4]
+ lib/Target/ARM/ARMScheduleM7.td [3:4]
+ lib/Target/ARM/ARMScheduleR52.td [3:4]
+ lib/Target/ARM/ARMScheduleSwift.td [3:4]
+ lib/Target/ARM/ARMScheduleV6.td [3:4]
+ lib/Target/ARM/ARMSelectionDAGInfo.cpp [3:4]
+ lib/Target/ARM/ARMSelectionDAGInfo.h [3:4]
+ lib/Target/ARM/ARMSubtarget.cpp [3:4]
+ lib/Target/ARM/ARMSubtarget.h [3:4]
+ lib/Target/ARM/ARMSystemRegister.td [3:4]
+ lib/Target/ARM/ARMTargetMachine.cpp [3:4]
+ lib/Target/ARM/ARMTargetMachine.h [3:4]
+ lib/Target/ARM/ARMTargetObjectFile.cpp [3:4]
+ lib/Target/ARM/ARMTargetObjectFile.h [3:4]
+ lib/Target/ARM/ARMTargetTransformInfo.cpp [3:4]
+ lib/Target/ARM/ARMTargetTransformInfo.h [3:4]
+ lib/Target/ARM/AsmParser/ARMAsmParser.cpp [3:4]
+ lib/Target/ARM/Disassembler/ARMDisassembler.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp [3:4]
+ lib/Target/ARM/MLxExpansionPass.cpp [3:4]
+ lib/Target/ARM/MVEGatherScatterLowering.cpp [3:4]
+ lib/Target/ARM/MVELaneInterleavingPass.cpp [3:4]
+ lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp [3:4]
+ lib/Target/ARM/MVETailPredUtils.h [3:4]
+ lib/Target/ARM/MVETailPredication.cpp [3:4]
+ lib/Target/ARM/MVEVPTBlockPass.cpp [3:4]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp [3:4]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.h [3:4]
+ lib/Target/ARM/Thumb1FrameLowering.cpp [3:4]
+ lib/Target/ARM/Thumb1FrameLowering.h [3:4]
+ lib/Target/ARM/Thumb1InstrInfo.cpp [3:4]
+ lib/Target/ARM/Thumb1InstrInfo.h [3:4]
+ lib/Target/ARM/Thumb2ITBlockPass.cpp [3:4]
+ lib/Target/ARM/Thumb2InstrInfo.cpp [3:4]
+ lib/Target/ARM/Thumb2InstrInfo.h [3:4]
+ lib/Target/ARM/Thumb2SizeReduction.cpp [3:4]
+ lib/Target/ARM/ThumbRegisterInfo.cpp [3:4]
+ lib/Target/ARM/ThumbRegisterInfo.h [3:4]
+ lib/Target/ARM/Utils/ARMBaseInfo.cpp [3:4]
+ lib/Target/ARM/Utils/ARMBaseInfo.h [3:4]
+ lib/Target/BPF/AsmParser/BPFAsmParser.cpp [3:4]
+ lib/Target/BPF/BPF.h [3:4]
+ lib/Target/BPF/BPF.td [3:4]
+ lib/Target/BPF/BPFAbstractMemberAccess.cpp [3:4]
+ lib/Target/BPF/BPFAdjustOpt.cpp [3:4]
+ lib/Target/BPF/BPFAsmPrinter.cpp [3:4]
+ lib/Target/BPF/BPFCORE.h [3:4]
+ lib/Target/BPF/BPFCallingConv.td [3:4]
+ lib/Target/BPF/BPFCheckAndAdjustIR.cpp [3:4]
+ lib/Target/BPF/BPFFrameLowering.cpp [3:4]
+ lib/Target/BPF/BPFFrameLowering.h [3:4]
+ lib/Target/BPF/BPFIRPeephole.cpp [3:4]
+ lib/Target/BPF/BPFISelDAGToDAG.cpp [3:4]
+ lib/Target/BPF/BPFISelLowering.cpp [3:4]
+ lib/Target/BPF/BPFISelLowering.h [3:4]
+ lib/Target/BPF/BPFInstrFormats.td [3:4]
+ lib/Target/BPF/BPFInstrInfo.cpp [3:4]
+ lib/Target/BPF/BPFInstrInfo.h [3:4]
+ lib/Target/BPF/BPFInstrInfo.td [3:4]
+ lib/Target/BPF/BPFMCInstLower.cpp [3:4]
+ lib/Target/BPF/BPFMCInstLower.h [3:4]
+ lib/Target/BPF/BPFMIChecking.cpp [3:4]
+ lib/Target/BPF/BPFMIPeephole.cpp [3:4]
+ lib/Target/BPF/BPFMISimplifyPatchable.cpp [3:4]
+ lib/Target/BPF/BPFPreserveDIType.cpp [3:4]
+ lib/Target/BPF/BPFRegisterInfo.cpp [3:4]
+ lib/Target/BPF/BPFRegisterInfo.h [3:4]
+ lib/Target/BPF/BPFRegisterInfo.td [3:4]
+ lib/Target/BPF/BPFSelectionDAGInfo.cpp [3:4]
+ lib/Target/BPF/BPFSelectionDAGInfo.h [3:4]
+ lib/Target/BPF/BPFSubtarget.cpp [3:4]
+ lib/Target/BPF/BPFSubtarget.h [3:4]
+ lib/Target/BPF/BPFTargetMachine.cpp [3:4]
+ lib/Target/BPF/BPFTargetMachine.h [3:4]
+ lib/Target/BPF/BPFTargetTransformInfo.h [3:4]
+ lib/Target/BPF/BTF.def [3:4]
+ lib/Target/BPF/BTF.h [3:4]
+ lib/Target/BPF/BTFDebug.cpp [3:4]
+ lib/Target/BPF/BTFDebug.h [3:4]
+ lib/Target/BPF/Disassembler/BPFDisassembler.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.h [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h [3:4]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp [3:4]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.h [3:4]
+ lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp [3:4]
+ lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp [3:4]
+ lib/Target/LoongArch/LoongArch.h [3:4]
+ lib/Target/LoongArch/LoongArch.td [3:4]
+ lib/Target/LoongArch/LoongArchAsmPrinter.cpp [3:4]
+ lib/Target/LoongArch/LoongArchAsmPrinter.h [3:4]
+ lib/Target/LoongArch/LoongArchCallingConv.td [3:4]
+ lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp [3:4]
+ lib/Target/LoongArch/LoongArchExpandPseudoInsts.cpp [3:4]
+ lib/Target/LoongArch/LoongArchFloat32InstrInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchFloat64InstrInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchFloatInstrFormats.td [3:4]
+ lib/Target/LoongArch/LoongArchFrameLowering.cpp [3:4]
+ lib/Target/LoongArch/LoongArchFrameLowering.h [3:4]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp [3:4]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.h [3:4]
+ lib/Target/LoongArch/LoongArchISelLowering.cpp [3:4]
+ lib/Target/LoongArch/LoongArchISelLowering.h [3:4]
+ lib/Target/LoongArch/LoongArchInstrFormats.td [3:4]
+ lib/Target/LoongArch/LoongArchInstrInfo.cpp [3:4]
+ lib/Target/LoongArch/LoongArchInstrInfo.h [3:4]
+ lib/Target/LoongArch/LoongArchInstrInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchMCInstLower.cpp [3:4]
+ lib/Target/LoongArch/LoongArchMachineFunctionInfo.h [3:4]
+ lib/Target/LoongArch/LoongArchRegisterInfo.cpp [3:4]
+ lib/Target/LoongArch/LoongArchRegisterInfo.h [3:4]
+ lib/Target/LoongArch/LoongArchRegisterInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchSubtarget.cpp [3:4]
+ lib/Target/LoongArch/LoongArchSubtarget.h [3:4]
+ lib/Target/LoongArch/LoongArchTargetMachine.cpp [3:4]
+ lib/Target/LoongArch/LoongArchTargetMachine.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.h [3:4]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.cpp [3:4]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.h [3:4]
+ lib/Target/NVPTX/NVPTX.h [3:4]
+ lib/Target/NVPTX/NVPTX.td [3:4]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.h [3:4]
+ lib/Target/NVPTX/NVPTXAsmPrinter.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAsmPrinter.h [3:4]
+ lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAtomicLower.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAtomicLower.h [3:4]
+ lib/Target/NVPTX/NVPTXFrameLowering.cpp [3:4]
+ lib/Target/NVPTX/NVPTXFrameLowering.h [3:4]
+ lib/Target/NVPTX/NVPTXGenericToNVVM.cpp [3:4]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp [3:4]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.h [3:4]
+ lib/Target/NVPTX/NVPTXISelLowering.cpp [3:4]
+ lib/Target/NVPTX/NVPTXISelLowering.h [3:4]
+ lib/Target/NVPTX/NVPTXImageOptimizer.cpp [3:4]
+ lib/Target/NVPTX/NVPTXInstrFormats.td [3:4]
+ lib/Target/NVPTX/NVPTXInstrInfo.cpp [3:4]
+ lib/Target/NVPTX/NVPTXInstrInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXInstrInfo.td [3:4]
+ lib/Target/NVPTX/NVPTXIntrinsics.td [3:4]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp [3:4]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.h [3:4]
+ lib/Target/NVPTX/NVPTXLowerAlloca.cpp [3:4]
+ lib/Target/NVPTX/NVPTXLowerArgs.cpp [3:4]
+ lib/Target/NVPTX/NVPTXMCExpr.cpp [3:4]
+ lib/Target/NVPTX/NVPTXMCExpr.h [3:4]
+ lib/Target/NVPTX/NVPTXMachineFunctionInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXPeephole.cpp [3:4]
+ lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp [3:4]
+ lib/Target/NVPTX/NVPTXProxyRegErasure.cpp [3:4]
+ lib/Target/NVPTX/NVPTXRegisterInfo.cpp [3:4]
+ lib/Target/NVPTX/NVPTXRegisterInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXRegisterInfo.td [3:4]
+ lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp [3:4]
+ lib/Target/NVPTX/NVPTXSubtarget.cpp [3:4]
+ lib/Target/NVPTX/NVPTXSubtarget.h [3:4]
+ lib/Target/NVPTX/NVPTXTargetMachine.cpp [3:4]
+ lib/Target/NVPTX/NVPTXTargetMachine.h [3:4]
+ lib/Target/NVPTX/NVPTXTargetObjectFile.h [3:4]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp [3:4]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXUtilities.cpp [3:4]
+ lib/Target/NVPTX/NVPTXUtilities.h [3:4]
+ lib/Target/NVPTX/NVVMIntrRange.cpp [3:4]
+ lib/Target/NVPTX/NVVMReflect.cpp [3:4]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp [3:4]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.h [3:4]
+ lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp [3:4]
+ lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCCallLowering.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCCallLowering.h [3:4]
+ lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.h [3:4]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.h [3:4]
+ lib/Target/PowerPC/GISel/PPCRegisterBanks.td [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp [4:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.h [3:4]
+ lib/Target/PowerPC/P10InstrResources.td [3:4]
+ lib/Target/PowerPC/P9InstrResources.td [3:4]
+ lib/Target/PowerPC/PPC.h [3:4]
+ lib/Target/PowerPC/PPC.td [3:4]
+ lib/Target/PowerPC/PPCAsmPrinter.cpp [3:4]
+ lib/Target/PowerPC/PPCBoolRetToInt.cpp [3:4]
+ lib/Target/PowerPC/PPCBranchCoalescing.cpp [3:4]
+ lib/Target/PowerPC/PPCBranchSelector.cpp [3:4]
+ lib/Target/PowerPC/PPCCCState.cpp [3:4]
+ lib/Target/PowerPC/PPCCCState.h [3:4]
+ lib/Target/PowerPC/PPCCTRLoops.cpp [3:4]
+ lib/Target/PowerPC/PPCCTRLoopsVerify.cpp [3:4]
+ lib/Target/PowerPC/PPCCallingConv.cpp [3:4]
+ lib/Target/PowerPC/PPCCallingConv.h [3:4]
+ lib/Target/PowerPC/PPCCallingConv.td [3:4]
+ lib/Target/PowerPC/PPCEarlyReturn.cpp [3:4]
+ lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp [3:4]
+ lib/Target/PowerPC/PPCExpandISEL.cpp [3:4]
+ lib/Target/PowerPC/PPCFastISel.cpp [3:4]
+ lib/Target/PowerPC/PPCFrameLowering.cpp [3:4]
+ lib/Target/PowerPC/PPCFrameLowering.h [3:4]
+ lib/Target/PowerPC/PPCGenRegisterBankInfo.def [3:4]
+ lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp [3:4]
+ lib/Target/PowerPC/PPCHazardRecognizers.cpp [3:4]
+ lib/Target/PowerPC/PPCHazardRecognizers.h [3:4]
+ lib/Target/PowerPC/PPCISelDAGToDAG.cpp [3:4]
+ lib/Target/PowerPC/PPCISelLowering.cpp [3:4]
+ lib/Target/PowerPC/PPCISelLowering.h [3:4]
+ lib/Target/PowerPC/PPCInstr64Bit.td [3:4]
+ lib/Target/PowerPC/PPCInstrAltivec.td [3:4]
+ lib/Target/PowerPC/PPCInstrBuilder.h [3:4]
+ lib/Target/PowerPC/PPCInstrFormats.td [3:4]
+ lib/Target/PowerPC/PPCInstrHTM.td [3:4]
+ lib/Target/PowerPC/PPCInstrInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCInstrInfo.h [3:4]
+ lib/Target/PowerPC/PPCInstrInfo.td [3:4]
+ lib/Target/PowerPC/PPCInstrP10.td [5:6]
+ lib/Target/PowerPC/PPCInstrSPE.td [3:4]
+ lib/Target/PowerPC/PPCInstrVSX.td [3:4]
+ lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp [3:4]
+ lib/Target/PowerPC/PPCLowerMASSVEntries.cpp [3:4]
+ lib/Target/PowerPC/PPCMCInstLower.cpp [3:4]
+ lib/Target/PowerPC/PPCMIPeephole.cpp [3:4]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.h [3:4]
+ lib/Target/PowerPC/PPCMachineScheduler.cpp [3:4]
+ lib/Target/PowerPC/PPCMachineScheduler.h [3:4]
+ lib/Target/PowerPC/PPCMacroFusion.cpp [3:4]
+ lib/Target/PowerPC/PPCMacroFusion.h [3:4]
+ lib/Target/PowerPC/PPCPerfectShuffle.h [3:4]
+ lib/Target/PowerPC/PPCPfmCounters.td [3:4]
+ lib/Target/PowerPC/PPCPreEmitPeephole.cpp [3:4]
+ lib/Target/PowerPC/PPCReduceCRLogicals.cpp [3:4]
+ lib/Target/PowerPC/PPCRegisterInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCRegisterInfo.h [3:4]
+ lib/Target/PowerPC/PPCRegisterInfo.td [3:4]
+ lib/Target/PowerPC/PPCRegisterInfoDMR.td [3:4]
+ lib/Target/PowerPC/PPCRegisterInfoMMA.td [3:4]
+ lib/Target/PowerPC/PPCSchedPredicates.td [3:4]
+ lib/Target/PowerPC/PPCSchedule.td [3:4]
+ lib/Target/PowerPC/PPCSchedule440.td [3:4]
+ lib/Target/PowerPC/PPCScheduleA2.td [3:4]
+ lib/Target/PowerPC/PPCScheduleE500.td [3:4]
+ lib/Target/PowerPC/PPCScheduleE500mc.td [3:4]
+ lib/Target/PowerPC/PPCScheduleE5500.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG3.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG4.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG4Plus.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG5.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP10.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP7.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP8.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP9.td [3:4]
+ lib/Target/PowerPC/PPCSubtarget.cpp [3:4]
+ lib/Target/PowerPC/PPCSubtarget.h [3:4]
+ lib/Target/PowerPC/PPCTLSDynamicCall.cpp [3:4]
+ lib/Target/PowerPC/PPCTOCRegDeps.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetMachine.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetMachine.h [3:4]
+ lib/Target/PowerPC/PPCTargetObjectFile.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetObjectFile.h [3:4]
+ lib/Target/PowerPC/PPCTargetStreamer.h [3:4]
+ lib/Target/PowerPC/PPCTargetTransformInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetTransformInfo.h [3:4]
+ lib/Target/PowerPC/PPCVSXCopy.cpp [3:4]
+ lib/Target/PowerPC/PPCVSXFMAMutate.cpp [3:4]
+ lib/Target/PowerPC/PPCVSXSwapRemoval.cpp [3:4]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp [3:4]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.h [3:4]
+ lib/Target/RISCV/GISel/RISCVCallLowering.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVCallLowering.h [3:4]
+ lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.h [3:4]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h [3:4]
+ lib/Target/RISCV/GISel/RISCVRegisterBanks.td [3:4]
+ lib/Target/RISCV/RISCV.h [3:4]
+ lib/Target/RISCV/RISCV.td [3:4]
+ lib/Target/RISCV/RISCVAsmPrinter.cpp [3:4]
+ lib/Target/RISCV/RISCVCallingConv.td [3:4]
+ lib/Target/RISCV/RISCVCodeGenPrepare.cpp [3:4]
+ lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp [3:4]
+ lib/Target/RISCV/RISCVExpandPseudoInsts.cpp [3:4]
+ lib/Target/RISCV/RISCVFeatures.td [3:4]
+ lib/Target/RISCV/RISCVFrameLowering.cpp [3:4]
+ lib/Target/RISCV/RISCVFrameLowering.h [3:4]
+ lib/Target/RISCV/RISCVGatherScatterLowering.cpp [3:4]
+ lib/Target/RISCV/RISCVISelDAGToDAG.cpp [3:4]
+ lib/Target/RISCV/RISCVISelDAGToDAG.h [3:4]
+ lib/Target/RISCV/RISCVISelLowering.cpp [3:4]
+ lib/Target/RISCV/RISCVISelLowering.h [3:4]
+ lib/Target/RISCV/RISCVInsertVSETVLI.cpp [3:4]
+ lib/Target/RISCV/RISCVInstrFormats.td [3:4]
+ lib/Target/RISCV/RISCVInstrFormatsC.td [3:4]
+ lib/Target/RISCV/RISCVInstrFormatsV.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVInstrInfo.h [3:4]
+ lib/Target/RISCV/RISCVInstrInfo.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoA.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoC.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoD.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoF.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoM.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoV.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoVPseudos.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoXTHead.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoXVentana.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZb.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZfh.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZicbo.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZk.td [3:4]
+ lib/Target/RISCV/RISCVMCInstLower.cpp [3:4]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.h [3:4]
+ lib/Target/RISCV/RISCVMacroFusion.cpp [3:4]
+ lib/Target/RISCV/RISCVMacroFusion.h [3:4]
+ lib/Target/RISCV/RISCVMakeCompressible.cpp [3:4]
+ lib/Target/RISCV/RISCVMergeBaseOffset.cpp [3:4]
+ lib/Target/RISCV/RISCVProcessors.td [3:4]
+ lib/Target/RISCV/RISCVRedundantCopyElimination.cpp [3:4]
+ lib/Target/RISCV/RISCVRegisterInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVRegisterInfo.h [3:4]
+ lib/Target/RISCV/RISCVRegisterInfo.td [3:4]
+ lib/Target/RISCV/RISCVSExtWRemoval.cpp [3:4]
+ lib/Target/RISCV/RISCVSchedRocket.td [3:4]
+ lib/Target/RISCV/RISCVSchedSiFive7.td [3:4]
+ lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td [3:4]
+ lib/Target/RISCV/RISCVSchedule.td [3:4]
+ lib/Target/RISCV/RISCVScheduleV.td [3:4]
+ lib/Target/RISCV/RISCVScheduleZb.td [3:4]
+ lib/Target/RISCV/RISCVStripWSuffix.cpp [4:5]
+ lib/Target/RISCV/RISCVSubtarget.cpp [3:4]
+ lib/Target/RISCV/RISCVSubtarget.h [3:4]
+ lib/Target/RISCV/RISCVSystemOperands.td [3:4]
+ lib/Target/RISCV/RISCVTargetMachine.cpp [3:4]
+ lib/Target/RISCV/RISCVTargetMachine.h [3:4]
+ lib/Target/RISCV/RISCVTargetObjectFile.cpp [3:4]
+ lib/Target/RISCV/RISCVTargetObjectFile.h [3:4]
+ lib/Target/RISCV/RISCVTargetTransformInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVTargetTransformInfo.h [3:4]
+ lib/Target/Target.cpp [3:4]
+ lib/Target/TargetIntrinsicInfo.cpp [3:4]
+ lib/Target/TargetLoweringObjectFile.cpp [3:4]
+ lib/Target/TargetMachine.cpp [3:4]
+ lib/Target/TargetMachineC.cpp [3:4]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp [3:4]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp [3:4]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h [3:4]
+ lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp [3:4]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp [3:4]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h [3:4]
+ lib/Target/WebAssembly/WebAssembly.h [3:4]
+ lib/Target/WebAssembly/WebAssembly.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyCFGSort.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFastISel.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyISD.def [3:4]
+ lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrAtomics.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrCall.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrControl.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrConv.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrFloat.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrFormats.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInteger.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrMemory.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrRef.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrSIMD.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrTable.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp [4:5]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyPeephole.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegColoring.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegStackify.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h [3:4]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblySortRegion.h [3:4]
+ lib/Target/WebAssembly/WebAssemblySubtarget.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblySubtarget.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h [3:4]
+ lib/Target/X86/AsmParser/X86AsmParser.cpp [3:4]
+ lib/Target/X86/AsmParser/X86AsmParserCommon.h [3:4]
+ lib/Target/X86/AsmParser/X86Operand.h [3:4]
+ lib/Target/X86/Disassembler/X86Disassembler.cpp [3:4]
+ lib/Target/X86/Disassembler/X86DisassemblerDecoder.h [3:4]
+ lib/Target/X86/ImmutableGraph.h [3:4]
+ lib/Target/X86/MCA/X86CustomBehaviour.cpp [3:4]
+ lib/Target/X86/MCA/X86CustomBehaviour.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86BaseInfo.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86FixupKinds.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstComments.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstComments.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCExpr.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MnemonicTables.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86TargetStreamer.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp [3:4]
+ lib/Target/X86/TargetInfo/X86TargetInfo.cpp [3:4]
+ lib/Target/X86/TargetInfo/X86TargetInfo.h [3:4]
+ lib/Target/X86/X86.h [3:4]
+ lib/Target/X86/X86.td [3:4]
+ lib/Target/X86/X86AsmPrinter.cpp [3:4]
+ lib/Target/X86/X86AsmPrinter.h [3:4]
+ lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp [3:4]
+ lib/Target/X86/X86AvoidTrailingCall.cpp [3:4]
+ lib/Target/X86/X86CallFrameOptimization.cpp [3:4]
+ lib/Target/X86/X86CallLowering.cpp [3:4]
+ lib/Target/X86/X86CallLowering.h [3:4]
+ lib/Target/X86/X86CallingConv.cpp [3:4]
+ lib/Target/X86/X86CallingConv.h [3:4]
+ lib/Target/X86/X86CallingConv.td [3:4]
+ lib/Target/X86/X86CmovConversion.cpp [3:4]
+ lib/Target/X86/X86DiscriminateMemOps.cpp [3:4]
+ lib/Target/X86/X86DomainReassignment.cpp [3:4]
+ lib/Target/X86/X86DynAllocaExpander.cpp [3:4]
+ lib/Target/X86/X86EvexToVex.cpp [4:5]
+ lib/Target/X86/X86ExpandPseudo.cpp [3:4]
+ lib/Target/X86/X86FastISel.cpp [3:4]
+ lib/Target/X86/X86FastPreTileConfig.cpp [3:4]
+ lib/Target/X86/X86FastTileConfig.cpp [3:4]
+ lib/Target/X86/X86FixupBWInsts.cpp [3:4]
+ lib/Target/X86/X86FixupLEAs.cpp [3:4]
+ lib/Target/X86/X86FixupSetCC.cpp [3:4]
+ lib/Target/X86/X86FlagsCopyLowering.cpp [3:4]
+ lib/Target/X86/X86FloatingPoint.cpp [3:4]
+ lib/Target/X86/X86FrameLowering.cpp [3:4]
+ lib/Target/X86/X86FrameLowering.h [3:4]
+ lib/Target/X86/X86GenRegisterBankInfo.def [3:4]
+ lib/Target/X86/X86ISelDAGToDAG.cpp [3:4]
+ lib/Target/X86/X86ISelLowering.cpp [3:4]
+ lib/Target/X86/X86ISelLowering.h [3:4]
+ lib/Target/X86/X86IndirectBranchTracking.cpp [3:4]
+ lib/Target/X86/X86IndirectThunks.cpp [3:4]
+ lib/Target/X86/X86InsertPrefetch.cpp [3:4]
+ lib/Target/X86/X86InsertWait.cpp [3:4]
+ lib/Target/X86/X86InstCombineIntrinsic.cpp [3:4]
+ lib/Target/X86/X86Instr3DNow.td [3:4]
+ lib/Target/X86/X86InstrAMX.td [3:4]
+ lib/Target/X86/X86InstrAVX512.td [3:4]
+ lib/Target/X86/X86InstrArithmetic.td [3:4]
+ lib/Target/X86/X86InstrBuilder.h [3:4]
+ lib/Target/X86/X86InstrCMovSetCC.td [3:4]
+ lib/Target/X86/X86InstrCompiler.td [3:4]
+ lib/Target/X86/X86InstrControl.td [3:4]
+ lib/Target/X86/X86InstrExtension.td [3:4]
+ lib/Target/X86/X86InstrFMA.td [3:4]
+ lib/Target/X86/X86InstrFMA3Info.cpp [3:4]
+ lib/Target/X86/X86InstrFMA3Info.h [3:4]
+ lib/Target/X86/X86InstrFPStack.td [3:4]
+ lib/Target/X86/X86InstrFoldTables.cpp [3:4]
+ lib/Target/X86/X86InstrFoldTables.h [3:4]
+ lib/Target/X86/X86InstrFormats.td [3:4]
+ lib/Target/X86/X86InstrFragmentsSIMD.td [3:4]
+ lib/Target/X86/X86InstrInfo.cpp [3:4]
+ lib/Target/X86/X86InstrInfo.h [3:4]
+ lib/Target/X86/X86InstrInfo.td [3:4]
+ lib/Target/X86/X86InstrKL.td [4:5]
+ lib/Target/X86/X86InstrMMX.td [3:4]
+ lib/Target/X86/X86InstrRAOINT.td [3:4]
+ lib/Target/X86/X86InstrSGX.td [3:4]
+ lib/Target/X86/X86InstrSNP.td [3:4]
+ lib/Target/X86/X86InstrSSE.td [3:4]
+ lib/Target/X86/X86InstrSVM.td [3:4]
+ lib/Target/X86/X86InstrShiftRotate.td [3:4]
+ lib/Target/X86/X86InstrSystem.td [3:4]
+ lib/Target/X86/X86InstrTDX.td [3:4]
+ lib/Target/X86/X86InstrTSX.td [3:4]
+ lib/Target/X86/X86InstrVMX.td [3:4]
+ lib/Target/X86/X86InstrVecCompiler.td [3:4]
+ lib/Target/X86/X86InstrXOP.td [3:4]
+ lib/Target/X86/X86InstructionSelector.cpp [3:4]
+ lib/Target/X86/X86InterleavedAccess.cpp [3:4]
+ lib/Target/X86/X86IntrinsicsInfo.h [3:4]
+ lib/Target/X86/X86KCFI.cpp [3:4]
+ lib/Target/X86/X86LegalizerInfo.cpp [3:4]
+ lib/Target/X86/X86LegalizerInfo.h [4:5]
+ lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp [3:4]
+ lib/Target/X86/X86LoadValueInjectionRetHardening.cpp [3:4]
+ lib/Target/X86/X86LowerAMXIntrinsics.cpp [3:4]
+ lib/Target/X86/X86LowerAMXType.cpp [3:4]
+ lib/Target/X86/X86LowerTileCopy.cpp [3:4]
+ lib/Target/X86/X86MCInstLower.cpp [3:4]
+ lib/Target/X86/X86MachineFunctionInfo.cpp [3:4]
+ lib/Target/X86/X86MachineFunctionInfo.h [3:4]
+ lib/Target/X86/X86MacroFusion.cpp [3:4]
+ lib/Target/X86/X86MacroFusion.h [3:4]
+ lib/Target/X86/X86OptimizeLEAs.cpp [3:4]
+ lib/Target/X86/X86PadShortFunction.cpp [3:4]
+ lib/Target/X86/X86PartialReduction.cpp [3:4]
+ lib/Target/X86/X86PfmCounters.td [3:4]
+ lib/Target/X86/X86PreAMXConfig.cpp [3:4]
+ lib/Target/X86/X86PreTileConfig.cpp [3:4]
+ lib/Target/X86/X86RegisterBankInfo.cpp [3:4]
+ lib/Target/X86/X86RegisterBankInfo.h [3:4]
+ lib/Target/X86/X86RegisterBanks.td [3:4]
+ lib/Target/X86/X86RegisterInfo.cpp [3:4]
+ lib/Target/X86/X86RegisterInfo.h [3:4]
+ lib/Target/X86/X86RegisterInfo.td [3:4]
+ lib/Target/X86/X86ReturnThunks.cpp [3:4]
+ lib/Target/X86/X86SchedAlderlakeP.td [3:4]
+ lib/Target/X86/X86SchedBroadwell.td [3:4]
+ lib/Target/X86/X86SchedHaswell.td [3:4]
+ lib/Target/X86/X86SchedIceLake.td [3:4]
+ lib/Target/X86/X86SchedPredicates.td [3:4]
+ lib/Target/X86/X86SchedSandyBridge.td [3:4]
+ lib/Target/X86/X86SchedSkylakeClient.td [3:4]
+ lib/Target/X86/X86SchedSkylakeServer.td [3:4]
+ lib/Target/X86/X86Schedule.td [3:4]
+ lib/Target/X86/X86ScheduleAtom.td [3:4]
+ lib/Target/X86/X86ScheduleBdVer2.td [3:4]
+ lib/Target/X86/X86ScheduleBtVer2.td [3:4]
+ lib/Target/X86/X86ScheduleSLM.td [3:4]
+ lib/Target/X86/X86ScheduleZnver1.td [3:4]
+ lib/Target/X86/X86ScheduleZnver2.td [3:4]
+ lib/Target/X86/X86ScheduleZnver3.td [3:4]
+ lib/Target/X86/X86SelectionDAGInfo.cpp [3:4]
+ lib/Target/X86/X86SelectionDAGInfo.h [3:4]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.cpp [3:4]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.h [3:4]
+ lib/Target/X86/X86SpeculativeExecutionSideEffectSuppression.cpp [3:4]
+ lib/Target/X86/X86SpeculativeLoadHardening.cpp [3:4]
+ lib/Target/X86/X86Subtarget.cpp [3:4]
+ lib/Target/X86/X86Subtarget.h [3:4]
+ lib/Target/X86/X86TargetMachine.cpp [3:4]
+ lib/Target/X86/X86TargetMachine.h [3:4]
+ lib/Target/X86/X86TargetObjectFile.cpp [3:4]
+ lib/Target/X86/X86TargetObjectFile.h [3:4]
+ lib/Target/X86/X86TargetTransformInfo.cpp [3:4]
+ lib/Target/X86/X86TargetTransformInfo.h [3:4]
+ lib/Target/X86/X86TileConfig.cpp [3:4]
+ lib/Target/X86/X86VZeroUpper.cpp [3:4]
+ lib/Target/X86/X86WinEHState.cpp [3:4]
+ lib/TargetParser/AArch64TargetParser.cpp [3:4]
+ lib/TargetParser/ARMTargetParser.cpp [3:4]
+ lib/TargetParser/ARMTargetParserCommon.cpp [3:4]
+ lib/TargetParser/Host.cpp [3:4]
+ lib/TargetParser/LoongArchTargetParser.cpp [3:4]
+ lib/TargetParser/RISCVTargetParser.cpp [3:4]
+ lib/TargetParser/TargetParser.cpp [3:4]
+ lib/TargetParser/Triple.cpp [3:4]
+ lib/TargetParser/Unix/Host.inc [3:4]
+ lib/TargetParser/Windows/Host.inc [3:4]
+ lib/TargetParser/X86TargetParser.cpp [3:4]
+ lib/TextAPI/Architecture.cpp [3:4]
+ lib/TextAPI/ArchitectureSet.cpp [3:4]
+ lib/TextAPI/InterfaceFile.cpp [3:4]
+ lib/TextAPI/PackedVersion.cpp [3:4]
+ lib/TextAPI/Platform.cpp [3:4]
+ lib/TextAPI/Symbol.cpp [3:4]
+ lib/TextAPI/Target.cpp [3:4]
+ lib/TextAPI/TextAPIContext.h [3:4]
+ lib/TextAPI/TextStub.cpp [3:4]
+ lib/TextAPI/TextStubCommon.cpp [3:4]
+ lib/TextAPI/TextStubCommon.h [3:4]
+ lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp [3:4]
+ lib/ToolDrivers/llvm-lib/LibDriver.cpp [3:4]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp [3:4]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h [3:4]
+ lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp [3:4]
+ lib/Transforms/CFGuard/CFGuard.cpp [3:4]
+ lib/Transforms/Coroutines/CoroCleanup.cpp [3:4]
+ lib/Transforms/Coroutines/CoroConditionalWrapper.cpp [3:4]
+ lib/Transforms/Coroutines/CoroEarly.cpp [3:4]
+ lib/Transforms/Coroutines/CoroElide.cpp [3:4]
+ lib/Transforms/Coroutines/CoroFrame.cpp [3:4]
+ lib/Transforms/Coroutines/CoroInstr.h [3:4]
+ lib/Transforms/Coroutines/CoroInternal.h [3:4]
+ lib/Transforms/Coroutines/CoroSplit.cpp [3:4]
+ lib/Transforms/Coroutines/Coroutines.cpp [3:4]
+ lib/Transforms/IPO/AlwaysInliner.cpp [3:4]
+ lib/Transforms/IPO/Annotation2Metadata.cpp [3:4]
+ lib/Transforms/IPO/ArgumentPromotion.cpp [3:4]
+ lib/Transforms/IPO/Attributor.cpp [3:4]
+ lib/Transforms/IPO/AttributorAttributes.cpp [3:4]
+ lib/Transforms/IPO/BarrierNoopPass.cpp [3:4]
+ lib/Transforms/IPO/BlockExtractor.cpp [3:4]
+ lib/Transforms/IPO/CalledValuePropagation.cpp [3:4]
+ lib/Transforms/IPO/ConstantMerge.cpp [3:4]
+ lib/Transforms/IPO/CrossDSOCFI.cpp [3:4]
+ lib/Transforms/IPO/DeadArgumentElimination.cpp [3:4]
+ lib/Transforms/IPO/ElimAvailExtern.cpp [3:4]
+ lib/Transforms/IPO/ExtractGV.cpp [3:4]
+ lib/Transforms/IPO/ForceFunctionAttrs.cpp [3:4]
+ lib/Transforms/IPO/FunctionAttrs.cpp [3:4]
+ lib/Transforms/IPO/FunctionImport.cpp [3:4]
+ lib/Transforms/IPO/FunctionSpecialization.cpp [3:4]
+ lib/Transforms/IPO/GlobalDCE.cpp [3:4]
+ lib/Transforms/IPO/GlobalOpt.cpp [3:4]
+ lib/Transforms/IPO/GlobalSplit.cpp [3:4]
+ lib/Transforms/IPO/HotColdSplitting.cpp [3:4]
+ lib/Transforms/IPO/IPO.cpp [3:4]
+ lib/Transforms/IPO/IROutliner.cpp [3:4]
+ lib/Transforms/IPO/InferFunctionAttrs.cpp [3:4]
+ lib/Transforms/IPO/InlineSimple.cpp [3:4]
+ lib/Transforms/IPO/Inliner.cpp [3:4]
+ lib/Transforms/IPO/Internalize.cpp [3:4]
+ lib/Transforms/IPO/LoopExtractor.cpp [3:4]
+ lib/Transforms/IPO/LowerTypeTests.cpp [3:4]
+ lib/Transforms/IPO/MergeFunctions.cpp [3:4]
+ lib/Transforms/IPO/ModuleInliner.cpp [3:4]
+ lib/Transforms/IPO/OpenMPOpt.cpp [3:4]
+ lib/Transforms/IPO/PartialInlining.cpp [3:4]
+ lib/Transforms/IPO/PassManagerBuilder.cpp [3:4]
+ lib/Transforms/IPO/SCCP.cpp [3:4]
+ lib/Transforms/IPO/SampleContextTracker.cpp [3:4]
+ lib/Transforms/IPO/SampleProfile.cpp [3:4]
+ lib/Transforms/IPO/SampleProfileProbe.cpp [3:4]
+ lib/Transforms/IPO/StripDeadPrototypes.cpp [3:4]
+ lib/Transforms/IPO/StripSymbols.cpp [3:4]
+ lib/Transforms/IPO/SyntheticCountsPropagation.cpp [3:4]
+ lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp [3:4]
+ lib/Transforms/IPO/WholeProgramDevirt.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineAddSub.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineAndOrXor.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineCalls.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineCasts.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineCompares.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineInternal.h [3:4]
+ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineNegator.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombinePHI.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineSelect.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineShifts.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineVectorOps.cpp [3:4]
+ lib/Transforms/InstCombine/InstructionCombining.cpp [3:4]
+ lib/Transforms/Instrumentation/AddressSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/BoundsChecking.cpp [3:4]
+ lib/Transforms/Instrumentation/CFGMST.h [3:4]
+ lib/Transforms/Instrumentation/CGProfile.cpp [3:4]
+ lib/Transforms/Instrumentation/ControlHeightReduction.cpp [3:4]
+ lib/Transforms/Instrumentation/DataFlowSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/GCOVProfiling.cpp [3:4]
+ lib/Transforms/Instrumentation/HWAddressSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/IndirectCallPromotion.cpp [3:4]
+ lib/Transforms/Instrumentation/InstrOrderFile.cpp [3:4]
+ lib/Transforms/Instrumentation/InstrProfiling.cpp [3:4]
+ lib/Transforms/Instrumentation/Instrumentation.cpp [3:4]
+ lib/Transforms/Instrumentation/KCFI.cpp [3:4]
+ lib/Transforms/Instrumentation/MemProfiler.cpp [3:4]
+ lib/Transforms/Instrumentation/MemorySanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/PGOInstrumentation.cpp [3:4]
+ lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp [3:4]
+ lib/Transforms/Instrumentation/PoisonChecking.cpp [3:4]
+ lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp [3:4]
+ lib/Transforms/Instrumentation/SanitizerCoverage.cpp [3:4]
+ lib/Transforms/Instrumentation/ThreadSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/ValueProfileCollector.cpp [3:4]
+ lib/Transforms/Instrumentation/ValueProfileCollector.h [3:4]
+ lib/Transforms/Instrumentation/ValueProfilePlugins.inc [3:4]
+ lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h [3:4]
+ lib/Transforms/ObjCARC/BlotMapVector.h [3:4]
+ lib/Transforms/ObjCARC/DependencyAnalysis.cpp [3:4]
+ lib/Transforms/ObjCARC/DependencyAnalysis.h [3:4]
+ lib/Transforms/ObjCARC/ObjCARC.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARC.h [3:4]
+ lib/Transforms/ObjCARC/ObjCARCAPElim.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARCContract.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARCExpand.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARCOpts.cpp [3:4]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp [3:4]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.h [3:4]
+ lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp [3:4]
+ lib/Transforms/ObjCARC/PtrState.cpp [3:4]
+ lib/Transforms/ObjCARC/PtrState.h [3:4]
+ lib/Transforms/Scalar/ADCE.cpp [3:4]
+ lib/Transforms/Scalar/AlignmentFromAssumptions.cpp [4:5]
+ lib/Transforms/Scalar/AnnotationRemarks.cpp [3:4]
+ lib/Transforms/Scalar/BDCE.cpp [3:4]
+ lib/Transforms/Scalar/CallSiteSplitting.cpp [3:4]
+ lib/Transforms/Scalar/ConstantHoisting.cpp [3:4]
+ lib/Transforms/Scalar/ConstraintElimination.cpp [3:4]
+ lib/Transforms/Scalar/CorrelatedValuePropagation.cpp [3:4]
+ lib/Transforms/Scalar/DCE.cpp [3:4]
+ lib/Transforms/Scalar/DFAJumpThreading.cpp [3:4]
+ lib/Transforms/Scalar/DeadStoreElimination.cpp [3:4]
+ lib/Transforms/Scalar/DivRemPairs.cpp [3:4]
+ lib/Transforms/Scalar/EarlyCSE.cpp [3:4]
+ lib/Transforms/Scalar/FlattenCFGPass.cpp [3:4]
+ lib/Transforms/Scalar/Float2Int.cpp [3:4]
+ lib/Transforms/Scalar/GVN.cpp [3:4]
+ lib/Transforms/Scalar/GVNHoist.cpp [3:4]
+ lib/Transforms/Scalar/GVNSink.cpp [3:4]
+ lib/Transforms/Scalar/GuardWidening.cpp [3:4]
+ lib/Transforms/Scalar/IVUsersPrinter.cpp [3:4]
+ lib/Transforms/Scalar/IndVarSimplify.cpp [3:4]
+ lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp [3:4]
+ lib/Transforms/Scalar/InferAddressSpaces.cpp [3:4]
+ lib/Transforms/Scalar/InstSimplifyPass.cpp [3:4]
+ lib/Transforms/Scalar/JumpThreading.cpp [3:4]
+ lib/Transforms/Scalar/LICM.cpp [3:4]
+ lib/Transforms/Scalar/LoopAccessAnalysisPrinter.cpp [3:4]
+ lib/Transforms/Scalar/LoopBoundSplit.cpp [3:4]
+ lib/Transforms/Scalar/LoopDataPrefetch.cpp [3:4]
+ lib/Transforms/Scalar/LoopDeletion.cpp [3:4]
+ lib/Transforms/Scalar/LoopDistribute.cpp [3:4]
+ lib/Transforms/Scalar/LoopFlatten.cpp [3:4]
+ lib/Transforms/Scalar/LoopFuse.cpp [3:4]
+ lib/Transforms/Scalar/LoopIdiomRecognize.cpp [3:4]
+ lib/Transforms/Scalar/LoopInstSimplify.cpp [3:4]
+ lib/Transforms/Scalar/LoopInterchange.cpp [3:4]
+ lib/Transforms/Scalar/LoopLoadElimination.cpp [3:4]
+ lib/Transforms/Scalar/LoopPassManager.cpp [3:4]
+ lib/Transforms/Scalar/LoopPredication.cpp [3:4]
+ lib/Transforms/Scalar/LoopRerollPass.cpp [3:4]
+ lib/Transforms/Scalar/LoopRotation.cpp [3:4]
+ lib/Transforms/Scalar/LoopSimplifyCFG.cpp [3:4]
+ lib/Transforms/Scalar/LoopSink.cpp [3:4]
+ lib/Transforms/Scalar/LoopStrengthReduce.cpp [3:4]
+ lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp [3:4]
+ lib/Transforms/Scalar/LoopUnrollPass.cpp [3:4]
+ lib/Transforms/Scalar/LoopVersioningLICM.cpp [3:4]
+ lib/Transforms/Scalar/LowerAtomicPass.cpp [3:4]
+ lib/Transforms/Scalar/LowerConstantIntrinsics.cpp [3:4]
+ lib/Transforms/Scalar/LowerExpectIntrinsic.cpp [3:4]
+ lib/Transforms/Scalar/LowerGuardIntrinsic.cpp [3:4]
+ lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp [3:4]
+ lib/Transforms/Scalar/LowerWidenableCondition.cpp [3:4]
+ lib/Transforms/Scalar/MakeGuardsExplicit.cpp [3:4]
+ lib/Transforms/Scalar/MemCpyOptimizer.cpp [3:4]
+ lib/Transforms/Scalar/MergeICmps.cpp [3:4]
+ lib/Transforms/Scalar/MergedLoadStoreMotion.cpp [3:4]
+ lib/Transforms/Scalar/NaryReassociate.cpp [3:4]
+ lib/Transforms/Scalar/NewGVN.cpp [3:4]
+ lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp [3:4]
+ lib/Transforms/Scalar/PlaceSafepoints.cpp [3:4]
+ lib/Transforms/Scalar/Reassociate.cpp [3:4]
+ lib/Transforms/Scalar/Reg2Mem.cpp [3:4]
+ lib/Transforms/Scalar/RewriteStatepointsForGC.cpp [3:4]
+ lib/Transforms/Scalar/SCCP.cpp [3:4]
+ lib/Transforms/Scalar/SROA.cpp [3:4]
+ lib/Transforms/Scalar/Scalar.cpp [3:4]
+ lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp [4:5]
+ lib/Transforms/Scalar/Scalarizer.cpp [3:4]
+ lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp [3:4]
+ lib/Transforms/Scalar/SimpleLoopUnswitch.cpp [3:4]
+ lib/Transforms/Scalar/SimplifyCFGPass.cpp [3:4]
+ lib/Transforms/Scalar/Sink.cpp [3:4]
+ lib/Transforms/Scalar/SpeculativeExecution.cpp [3:4]
+ lib/Transforms/Scalar/StraightLineStrengthReduce.cpp [3:4]
+ lib/Transforms/Scalar/StructurizeCFG.cpp [3:4]
+ lib/Transforms/Scalar/TLSVariableHoist.cpp [3:4]
+ lib/Transforms/Scalar/TailRecursionElimination.cpp [3:4]
+ lib/Transforms/Scalar/WarnMissedTransforms.cpp [3:4]
+ lib/Transforms/Utils/AMDGPUEmitPrintf.cpp [3:4]
+ lib/Transforms/Utils/ASanStackFrameLayout.cpp [3:4]
+ lib/Transforms/Utils/AddDiscriminators.cpp [3:4]
+ lib/Transforms/Utils/AssumeBundleBuilder.cpp [3:4]
+ lib/Transforms/Utils/BasicBlockUtils.cpp [3:4]
+ lib/Transforms/Utils/BreakCriticalEdges.cpp [3:4]
+ lib/Transforms/Utils/BuildLibCalls.cpp [3:4]
+ lib/Transforms/Utils/BypassSlowDivision.cpp [3:4]
+ lib/Transforms/Utils/CallGraphUpdater.cpp [3:4]
+ lib/Transforms/Utils/CallPromotionUtils.cpp [3:4]
+ lib/Transforms/Utils/CanonicalizeAliases.cpp [3:4]
+ lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp [3:4]
+ lib/Transforms/Utils/CloneFunction.cpp [3:4]
+ lib/Transforms/Utils/CloneModule.cpp [3:4]
+ lib/Transforms/Utils/CodeExtractor.cpp [3:4]
+ lib/Transforms/Utils/CodeLayout.cpp [3:4]
+ lib/Transforms/Utils/CodeMoverUtils.cpp [3:4]
+ lib/Transforms/Utils/CtorUtils.cpp [3:4]
+ lib/Transforms/Utils/Debugify.cpp [3:4]
+ lib/Transforms/Utils/DemoteRegToStack.cpp [3:4]
+ lib/Transforms/Utils/EntryExitInstrumenter.cpp [3:4]
+ lib/Transforms/Utils/EscapeEnumerator.cpp [3:4]
+ lib/Transforms/Utils/Evaluator.cpp [3:4]
+ lib/Transforms/Utils/FixIrreducible.cpp [3:4]
+ lib/Transforms/Utils/FlattenCFG.cpp [3:4]
+ lib/Transforms/Utils/FunctionComparator.cpp [3:4]
+ lib/Transforms/Utils/FunctionImportUtils.cpp [3:4]
+ lib/Transforms/Utils/GlobalStatus.cpp [3:4]
+ lib/Transforms/Utils/GuardUtils.cpp [3:4]
+ lib/Transforms/Utils/HelloWorld.cpp [3:4]
+ lib/Transforms/Utils/InjectTLIMappings.cpp [3:4]
+ lib/Transforms/Utils/InlineFunction.cpp [3:4]
+ lib/Transforms/Utils/InstructionNamer.cpp [3:4]
+ lib/Transforms/Utils/IntegerDivision.cpp [3:4]
+ lib/Transforms/Utils/LCSSA.cpp [3:4]
+ lib/Transforms/Utils/LibCallsShrinkWrap.cpp [3:4]
+ lib/Transforms/Utils/Local.cpp [3:4]
+ lib/Transforms/Utils/LoopPeel.cpp [3:4]
+ lib/Transforms/Utils/LoopRotationUtils.cpp [3:4]
+ lib/Transforms/Utils/LoopSimplify.cpp [3:4]
+ lib/Transforms/Utils/LoopUnroll.cpp [3:4]
+ lib/Transforms/Utils/LoopUnrollAndJam.cpp [3:4]
+ lib/Transforms/Utils/LoopUnrollRuntime.cpp [3:4]
+ lib/Transforms/Utils/LoopUtils.cpp [3:4]
+ lib/Transforms/Utils/LoopVersioning.cpp [3:4]
+ lib/Transforms/Utils/LowerAtomic.cpp [3:4]
+ lib/Transforms/Utils/LowerGlobalDtors.cpp [3:4]
+ lib/Transforms/Utils/LowerIFunc.cpp [3:4]
+ lib/Transforms/Utils/LowerInvoke.cpp [3:4]
+ lib/Transforms/Utils/LowerMemIntrinsics.cpp [3:4]
+ lib/Transforms/Utils/LowerSwitch.cpp [3:4]
+ lib/Transforms/Utils/MatrixUtils.cpp [3:4]
+ lib/Transforms/Utils/Mem2Reg.cpp [3:4]
+ lib/Transforms/Utils/MemoryOpRemark.cpp [3:4]
+ lib/Transforms/Utils/MemoryTaggingSupport.cpp [2:3]
+ lib/Transforms/Utils/MetaRenamer.cpp [3:4]
+ lib/Transforms/Utils/MisExpect.cpp [3:4]
+ lib/Transforms/Utils/ModuleUtils.cpp [3:4]
+ lib/Transforms/Utils/NameAnonGlobals.cpp [3:4]
+ lib/Transforms/Utils/PredicateInfo.cpp [3:4]
+ lib/Transforms/Utils/PromoteMemoryToRegister.cpp [3:4]
+ lib/Transforms/Utils/RelLookupTableConverter.cpp [3:4]
+ lib/Transforms/Utils/SCCPSolver.cpp [3:4]
+ lib/Transforms/Utils/SSAUpdater.cpp [3:4]
+ lib/Transforms/Utils/SSAUpdaterBulk.cpp [3:4]
+ lib/Transforms/Utils/SampleProfileInference.cpp [3:4]
+ lib/Transforms/Utils/SampleProfileLoaderBaseUtil.cpp [3:4]
+ lib/Transforms/Utils/SanitizerStats.cpp [3:4]
+ lib/Transforms/Utils/ScalarEvolutionExpander.cpp [3:4]
+ lib/Transforms/Utils/SimplifyCFG.cpp [3:4]
+ lib/Transforms/Utils/SimplifyIndVar.cpp [3:4]
+ lib/Transforms/Utils/SimplifyLibCalls.cpp [3:4]
+ lib/Transforms/Utils/SizeOpts.cpp [3:4]
+ lib/Transforms/Utils/SplitModule.cpp [3:4]
+ lib/Transforms/Utils/StripGCRelocates.cpp [3:4]
+ lib/Transforms/Utils/StripNonLineTableDebugInfo.cpp [3:4]
+ lib/Transforms/Utils/SymbolRewriter.cpp [3:4]
+ lib/Transforms/Utils/UnifyFunctionExitNodes.cpp [3:4]
+ lib/Transforms/Utils/UnifyLoopExits.cpp [3:4]
+ lib/Transforms/Utils/Utils.cpp [3:4]
+ lib/Transforms/Utils/ValueMapper.cpp [3:4]
+ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp [3:4]
+ lib/Transforms/Vectorize/LoopVectorizationLegality.cpp [3:4]
+ lib/Transforms/Vectorize/LoopVectorizationPlanner.h [3:4]
+ lib/Transforms/Vectorize/LoopVectorize.cpp [3:4]
+ lib/Transforms/Vectorize/SLPVectorizer.cpp [3:4]
+ lib/Transforms/Vectorize/VPRecipeBuilder.h [3:4]
+ lib/Transforms/Vectorize/VPlan.cpp [3:4]
+ lib/Transforms/Vectorize/VPlan.h [3:4]
+ lib/Transforms/Vectorize/VPlanCFG.h [3:4]
+ lib/Transforms/Vectorize/VPlanDominatorTree.h [3:4]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.h [3:4]
+ lib/Transforms/Vectorize/VPlanRecipes.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanSLP.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanTransforms.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanTransforms.h [3:4]
+ lib/Transforms/Vectorize/VPlanValue.h [3:4]
+ lib/Transforms/Vectorize/VPlanVerifier.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanVerifier.h [3:4]
+ lib/Transforms/Vectorize/VectorCombine.cpp [3:4]
+ lib/Transforms/Vectorize/Vectorize.cpp [3:4]
+ lib/WindowsDriver/MSVCPaths.cpp [3:4]
+ lib/WindowsManifest/WindowsManifestMerger.cpp [3:4]
+ lib/XRay/BlockIndexer.cpp [3:4]
+ lib/XRay/BlockPrinter.cpp [3:4]
+ lib/XRay/BlockVerifier.cpp [3:4]
+ lib/XRay/FDRRecordProducer.cpp [3:4]
+ lib/XRay/FDRRecords.cpp [3:4]
+ lib/XRay/FDRTraceExpander.cpp [3:4]
+ lib/XRay/FDRTraceWriter.cpp [3:4]
+ lib/XRay/FileHeaderReader.cpp [3:4]
+ lib/XRay/InstrumentationMap.cpp [3:4]
+ lib/XRay/LogBuilderConsumer.cpp [3:4]
+ lib/XRay/Profile.cpp [3:4]
+ lib/XRay/RecordInitializer.cpp [3:4]
+ lib/XRay/RecordPrinter.cpp [3:4]
+ lib/XRay/Trace.cpp [3:4]
+ tools/bugpoint/BugDriver.cpp [3:4]
+ tools/bugpoint/BugDriver.h [3:4]
+ tools/bugpoint/CrashDebugger.cpp [3:4]
+ tools/bugpoint/ExecutionDriver.cpp [3:4]
+ tools/bugpoint/ExtractFunction.cpp [3:4]
+ tools/bugpoint/FindBugs.cpp [3:4]
+ tools/bugpoint/ListReducer.h [3:4]
+ tools/bugpoint/Miscompilation.cpp [3:4]
+ tools/bugpoint/OptimizerDriver.cpp [3:4]
+ tools/bugpoint/ToolRunner.cpp [3:4]
+ tools/bugpoint/ToolRunner.h [3:4]
+ tools/bugpoint/bugpoint.cpp [3:4]
+ tools/dsymutil/BinaryHolder.cpp [3:4]
+ tools/dsymutil/BinaryHolder.h [3:4]
+ tools/dsymutil/CFBundle.cpp [3:4]
+ tools/dsymutil/CFBundle.h [3:4]
+ tools/dsymutil/DebugMap.cpp [3:4]
+ tools/dsymutil/DebugMap.h [3:4]
+ tools/dsymutil/DwarfLinkerForBinary.cpp [3:4]
+ tools/dsymutil/DwarfLinkerForBinary.h [3:4]
+ tools/dsymutil/LinkUtils.h [3:4]
+ tools/dsymutil/MachODebugMapParser.cpp [3:4]
+ tools/dsymutil/MachOUtils.cpp [3:4]
+ tools/dsymutil/MachOUtils.h [3:4]
+ tools/dsymutil/Reproducer.cpp [3:4]
+ tools/dsymutil/Reproducer.h [3:4]
+ tools/dsymutil/SymbolMap.cpp [3:4]
+ tools/dsymutil/SymbolMap.h [3:4]
+ tools/dsymutil/dsymutil.cpp [3:4]
+ tools/dsymutil/dsymutil.h [3:4]
+ tools/gold/gold-plugin.cpp [3:4]
+ tools/llc/llc.cpp [3:4]
+ tools/lli/ChildTarget/ChildTarget.cpp [3:4]
+ tools/lli/ExecutionUtils.cpp [3:4]
+ tools/lli/ExecutionUtils.h [3:4]
+ tools/lli/ForwardingMemoryManager.h [3:4]
+ tools/lli/lli.cpp [3:4]
+ tools/llvm-ar/llvm-ar-driver.cpp [3:4]
+ tools/llvm-ar/llvm-ar.cpp [3:4]
+ tools/llvm-as/llvm-as.cpp [3:4]
+ tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp [3:4]
+ tools/llvm-cat/llvm-cat.cpp [3:4]
+ tools/llvm-cfi-verify/lib/FileAnalysis.cpp [3:4]
+ tools/llvm-cfi-verify/lib/FileAnalysis.h [3:4]
+ tools/llvm-cfi-verify/lib/GraphBuilder.cpp [3:4]
+ tools/llvm-cfi-verify/lib/GraphBuilder.h [3:4]
+ tools/llvm-cfi-verify/llvm-cfi-verify.cpp [3:4]
+ tools/llvm-config/BuildVariables.inc [3:4]
+ tools/llvm-config/llvm-config.cpp [3:4]
+ tools/llvm-cov/CodeCoverage.cpp [3:4]
+ tools/llvm-cov/CoverageExporter.h [3:4]
+ tools/llvm-cov/CoverageExporterJson.cpp [3:4]
+ tools/llvm-cov/CoverageExporterJson.h [3:4]
+ tools/llvm-cov/CoverageExporterLcov.cpp [3:4]
+ tools/llvm-cov/CoverageExporterLcov.h [3:4]
+ tools/llvm-cov/CoverageFilters.cpp [3:4]
+ tools/llvm-cov/CoverageFilters.h [3:4]
+ tools/llvm-cov/CoverageReport.cpp [3:4]
+ tools/llvm-cov/CoverageReport.h [3:4]
+ tools/llvm-cov/CoverageSummaryInfo.cpp [3:4]
+ tools/llvm-cov/CoverageSummaryInfo.h [3:4]
+ tools/llvm-cov/CoverageViewOptions.h [3:4]
+ tools/llvm-cov/RenderingSupport.h [3:4]
+ tools/llvm-cov/SourceCoverageView.cpp [3:4]
+ tools/llvm-cov/SourceCoverageView.h [3:4]
+ tools/llvm-cov/SourceCoverageViewHTML.cpp [3:4]
+ tools/llvm-cov/SourceCoverageViewHTML.h [3:4]
+ tools/llvm-cov/SourceCoverageViewText.cpp [3:4]
+ tools/llvm-cov/SourceCoverageViewText.h [3:4]
+ tools/llvm-cov/TestingSupport.cpp [3:4]
+ tools/llvm-cov/gcov.cpp [3:4]
+ tools/llvm-cov/llvm-cov.cpp [3:4]
+ tools/llvm-cvtres/llvm-cvtres.cpp [3:4]
+ tools/llvm-cxxdump/Error.cpp [3:4]
+ tools/llvm-cxxdump/Error.h [3:4]
+ tools/llvm-cxxdump/llvm-cxxdump.cpp [3:4]
+ tools/llvm-cxxdump/llvm-cxxdump.h [3:4]
+ tools/llvm-cxxfilt/llvm-cxxfilt-driver.cpp [3:4]
+ tools/llvm-cxxfilt/llvm-cxxfilt.cpp [3:4]
+ tools/llvm-cxxmap/llvm-cxxmap.cpp [3:4]
+ tools/llvm-diff/lib/DiffConsumer.cpp [3:4]
+ tools/llvm-diff/lib/DiffConsumer.h [3:4]
+ tools/llvm-diff/lib/DiffLog.cpp [3:4]
+ tools/llvm-diff/lib/DiffLog.h [3:4]
+ tools/llvm-diff/lib/DifferenceEngine.cpp [3:4]
+ tools/llvm-diff/lib/DifferenceEngine.h [3:4]
+ tools/llvm-diff/llvm-diff.cpp [3:4]
+ tools/llvm-dis/llvm-dis.cpp [3:4]
+ tools/llvm-dwarfdump/SectionSizes.cpp [3:4]
+ tools/llvm-dwarfdump/Statistics.cpp [3:4]
+ tools/llvm-dwarfdump/llvm-dwarfdump.cpp [3:4]
+ tools/llvm-dwarfdump/llvm-dwarfdump.h [3:4]
+ tools/llvm-dwp/llvm-dwp.cpp [3:4]
+ tools/llvm-exegesis/lib/AArch64/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/Analysis.cpp [3:4]
+ tools/llvm-exegesis/lib/Analysis.h [3:4]
+ tools/llvm-exegesis/lib/Assembler.cpp [3:4]
+ tools/llvm-exegesis/lib/Assembler.h [3:4]
+ tools/llvm-exegesis/lib/BenchmarkCode.h [3:4]
+ tools/llvm-exegesis/lib/BenchmarkResult.cpp [3:4]
+ tools/llvm-exegesis/lib/BenchmarkResult.h [3:4]
+ tools/llvm-exegesis/lib/BenchmarkRunner.cpp [3:4]
+ tools/llvm-exegesis/lib/BenchmarkRunner.h [3:4]
+ tools/llvm-exegesis/lib/Clustering.cpp [3:4]
+ tools/llvm-exegesis/lib/Clustering.h [3:4]
+ tools/llvm-exegesis/lib/CodeTemplate.cpp [3:4]
+ tools/llvm-exegesis/lib/CodeTemplate.h [3:4]
+ tools/llvm-exegesis/lib/Error.cpp [3:4]
+ tools/llvm-exegesis/lib/Error.h [3:4]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.cpp [3:4]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.h [3:4]
+ tools/llvm-exegesis/lib/LlvmState.cpp [3:4]
+ tools/llvm-exegesis/lib/LlvmState.h [3:4]
+ tools/llvm-exegesis/lib/MCInstrDescView.cpp [3:4]
+ tools/llvm-exegesis/lib/MCInstrDescView.h [3:4]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp [3:4]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.h [3:4]
+ tools/llvm-exegesis/lib/PerfHelper.cpp [3:4]
+ tools/llvm-exegesis/lib/PerfHelper.h [3:4]
+ tools/llvm-exegesis/lib/PowerPC/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/ProgressMeter.h [3:4]
+ tools/llvm-exegesis/lib/RegisterAliasing.cpp [3:4]
+ tools/llvm-exegesis/lib/RegisterAliasing.h [3:4]
+ tools/llvm-exegesis/lib/RegisterValue.cpp [3:4]
+ tools/llvm-exegesis/lib/RegisterValue.h [3:4]
+ tools/llvm-exegesis/lib/SchedClassResolution.cpp [3:4]
+ tools/llvm-exegesis/lib/SchedClassResolution.h [3:4]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp [3:4]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.h [3:4]
+ tools/llvm-exegesis/lib/SnippetFile.cpp [3:4]
+ tools/llvm-exegesis/lib/SnippetFile.h [3:4]
+ tools/llvm-exegesis/lib/SnippetGenerator.cpp [3:4]
+ tools/llvm-exegesis/lib/SnippetGenerator.h [3:4]
+ tools/llvm-exegesis/lib/SnippetRepetitor.cpp [3:4]
+ tools/llvm-exegesis/lib/SnippetRepetitor.h [3:4]
+ tools/llvm-exegesis/lib/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/Target.h [3:4]
+ tools/llvm-exegesis/lib/TargetSelect.h [3:4]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.cpp [3:4]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.h [3:4]
+ tools/llvm-exegesis/lib/X86/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/X86/X86Counter.cpp [3:4]
+ tools/llvm-exegesis/lib/X86/X86Counter.h [3:4]
+ tools/llvm-exegesis/llvm-exegesis.cpp [3:4]
+ tools/llvm-extract/llvm-extract.cpp [3:4]
+ tools/llvm-gsymutil/llvm-gsymutil.cpp [3:4]
+ tools/llvm-ifs/ErrorCollector.cpp [3:4]
+ tools/llvm-ifs/ErrorCollector.h [3:4]
+ tools/llvm-ifs/llvm-ifs-driver.cpp [3:4]
+ tools/llvm-ifs/llvm-ifs.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-coff.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-elf.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-macho.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink.h [3:4]
+ tools/llvm-libtool-darwin/DependencyInfo.h [3:4]
+ tools/llvm-libtool-darwin/llvm-libtool-darwin.cpp [3:4]
+ tools/llvm-link/llvm-link.cpp [3:4]
+ tools/llvm-lipo/llvm-lipo-driver.cpp [3:4]
+ tools/llvm-lipo/llvm-lipo.cpp [3:4]
+ tools/llvm-lto/llvm-lto.cpp [3:4]
+ tools/llvm-lto2/llvm-lto2.cpp [3:4]
+ tools/llvm-mc/Disassembler.cpp [3:4]
+ tools/llvm-mc/Disassembler.h [3:4]
+ tools/llvm-mc/llvm-mc.cpp [3:4]
+ tools/llvm-mca/CodeRegion.cpp [3:4]
+ tools/llvm-mca/CodeRegion.h [3:4]
+ tools/llvm-mca/CodeRegionGenerator.cpp [3:4]
+ tools/llvm-mca/CodeRegionGenerator.h [3:4]
+ tools/llvm-mca/PipelinePrinter.cpp [3:4]
+ tools/llvm-mca/PipelinePrinter.h [3:4]
+ tools/llvm-mca/Views/BottleneckAnalysis.cpp [3:4]
+ tools/llvm-mca/Views/BottleneckAnalysis.h [3:4]
+ tools/llvm-mca/Views/DispatchStatistics.cpp [3:4]
+ tools/llvm-mca/Views/DispatchStatistics.h [3:4]
+ tools/llvm-mca/Views/InstructionInfoView.cpp [3:4]
+ tools/llvm-mca/Views/InstructionInfoView.h [3:4]
+ tools/llvm-mca/Views/InstructionView.cpp [3:4]
+ tools/llvm-mca/Views/InstructionView.h [3:4]
+ tools/llvm-mca/Views/RegisterFileStatistics.cpp [3:4]
+ tools/llvm-mca/Views/RegisterFileStatistics.h [3:4]
+ tools/llvm-mca/Views/ResourcePressureView.cpp [3:4]
+ tools/llvm-mca/Views/ResourcePressureView.h [3:4]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.cpp [3:4]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.h [3:4]
+ tools/llvm-mca/Views/SchedulerStatistics.cpp [3:4]
+ tools/llvm-mca/Views/SchedulerStatistics.h [3:4]
+ tools/llvm-mca/Views/SummaryView.cpp [3:4]
+ tools/llvm-mca/Views/SummaryView.h [3:4]
+ tools/llvm-mca/Views/TimelineView.cpp [3:4]
+ tools/llvm-mca/Views/TimelineView.h [3:4]
+ tools/llvm-mca/llvm-mca.cpp [3:4]
+ tools/llvm-ml/Disassembler.cpp [3:4]
+ tools/llvm-ml/Disassembler.h [3:4]
+ tools/llvm-ml/llvm-ml.cpp [3:4]
+ tools/llvm-modextract/llvm-modextract.cpp [3:4]
+ tools/llvm-mt/llvm-mt-driver.cpp [3:4]
+ tools/llvm-mt/llvm-mt.cpp [3:4]
+ tools/llvm-nm/llvm-nm-driver.cpp [3:4]
+ tools/llvm-nm/llvm-nm.cpp [3:4]
+ tools/llvm-objcopy/BitcodeStripOpts.td [3:4]
+ tools/llvm-objcopy/InstallNameToolOpts.td [3:4]
+ tools/llvm-objcopy/ObjcopyOptions.cpp [3:4]
+ tools/llvm-objcopy/ObjcopyOptions.h [3:4]
+ tools/llvm-objcopy/llvm-objcopy-driver.cpp [3:4]
+ tools/llvm-objcopy/llvm-objcopy.cpp [3:4]
+ tools/llvm-objdump/COFFDump.cpp [3:4]
+ tools/llvm-objdump/COFFDump.h [3:4]
+ tools/llvm-objdump/ELFDump.cpp [3:4]
+ tools/llvm-objdump/ELFDump.h [3:4]
+ tools/llvm-objdump/MachODump.cpp [3:4]
+ tools/llvm-objdump/MachODump.h [3:4]
+ tools/llvm-objdump/OffloadDump.cpp [3:4]
+ tools/llvm-objdump/OffloadDump.h [3:4]
+ tools/llvm-objdump/SourcePrinter.cpp [3:4]
+ tools/llvm-objdump/SourcePrinter.h [3:4]
+ tools/llvm-objdump/WasmDump.cpp [3:4]
+ tools/llvm-objdump/WasmDump.h [3:4]
+ tools/llvm-objdump/XCOFFDump.cpp [3:4]
+ tools/llvm-objdump/XCOFFDump.h [3:4]
+ tools/llvm-objdump/llvm-objdump.cpp [3:4]
+ tools/llvm-objdump/llvm-objdump.h [3:4]
+ tools/llvm-opt-report/OptReport.cpp [3:4]
+ tools/llvm-pdbutil/BytesOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/BytesOutputStyle.h [3:4]
+ tools/llvm-pdbutil/DumpOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/DumpOutputStyle.h [3:4]
+ tools/llvm-pdbutil/ExplainOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/ExplainOutputStyle.h [3:4]
+ tools/llvm-pdbutil/MinimalSymbolDumper.cpp [3:4]
+ tools/llvm-pdbutil/MinimalSymbolDumper.h [3:4]
+ tools/llvm-pdbutil/MinimalTypeDumper.cpp [3:4]
+ tools/llvm-pdbutil/MinimalTypeDumper.h [3:4]
+ tools/llvm-pdbutil/OutputStyle.h [3:4]
+ tools/llvm-pdbutil/PdbYaml.cpp [3:4]
+ tools/llvm-pdbutil/PdbYaml.h [3:4]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyCompilandDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyCompilandDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyEnumDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyEnumDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyFunctionDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyFunctionDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyTypeDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyTypeDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyTypedefDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyTypedefDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyVariableDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyVariableDumper.h [3:4]
+ tools/llvm-pdbutil/StreamUtil.cpp [3:4]
+ tools/llvm-pdbutil/StreamUtil.h [3:4]
+ tools/llvm-pdbutil/TypeReferenceTracker.cpp [3:4]
+ tools/llvm-pdbutil/TypeReferenceTracker.h [3:4]
+ tools/llvm-pdbutil/YAMLOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/YAMLOutputStyle.h [3:4]
+ tools/llvm-pdbutil/llvm-pdbutil.cpp [3:4]
+ tools/llvm-pdbutil/llvm-pdbutil.h [3:4]
+ tools/llvm-profdata/llvm-profdata-driver.cpp [3:4]
+ tools/llvm-profdata/llvm-profdata.cpp [3:4]
+ tools/llvm-profgen/CSPreInliner.cpp [3:4]
+ tools/llvm-profgen/CSPreInliner.h [3:4]
+ tools/llvm-profgen/CallContext.h [3:4]
+ tools/llvm-profgen/ErrorHandling.h [3:4]
+ tools/llvm-profgen/MissingFrameInferrer.cpp [3:4]
+ tools/llvm-profgen/MissingFrameInferrer.h [3:4]
+ tools/llvm-profgen/PerfReader.cpp [3:4]
+ tools/llvm-profgen/PerfReader.h [3:4]
+ tools/llvm-profgen/ProfileGenerator.cpp [3:4]
+ tools/llvm-profgen/ProfileGenerator.h [3:4]
+ tools/llvm-profgen/ProfiledBinary.cpp [3:4]
+ tools/llvm-profgen/ProfiledBinary.h [3:4]
+ tools/llvm-profgen/llvm-profgen.cpp [3:4]
+ tools/llvm-rc/ResourceFileWriter.cpp [3:4]
+ tools/llvm-rc/ResourceFileWriter.h [3:4]
+ tools/llvm-rc/ResourceScriptCppFilter.cpp [3:4]
+ tools/llvm-rc/ResourceScriptCppFilter.h [3:4]
+ tools/llvm-rc/ResourceScriptParser.cpp [3:4]
+ tools/llvm-rc/ResourceScriptParser.h [3:4]
+ tools/llvm-rc/ResourceScriptStmt.cpp [2:3]
+ tools/llvm-rc/ResourceScriptStmt.h [3:4]
+ tools/llvm-rc/ResourceScriptToken.cpp [3:4]
+ tools/llvm-rc/ResourceScriptToken.h [3:4]
+ tools/llvm-rc/ResourceScriptTokenList.def [3:4]
+ tools/llvm-rc/ResourceVisitor.h [3:4]
+ tools/llvm-rc/llvm-rc-driver.cpp [3:4]
+ tools/llvm-rc/llvm-rc.cpp [3:4]
+ tools/llvm-readobj/ARMEHABIPrinter.h [3:4]
+ tools/llvm-readobj/ARMWinEHPrinter.cpp [3:4]
+ tools/llvm-readobj/ARMWinEHPrinter.h [3:4]
+ tools/llvm-readobj/COFFDumper.cpp [3:4]
+ tools/llvm-readobj/COFFImportDumper.cpp [3:4]
+ tools/llvm-readobj/DwarfCFIEHPrinter.h [3:4]
+ tools/llvm-readobj/ELFDumper.cpp [3:4]
+ tools/llvm-readobj/MachODumper.cpp [3:4]
+ tools/llvm-readobj/ObjDumper.cpp [3:4]
+ tools/llvm-readobj/ObjDumper.h [3:4]
+ tools/llvm-readobj/StackMapPrinter.h [3:4]
+ tools/llvm-readobj/WasmDumper.cpp [3:4]
+ tools/llvm-readobj/Win64EHDumper.cpp [3:4]
+ tools/llvm-readobj/Win64EHDumper.h [3:4]
+ tools/llvm-readobj/WindowsResourceDumper.cpp [3:4]
+ tools/llvm-readobj/WindowsResourceDumper.h [3:4]
+ tools/llvm-readobj/XCOFFDumper.cpp [3:4]
+ tools/llvm-readobj/llvm-readobj-driver.cpp [3:4]
+ tools/llvm-readobj/llvm-readobj.cpp [3:4]
+ tools/llvm-readobj/llvm-readobj.h [3:4]
+ tools/llvm-reduce/DeltaManager.cpp [3:4]
+ tools/llvm-reduce/DeltaManager.h [3:4]
+ tools/llvm-reduce/ReducerWorkItem.cpp [3:4]
+ tools/llvm-reduce/ReducerWorkItem.h [3:4]
+ tools/llvm-reduce/TestRunner.cpp [3:4]
+ tools/llvm-reduce/TestRunner.h [3:4]
+ tools/llvm-reduce/deltas/Delta.cpp [3:4]
+ tools/llvm-reduce/deltas/Delta.h [3:4]
+ tools/llvm-reduce/deltas/ReduceAliases.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceAliases.h [3:4]
+ tools/llvm-reduce/deltas/ReduceArguments.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceArguments.h [3:4]
+ tools/llvm-reduce/deltas/ReduceAttributes.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceAttributes.h [3:4]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.h [3:4]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.h [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.h [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctions.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctions.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.h [3:4]
+ tools/llvm-reduce/deltas/ReduceIRReferences.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceIRReferences.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructions.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructions.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInvokes.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInvokes.h [3:4]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.h [3:4]
+ tools/llvm-reduce/deltas/ReduceMetadata.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceMetadata.h [3:4]
+ tools/llvm-reduce/deltas/ReduceModuleData.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceModuleData.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOpcodes.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOpcodes.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperands.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperands.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.h [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.h [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.h [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.h [3:4]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.h [3:4]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.h [3:4]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.h [3:4]
+ tools/llvm-reduce/deltas/RunIRPasses.cpp [3:4]
+ tools/llvm-reduce/deltas/RunIRPasses.h [3:4]
+ tools/llvm-reduce/deltas/SimplifyInstructions.cpp [3:4]
+ tools/llvm-reduce/deltas/SimplifyInstructions.h [3:4]
+ tools/llvm-reduce/deltas/StripDebugInfo.cpp [3:4]
+ tools/llvm-reduce/deltas/StripDebugInfo.h [3:4]
+ tools/llvm-reduce/deltas/Utils.cpp [3:4]
+ tools/llvm-reduce/deltas/Utils.h [3:4]
+ tools/llvm-reduce/llvm-reduce.cpp [3:4]
+ tools/llvm-rtdyld/llvm-rtdyld.cpp [3:4]
+ tools/llvm-size/llvm-size-driver.cpp [3:4]
+ tools/llvm-size/llvm-size.cpp [3:4]
+ tools/llvm-split/llvm-split.cpp [3:4]
+ tools/llvm-stress/llvm-stress.cpp [3:4]
+ tools/llvm-strings/llvm-strings.cpp [3:4]
+ tools/llvm-symbolizer/llvm-symbolizer.cpp [3:4]
+ tools/llvm-undname/llvm-undname.cpp [4:5]
+ tools/llvm-xray/func-id-helper.cpp [3:4]
+ tools/llvm-xray/func-id-helper.h [3:4]
+ tools/llvm-xray/llvm-xray.cpp [3:4]
+ tools/llvm-xray/trie-node.h [3:4]
+ tools/llvm-xray/xray-account.cpp [3:4]
+ tools/llvm-xray/xray-account.h [3:4]
+ tools/llvm-xray/xray-color-helper.cpp [3:4]
+ tools/llvm-xray/xray-color-helper.h [3:4]
+ tools/llvm-xray/xray-converter.cpp [3:4]
+ tools/llvm-xray/xray-converter.h [3:4]
+ tools/llvm-xray/xray-extract.cpp [3:4]
+ tools/llvm-xray/xray-fdr-dump.cpp [3:4]
+ tools/llvm-xray/xray-graph-diff.cpp [3:4]
+ tools/llvm-xray/xray-graph-diff.h [3:4]
+ tools/llvm-xray/xray-graph.cpp [3:4]
+ tools/llvm-xray/xray-graph.h [3:4]
+ tools/llvm-xray/xray-registry.cpp [3:4]
+ tools/llvm-xray/xray-registry.h [3:4]
+ tools/llvm-xray/xray-stacks.cpp [3:4]
+ tools/lto/LTODisassembler.cpp [3:4]
+ tools/lto/lto.cpp [3:4]
+ tools/obj2yaml/archive2yaml.cpp [3:4]
+ tools/obj2yaml/coff2yaml.cpp [3:4]
+ tools/obj2yaml/dwarf2yaml.cpp [3:4]
+ tools/obj2yaml/dxcontainer2yaml.cpp [3:4]
+ tools/obj2yaml/elf2yaml.cpp [3:4]
+ tools/obj2yaml/macho2yaml.cpp [3:4]
+ tools/obj2yaml/minidump2yaml.cpp [3:4]
+ tools/obj2yaml/obj2yaml.cpp [3:4]
+ tools/obj2yaml/obj2yaml.h [3:4]
+ tools/obj2yaml/offload2yaml.cpp [3:4]
+ tools/obj2yaml/wasm2yaml.cpp [3:4]
+ tools/obj2yaml/xcoff2yaml.cpp [3:4]
+ tools/opt/AnalysisWrappers.cpp [3:4]
+ tools/opt/BreakpointPrinter.cpp [3:4]
+ tools/opt/BreakpointPrinter.h [3:4]
+ tools/opt/NewPMDriver.cpp [3:4]
+ tools/opt/NewPMDriver.h [3:4]
+ tools/opt/opt.cpp [3:4]
+ tools/polly/include/polly/Canonicalization.h [3:4]
+ tools/polly/include/polly/CodeGen/BlockGenerators.h [3:4]
+ tools/polly/include/polly/CodeGen/CodeGeneration.h [3:4]
+ tools/polly/include/polly/CodeGen/IRBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/IslAst.h [3:4]
+ tools/polly/include/polly/CodeGen/IslExprBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/IslNodeBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/LoopGenerators.h [3:4]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsGOMP.h [3:4]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsKMP.h [3:4]
+ tools/polly/include/polly/CodeGen/PPCGCodeGeneration.h [3:4]
+ tools/polly/include/polly/CodeGen/PerfMonitor.h [3:4]
+ tools/polly/include/polly/CodeGen/RuntimeDebugBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/Utils.h [3:4]
+ tools/polly/include/polly/CodePreparation.h [3:4]
+ tools/polly/include/polly/Config/config.h [3:4]
+ tools/polly/include/polly/DeLICM.h [3:4]
+ tools/polly/include/polly/DeadCodeElimination.h [3:4]
+ tools/polly/include/polly/DependenceInfo.h [3:4]
+ tools/polly/include/polly/FlattenAlgo.h [3:4]
+ tools/polly/include/polly/FlattenSchedule.h [3:4]
+ tools/polly/include/polly/ForwardOpTree.h [3:4]
+ tools/polly/include/polly/JSONExporter.h [3:4]
+ tools/polly/include/polly/LinkAllPasses.h [3:4]
+ tools/polly/include/polly/ManualOptimizer.h [3:4]
+ tools/polly/include/polly/MatmulOptimizer.h [3:4]
+ tools/polly/include/polly/MaximalStaticExpansion.h [3:4]
+ tools/polly/include/polly/Options.h [3:4]
+ tools/polly/include/polly/PolyhedralInfo.h [3:4]
+ tools/polly/include/polly/PruneUnprofitable.h [3:4]
+ tools/polly/include/polly/RegisterPasses.h [3:4]
+ tools/polly/include/polly/ScheduleOptimizer.h [3:4]
+ tools/polly/include/polly/ScheduleTreeTransform.h [3:4]
+ tools/polly/include/polly/ScopBuilder.h [3:4]
+ tools/polly/include/polly/ScopDetection.h [3:4]
+ tools/polly/include/polly/ScopDetectionDiagnostic.h [3:4]
+ tools/polly/include/polly/ScopGraphPrinter.h [3:4]
+ tools/polly/include/polly/ScopInfo.h [3:4]
+ tools/polly/include/polly/ScopPass.h [3:4]
+ tools/polly/include/polly/Simplify.h [3:4]
+ tools/polly/include/polly/Support/DumpFunctionPass.h [3:4]
+ tools/polly/include/polly/Support/DumpModulePass.h [3:4]
+ tools/polly/include/polly/Support/GICHelper.h [3:4]
+ tools/polly/include/polly/Support/ISLOStream.h [3:4]
+ tools/polly/include/polly/Support/ISLOperators.h [3:4]
+ tools/polly/include/polly/Support/ISLTools.h [3:4]
+ tools/polly/include/polly/Support/SCEVAffinator.h [3:4]
+ tools/polly/include/polly/Support/SCEVValidator.h [3:4]
+ tools/polly/include/polly/Support/ScopHelper.h [3:4]
+ tools/polly/include/polly/Support/ScopLocation.h [3:4]
+ tools/polly/include/polly/Support/VirtualInstruction.h [3:4]
+ tools/polly/include/polly/ZoneAlgo.h [3:4]
+ tools/polly/lib/Analysis/DependenceInfo.cpp [3:4]
+ tools/polly/lib/Analysis/PolyhedralInfo.cpp [3:4]
+ tools/polly/lib/Analysis/PruneUnprofitable.cpp [3:4]
+ tools/polly/lib/Analysis/ScopBuilder.cpp [3:4]
+ tools/polly/lib/Analysis/ScopDetection.cpp [3:4]
+ tools/polly/lib/Analysis/ScopDetectionDiagnostic.cpp [3:4]
+ tools/polly/lib/Analysis/ScopGraphPrinter.cpp [3:4]
+ tools/polly/lib/Analysis/ScopInfo.cpp [3:4]
+ tools/polly/lib/Analysis/ScopPass.cpp [3:4]
+ tools/polly/lib/CodeGen/BlockGenerators.cpp [3:4]
+ tools/polly/lib/CodeGen/CodeGeneration.cpp [3:4]
+ tools/polly/lib/CodeGen/CodegenCleanup.cpp [3:4]
+ tools/polly/lib/CodeGen/IRBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/IslAst.cpp [3:4]
+ tools/polly/lib/CodeGen/IslExprBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/IslNodeBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/LoopGenerators.cpp [3:4]
+ tools/polly/lib/CodeGen/LoopGeneratorsGOMP.cpp [3:4]
+ tools/polly/lib/CodeGen/LoopGeneratorsKMP.cpp [3:4]
+ tools/polly/lib/CodeGen/ManagedMemoryRewrite.cpp [3:4]
+ tools/polly/lib/CodeGen/PPCGCodeGeneration.cpp [3:4]
+ tools/polly/lib/CodeGen/PerfMonitor.cpp [3:4]
+ tools/polly/lib/CodeGen/RuntimeDebugBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/Utils.cpp [3:4]
+ tools/polly/lib/Exchange/JSONExporter.cpp [3:4]
+ tools/polly/lib/Support/DumpFunctionPass.cpp [3:4]
+ tools/polly/lib/Support/DumpModulePass.cpp [3:4]
+ tools/polly/lib/Support/GICHelper.cpp [3:4]
+ tools/polly/lib/Support/ISLTools.cpp [3:4]
+ tools/polly/lib/Support/RegisterPasses.cpp [3:4]
+ tools/polly/lib/Support/SCEVAffinator.cpp [3:4]
+ tools/polly/lib/Support/ScopHelper.cpp [3:4]
+ tools/polly/lib/Support/ScopLocation.cpp [3:4]
+ tools/polly/lib/Support/VirtualInstruction.cpp [3:4]
+ tools/polly/lib/Transform/Canonicalization.cpp [3:4]
+ tools/polly/lib/Transform/CodePreparation.cpp [3:4]
+ tools/polly/lib/Transform/DeLICM.cpp [3:4]
+ tools/polly/lib/Transform/DeadCodeElimination.cpp [3:4]
+ tools/polly/lib/Transform/FlattenAlgo.cpp [3:4]
+ tools/polly/lib/Transform/FlattenSchedule.cpp [3:4]
+ tools/polly/lib/Transform/ForwardOpTree.cpp [3:4]
+ tools/polly/lib/Transform/ManualOptimizer.cpp [3:4]
+ tools/polly/lib/Transform/MatmulOptimizer.cpp [3:4]
+ tools/polly/lib/Transform/MaximalStaticExpansion.cpp [3:4]
+ tools/polly/lib/Transform/ScheduleOptimizer.cpp [3:4]
+ tools/polly/lib/Transform/ScheduleTreeTransform.cpp [3:4]
+ tools/polly/lib/Transform/ScopInliner.cpp [3:4]
+ tools/polly/lib/Transform/Simplify.cpp [3:4]
+ tools/polly/lib/Transform/ZoneAlgo.cpp [3:4]
+ tools/remarks-shlib/libremarks.cpp [3:4]
+ tools/sancov/sancov.cpp [3:4]
+ tools/sanstats/sanstats.cpp [3:4]
+ tools/verify-uselistorder/verify-uselistorder.cpp [3:4]
+ tools/yaml2obj/yaml2obj.cpp [3:4]
+ utils/TableGen/AsmMatcherEmitter.cpp [3:4]
+ utils/TableGen/AsmWriterEmitter.cpp [3:4]
+ utils/TableGen/AsmWriterInst.cpp [3:4]
+ utils/TableGen/AsmWriterInst.h [3:4]
+ utils/TableGen/Attributes.cpp [3:4]
+ utils/TableGen/CTagsEmitter.cpp [3:4]
+ utils/TableGen/CallingConvEmitter.cpp [3:4]
+ utils/TableGen/CodeEmitterGen.cpp [3:4]
+ utils/TableGen/CodeGenDAGPatterns.cpp [3:4]
+ utils/TableGen/CodeGenDAGPatterns.h [3:4]
+ utils/TableGen/CodeGenHwModes.cpp [3:4]
+ utils/TableGen/CodeGenHwModes.h [3:4]
+ utils/TableGen/CodeGenInstruction.cpp [3:4]
+ utils/TableGen/CodeGenInstruction.h [3:4]
+ utils/TableGen/CodeGenIntrinsics.h [3:4]
+ utils/TableGen/CodeGenMapTable.cpp [3:4]
+ utils/TableGen/CodeGenRegisters.cpp [3:4]
+ utils/TableGen/CodeGenRegisters.h [3:4]
+ utils/TableGen/CodeGenSchedule.cpp [3:4]
+ utils/TableGen/CodeGenSchedule.h [3:4]
+ utils/TableGen/CodeGenTarget.cpp [3:4]
+ utils/TableGen/CodeGenTarget.h [3:4]
+ utils/TableGen/CompressInstEmitter.cpp [3:4]
+ utils/TableGen/DAGISelEmitter.cpp [3:4]
+ utils/TableGen/DAGISelMatcher.cpp [3:4]
+ utils/TableGen/DAGISelMatcher.h [3:4]
+ utils/TableGen/DAGISelMatcherEmitter.cpp [3:4]
+ utils/TableGen/DAGISelMatcherGen.cpp [3:4]
+ utils/TableGen/DAGISelMatcherOpt.cpp [3:4]
+ utils/TableGen/DFAEmitter.cpp [3:4]
+ utils/TableGen/DFAEmitter.h [3:4]
+ utils/TableGen/DFAPacketizerEmitter.cpp [3:4]
+ utils/TableGen/DXILEmitter.cpp [3:4]
+ utils/TableGen/DecoderEmitter.cpp [3:4]
+ utils/TableGen/DirectiveEmitter.cpp [3:4]
+ utils/TableGen/DisassemblerEmitter.cpp [3:4]
+ utils/TableGen/ExegesisEmitter.cpp [3:4]
+ utils/TableGen/FastISelEmitter.cpp [3:4]
+ utils/TableGen/GICombinerEmitter.cpp [3:4]
+ utils/TableGen/GlobalISel/CodeExpander.cpp [3:4]
+ utils/TableGen/GlobalISel/CodeExpander.h [3:4]
+ utils/TableGen/GlobalISel/CodeExpansions.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDag.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDag.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchTree.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchTree.h [3:4]
+ utils/TableGen/GlobalISelEmitter.cpp [3:4]
+ utils/TableGen/InfoByHwMode.cpp [3:4]
+ utils/TableGen/InfoByHwMode.h [3:4]
+ utils/TableGen/InstrDocsEmitter.cpp [3:4]
+ utils/TableGen/InstrInfoEmitter.cpp [3:4]
+ utils/TableGen/IntrinsicEmitter.cpp [3:4]
+ utils/TableGen/OptEmitter.cpp [3:4]
+ utils/TableGen/OptEmitter.h [3:4]
+ utils/TableGen/OptParserEmitter.cpp [3:4]
+ utils/TableGen/OptRSTEmitter.cpp [3:4]
+ utils/TableGen/PredicateExpander.cpp [3:4]
+ utils/TableGen/PredicateExpander.h [3:4]
+ utils/TableGen/PseudoLoweringEmitter.cpp [3:4]
+ utils/TableGen/RISCVTargetDefEmitter.cpp [3:4]
+ utils/TableGen/RegisterBankEmitter.cpp [3:4]
+ utils/TableGen/RegisterInfoEmitter.cpp [3:4]
+ utils/TableGen/SDNodeProperties.cpp [3:4]
+ utils/TableGen/SDNodeProperties.h [3:4]
+ utils/TableGen/SearchableTableEmitter.cpp [3:4]
+ utils/TableGen/SequenceToOffsetTable.h [3:4]
+ utils/TableGen/SubtargetEmitter.cpp [3:4]
+ utils/TableGen/SubtargetFeatureInfo.cpp [3:4]
+ utils/TableGen/SubtargetFeatureInfo.h [3:4]
+ utils/TableGen/TableGen.cpp [3:4]
+ utils/TableGen/TableGenBackends.h [3:4]
+ utils/TableGen/Types.cpp [3:4]
+ utils/TableGen/Types.h [3:4]
+ utils/TableGen/VarLenCodeEmitterGen.cpp [3:4]
+ utils/TableGen/VarLenCodeEmitterGen.h [3:4]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.cpp [3:4]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.h [3:4]
+ utils/TableGen/X86DisassemblerShared.h [3:4]
+ utils/TableGen/X86DisassemblerTables.cpp [3:4]
+ utils/TableGen/X86DisassemblerTables.h [3:4]
+ utils/TableGen/X86EVEX2VEXTablesEmitter.cpp [3:4]
+ utils/TableGen/X86FoldTablesEmitter.cpp [3:4]
+ utils/TableGen/X86MnemonicTables.cpp [3:4]
+ utils/TableGen/X86ModRMFilters.cpp [3:4]
+ utils/TableGen/X86ModRMFilters.h [3:4]
+ utils/TableGen/X86RecognizableInstr.cpp [3:4]
+ utils/TableGen/X86RecognizableInstr.h [3:4]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm-c/DebugInfo.h [10:11]
+ include/llvm/ADT/APFixedPoint.h [10:11]
+ include/llvm/ADT/APFloat.h [10:11]
+ include/llvm/ADT/APInt.h [10:11]
+ include/llvm/ADT/APSInt.h [10:11]
+ include/llvm/ADT/AddressRanges.h [10:11]
+ include/llvm/ADT/AllocatorList.h [10:11]
+ include/llvm/ADT/Any.h [10:11]
+ include/llvm/ADT/ArrayRef.h [10:11]
+ include/llvm/ADT/BitVector.h [10:11]
+ include/llvm/ADT/Bitfields.h [10:11]
+ include/llvm/ADT/BitmaskEnum.h [10:11]
+ include/llvm/ADT/BreadthFirstIterator.h [10:11]
+ include/llvm/ADT/CachedHashString.h [10:11]
+ include/llvm/ADT/CoalescingBitVector.h [10:11]
+ include/llvm/ADT/CombinationGenerator.h [10:11]
+ include/llvm/ADT/DAGDeltaAlgorithm.h [10:11]
+ include/llvm/ADT/DeltaAlgorithm.h [10:11]
+ include/llvm/ADT/DenseMap.h [10:11]
+ include/llvm/ADT/DenseMapInfo.h [10:11]
+ include/llvm/ADT/DenseSet.h [10:11]
+ include/llvm/ADT/DepthFirstIterator.h [10:11]
+ include/llvm/ADT/DirectedGraph.h [10:11]
+ include/llvm/ADT/EnumeratedArray.h [10:11]
+ include/llvm/ADT/EpochTracker.h [10:11]
+ include/llvm/ADT/EquivalenceClasses.h [10:11]
+ include/llvm/ADT/FloatingPointMode.h [10:11]
+ include/llvm/ADT/FoldingSet.h [10:11]
+ include/llvm/ADT/FunctionExtras.h [10:11]
+ include/llvm/ADT/GenericCycleImpl.h [10:11]
+ include/llvm/ADT/GenericCycleInfo.h [10:11]
+ include/llvm/ADT/GenericSSAContext.h [10:11]
+ include/llvm/ADT/GenericUniformityImpl.h [10:11]
+ include/llvm/ADT/GenericUniformityInfo.h [10:11]
+ include/llvm/ADT/GraphTraits.h [10:11]
+ include/llvm/ADT/Hashing.h [10:11]
+ include/llvm/ADT/ImmutableList.h [10:11]
+ include/llvm/ADT/ImmutableMap.h [10:11]
+ include/llvm/ADT/ImmutableSet.h [10:11]
+ include/llvm/ADT/IndexedMap.h [10:11]
+ include/llvm/ADT/IntEqClasses.h [10:11]
+ include/llvm/ADT/IntervalMap.h [10:11]
+ include/llvm/ADT/IntervalTree.h [10:11]
+ include/llvm/ADT/IntrusiveRefCntPtr.h [10:11]
+ include/llvm/ADT/MapVector.h [10:11]
+ include/llvm/ADT/None.h [10:11]
+ include/llvm/ADT/Optional.h [10:11]
+ include/llvm/ADT/PackedVector.h [10:11]
+ include/llvm/ADT/PointerEmbeddedInt.h [10:11]
+ include/llvm/ADT/PointerIntPair.h [10:11]
+ include/llvm/ADT/PointerSumType.h [10:11]
+ include/llvm/ADT/PointerUnion.h [10:11]
+ include/llvm/ADT/PostOrderIterator.h [10:11]
+ include/llvm/ADT/PriorityQueue.h [10:11]
+ include/llvm/ADT/PriorityWorklist.h [10:11]
+ include/llvm/ADT/SCCIterator.h [10:11]
+ include/llvm/ADT/STLExtras.h [10:11]
+ include/llvm/ADT/STLForwardCompat.h [10:11]
+ include/llvm/ADT/STLFunctionalExtras.h [10:11]
+ include/llvm/ADT/ScopeExit.h [10:11]
+ include/llvm/ADT/ScopedHashTable.h [10:11]
+ include/llvm/ADT/Sequence.h [10:11]
+ include/llvm/ADT/SetOperations.h [10:11]
+ include/llvm/ADT/SetVector.h [10:11]
+ include/llvm/ADT/SmallBitVector.h [10:11]
+ include/llvm/ADT/SmallPtrSet.h [10:11]
+ include/llvm/ADT/SmallSet.h [10:11]
+ include/llvm/ADT/SmallString.h [10:11]
+ include/llvm/ADT/SmallVector.h [10:11]
+ include/llvm/ADT/SparseBitVector.h [10:11]
+ include/llvm/ADT/SparseMultiSet.h [10:11]
+ include/llvm/ADT/SparseSet.h [10:11]
+ include/llvm/ADT/Statistic.h [10:11]
+ include/llvm/ADT/StringExtras.h [10:11]
+ include/llvm/ADT/StringMap.h [10:11]
+ include/llvm/ADT/StringMapEntry.h [10:11]
+ include/llvm/ADT/StringRef.h [10:11]
+ include/llvm/ADT/StringSet.h [10:11]
+ include/llvm/ADT/StringSwitch.h [10:11]
+ include/llvm/ADT/TinyPtrVector.h [10:11]
+ include/llvm/ADT/Triple.h [10:11]
+ include/llvm/ADT/Twine.h [10:11]
+ include/llvm/ADT/TypeSwitch.h [10:11]
+ include/llvm/ADT/Uniformity.h [10:11]
+ include/llvm/ADT/UniqueVector.h [10:11]
+ include/llvm/ADT/bit.h [10:11]
+ include/llvm/ADT/edit_distance.h [10:11]
+ include/llvm/ADT/fallible_iterator.h [10:11]
+ include/llvm/ADT/identity.h [10:11]
+ include/llvm/ADT/ilist.h [10:11]
+ include/llvm/ADT/ilist_base.h [10:11]
+ include/llvm/ADT/ilist_iterator.h [10:11]
+ include/llvm/ADT/ilist_node.h [10:11]
+ include/llvm/ADT/ilist_node_base.h [10:11]
+ include/llvm/ADT/ilist_node_options.h [10:11]
+ include/llvm/ADT/iterator.h [10:11]
+ include/llvm/ADT/iterator_range.h [10:11]
+ include/llvm/ADT/simple_ilist.h [10:11]
+ include/llvm/Analysis/AliasAnalysis.h [10:11]
+ include/llvm/Analysis/AliasAnalysisEvaluator.h [10:11]
+ include/llvm/Analysis/AliasSetTracker.h [10:11]
+ include/llvm/Analysis/AssumeBundleQueries.h [10:11]
+ include/llvm/Analysis/AssumptionCache.h [10:11]
+ include/llvm/Analysis/BasicAliasAnalysis.h [10:11]
+ include/llvm/Analysis/BlockFrequencyInfo.h [10:11]
+ include/llvm/Analysis/BlockFrequencyInfoImpl.h [10:11]
+ include/llvm/Analysis/BranchProbabilityInfo.h [10:11]
+ include/llvm/Analysis/CFG.h [10:11]
+ include/llvm/Analysis/CFGPrinter.h [10:11]
+ include/llvm/Analysis/CFGSCCPrinter.h [10:11]
+ include/llvm/Analysis/CGSCCPassManager.h [10:11]
+ include/llvm/Analysis/CallGraph.h [10:11]
+ include/llvm/Analysis/CallGraphSCCPass.h [10:11]
+ include/llvm/Analysis/CallPrinter.h [10:11]
+ include/llvm/Analysis/CaptureTracking.h [10:11]
+ include/llvm/Analysis/CmpInstAnalysis.h [10:11]
+ include/llvm/Analysis/CodeMetrics.h [10:11]
+ include/llvm/Analysis/ConstantFolding.h [10:11]
+ include/llvm/Analysis/ConstraintSystem.h [10:11]
+ include/llvm/Analysis/CostModel.h [10:11]
+ include/llvm/Analysis/CycleAnalysis.h [10:11]
+ include/llvm/Analysis/DDG.h [10:11]
+ include/llvm/Analysis/DDGPrinter.h [10:11]
+ include/llvm/Analysis/DOTGraphTraitsPass.h [10:11]
+ include/llvm/Analysis/Delinearization.h [10:11]
+ include/llvm/Analysis/DemandedBits.h [10:11]
+ include/llvm/Analysis/DependenceAnalysis.h [10:11]
+ include/llvm/Analysis/DependenceGraphBuilder.h [10:11]
+ include/llvm/Analysis/DivergenceAnalysis.h [10:11]
+ include/llvm/Analysis/DomPrinter.h [10:11]
+ include/llvm/Analysis/DomTreeUpdater.h [10:11]
+ include/llvm/Analysis/DominanceFrontier.h [10:11]
+ include/llvm/Analysis/DominanceFrontierImpl.h [10:11]
+ include/llvm/Analysis/EHPersonalities.h [10:11]
+ include/llvm/Analysis/FunctionPropertiesAnalysis.h [10:11]
+ include/llvm/Analysis/GlobalsModRef.h [10:11]
+ include/llvm/Analysis/GuardUtils.h [10:11]
+ include/llvm/Analysis/HeatUtils.h [10:11]
+ include/llvm/Analysis/IRSimilarityIdentifier.h [10:11]
+ include/llvm/Analysis/IVDescriptors.h [10:11]
+ include/llvm/Analysis/IVUsers.h [10:11]
+ include/llvm/Analysis/IndirectCallPromotionAnalysis.h [10:11]
+ include/llvm/Analysis/IndirectCallVisitor.h [10:11]
+ include/llvm/Analysis/InlineAdvisor.h [10:11]
+ include/llvm/Analysis/InlineCost.h [10:11]
+ include/llvm/Analysis/InlineModelFeatureMaps.h [10:11]
+ include/llvm/Analysis/InlineOrder.h [10:11]
+ include/llvm/Analysis/InlineSizeEstimatorAnalysis.h [10:11]
+ include/llvm/Analysis/InstCount.h [10:11]
+ include/llvm/Analysis/InstSimplifyFolder.h [10:11]
+ include/llvm/Analysis/InstructionPrecedenceTracking.h [10:11]
+ include/llvm/Analysis/InstructionSimplify.h [10:11]
+ include/llvm/Analysis/Interval.h [10:11]
+ include/llvm/Analysis/IntervalIterator.h [10:11]
+ include/llvm/Analysis/IntervalPartition.h [10:11]
+ include/llvm/Analysis/IteratedDominanceFrontier.h [10:11]
+ include/llvm/Analysis/LazyBlockFrequencyInfo.h [10:11]
+ include/llvm/Analysis/LazyBranchProbabilityInfo.h [10:11]
+ include/llvm/Analysis/LazyCallGraph.h [10:11]
+ include/llvm/Analysis/LazyValueInfo.h [10:11]
+ include/llvm/Analysis/LegacyDivergenceAnalysis.h [10:11]
+ include/llvm/Analysis/Lint.h [10:11]
+ include/llvm/Analysis/Loads.h [10:11]
+ include/llvm/Analysis/LoopAccessAnalysis.h [10:11]
+ include/llvm/Analysis/LoopAnalysisManager.h [10:11]
+ include/llvm/Analysis/LoopCacheAnalysis.h [10:11]
+ include/llvm/Analysis/LoopInfo.h [10:11]
+ include/llvm/Analysis/LoopInfoImpl.h [10:11]
+ include/llvm/Analysis/LoopIterator.h [10:11]
+ include/llvm/Analysis/LoopNestAnalysis.h [10:11]
+ include/llvm/Analysis/LoopPass.h [10:11]
+ include/llvm/Analysis/LoopUnrollAnalyzer.h [10:11]
+ include/llvm/Analysis/MLInlineAdvisor.h [10:11]
+ include/llvm/Analysis/MLModelRunner.h [10:11]
+ include/llvm/Analysis/MemDerefPrinter.h [10:11]
+ include/llvm/Analysis/MemoryBuiltins.h [10:11]
+ include/llvm/Analysis/MemoryDependenceAnalysis.h [10:11]
+ include/llvm/Analysis/MemoryLocation.h [10:11]
+ include/llvm/Analysis/MemoryProfileInfo.h [10:11]
+ include/llvm/Analysis/MemorySSA.h [10:11]
+ include/llvm/Analysis/MemorySSAUpdater.h [10:11]
+ include/llvm/Analysis/ModelUnderTrainingRunner.h [10:11]
+ include/llvm/Analysis/ModuleDebugInfoPrinter.h [10:11]
+ include/llvm/Analysis/ModuleSummaryAnalysis.h [10:11]
+ include/llvm/Analysis/MustExecute.h [10:11]
+ include/llvm/Analysis/NoInferenceModelRunner.h [10:11]
+ include/llvm/Analysis/ObjCARCAliasAnalysis.h [10:11]
+ include/llvm/Analysis/ObjCARCAnalysisUtils.h [10:11]
+ include/llvm/Analysis/ObjCARCInstKind.h [10:11]
+ include/llvm/Analysis/ObjCARCUtil.h [10:11]
+ include/llvm/Analysis/OptimizationRemarkEmitter.h [10:11]
+ include/llvm/Analysis/OverflowInstAnalysis.h [10:11]
+ include/llvm/Analysis/PHITransAddr.h [10:11]
+ include/llvm/Analysis/Passes.h [10:11]
+ include/llvm/Analysis/PhiValues.h [10:11]
+ include/llvm/Analysis/PostDominators.h [10:11]
+ include/llvm/Analysis/ProfileSummaryInfo.h [10:11]
+ include/llvm/Analysis/PtrUseVisitor.h [10:11]
+ include/llvm/Analysis/RegionInfo.h [10:11]
+ include/llvm/Analysis/RegionInfoImpl.h [10:11]
+ include/llvm/Analysis/RegionIterator.h [10:11]
+ include/llvm/Analysis/RegionPass.h [10:11]
+ include/llvm/Analysis/RegionPrinter.h [10:11]
+ include/llvm/Analysis/ReleaseModeModelRunner.h [10:11]
+ include/llvm/Analysis/ReplayInlineAdvisor.h [10:11]
+ include/llvm/Analysis/ScalarEvolution.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionDivision.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionExpressions.h [10:11]
+ include/llvm/Analysis/ScalarEvolutionNormalization.h [10:11]
+ include/llvm/Analysis/ScalarFuncs.def [3:4]
+ include/llvm/Analysis/ScopedNoAliasAA.h [10:11]
+ include/llvm/Analysis/SparsePropagation.h [10:11]
+ include/llvm/Analysis/StackLifetime.h [10:11]
+ include/llvm/Analysis/StackSafetyAnalysis.h [10:11]
+ include/llvm/Analysis/SyncDependenceAnalysis.h [10:11]
+ include/llvm/Analysis/SyntheticCountsUtils.h [10:11]
+ include/llvm/Analysis/TargetFolder.h [10:11]
+ include/llvm/Analysis/TargetLibraryInfo.def [3:4]
+ include/llvm/Analysis/TargetLibraryInfo.h [10:11]
+ include/llvm/Analysis/TargetTransformInfo.h [10:11]
+ include/llvm/Analysis/TargetTransformInfoImpl.h [10:11]
+ include/llvm/Analysis/TensorSpec.h [10:11]
+ include/llvm/Analysis/Trace.h [10:11]
+ include/llvm/Analysis/TypeBasedAliasAnalysis.h [10:11]
+ include/llvm/Analysis/TypeMetadataUtils.h [10:11]
+ include/llvm/Analysis/UniformityAnalysis.h [10:11]
+ include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h [10:11]
+ include/llvm/Analysis/Utils/Local.h [10:11]
+ include/llvm/Analysis/Utils/TFUtils.h [10:11]
+ include/llvm/Analysis/Utils/TrainingLogger.h [10:11]
+ include/llvm/Analysis/ValueLattice.h [10:11]
+ include/llvm/Analysis/ValueLatticeUtils.h [10:11]
+ include/llvm/Analysis/ValueTracking.h [10:11]
+ include/llvm/Analysis/VecFuncs.def [3:4]
+ include/llvm/Analysis/VectorUtils.h [10:11]
+ include/llvm/AsmParser/LLLexer.h [10:11]
+ include/llvm/AsmParser/LLParser.h [10:11]
+ include/llvm/AsmParser/LLToken.h [10:11]
+ include/llvm/AsmParser/Parser.h [10:11]
+ include/llvm/AsmParser/SlotMapping.h [10:11]
+ include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h [10:11]
+ include/llvm/BinaryFormat/COFF.h [10:11]
+ include/llvm/BinaryFormat/DXContainer.h [10:11]
+ include/llvm/BinaryFormat/Dwarf.def [3:4]
+ include/llvm/BinaryFormat/Dwarf.h [10:11]
+ include/llvm/BinaryFormat/ELF.h [10:11]
+ include/llvm/BinaryFormat/GOFF.h [10:11]
+ include/llvm/BinaryFormat/MachO.def [3:4]
+ include/llvm/BinaryFormat/MachO.h [10:11]
+ include/llvm/BinaryFormat/Magic.h [10:11]
+ include/llvm/BinaryFormat/Minidump.h [10:11]
+ include/llvm/BinaryFormat/MinidumpConstants.def [3:4]
+ include/llvm/BinaryFormat/MsgPack.def [3:4]
+ include/llvm/BinaryFormat/MsgPack.h [10:11]
+ include/llvm/BinaryFormat/MsgPackDocument.h [10:11]
+ include/llvm/BinaryFormat/MsgPackReader.h [10:11]
+ include/llvm/BinaryFormat/MsgPackWriter.h [10:11]
+ include/llvm/BinaryFormat/Swift.def [3:4]
+ include/llvm/BinaryFormat/Swift.h [10:11]
+ include/llvm/BinaryFormat/Wasm.h [10:11]
+ include/llvm/BinaryFormat/WasmTraits.h [10:11]
+ include/llvm/BinaryFormat/XCOFF.h [10:11]
+ include/llvm/Bitcode/BitcodeAnalyzer.h [10:11]
+ include/llvm/Bitcode/BitcodeCommon.h [10:11]
+ include/llvm/Bitcode/BitcodeConvenience.h [10:11]
+ include/llvm/Bitcode/BitcodeReader.h [10:11]
+ include/llvm/Bitcode/BitcodeWriter.h [10:11]
+ include/llvm/Bitcode/BitcodeWriterPass.h [10:11]
+ include/llvm/Bitcode/LLVMBitCodes.h [10:11]
+ include/llvm/Bitstream/BitCodeEnums.h [10:11]
+ include/llvm/Bitstream/BitCodes.h [10:11]
+ include/llvm/Bitstream/BitstreamReader.h [10:11]
+ include/llvm/Bitstream/BitstreamWriter.h [10:11]
+ include/llvm/CodeGen/AccelTable.h [10:11]
+ include/llvm/CodeGen/Analysis.h [10:11]
+ include/llvm/CodeGen/AntiDepBreaker.h [10:11]
+ include/llvm/CodeGen/AsmPrinter.h [10:11]
+ include/llvm/CodeGen/AsmPrinterHandler.h [10:11]
+ include/llvm/CodeGen/AtomicExpandUtils.h [10:11]
+ include/llvm/CodeGen/BasicBlockSectionUtils.h [10:11]
+ include/llvm/CodeGen/BasicBlockSectionsProfileReader.h [10:11]
+ include/llvm/CodeGen/BasicTTIImpl.h [10:11]
+ include/llvm/CodeGen/CFIFixup.h [10:11]
+ include/llvm/CodeGen/CSEConfigBase.h [10:11]
+ include/llvm/CodeGen/CalcSpillWeights.h [10:11]
+ include/llvm/CodeGen/CallingConvLower.h [10:11]
+ include/llvm/CodeGen/CodeGenCommonISel.h [10:11]
+ include/llvm/CodeGen/CodeGenPassBuilder.h [10:11]
+ include/llvm/CodeGen/CommandFlags.h [10:11]
+ include/llvm/CodeGen/ComplexDeinterleavingPass.h [10:11]
+ include/llvm/CodeGen/CostTable.h [10:11]
+ include/llvm/CodeGen/DAGCombine.h [10:11]
+ include/llvm/CodeGen/DFAPacketizer.h [10:11]
+ include/llvm/CodeGen/DIE.h [10:11]
+ include/llvm/CodeGen/DIEValue.def [3:4]
+ include/llvm/CodeGen/DbgEntityHistoryCalculator.h [10:11]
+ include/llvm/CodeGen/DebugHandlerBase.h [10:11]
+ include/llvm/CodeGen/DwarfStringPoolEntry.h [10:11]
+ include/llvm/CodeGen/EdgeBundles.h [10:11]
+ include/llvm/CodeGen/ExecutionDomainFix.h [10:11]
+ include/llvm/CodeGen/ExpandReductions.h [10:11]
+ include/llvm/CodeGen/ExpandVectorPredication.h [10:11]
+ include/llvm/CodeGen/FastISel.h [10:11]
+ include/llvm/CodeGen/FaultMaps.h [10:11]
+ include/llvm/CodeGen/FunctionLoweringInfo.h [10:11]
+ include/llvm/CodeGen/GCMetadata.h [10:11]
+ include/llvm/CodeGen/GCMetadataPrinter.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CSEInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CallLowering.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Combiner.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CombinerHelper.h [10:11]
+ include/llvm/CodeGen/GlobalISel/CombinerInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GISelKnownBits.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GISelWorkList.h [10:11]
+ include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h [10:11]
+ include/llvm/CodeGen/GlobalISel/IRTranslator.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InstructionSelect.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InstructionSelector.h [10:11]
+ include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegacyLegalizerInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Legalizer.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegalizerHelper.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LegalizerInfo.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Localizer.h [10:11]
+ include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h [10:11]
+ include/llvm/CodeGen/GlobalISel/MIPatternMatch.h [10:11]
+ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h [10:11]
+ include/llvm/CodeGen/GlobalISel/RegBankSelect.h [10:11]
+ include/llvm/CodeGen/GlobalISel/Utils.h [10:11]
+ include/llvm/CodeGen/ISDOpcodes.h [10:11]
+ include/llvm/CodeGen/IndirectThunks.h [10:11]
+ include/llvm/CodeGen/IntrinsicLowering.h [10:11]
+ include/llvm/CodeGen/LatencyPriorityQueue.h [10:11]
+ include/llvm/CodeGen/LexicalScopes.h [10:11]
+ include/llvm/CodeGen/LinkAllAsmWriterComponents.h [10:11]
+ include/llvm/CodeGen/LinkAllCodegenComponents.h [10:11]
+ include/llvm/CodeGen/LiveInterval.h [10:11]
+ include/llvm/CodeGen/LiveIntervalCalc.h [10:11]
+ include/llvm/CodeGen/LiveIntervalUnion.h [10:11]
+ include/llvm/CodeGen/LiveIntervals.h [10:11]
+ include/llvm/CodeGen/LivePhysRegs.h [10:11]
+ include/llvm/CodeGen/LiveRangeCalc.h [10:11]
+ include/llvm/CodeGen/LiveRangeEdit.h [10:11]
+ include/llvm/CodeGen/LiveRegMatrix.h [10:11]
+ include/llvm/CodeGen/LiveRegUnits.h [10:11]
+ include/llvm/CodeGen/LiveStacks.h [10:11]
+ include/llvm/CodeGen/LiveVariables.h [10:11]
+ include/llvm/CodeGen/LoopTraversal.h [10:11]
+ include/llvm/CodeGen/LowLevelType.h [10:11]
+ include/llvm/CodeGen/MBFIWrapper.h [10:11]
+ include/llvm/CodeGen/MIRFSDiscriminator.h [10:11]
+ include/llvm/CodeGen/MIRFormatter.h [10:11]
+ include/llvm/CodeGen/MIRParser/MIParser.h [10:11]
+ include/llvm/CodeGen/MIRParser/MIRParser.h [10:11]
+ include/llvm/CodeGen/MIRPrinter.h [10:11]
+ include/llvm/CodeGen/MIRSampleProfile.h [10:11]
+ include/llvm/CodeGen/MIRYamlMapping.h [10:11]
+ include/llvm/CodeGen/MachORelocation.h [10:11]
+ include/llvm/CodeGen/MachineBasicBlock.h [10:11]
+ include/llvm/CodeGen/MachineBlockFrequencyInfo.h [10:11]
+ include/llvm/CodeGen/MachineBranchProbabilityInfo.h [10:11]
+ include/llvm/CodeGen/MachineCFGPrinter.h [10:11]
+ include/llvm/CodeGen/MachineCombinerPattern.h [11:12]
+ include/llvm/CodeGen/MachineConstantPool.h [10:11]
+ include/llvm/CodeGen/MachineCycleAnalysis.h [10:11]
+ include/llvm/CodeGen/MachineDominanceFrontier.h [10:11]
+ include/llvm/CodeGen/MachineDominators.h [10:11]
+ include/llvm/CodeGen/MachineFrameInfo.h [10:11]
+ include/llvm/CodeGen/MachineFunction.h [10:11]
+ include/llvm/CodeGen/MachineFunctionPass.h [10:11]
+ include/llvm/CodeGen/MachineInstr.h [10:11]
+ include/llvm/CodeGen/MachineInstrBuilder.h [10:11]
+ include/llvm/CodeGen/MachineInstrBundle.h [10:11]
+ include/llvm/CodeGen/MachineInstrBundleIterator.h [10:11]
+ include/llvm/CodeGen/MachineJumpTableInfo.h [10:11]
+ include/llvm/CodeGen/MachineLoopInfo.h [10:11]
+ include/llvm/CodeGen/MachineLoopUtils.h [10:11]
+ include/llvm/CodeGen/MachineMemOperand.h [10:11]
+ include/llvm/CodeGen/MachineModuleInfo.h [10:11]
+ include/llvm/CodeGen/MachineModuleInfoImpls.h [10:11]
+ include/llvm/CodeGen/MachineModuleSlotTracker.h [10:11]
+ include/llvm/CodeGen/MachineOperand.h [10:11]
+ include/llvm/CodeGen/MachineOutliner.h [10:11]
+ include/llvm/CodeGen/MachinePassManager.h [10:11]
+ include/llvm/CodeGen/MachinePassRegistry.def [3:4]
+ include/llvm/CodeGen/MachinePassRegistry.h [10:11]
+ include/llvm/CodeGen/MachinePipeliner.h [10:11]
+ include/llvm/CodeGen/MachinePostDominators.h [10:11]
+ include/llvm/CodeGen/MachineRegionInfo.h [10:11]
+ include/llvm/CodeGen/MachineRegisterInfo.h [10:11]
+ include/llvm/CodeGen/MachineSSAContext.h [10:11]
+ include/llvm/CodeGen/MachineSSAUpdater.h [10:11]
+ include/llvm/CodeGen/MachineScheduler.h [10:11]
+ include/llvm/CodeGen/MachineSizeOpts.h [10:11]
+ include/llvm/CodeGen/MachineStableHash.h [10:11]
+ include/llvm/CodeGen/MachineTraceMetrics.h [10:11]
+ include/llvm/CodeGen/MachineUniformityAnalysis.h [10:11]
+ include/llvm/CodeGen/MacroFusion.h [10:11]
+ include/llvm/CodeGen/ModuloSchedule.h [10:11]
+ include/llvm/CodeGen/MultiHazardRecognizer.h [10:11]
+ include/llvm/CodeGen/NonRelocatableStringpool.h [10:11]
+ include/llvm/CodeGen/PBQP/CostAllocator.h [10:11]
+ include/llvm/CodeGen/PBQP/Graph.h [10:11]
+ include/llvm/CodeGen/PBQP/Math.h [10:11]
+ include/llvm/CodeGen/PBQP/ReductionRules.h [10:11]
+ include/llvm/CodeGen/PBQP/Solution.h [10:11]
+ include/llvm/CodeGen/PBQPRAConstraint.h [10:11]
+ include/llvm/CodeGen/ParallelCG.h [10:11]
+ include/llvm/CodeGen/Passes.h [10:11]
+ include/llvm/CodeGen/PreISelIntrinsicLowering.h [10:11]
+ include/llvm/CodeGen/PseudoSourceValue.h [10:11]
+ include/llvm/CodeGen/RDFGraph.h [10:11]
+ include/llvm/CodeGen/RDFLiveness.h [10:11]
+ include/llvm/CodeGen/RDFRegisters.h [10:11]
+ include/llvm/CodeGen/ReachingDefAnalysis.h [10:11]
+ include/llvm/CodeGen/RegAllocCommon.h [10:11]
+ include/llvm/CodeGen/RegAllocPBQP.h [10:11]
+ include/llvm/CodeGen/RegAllocRegistry.h [10:11]
+ include/llvm/CodeGen/Register.h [10:11]
+ include/llvm/CodeGen/RegisterBank.h [10:11]
+ include/llvm/CodeGen/RegisterBankInfo.h [10:11]
+ include/llvm/CodeGen/RegisterClassInfo.h [10:11]
+ include/llvm/CodeGen/RegisterPressure.h [10:11]
+ include/llvm/CodeGen/RegisterScavenging.h [10:11]
+ include/llvm/CodeGen/RegisterUsageInfo.h [10:11]
+ include/llvm/CodeGen/ReplaceWithVeclib.h [10:11]
+ include/llvm/CodeGen/ResourcePriorityQueue.h [10:11]
+ include/llvm/CodeGen/RuntimeLibcalls.h [10:11]
+ include/llvm/CodeGen/SDNodeProperties.td [3:4]
+ include/llvm/CodeGen/ScheduleDAG.h [10:11]
+ include/llvm/CodeGen/ScheduleDAGInstrs.h [10:11]
+ include/llvm/CodeGen/ScheduleDAGMutation.h [10:11]
+ include/llvm/CodeGen/ScheduleDFS.h [10:11]
+ include/llvm/CodeGen/ScheduleHazardRecognizer.h [10:11]
+ include/llvm/CodeGen/SchedulerRegistry.h [10:11]
+ include/llvm/CodeGen/ScoreboardHazardRecognizer.h [10:11]
+ include/llvm/CodeGen/SelectionDAG.h [10:11]
+ include/llvm/CodeGen/SelectionDAGAddressAnalysis.h [10:11]
+ include/llvm/CodeGen/SelectionDAGISel.h [10:11]
+ include/llvm/CodeGen/SelectionDAGNodes.h [10:11]
+ include/llvm/CodeGen/SelectionDAGTargetInfo.h [10:11]
+ include/llvm/CodeGen/SlotIndexes.h [10:11]
+ include/llvm/CodeGen/Spiller.h [10:11]
+ include/llvm/CodeGen/StableHashing.h [10:11]
+ include/llvm/CodeGen/StackMaps.h [10:11]
+ include/llvm/CodeGen/StackProtector.h [10:11]
+ include/llvm/CodeGen/SwiftErrorValueTracking.h [10:11]
+ include/llvm/CodeGen/SwitchLoweringUtils.h [10:11]
+ include/llvm/CodeGen/TailDuplicator.h [10:11]
+ include/llvm/CodeGen/TargetCallingConv.h [10:11]
+ include/llvm/CodeGen/TargetFrameLowering.h [10:11]
+ include/llvm/CodeGen/TargetInstrInfo.h [10:11]
+ include/llvm/CodeGen/TargetLowering.h [10:11]
+ include/llvm/CodeGen/TargetLoweringObjectFileImpl.h [10:11]
+ include/llvm/CodeGen/TargetOpcodes.h [10:11]
+ include/llvm/CodeGen/TargetPassConfig.h [10:11]
+ include/llvm/CodeGen/TargetRegisterInfo.h [10:11]
+ include/llvm/CodeGen/TargetSchedule.h [10:11]
+ include/llvm/CodeGen/TargetSubtargetInfo.h [10:11]
+ include/llvm/CodeGen/TileShapeInfo.h [10:11]
+ include/llvm/CodeGen/TypePromotion.h [10:11]
+ include/llvm/CodeGen/UnreachableBlockElim.h [10:11]
+ include/llvm/CodeGen/VLIWMachineScheduler.h [10:11]
+ include/llvm/CodeGen/ValueTypes.h [10:11]
+ include/llvm/CodeGen/ValueTypes.td [3:4]
+ include/llvm/CodeGen/VirtRegMap.h [10:11]
+ include/llvm/CodeGen/WasmEHFuncInfo.h [10:11]
+ include/llvm/CodeGen/WinEHFuncInfo.h [10:11]
+ include/llvm/DWARFLinker/DWARFLinker.h [10:11]
+ include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h [10:11]
+ include/llvm/DWARFLinker/DWARFLinkerDeclContext.h [10:11]
+ include/llvm/DWARFLinker/DWARFStreamer.h [10:11]
+ include/llvm/DWARFLinkerParallel/DWARFLinker.h [10:11]
+ include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/CVRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/CVTypeVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeView.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeViewError.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h [10:11]
+ include/llvm/DebugInfo/CodeView/CodeViewRegisters.def [3:4]
+ include/llvm/DebugInfo/CodeView/CodeViewSymbols.def [3:4]
+ include/llvm/DebugInfo/CodeView/CodeViewTypes.def [3:4]
+ include/llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugCrossExSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugLinesSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugStringTableSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/DebugUnknownSubsection.h [10:11]
+ include/llvm/DebugInfo/CodeView/EnumTables.h [10:11]
+ include/llvm/DebugInfo/CodeView/Formatters.h [10:11]
+ include/llvm/DebugInfo/CodeView/FunctionId.h [10:11]
+ include/llvm/DebugInfo/CodeView/GUID.h [10:11]
+ include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h [10:11]
+ include/llvm/DebugInfo/CodeView/Line.h [10:11]
+ include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h [10:11]
+ include/llvm/DebugInfo/CodeView/RecordName.h [10:11]
+ include/llvm/DebugInfo/CodeView/RecordSerialization.h [10:11]
+ include/llvm/DebugInfo/CodeView/SimpleTypeSerializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/StringsAndChecksums.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolDeserializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolDumper.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolSerializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h [10:11]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeCollection.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeDeserializer.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeDumpVisitor.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeHashing.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeIndex.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeRecord.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeRecordMapping.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeStreamMerger.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeTableCollection.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h [10:11]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h [10:11]
+ include/llvm/DebugInfo/DIContext.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAddressRange.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFAttribute.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFContext.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDataExtractor.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAddr.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLine.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFDie.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFExpression.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFFormValue.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFListTable.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFLocationExpression.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFObject.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFRelocMap.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFSection.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFTypePrinter.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFUnit.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h [10:11]
+ include/llvm/DebugInfo/DWARF/DWARFVerifier.h [10:11]
+ include/llvm/DebugInfo/GSYM/DwarfTransformer.h [10:11]
+ include/llvm/DebugInfo/GSYM/ExtractRanges.h [10:11]
+ include/llvm/DebugInfo/GSYM/FileEntry.h [10:11]
+ include/llvm/DebugInfo/GSYM/FileWriter.h [10:11]
+ include/llvm/DebugInfo/GSYM/FunctionInfo.h [10:11]
+ include/llvm/DebugInfo/GSYM/GsymCreator.h [10:11]
+ include/llvm/DebugInfo/GSYM/GsymReader.h [10:11]
+ include/llvm/DebugInfo/GSYM/Header.h [10:11]
+ include/llvm/DebugInfo/GSYM/InlineInfo.h [10:11]
+ include/llvm/DebugInfo/GSYM/LineEntry.h [10:11]
+ include/llvm/DebugInfo/GSYM/LineTable.h [10:11]
+ include/llvm/DebugInfo/GSYM/LookupResult.h [10:11]
+ include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h [10:11]
+ include/llvm/DebugInfo/GSYM/StringTable.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVCompare.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVElement.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVLine.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVLocation.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVObject.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVOptions.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVRange.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVReader.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVScope.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVSort.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVSupport.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Core/LVType.h [10:11]
+ include/llvm/DebugInfo/LogicalView/LVReaderHandler.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h [10:11]
+ include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h [10:11]
+ include/llvm/DebugInfo/MSF/IMSFFile.h [10:11]
+ include/llvm/DebugInfo/MSF/MSFBuilder.h [10:11]
+ include/llvm/DebugInfo/MSF/MSFCommon.h [10:11]
+ include/llvm/DebugInfo/MSF/MSFError.h [10:11]
+ include/llvm/DebugInfo/MSF/MappedBlockStream.h [10:11]
+ include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIADataStream.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAError.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASession.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIASupport.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIATable.h [10:11]
+ include/llvm/DebugInfo/PDB/DIA/DIAUtils.h [10:11]
+ include/llvm/DebugInfo/PDB/GenericError.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBDataStream.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBEnumChildren.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBFrameData.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBInjectedSource.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBLineNumber.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBRawSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBSectionContrib.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBSession.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBSourceFile.h [10:11]
+ include/llvm/DebugInfo/PDB/IPDBTable.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleList.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/EnumTables.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/FormatUtil.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/Formatters.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/GlobalsStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/Hash.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/HashTable.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/ISectionContribVisitor.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InfoStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InjectedSourceStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/InputFile.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/LinePrinter.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumInjectedSources.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumLineNumbers.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumSymbols.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeFunctionSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeInlineSiteSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeLineNumber.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativePublicSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeSession.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeSourceFile.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeBuiltin.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBFile.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTable.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/PublicsStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/RawConstants.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/RawError.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/RawTypes.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/SymbolCache.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/SymbolStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/TpiHashing.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/TpiStream.h [10:11]
+ include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h [10:11]
+ include/llvm/DebugInfo/PDB/PDB.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBContext.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBExtras.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymDumper.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolBlock.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolCustom.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolData.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolExe.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolFunc.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolLabel.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolThunk.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h [10:11]
+ include/llvm/DebugInfo/PDB/PDBTypes.h [10:11]
+ include/llvm/DebugInfo/PDB/UDTLayout.h [10:11]
+ include/llvm/DebugInfo/Symbolize/DIPrinter.h [10:11]
+ include/llvm/DebugInfo/Symbolize/Markup.h [10:11]
+ include/llvm/DebugInfo/Symbolize/MarkupFilter.h [10:11]
+ include/llvm/DebugInfo/Symbolize/SymbolizableModule.h [10:11]
+ include/llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h [10:11]
+ include/llvm/DebugInfo/Symbolize/Symbolize.h [10:11]
+ include/llvm/Debuginfod/BuildIDFetcher.h [10:11]
+ include/llvm/Debuginfod/Debuginfod.h [10:11]
+ include/llvm/Debuginfod/HTTPClient.h [10:11]
+ include/llvm/Debuginfod/HTTPServer.h [10:11]
+ include/llvm/Demangle/Demangle.h [10:11]
+ include/llvm/Demangle/DemangleConfig.h [10:11]
+ include/llvm/Demangle/ItaniumDemangle.h [10:11]
+ include/llvm/Demangle/ItaniumNodes.def [3:4]
+ include/llvm/Demangle/MicrosoftDemangle.h [10:11]
+ include/llvm/Demangle/MicrosoftDemangleNodes.h [10:11]
+ include/llvm/Demangle/StringView.h [10:11]
+ include/llvm/Demangle/Utility.h [10:11]
+ include/llvm/ExecutionEngine/ExecutionEngine.h [10:11]
+ include/llvm/ExecutionEngine/GenericValue.h [10:11]
+ include/llvm/ExecutionEngine/Interpreter.h [10:11]
+ include/llvm/ExecutionEngine/JITEventListener.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/COFF.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_aarch64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_i386.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_loongarch.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_riscv.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/ELF_x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/JITLink.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/JITLinkDylib.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/MachO.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/MachO_arm64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/MachO_x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/TableManager.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/aarch64.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/i386.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/loongarch.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/riscv.h [10:11]
+ include/llvm/ExecutionEngine/JITLink/x86_64.h [10:11]
+ include/llvm/ExecutionEngine/JITSymbol.h [10:11]
+ include/llvm/ExecutionEngine/MCJIT.h [10:11]
+ include/llvm/ExecutionEngine/OProfileWrapper.h [10:11]
+ include/llvm/ExecutionEngine/ObjectCache.h [10:11]
+ include/llvm/ExecutionEngine/Orc/COFFPlatform.h [10:11]
+ include/llvm/ExecutionEngine/Orc/COFFVCRuntimeSupport.h [10:11]
+ include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/CompileUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Core.h [10:11]
+ include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h [10:11]
+ include/llvm/ExecutionEngine/Orc/DebugUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericDylibManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ExecutionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ExecutorProcessControl.h [10:11]
+ include/llvm/ExecutionEngine/Orc/IRCompileLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/IRTransformLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/IndirectionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h [10:11]
+ include/llvm/ExecutionEngine/Orc/LLJIT.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Layer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/LazyReexports.h [10:11]
+ include/llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h [10:11]
+ include/llvm/ExecutionEngine/Orc/MachOPlatform.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Mangling.h [10:11]
+ include/llvm/ExecutionEngine/Orc/MapperJITLinkMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/MemoryMapper.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/OrcABISupport.h [10:11]
+ include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcError.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h [10:11]
+ include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h [10:11]
+ include/llvm/ExecutionEngine/Orc/Speculation.h [10:11]
+ include/llvm/ExecutionEngine/Orc/SymbolStringPool.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h [10:11]
+ include/llvm/ExecutionEngine/Orc/TaskDispatch.h [10:11]
+ include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h [10:11]
+ include/llvm/ExecutionEngine/RTDyldMemoryManager.h [10:11]
+ include/llvm/ExecutionEngine/RuntimeDyld.h [10:11]
+ include/llvm/ExecutionEngine/RuntimeDyldChecker.h [10:11]
+ include/llvm/ExecutionEngine/SectionMemoryManager.h [10:11]
+ include/llvm/FileCheck/FileCheck.h [10:11]
+ include/llvm/Frontend/Directive/DirectiveBase.td [3:4]
+ include/llvm/Frontend/HLSL/HLSLResource.h [10:11]
+ include/llvm/Frontend/OpenACC/ACC.td [3:4]
+ include/llvm/Frontend/OpenMP/OMP.td [3:4]
+ include/llvm/Frontend/OpenMP/OMPAssume.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPConstants.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPContext.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPDeviceConstants.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPGridValues.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPIRBuilder.h [10:11]
+ include/llvm/Frontend/OpenMP/OMPKinds.def [3:4]
+ include/llvm/FuzzMutate/FuzzerCLI.h [10:11]
+ include/llvm/FuzzMutate/IRMutator.h [10:11]
+ include/llvm/FuzzMutate/OpDescriptor.h [10:11]
+ include/llvm/FuzzMutate/Operations.h [10:11]
+ include/llvm/FuzzMutate/Random.h [10:11]
+ include/llvm/FuzzMutate/RandomIRBuilder.h [10:11]
+ include/llvm/IR/AbstractCallSite.h [10:11]
+ include/llvm/IR/Argument.h [10:11]
+ include/llvm/IR/AssemblyAnnotationWriter.h [10:11]
+ include/llvm/IR/Assumptions.h [10:11]
+ include/llvm/IR/Attributes.h [10:11]
+ include/llvm/IR/Attributes.td [3:4]
+ include/llvm/IR/AutoUpgrade.h [10:11]
+ include/llvm/IR/BasicBlock.h [10:11]
+ include/llvm/IR/BuiltinGCs.h [10:11]
+ include/llvm/IR/CFG.h [10:11]
+ include/llvm/IR/CallingConv.h [10:11]
+ include/llvm/IR/Comdat.h [10:11]
+ include/llvm/IR/Constant.h [10:11]
+ include/llvm/IR/ConstantFold.h [10:11]
+ include/llvm/IR/ConstantFolder.h [10:11]
+ include/llvm/IR/ConstantRange.h [10:11]
+ include/llvm/IR/Constants.h [10:11]
+ include/llvm/IR/ConstrainedOps.def [3:4]
+ include/llvm/IR/DIBuilder.h [10:11]
+ include/llvm/IR/DataLayout.h [10:11]
+ include/llvm/IR/DebugInfo.h [10:11]
+ include/llvm/IR/DebugInfoFlags.def [3:4]
+ include/llvm/IR/DebugInfoMetadata.h [10:11]
+ include/llvm/IR/DebugLoc.h [10:11]
+ include/llvm/IR/DerivedTypes.h [10:11]
+ include/llvm/IR/DerivedUser.h [10:11]
+ include/llvm/IR/DiagnosticHandler.h [10:11]
+ include/llvm/IR/DiagnosticInfo.h [10:11]
+ include/llvm/IR/DiagnosticPrinter.h [10:11]
+ include/llvm/IR/Dominators.h [10:11]
+ include/llvm/IR/FMF.h [10:11]
+ include/llvm/IR/FPEnv.h [10:11]
+ include/llvm/IR/FixedPointBuilder.h [10:11]
+ include/llvm/IR/Function.h [10:11]
+ include/llvm/IR/GCStrategy.h [10:11]
+ include/llvm/IR/GVMaterializer.h [10:11]
+ include/llvm/IR/GetElementPtrTypeIterator.h [10:11]
+ include/llvm/IR/GlobalAlias.h [10:11]
+ include/llvm/IR/GlobalIFunc.h [10:11]
+ include/llvm/IR/GlobalObject.h [10:11]
+ include/llvm/IR/GlobalValue.h [10:11]
+ include/llvm/IR/GlobalVariable.h [10:11]
+ include/llvm/IR/IRBuilder.h [10:11]
+ include/llvm/IR/IRBuilderFolder.h [10:11]
+ include/llvm/IR/IRPrintingPasses.h [10:11]
+ include/llvm/IR/InlineAsm.h [10:11]
+ include/llvm/IR/InstIterator.h [10:11]
+ include/llvm/IR/InstVisitor.h [10:11]
+ include/llvm/IR/InstrTypes.h [10:11]
+ include/llvm/IR/Instruction.def [3:4]
+ include/llvm/IR/Instruction.h [10:11]
+ include/llvm/IR/Instructions.h [10:11]
+ include/llvm/IR/IntrinsicInst.h [10:11]
+ include/llvm/IR/Intrinsics.h [10:11]
+ include/llvm/IR/Intrinsics.td [3:4]
+ include/llvm/IR/IntrinsicsAArch64.td [3:4]
+ include/llvm/IR/IntrinsicsAMDGPU.td [3:4]
+ include/llvm/IR/IntrinsicsARM.td [3:4]
+ include/llvm/IR/IntrinsicsBPF.td [3:4]
+ include/llvm/IR/IntrinsicsDirectX.td [3:4]
+ include/llvm/IR/IntrinsicsHexagon.td [2:3]
+ include/llvm/IR/IntrinsicsHexagonDep.td [3:4]
+ include/llvm/IR/IntrinsicsLoongArch.td [3:4]
+ include/llvm/IR/IntrinsicsMips.td [3:4]
+ include/llvm/IR/IntrinsicsNVVM.td [3:4]
+ include/llvm/IR/IntrinsicsPowerPC.td [3:4]
+ include/llvm/IR/IntrinsicsRISCV.td [3:4]
+ include/llvm/IR/IntrinsicsSPIRV.td [3:4]
+ include/llvm/IR/IntrinsicsSystemZ.td [3:4]
+ include/llvm/IR/IntrinsicsWebAssembly.td [3:4]
+ include/llvm/IR/IntrinsicsX86.td [3:4]
+ include/llvm/IR/IntrinsicsXCore.td [3:4]
+ include/llvm/IR/LLVMContext.h [10:11]
+ include/llvm/IR/LLVMRemarkStreamer.h [10:11]
+ include/llvm/IR/LegacyPassManager.h [10:11]
+ include/llvm/IR/LegacyPassManagers.h [10:11]
+ include/llvm/IR/LegacyPassNameParser.h [10:11]
+ include/llvm/IR/MDBuilder.h [10:11]
+ include/llvm/IR/Mangler.h [10:11]
+ include/llvm/IR/MatrixBuilder.h [10:11]
+ include/llvm/IR/Metadata.def [3:4]
+ include/llvm/IR/Metadata.h [10:11]
+ include/llvm/IR/Module.h [10:11]
+ include/llvm/IR/ModuleSlotTracker.h [10:11]
+ include/llvm/IR/ModuleSummaryIndex.h [10:11]
+ include/llvm/IR/ModuleSummaryIndexYAML.h [10:11]
+ include/llvm/IR/NoFolder.h [10:11]
+ include/llvm/IR/OperandTraits.h [10:11]
+ include/llvm/IR/Operator.h [10:11]
+ include/llvm/IR/OptBisect.h [10:11]
+ include/llvm/IR/PassInstrumentation.h [10:11]
+ include/llvm/IR/PassManager.h [10:11]
+ include/llvm/IR/PassManagerImpl.h [10:11]
+ include/llvm/IR/PassManagerInternal.h [10:11]
+ include/llvm/IR/PassTimingInfo.h [10:11]
+ include/llvm/IR/PatternMatch.h [10:11]
+ include/llvm/IR/PredIteratorCache.h [10:11]
+ include/llvm/IR/PrintPasses.h [10:11]
+ include/llvm/IR/ProfDataUtils.h [10:11]
+ include/llvm/IR/ProfileSummary.h [10:11]
+ include/llvm/IR/PseudoProbe.h [10:11]
+ include/llvm/IR/ReplaceConstant.h [10:11]
+ include/llvm/IR/RuntimeLibcalls.def [3:4]
+ include/llvm/IR/SSAContext.h [10:11]
+ include/llvm/IR/SafepointIRVerifier.h [10:11]
+ include/llvm/IR/Statepoint.h [10:11]
+ include/llvm/IR/StructuralHash.h [10:11]
+ include/llvm/IR/SymbolTableListTraits.h [10:11]
+ include/llvm/IR/TrackingMDRef.h [10:11]
+ include/llvm/IR/Type.h [10:11]
+ include/llvm/IR/TypeFinder.h [10:11]
+ include/llvm/IR/TypedPointerType.h [10:11]
+ include/llvm/IR/Use.h [10:11]
+ include/llvm/IR/UseListOrder.h [10:11]
+ include/llvm/IR/User.h [10:11]
+ include/llvm/IR/VPIntrinsics.def [3:4]
+ include/llvm/IR/Value.def [3:4]
+ include/llvm/IR/Value.h [10:11]
+ include/llvm/IR/ValueHandle.h [10:11]
+ include/llvm/IR/ValueMap.h [10:11]
+ include/llvm/IR/ValueSymbolTable.h [10:11]
+ include/llvm/IR/VectorBuilder.h [10:11]
+ include/llvm/IR/Verifier.h [10:11]
+ include/llvm/IRPrinter/IRPrintingPasses.h [10:11]
+ include/llvm/IRReader/IRReader.h [10:11]
+ include/llvm/InitializePasses.h [10:11]
+ include/llvm/InterfaceStub/ELFObjHandler.h [10:11]
+ include/llvm/InterfaceStub/IFSHandler.h [10:11]
+ include/llvm/InterfaceStub/IFSStub.h [10:11]
+ include/llvm/LTO/Config.h [10:11]
+ include/llvm/LTO/LTO.h [10:11]
+ include/llvm/LTO/LTOBackend.h [10:11]
+ include/llvm/LTO/SummaryBasedOptimizations.h [10:11]
+ include/llvm/LTO/legacy/LTOCodeGenerator.h [10:11]
+ include/llvm/LTO/legacy/LTOModule.h [10:11]
+ include/llvm/LTO/legacy/ThinLTOCodeGenerator.h [10:11]
+ include/llvm/LTO/legacy/UpdateCompilerUsed.h [10:11]
+ include/llvm/LineEditor/LineEditor.h [10:11]
+ include/llvm/LinkAllIR.h [10:11]
+ include/llvm/LinkAllPasses.h [10:11]
+ include/llvm/Linker/IRMover.h [10:11]
+ include/llvm/Linker/Linker.h [10:11]
+ include/llvm/MC/ConstantPools.h [10:11]
+ include/llvm/MC/LaneBitmask.h [10:11]
+ include/llvm/MC/MCAsmBackend.h [10:11]
+ include/llvm/MC/MCAsmInfo.h [10:11]
+ include/llvm/MC/MCAsmInfoCOFF.h [10:11]
+ include/llvm/MC/MCAsmInfoDarwin.h [10:11]
+ include/llvm/MC/MCAsmInfoELF.h [10:11]
+ include/llvm/MC/MCAsmInfoGOFF.h [10:11]
+ include/llvm/MC/MCAsmInfoWasm.h [10:11]
+ include/llvm/MC/MCAsmInfoXCOFF.h [10:11]
+ include/llvm/MC/MCAsmLayout.h [10:11]
+ include/llvm/MC/MCAsmMacro.h [10:11]
+ include/llvm/MC/MCAssembler.h [10:11]
+ include/llvm/MC/MCCodeEmitter.h [10:11]
+ include/llvm/MC/MCCodeView.h [10:11]
+ include/llvm/MC/MCContext.h [10:11]
+ include/llvm/MC/MCDXContainerStreamer.h [10:11]
+ include/llvm/MC/MCDXContainerWriter.h [10:11]
+ include/llvm/MC/MCDecoderOps.h [10:11]
+ include/llvm/MC/MCDirectives.h [10:11]
+ include/llvm/MC/MCDisassembler/MCDisassembler.h [10:11]
+ include/llvm/MC/MCDisassembler/MCExternalSymbolizer.h [10:11]
+ include/llvm/MC/MCDisassembler/MCRelocationInfo.h [10:11]
+ include/llvm/MC/MCDisassembler/MCSymbolizer.h [10:11]
+ include/llvm/MC/MCDwarf.h [10:11]
+ include/llvm/MC/MCELFObjectWriter.h [10:11]
+ include/llvm/MC/MCELFStreamer.h [10:11]
+ include/llvm/MC/MCExpr.h [10:11]
+ include/llvm/MC/MCFixup.h [10:11]
+ include/llvm/MC/MCFixupKindInfo.h [10:11]
+ include/llvm/MC/MCFragment.h [10:11]
+ include/llvm/MC/MCInst.h [10:11]
+ include/llvm/MC/MCInstBuilder.h [10:11]
+ include/llvm/MC/MCInstPrinter.h [10:11]
+ include/llvm/MC/MCInstrAnalysis.h [10:11]
+ include/llvm/MC/MCInstrDesc.h [10:11]
+ include/llvm/MC/MCInstrInfo.h [10:11]
+ include/llvm/MC/MCInstrItineraries.h [10:11]
+ include/llvm/MC/MCLabel.h [10:11]
+ include/llvm/MC/MCLinkerOptimizationHint.h [11:12]
+ include/llvm/MC/MCMachObjectWriter.h [10:11]
+ include/llvm/MC/MCObjectFileInfo.h [10:11]
+ include/llvm/MC/MCObjectStreamer.h [10:11]
+ include/llvm/MC/MCObjectWriter.h [10:11]
+ include/llvm/MC/MCParser/AsmCond.h [10:11]
+ include/llvm/MC/MCParser/AsmLexer.h [10:11]
+ include/llvm/MC/MCParser/MCAsmLexer.h [10:11]
+ include/llvm/MC/MCParser/MCAsmParser.h [10:11]
+ include/llvm/MC/MCParser/MCAsmParserExtension.h [10:11]
+ include/llvm/MC/MCParser/MCAsmParserUtils.h [10:11]
+ include/llvm/MC/MCParser/MCParsedAsmOperand.h [10:11]
+ include/llvm/MC/MCParser/MCTargetAsmParser.h [10:11]
+ include/llvm/MC/MCPseudoProbe.h [10:11]
+ include/llvm/MC/MCRegister.h [10:11]
+ include/llvm/MC/MCRegisterInfo.h [10:11]
+ include/llvm/MC/MCSPIRVObjectWriter.h [10:11]
+ include/llvm/MC/MCSPIRVStreamer.h [10:11]
+ include/llvm/MC/MCSchedule.h [10:11]
+ include/llvm/MC/MCSection.h [10:11]
+ include/llvm/MC/MCSectionCOFF.h [10:11]
+ include/llvm/MC/MCSectionDXContainer.h [10:11]
+ include/llvm/MC/MCSectionELF.h [10:11]
+ include/llvm/MC/MCSectionGOFF.h [10:11]
+ include/llvm/MC/MCSectionMachO.h [10:11]
+ include/llvm/MC/MCSectionSPIRV.h [10:11]
+ include/llvm/MC/MCSectionWasm.h [10:11]
+ include/llvm/MC/MCSectionXCOFF.h [10:11]
+ include/llvm/MC/MCStreamer.h [10:11]
+ include/llvm/MC/MCSubtargetInfo.h [10:11]
+ include/llvm/MC/MCSymbol.h [10:11]
+ include/llvm/MC/MCSymbolCOFF.h [10:11]
+ include/llvm/MC/MCSymbolELF.h [10:11]
+ include/llvm/MC/MCSymbolGOFF.h [10:11]
+ include/llvm/MC/MCSymbolMachO.h [10:11]
+ include/llvm/MC/MCSymbolWasm.h [10:11]
+ include/llvm/MC/MCSymbolXCOFF.h [10:11]
+ include/llvm/MC/MCTargetOptions.h [10:11]
+ include/llvm/MC/MCTargetOptionsCommandFlags.h [10:11]
+ include/llvm/MC/MCValue.h [10:11]
+ include/llvm/MC/MCWasmObjectWriter.h [10:11]
+ include/llvm/MC/MCWasmStreamer.h [10:11]
+ include/llvm/MC/MCWin64EH.h [10:11]
+ include/llvm/MC/MCWinCOFFObjectWriter.h [10:11]
+ include/llvm/MC/MCWinCOFFStreamer.h [10:11]
+ include/llvm/MC/MCWinEH.h [10:11]
+ include/llvm/MC/MCXCOFFObjectWriter.h [10:11]
+ include/llvm/MC/MCXCOFFStreamer.h [10:11]
+ include/llvm/MC/MachineLocation.h [10:11]
+ include/llvm/MC/SectionKind.h [10:11]
+ include/llvm/MC/StringTableBuilder.h [10:11]
+ include/llvm/MC/SubtargetFeature.h [10:11]
+ include/llvm/MC/TargetRegistry.h [10:11]
+ include/llvm/MCA/CodeEmitter.h [10:11]
+ include/llvm/MCA/Context.h [10:11]
+ include/llvm/MCA/CustomBehaviour.h [10:11]
+ include/llvm/MCA/HWEventListener.h [10:11]
+ include/llvm/MCA/HardwareUnits/HardwareUnit.h [10:11]
+ include/llvm/MCA/HardwareUnits/LSUnit.h [10:11]
+ include/llvm/MCA/HardwareUnits/RegisterFile.h [10:11]
+ include/llvm/MCA/HardwareUnits/ResourceManager.h [10:11]
+ include/llvm/MCA/HardwareUnits/RetireControlUnit.h [10:11]
+ include/llvm/MCA/HardwareUnits/Scheduler.h [10:11]
+ include/llvm/MCA/IncrementalSourceMgr.h [10:11]
+ include/llvm/MCA/InstrBuilder.h [10:11]
+ include/llvm/MCA/Instruction.h [10:11]
+ include/llvm/MCA/Pipeline.h [10:11]
+ include/llvm/MCA/SourceMgr.h [10:11]
+ include/llvm/MCA/Stages/DispatchStage.h [10:11]
+ include/llvm/MCA/Stages/EntryStage.h [10:11]
+ include/llvm/MCA/Stages/ExecuteStage.h [10:11]
+ include/llvm/MCA/Stages/InOrderIssueStage.h [10:11]
+ include/llvm/MCA/Stages/InstructionTables.h [10:11]
+ include/llvm/MCA/Stages/MicroOpQueueStage.h [10:11]
+ include/llvm/MCA/Stages/RetireStage.h [10:11]
+ include/llvm/MCA/Stages/Stage.h [10:11]
+ include/llvm/MCA/Support.h [10:11]
+ include/llvm/MCA/View.h [10:11]
+ include/llvm/ObjCopy/COFF/COFFConfig.h [10:11]
+ include/llvm/ObjCopy/COFF/COFFObjcopy.h [10:11]
+ include/llvm/ObjCopy/CommonConfig.h [10:11]
+ include/llvm/ObjCopy/ConfigManager.h [10:11]
+ include/llvm/ObjCopy/ELF/ELFConfig.h [10:11]
+ include/llvm/ObjCopy/ELF/ELFObjcopy.h [10:11]
+ include/llvm/ObjCopy/MachO/MachOConfig.h [10:11]
+ include/llvm/ObjCopy/MachO/MachOObjcopy.h [10:11]
+ include/llvm/ObjCopy/MultiFormatConfig.h [10:11]
+ include/llvm/ObjCopy/ObjCopy.h [10:11]
+ include/llvm/ObjCopy/XCOFF/XCOFFConfig.h [10:11]
+ include/llvm/ObjCopy/XCOFF/XCOFFObjcopy.h [10:11]
+ include/llvm/ObjCopy/wasm/WasmConfig.h [10:11]
+ include/llvm/ObjCopy/wasm/WasmObjcopy.h [10:11]
+ include/llvm/Object/Archive.h [10:11]
+ include/llvm/Object/ArchiveWriter.h [10:11]
+ include/llvm/Object/Binary.h [10:11]
+ include/llvm/Object/BuildID.h [10:11]
+ include/llvm/Object/COFF.h [10:11]
+ include/llvm/Object/COFFImportFile.h [10:11]
+ include/llvm/Object/COFFModuleDefinition.h [10:11]
+ include/llvm/Object/CVDebugRecord.h [10:11]
+ include/llvm/Object/DXContainer.h [10:11]
+ include/llvm/Object/Decompressor.h [10:11]
+ include/llvm/Object/ELF.h [10:11]
+ include/llvm/Object/ELFObjectFile.h [10:11]
+ include/llvm/Object/ELFTypes.h [10:11]
+ include/llvm/Object/Error.h [10:11]
+ include/llvm/Object/FaultMapParser.h [10:11]
+ include/llvm/Object/IRObjectFile.h [10:11]
+ include/llvm/Object/IRSymtab.h [10:11]
+ include/llvm/Object/MachO.h [10:11]
+ include/llvm/Object/MachOUniversal.h [10:11]
+ include/llvm/Object/MachOUniversalWriter.h [10:11]
+ include/llvm/Object/Minidump.h [10:11]
+ include/llvm/Object/ModuleSymbolTable.h [10:11]
+ include/llvm/Object/ObjectFile.h [10:11]
+ include/llvm/Object/OffloadBinary.h [10:11]
+ include/llvm/Object/RelocationResolver.h [10:11]
+ include/llvm/Object/StackMapParser.h [10:11]
+ include/llvm/Object/SymbolSize.h [10:11]
+ include/llvm/Object/SymbolicFile.h [10:11]
+ include/llvm/Object/TapiFile.h [10:11]
+ include/llvm/Object/TapiUniversal.h [10:11]
+ include/llvm/Object/Wasm.h [10:11]
+ include/llvm/Object/WindowsMachineFlag.h [10:11]
+ include/llvm/Object/WindowsResource.h [10:11]
+ include/llvm/Object/XCOFFObjectFile.h [10:11]
+ include/llvm/ObjectYAML/ArchiveYAML.h [10:11]
+ include/llvm/ObjectYAML/COFFYAML.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLSymbols.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h [10:11]
+ include/llvm/ObjectYAML/CodeViewYAMLTypes.h [10:11]
+ include/llvm/ObjectYAML/DWARFEmitter.h [10:11]
+ include/llvm/ObjectYAML/DWARFYAML.h [10:11]
+ include/llvm/ObjectYAML/DXContainerYAML.h [10:11]
+ include/llvm/ObjectYAML/ELFYAML.h [10:11]
+ include/llvm/ObjectYAML/MachOYAML.h [10:11]
+ include/llvm/ObjectYAML/MinidumpYAML.h [10:11]
+ include/llvm/ObjectYAML/ObjectYAML.h [10:11]
+ include/llvm/ObjectYAML/OffloadYAML.h [10:11]
+ include/llvm/ObjectYAML/WasmYAML.h [10:11]
+ include/llvm/ObjectYAML/XCOFFYAML.h [10:11]
+ include/llvm/ObjectYAML/YAML.h [10:11]
+ include/llvm/ObjectYAML/yaml2obj.h [10:11]
+ include/llvm/Option/Arg.h [10:11]
+ include/llvm/Option/ArgList.h [10:11]
+ include/llvm/Option/OptParser.td [3:4]
+ include/llvm/Option/OptSpecifier.h [10:11]
+ include/llvm/Option/OptTable.h [10:11]
+ include/llvm/Option/Option.h [10:11]
+ include/llvm/Pass.h [10:11]
+ include/llvm/PassAnalysisSupport.h [10:11]
+ include/llvm/PassInfo.h [10:11]
+ include/llvm/PassRegistry.h [10:11]
+ include/llvm/PassSupport.h [10:11]
+ include/llvm/Passes/OptimizationLevel.h [10:11]
+ include/llvm/Passes/PassBuilder.h [10:11]
+ include/llvm/Passes/PassPlugin.h [10:11]
+ include/llvm/Passes/StandardInstrumentations.h [10:11]
+ include/llvm/ProfileData/Coverage/CoverageMapping.h [10:11]
+ include/llvm/ProfileData/Coverage/CoverageMappingReader.h [10:11]
+ include/llvm/ProfileData/Coverage/CoverageMappingWriter.h [10:11]
+ include/llvm/ProfileData/GCOV.h [10:11]
+ include/llvm/ProfileData/InstrProf.h [10:11]
+ include/llvm/ProfileData/InstrProfCorrelator.h [10:11]
+ include/llvm/ProfileData/InstrProfReader.h [10:11]
+ include/llvm/ProfileData/InstrProfWriter.h [10:11]
+ include/llvm/ProfileData/ProfileCommon.h [10:11]
+ include/llvm/ProfileData/RawMemProfReader.h [12:13]
+ include/llvm/ProfileData/SampleProf.h [10:11]
+ include/llvm/ProfileData/SampleProfReader.h [10:11]
+ include/llvm/ProfileData/SampleProfWriter.h [10:11]
+ include/llvm/Remarks/BitstreamRemarkContainer.h [10:11]
+ include/llvm/Remarks/BitstreamRemarkParser.h [10:11]
+ include/llvm/Remarks/BitstreamRemarkSerializer.h [10:11]
+ include/llvm/Remarks/HotnessThresholdParser.h [10:11]
+ include/llvm/Remarks/Remark.h [10:11]
+ include/llvm/Remarks/RemarkFormat.h [10:11]
+ include/llvm/Remarks/RemarkLinker.h [10:11]
+ include/llvm/Remarks/RemarkParser.h [10:11]
+ include/llvm/Remarks/RemarkSerializer.h [10:11]
+ include/llvm/Remarks/RemarkStreamer.h [10:11]
+ include/llvm/Remarks/RemarkStringTable.h [10:11]
+ include/llvm/Remarks/YAMLRemarkSerializer.h [10:11]
+ include/llvm/Support/AArch64TargetParser.h [10:11]
+ include/llvm/Support/AMDGPUMetadata.h [10:11]
+ include/llvm/Support/AMDHSAKernelDescriptor.h [10:11]
+ include/llvm/Support/ARMAttributeParser.h [10:11]
+ include/llvm/Support/ARMBuildAttributes.h [10:11]
+ include/llvm/Support/ARMEHABI.h [10:11]
+ include/llvm/Support/ARMTargetParser.h [10:11]
+ include/llvm/Support/ARMTargetParserCommon.h [10:11]
+ include/llvm/Support/ARMWinEH.h [10:11]
+ include/llvm/Support/AlignOf.h [10:11]
+ include/llvm/Support/Alignment.h [10:11]
+ include/llvm/Support/Allocator.h [10:11]
+ include/llvm/Support/AllocatorBase.h [10:11]
+ include/llvm/Support/ArrayRecycler.h [10:11]
+ include/llvm/Support/Atomic.h [10:11]
+ include/llvm/Support/AtomicOrdering.h [10:11]
+ include/llvm/Support/AutoConvert.h [10:11]
+ include/llvm/Support/Automaton.h [10:11]
+ include/llvm/Support/BCD.h [10:11]
+ include/llvm/Support/BLAKE3.h [10:11]
+ include/llvm/Support/Base64.h [10:11]
+ include/llvm/Support/BinaryByteStream.h [10:11]
+ include/llvm/Support/BinaryItemStream.h [10:11]
+ include/llvm/Support/BinaryStream.h [10:11]
+ include/llvm/Support/BinaryStreamArray.h [10:11]
+ include/llvm/Support/BinaryStreamError.h [10:11]
+ include/llvm/Support/BinaryStreamReader.h [10:11]
+ include/llvm/Support/BinaryStreamRef.h [10:11]
+ include/llvm/Support/BinaryStreamWriter.h [10:11]
+ include/llvm/Support/BlockFrequency.h [10:11]
+ include/llvm/Support/BranchProbability.h [10:11]
+ include/llvm/Support/BuryPointer.h [10:11]
+ include/llvm/Support/CBindingWrapping.h [10:11]
+ include/llvm/Support/CFGDiff.h [10:11]
+ include/llvm/Support/CFGUpdate.h [10:11]
+ include/llvm/Support/COM.h [10:11]
+ include/llvm/Support/CRC.h [10:11]
+ include/llvm/Support/CSKYAttributeParser.h [10:11]
+ include/llvm/Support/CSKYAttributes.h [10:11]
+ include/llvm/Support/CSKYTargetParser.h [10:11]
+ include/llvm/Support/CachePruning.h [10:11]
+ include/llvm/Support/Caching.h [10:11]
+ include/llvm/Support/Capacity.h [10:11]
+ include/llvm/Support/Casting.h [10:11]
+ include/llvm/Support/CheckedArithmetic.h [10:11]
+ include/llvm/Support/Chrono.h [10:11]
+ include/llvm/Support/CodeGen.h [10:11]
+ include/llvm/Support/CodeGenCoverage.h [10:11]
+ include/llvm/Support/CommandLine.h [10:11]
+ include/llvm/Support/Compiler.h [10:11]
+ include/llvm/Support/Compression.h [10:11]
+ include/llvm/Support/CrashRecoveryContext.h [10:11]
+ include/llvm/Support/DJB.h [10:11]
+ include/llvm/Support/DOTGraphTraits.h [10:11]
+ include/llvm/Support/DXILOperationCommon.h [10:11]
+ include/llvm/Support/DataExtractor.h [10:11]
+ include/llvm/Support/DataTypes.h [10:11]
+ include/llvm/Support/Debug.h [10:11]
+ include/llvm/Support/DebugCounter.h [10:11]
+ include/llvm/Support/Discriminator.h [10:11]
+ include/llvm/Support/DivisionByConstantInfo.h [10:11]
+ include/llvm/Support/Duration.h [10:11]
+ include/llvm/Support/DynamicLibrary.h [10:11]
+ include/llvm/Support/ELFAttributeParser.h [10:11]
+ include/llvm/Support/ELFAttributes.h [10:11]
+ include/llvm/Support/Endian.h [10:11]
+ include/llvm/Support/EndianStream.h [10:11]
+ include/llvm/Support/Errc.h [10:11]
+ include/llvm/Support/Errno.h [10:11]
+ include/llvm/Support/Error.h [10:11]
+ include/llvm/Support/ErrorHandling.h [10:11]
+ include/llvm/Support/ErrorOr.h [10:11]
+ include/llvm/Support/ExitCodes.h [10:11]
+ include/llvm/Support/ExtensibleRTTI.h [10:11]
+ include/llvm/Support/FileCollector.h [10:11]
+ include/llvm/Support/FileOutputBuffer.h [10:11]
+ include/llvm/Support/FileSystem.h [10:11]
+ include/llvm/Support/FileSystem/UniqueID.h [10:11]
+ include/llvm/Support/FileUtilities.h [10:11]
+ include/llvm/Support/Format.h [10:11]
+ include/llvm/Support/FormatAdapters.h [10:11]
+ include/llvm/Support/FormatCommon.h [10:11]
+ include/llvm/Support/FormatProviders.h [10:11]
+ include/llvm/Support/FormatVariadic.h [10:11]
+ include/llvm/Support/FormatVariadicDetails.h [10:11]
+ include/llvm/Support/FormattedStream.h [10:11]
+ include/llvm/Support/GenericDomTree.h [10:11]
+ include/llvm/Support/GenericDomTreeConstruction.h [10:11]
+ include/llvm/Support/GenericIteratedDominanceFrontier.h [10:11]
+ include/llvm/Support/GlobPattern.h [10:11]
+ include/llvm/Support/GraphWriter.h [10:11]
+ include/llvm/Support/HashBuilder.h [10:11]
+ include/llvm/Support/Host.h [10:11]
+ include/llvm/Support/InitLLVM.h [10:11]
+ include/llvm/Support/InstructionCost.h [10:11]
+ include/llvm/Support/ItaniumManglingCanonicalizer.h [10:11]
+ include/llvm/Support/JSON.h [10:11]
+ include/llvm/Support/KnownBits.h [10:11]
+ include/llvm/Support/LEB128.h [10:11]
+ include/llvm/Support/LineIterator.h [10:11]
+ include/llvm/Support/LockFileManager.h [10:11]
+ include/llvm/Support/LoongArchTargetParser.h [10:11]
+ include/llvm/Support/LowLevelTypeImpl.h [10:11]
+ include/llvm/Support/MSP430AttributeParser.h [10:11]
+ include/llvm/Support/MSP430Attributes.h [10:11]
+ include/llvm/Support/MSVCErrorWorkarounds.h [10:11]
+ include/llvm/Support/MachineValueType.h [10:11]
+ include/llvm/Support/ManagedStatic.h [10:11]
+ include/llvm/Support/MathExtras.h [10:11]
+ include/llvm/Support/MemAlloc.h [10:11]
+ include/llvm/Support/Memory.h [10:11]
+ include/llvm/Support/MemoryBuffer.h [10:11]
+ include/llvm/Support/MemoryBufferRef.h [10:11]
+ include/llvm/Support/MipsABIFlags.h [10:11]
+ include/llvm/Support/ModRef.h [10:11]
+ include/llvm/Support/Mutex.h [10:11]
+ include/llvm/Support/NativeFormatting.h [10:11]
+ include/llvm/Support/OnDiskHashTable.h [10:11]
+ include/llvm/Support/OptimizedStructLayout.h [10:11]
+ include/llvm/Support/PGOOptions.h [10:11]
+ include/llvm/Support/Parallel.h [10:11]
+ include/llvm/Support/Path.h [10:11]
+ include/llvm/Support/PluginLoader.h [10:11]
+ include/llvm/Support/PointerLikeTypeTraits.h [10:11]
+ include/llvm/Support/PrettyStackTrace.h [10:11]
+ include/llvm/Support/Printable.h [10:11]
+ include/llvm/Support/Process.h [10:11]
+ include/llvm/Support/Program.h [10:11]
+ include/llvm/Support/RISCVAttributeParser.h [10:11]
+ include/llvm/Support/RISCVAttributes.h [10:11]
+ include/llvm/Support/RISCVISAInfo.h [10:11]
+ include/llvm/Support/RWMutex.h [10:11]
+ include/llvm/Support/RandomNumberGenerator.h [10:11]
+ include/llvm/Support/Recycler.h [10:11]
+ include/llvm/Support/RecyclingAllocator.h [10:11]
+ include/llvm/Support/Regex.h [10:11]
+ include/llvm/Support/Registry.h [10:11]
+ include/llvm/Support/SHA1.h [10:11]
+ include/llvm/Support/SHA256.h [10:11]
+ include/llvm/Support/SMLoc.h [10:11]
+ include/llvm/Support/SMTAPI.h [10:11]
+ include/llvm/Support/SaveAndRestore.h [10:11]
+ include/llvm/Support/ScaledNumber.h [10:11]
+ include/llvm/Support/ScopedPrinter.h [10:11]
+ include/llvm/Support/Signals.h [10:11]
+ include/llvm/Support/Signposts.h [10:11]
+ include/llvm/Support/SmallVectorMemoryBuffer.h [10:11]
+ include/llvm/Support/SourceMgr.h [10:11]
+ include/llvm/Support/SpecialCaseList.h [10:11]
+ include/llvm/Support/StringSaver.h [10:11]
+ include/llvm/Support/SuffixTree.h [10:11]
+ include/llvm/Support/SwapByteOrder.h [10:11]
+ include/llvm/Support/SymbolRemappingReader.h [10:11]
+ include/llvm/Support/SystemUtils.h [10:11]
+ include/llvm/Support/TarWriter.h [10:11]
+ include/llvm/Support/TargetOpcodes.def [3:4]
+ include/llvm/Support/TargetParser.h [10:11]
+ include/llvm/Support/TargetSelect.h [10:11]
+ include/llvm/Support/TaskQueue.h [10:11]
+ include/llvm/Support/ThreadPool.h [10:11]
+ include/llvm/Support/Threading.h [10:11]
+ include/llvm/Support/TimeProfiler.h [10:11]
+ include/llvm/Support/Timer.h [10:11]
+ include/llvm/Support/ToolOutputFile.h [10:11]
+ include/llvm/Support/TrailingObjects.h [10:11]
+ include/llvm/Support/TrigramIndex.h [10:11]
+ include/llvm/Support/TypeName.h [10:11]
+ include/llvm/Support/TypeSize.h [10:11]
+ include/llvm/Support/Unicode.h [10:11]
+ include/llvm/Support/UnicodeCharRanges.h [10:11]
+ include/llvm/Support/Valgrind.h [10:11]
+ include/llvm/Support/VersionTuple.h [10:11]
+ include/llvm/Support/VirtualFileSystem.h [10:11]
+ include/llvm/Support/Watchdog.h [10:11]
+ include/llvm/Support/Win64EH.h [10:11]
+ include/llvm/Support/Windows/WindowsSupport.h [10:11]
+ include/llvm/Support/WindowsError.h [10:11]
+ include/llvm/Support/WithColor.h [10:11]
+ include/llvm/Support/X86DisassemblerDecoderCommon.h [10:11]
+ include/llvm/Support/X86TargetParser.def [3:4]
+ include/llvm/Support/X86TargetParser.h [10:11]
+ include/llvm/Support/YAMLParser.h [10:11]
+ include/llvm/Support/YAMLTraits.h [10:11]
+ include/llvm/Support/circular_raw_ostream.h [10:11]
+ include/llvm/Support/raw_os_ostream.h [10:11]
+ include/llvm/Support/raw_ostream.h [10:11]
+ include/llvm/Support/raw_sha1_ostream.h [10:11]
+ include/llvm/Support/thread.h [10:11]
+ include/llvm/Support/type_traits.h [10:11]
+ include/llvm/TableGen/Automaton.td [3:4]
+ include/llvm/TableGen/Error.h [10:11]
+ include/llvm/TableGen/Main.h [10:11]
+ include/llvm/TableGen/Parser.h [10:11]
+ include/llvm/TableGen/Record.h [10:11]
+ include/llvm/TableGen/SearchableTable.td [3:4]
+ include/llvm/TableGen/SetTheory.h [10:11]
+ include/llvm/TableGen/StringMatcher.h [10:11]
+ include/llvm/TableGen/StringToOffsetTable.h [10:11]
+ include/llvm/TableGen/TableGenBackend.h [10:11]
+ include/llvm/Target/CGPassBuilderOption.h [10:11]
+ include/llvm/Target/CodeGenCWrappers.h [10:11]
+ include/llvm/Target/GenericOpcodes.td [3:4]
+ include/llvm/Target/GlobalISel/Combine.td [3:4]
+ include/llvm/Target/GlobalISel/RegisterBank.td [3:4]
+ include/llvm/Target/GlobalISel/SelectionDAGCompat.td [3:4]
+ include/llvm/Target/GlobalISel/Target.td [3:4]
+ include/llvm/Target/Target.td [3:4]
+ include/llvm/Target/TargetCallingConv.td [3:4]
+ include/llvm/Target/TargetInstrPredicate.td [3:4]
+ include/llvm/Target/TargetIntrinsicInfo.h [10:11]
+ include/llvm/Target/TargetItinerary.td [3:4]
+ include/llvm/Target/TargetLoweringObjectFile.h [10:11]
+ include/llvm/Target/TargetMachine.h [10:11]
+ include/llvm/Target/TargetOptions.h [10:11]
+ include/llvm/Target/TargetPfmCounters.td [3:4]
+ include/llvm/Target/TargetSchedule.td [3:4]
+ include/llvm/Target/TargetSelectionDAG.td [3:4]
+ include/llvm/TargetParser/AArch64TargetParser.h [10:11]
+ include/llvm/TargetParser/ARMTargetParser.def [3:4]
+ include/llvm/TargetParser/ARMTargetParser.h [10:11]
+ include/llvm/TargetParser/ARMTargetParserCommon.h [10:11]
+ include/llvm/TargetParser/CSKYTargetParser.def [3:4]
+ include/llvm/TargetParser/CSKYTargetParser.h [11:12]
+ include/llvm/TargetParser/Host.h [10:11]
+ include/llvm/TargetParser/LoongArchTargetParser.h [10:11]
+ include/llvm/TargetParser/RISCVTargetParser.h [10:11]
+ include/llvm/TargetParser/TargetParser.h [10:11]
+ include/llvm/TargetParser/Triple.h [10:11]
+ include/llvm/TargetParser/X86TargetParser.def [3:4]
+ include/llvm/TargetParser/X86TargetParser.h [10:11]
+ include/llvm/Testing/ADT/StringMap.h [10:11]
+ include/llvm/Testing/ADT/StringMapEntry.h [10:11]
+ include/llvm/Testing/Annotations/Annotations.h [10:11]
+ include/llvm/Testing/Support/Error.h [10:11]
+ include/llvm/Testing/Support/SupportHelpers.h [10:11]
+ include/llvm/TextAPI/Architecture.def [3:4]
+ include/llvm/TextAPI/Architecture.h [10:11]
+ include/llvm/TextAPI/ArchitectureSet.h [10:11]
+ include/llvm/TextAPI/InterfaceFile.h [10:11]
+ include/llvm/TextAPI/PackedVersion.h [10:11]
+ include/llvm/TextAPI/Platform.h [10:11]
+ include/llvm/TextAPI/Symbol.h [10:11]
+ include/llvm/TextAPI/Target.h [10:11]
+ include/llvm/TextAPI/TextAPIReader.h [10:11]
+ include/llvm/TextAPI/TextAPIWriter.h [10:11]
+ include/llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h [10:11]
+ include/llvm/ToolDrivers/llvm-lib/LibDriver.h [10:11]
+ include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h [10:11]
+ include/llvm/Transforms/CFGuard.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroCleanup.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroConditionalWrapper.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroEarly.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroElide.h [10:11]
+ include/llvm/Transforms/Coroutines/CoroSplit.h [10:11]
+ include/llvm/Transforms/IPO.h [10:11]
+ include/llvm/Transforms/IPO/AlwaysInliner.h [10:11]
+ include/llvm/Transforms/IPO/Annotation2Metadata.h [10:11]
+ include/llvm/Transforms/IPO/ArgumentPromotion.h [10:11]
+ include/llvm/Transforms/IPO/Attributor.h [10:11]
+ include/llvm/Transforms/IPO/BlockExtractor.h [10:11]
+ include/llvm/Transforms/IPO/CalledValuePropagation.h [10:11]
+ include/llvm/Transforms/IPO/ConstantMerge.h [10:11]
+ include/llvm/Transforms/IPO/CrossDSOCFI.h [10:11]
+ include/llvm/Transforms/IPO/DeadArgumentElimination.h [10:11]
+ include/llvm/Transforms/IPO/ElimAvailExtern.h [10:11]
+ include/llvm/Transforms/IPO/ExtractGV.h [10:11]
+ include/llvm/Transforms/IPO/ForceFunctionAttrs.h [10:11]
+ include/llvm/Transforms/IPO/FunctionAttrs.h [10:11]
+ include/llvm/Transforms/IPO/FunctionImport.h [10:11]
+ include/llvm/Transforms/IPO/FunctionSpecialization.h [10:11]
+ include/llvm/Transforms/IPO/GlobalDCE.h [10:11]
+ include/llvm/Transforms/IPO/GlobalOpt.h [10:11]
+ include/llvm/Transforms/IPO/GlobalSplit.h [10:11]
+ include/llvm/Transforms/IPO/HotColdSplitting.h [10:11]
+ include/llvm/Transforms/IPO/IROutliner.h [10:11]
+ include/llvm/Transforms/IPO/InferFunctionAttrs.h [10:11]
+ include/llvm/Transforms/IPO/Inliner.h [10:11]
+ include/llvm/Transforms/IPO/Internalize.h [10:11]
+ include/llvm/Transforms/IPO/LoopExtractor.h [10:11]
+ include/llvm/Transforms/IPO/LowerTypeTests.h [10:11]
+ include/llvm/Transforms/IPO/MergeFunctions.h [10:11]
+ include/llvm/Transforms/IPO/ModuleInliner.h [10:11]
+ include/llvm/Transforms/IPO/OpenMPOpt.h [10:11]
+ include/llvm/Transforms/IPO/PartialInlining.h [10:11]
+ include/llvm/Transforms/IPO/PassManagerBuilder.h [10:11]
+ include/llvm/Transforms/IPO/ProfiledCallGraph.h [10:11]
+ include/llvm/Transforms/IPO/SCCP.h [10:11]
+ include/llvm/Transforms/IPO/SampleContextTracker.h [10:11]
+ include/llvm/Transforms/IPO/SampleProfile.h [10:11]
+ include/llvm/Transforms/IPO/SampleProfileProbe.h [10:11]
+ include/llvm/Transforms/IPO/StripDeadPrototypes.h [10:11]
+ include/llvm/Transforms/IPO/StripSymbols.h [10:11]
+ include/llvm/Transforms/IPO/SyntheticCountsPropagation.h [10:11]
+ include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h [10:11]
+ include/llvm/Transforms/IPO/WholeProgramDevirt.h [10:11]
+ include/llvm/Transforms/InstCombine/InstCombine.h [10:11]
+ include/llvm/Transforms/InstCombine/InstCombiner.h [10:11]
+ include/llvm/Transforms/Instrumentation.h [10:11]
+ include/llvm/Transforms/Instrumentation/AddressSanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h [10:11]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerOptions.h [10:11]
+ include/llvm/Transforms/Instrumentation/BoundsChecking.h [10:11]
+ include/llvm/Transforms/Instrumentation/CGProfile.h [10:11]
+ include/llvm/Transforms/Instrumentation/ControlHeightReduction.h [10:11]
+ include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/GCOVProfiler.h [10:11]
+ include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/InstrOrderFile.h [10:11]
+ include/llvm/Transforms/Instrumentation/InstrProfiling.h [10:11]
+ include/llvm/Transforms/Instrumentation/KCFI.h [10:11]
+ include/llvm/Transforms/Instrumentation/MemProfiler.h [10:11]
+ include/llvm/Transforms/Instrumentation/MemorySanitizer.h [10:11]
+ include/llvm/Transforms/Instrumentation/PGOInstrumentation.h [10:11]
+ include/llvm/Transforms/Instrumentation/PoisonChecking.h [10:11]
+ include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h [10:11]
+ include/llvm/Transforms/Instrumentation/SanitizerCoverage.h [12:13]
+ include/llvm/Transforms/Instrumentation/ThreadSanitizer.h [10:11]
+ include/llvm/Transforms/ObjCARC.h [10:11]
+ include/llvm/Transforms/Scalar.h [10:11]
+ include/llvm/Transforms/Scalar/ADCE.h [10:11]
+ include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h [10:11]
+ include/llvm/Transforms/Scalar/AnnotationRemarks.h [10:11]
+ include/llvm/Transforms/Scalar/BDCE.h [10:11]
+ include/llvm/Transforms/Scalar/CallSiteSplitting.h [10:11]
+ include/llvm/Transforms/Scalar/ConstantHoisting.h [10:11]
+ include/llvm/Transforms/Scalar/ConstraintElimination.h [10:11]
+ include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h [10:11]
+ include/llvm/Transforms/Scalar/DCE.h [10:11]
+ include/llvm/Transforms/Scalar/DFAJumpThreading.h [10:11]
+ include/llvm/Transforms/Scalar/DeadStoreElimination.h [10:11]
+ include/llvm/Transforms/Scalar/DivRemPairs.h [10:11]
+ include/llvm/Transforms/Scalar/EarlyCSE.h [10:11]
+ include/llvm/Transforms/Scalar/FlattenCFG.h [10:11]
+ include/llvm/Transforms/Scalar/Float2Int.h [10:11]
+ include/llvm/Transforms/Scalar/GVN.h [10:11]
+ include/llvm/Transforms/Scalar/GVNExpression.h [10:11]
+ include/llvm/Transforms/Scalar/GuardWidening.h [10:11]
+ include/llvm/Transforms/Scalar/IVUsersPrinter.h [10:11]
+ include/llvm/Transforms/Scalar/IndVarSimplify.h [10:11]
+ include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h [10:11]
+ include/llvm/Transforms/Scalar/InferAddressSpaces.h [10:11]
+ include/llvm/Transforms/Scalar/InstSimplifyPass.h [10:11]
+ include/llvm/Transforms/Scalar/JumpThreading.h [10:11]
+ include/llvm/Transforms/Scalar/LICM.h [10:11]
+ include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h [10:11]
+ include/llvm/Transforms/Scalar/LoopBoundSplit.h [10:11]
+ include/llvm/Transforms/Scalar/LoopDataPrefetch.h [11:12]
+ include/llvm/Transforms/Scalar/LoopDeletion.h [10:11]
+ include/llvm/Transforms/Scalar/LoopDistribute.h [10:11]
+ include/llvm/Transforms/Scalar/LoopFlatten.h [10:11]
+ include/llvm/Transforms/Scalar/LoopFuse.h [10:11]
+ include/llvm/Transforms/Scalar/LoopIdiomRecognize.h [10:11]
+ include/llvm/Transforms/Scalar/LoopInstSimplify.h [10:11]
+ include/llvm/Transforms/Scalar/LoopInterchange.h [10:11]
+ include/llvm/Transforms/Scalar/LoopLoadElimination.h [10:11]
+ include/llvm/Transforms/Scalar/LoopPassManager.h [10:11]
+ include/llvm/Transforms/Scalar/LoopPredication.h [10:11]
+ include/llvm/Transforms/Scalar/LoopReroll.h [10:11]
+ include/llvm/Transforms/Scalar/LoopRotation.h [10:11]
+ include/llvm/Transforms/Scalar/LoopSimplifyCFG.h [10:11]
+ include/llvm/Transforms/Scalar/LoopSink.h [10:11]
+ include/llvm/Transforms/Scalar/LoopStrengthReduce.h [10:11]
+ include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h [10:11]
+ include/llvm/Transforms/Scalar/LoopUnrollPass.h [10:11]
+ include/llvm/Transforms/Scalar/LoopVersioningLICM.h [10:11]
+ include/llvm/Transforms/Scalar/LowerAtomicPass.h [10:11]
+ include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h [10:11]
+ include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h [10:11]
+ include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h [10:11]
+ include/llvm/Transforms/Scalar/LowerMatrixIntrinsics.h [10:11]
+ include/llvm/Transforms/Scalar/LowerWidenableCondition.h [10:11]
+ include/llvm/Transforms/Scalar/MakeGuardsExplicit.h [10:11]
+ include/llvm/Transforms/Scalar/MemCpyOptimizer.h [10:11]
+ include/llvm/Transforms/Scalar/MergeICmps.h [10:11]
+ include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h [10:11]
+ include/llvm/Transforms/Scalar/NaryReassociate.h [10:11]
+ include/llvm/Transforms/Scalar/NewGVN.h [10:11]
+ include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h [10:11]
+ include/llvm/Transforms/Scalar/Reassociate.h [10:11]
+ include/llvm/Transforms/Scalar/Reg2Mem.h [10:11]
+ include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h [10:11]
+ include/llvm/Transforms/Scalar/SCCP.h [10:11]
+ include/llvm/Transforms/Scalar/SROA.h [10:11]
+ include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h [11:12]
+ include/llvm/Transforms/Scalar/Scalarizer.h [10:11]
+ include/llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h [10:11]
+ include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h [10:11]
+ include/llvm/Transforms/Scalar/SimplifyCFG.h [10:11]
+ include/llvm/Transforms/Scalar/Sink.h [10:11]
+ include/llvm/Transforms/Scalar/SpeculativeExecution.h [10:11]
+ include/llvm/Transforms/Scalar/StraightLineStrengthReduce.h [10:11]
+ include/llvm/Transforms/Scalar/StructurizeCFG.h [10:11]
+ include/llvm/Transforms/Scalar/TLSVariableHoist.h [10:11]
+ include/llvm/Transforms/Scalar/TailRecursionElimination.h [10:11]
+ include/llvm/Transforms/Scalar/WarnMissedTransforms.h [10:11]
+ include/llvm/Transforms/Utils.h [10:11]
+ include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h [10:11]
+ include/llvm/Transforms/Utils/ASanStackFrameLayout.h [10:11]
+ include/llvm/Transforms/Utils/AddDiscriminators.h [10:11]
+ include/llvm/Transforms/Utils/AssumeBundleBuilder.h [10:11]
+ include/llvm/Transforms/Utils/BasicBlockUtils.h [10:11]
+ include/llvm/Transforms/Utils/BreakCriticalEdges.h [10:11]
+ include/llvm/Transforms/Utils/BuildLibCalls.h [10:11]
+ include/llvm/Transforms/Utils/BypassSlowDivision.h [10:11]
+ include/llvm/Transforms/Utils/CallGraphUpdater.h [10:11]
+ include/llvm/Transforms/Utils/CallPromotionUtils.h [10:11]
+ include/llvm/Transforms/Utils/CanonicalizeAliases.h [10:11]
+ include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h [10:11]
+ include/llvm/Transforms/Utils/Cloning.h [10:11]
+ include/llvm/Transforms/Utils/CodeExtractor.h [10:11]
+ include/llvm/Transforms/Utils/CodeLayout.h [10:11]
+ include/llvm/Transforms/Utils/CodeMoverUtils.h [10:11]
+ include/llvm/Transforms/Utils/CtorUtils.h [10:11]
+ include/llvm/Transforms/Utils/Debugify.h [10:11]
+ include/llvm/Transforms/Utils/EntryExitInstrumenter.h [10:11]
+ include/llvm/Transforms/Utils/EscapeEnumerator.h [10:11]
+ include/llvm/Transforms/Utils/Evaluator.h [10:11]
+ include/llvm/Transforms/Utils/FixIrreducible.h [10:11]
+ include/llvm/Transforms/Utils/FunctionComparator.h [10:11]
+ include/llvm/Transforms/Utils/FunctionImportUtils.h [10:11]
+ include/llvm/Transforms/Utils/GlobalStatus.h [10:11]
+ include/llvm/Transforms/Utils/GuardUtils.h [10:11]
+ include/llvm/Transforms/Utils/HelloWorld.h [10:11]
+ include/llvm/Transforms/Utils/InjectTLIMappings.h [10:11]
+ include/llvm/Transforms/Utils/InstructionNamer.h [10:11]
+ include/llvm/Transforms/Utils/InstructionWorklist.h [10:11]
+ include/llvm/Transforms/Utils/IntegerDivision.h [10:11]
+ include/llvm/Transforms/Utils/LCSSA.h [10:11]
+ include/llvm/Transforms/Utils/LibCallsShrinkWrap.h [10:11]
+ include/llvm/Transforms/Utils/Local.h [10:11]
+ include/llvm/Transforms/Utils/LoopPeel.h [10:11]
+ include/llvm/Transforms/Utils/LoopRotationUtils.h [10:11]
+ include/llvm/Transforms/Utils/LoopSimplify.h [10:11]
+ include/llvm/Transforms/Utils/LoopUtils.h [10:11]
+ include/llvm/Transforms/Utils/LoopVersioning.h [10:11]
+ include/llvm/Transforms/Utils/LowerAtomic.h [10:11]
+ include/llvm/Transforms/Utils/LowerGlobalDtors.h [10:11]
+ include/llvm/Transforms/Utils/LowerIFunc.h [10:11]
+ include/llvm/Transforms/Utils/LowerInvoke.h [10:11]
+ include/llvm/Transforms/Utils/LowerMemIntrinsics.h [10:11]
+ include/llvm/Transforms/Utils/LowerSwitch.h [10:11]
+ include/llvm/Transforms/Utils/MatrixUtils.h [10:11]
+ include/llvm/Transforms/Utils/Mem2Reg.h [10:11]
+ include/llvm/Transforms/Utils/MemoryOpRemark.h [10:11]
+ include/llvm/Transforms/Utils/MemoryTaggingSupport.h [9:10]
+ include/llvm/Transforms/Utils/MetaRenamer.h [10:11]
+ include/llvm/Transforms/Utils/MisExpect.h [10:11]
+ include/llvm/Transforms/Utils/ModuleUtils.h [10:11]
+ include/llvm/Transforms/Utils/NameAnonGlobals.h [10:11]
+ include/llvm/Transforms/Utils/PredicateInfo.h [10:11]
+ include/llvm/Transforms/Utils/PromoteMemToReg.h [10:11]
+ include/llvm/Transforms/Utils/RelLookupTableConverter.h [10:11]
+ include/llvm/Transforms/Utils/SCCPSolver.h [10:11]
+ include/llvm/Transforms/Utils/SSAUpdater.h [10:11]
+ include/llvm/Transforms/Utils/SSAUpdaterBulk.h [10:11]
+ include/llvm/Transforms/Utils/SSAUpdaterImpl.h [10:11]
+ include/llvm/Transforms/Utils/SampleProfileInference.h [10:11]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h [10:11]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h [10:11]
+ include/llvm/Transforms/Utils/SanitizerStats.h [10:11]
+ include/llvm/Transforms/Utils/ScalarEvolutionExpander.h [10:11]
+ include/llvm/Transforms/Utils/SimplifyCFGOptions.h [10:11]
+ include/llvm/Transforms/Utils/SimplifyIndVar.h [10:11]
+ include/llvm/Transforms/Utils/SimplifyLibCalls.h [10:11]
+ include/llvm/Transforms/Utils/SizeOpts.h [10:11]
+ include/llvm/Transforms/Utils/SplitModule.h [10:11]
+ include/llvm/Transforms/Utils/StripGCRelocates.h [10:11]
+ include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h [10:11]
+ include/llvm/Transforms/Utils/SymbolRewriter.h [10:11]
+ include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h [10:11]
+ include/llvm/Transforms/Utils/UnifyLoopExits.h [10:11]
+ include/llvm/Transforms/Utils/UnrollLoop.h [10:11]
+ include/llvm/Transforms/Utils/VNCoercion.h [10:11]
+ include/llvm/Transforms/Utils/ValueMapper.h [10:11]
+ include/llvm/Transforms/Vectorize.h [10:11]
+ include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h [10:11]
+ include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h [10:11]
+ include/llvm/Transforms/Vectorize/LoopVectorize.h [10:11]
+ include/llvm/Transforms/Vectorize/SLPVectorizer.h [10:11]
+ include/llvm/Transforms/Vectorize/VectorCombine.h [10:11]
+ include/llvm/WindowsDriver/MSVCPaths.h [10:11]
+ include/llvm/WindowsManifest/WindowsManifestMerger.h [10:11]
+ include/llvm/WindowsResource/ResourceProcessor.h [10:11]
+ include/llvm/WindowsResource/ResourceScriptToken.h [10:11]
+ include/llvm/WindowsResource/ResourceScriptTokenList.h [10:11]
+ include/llvm/XRay/BlockIndexer.h [10:11]
+ include/llvm/XRay/BlockPrinter.h [10:11]
+ include/llvm/XRay/BlockVerifier.h [10:11]
+ include/llvm/XRay/FDRLogBuilder.h [10:11]
+ include/llvm/XRay/FDRRecordConsumer.h [10:11]
+ include/llvm/XRay/FDRRecordProducer.h [10:11]
+ include/llvm/XRay/FDRRecords.h [10:11]
+ include/llvm/XRay/FDRTraceExpander.h [10:11]
+ include/llvm/XRay/FDRTraceWriter.h [10:11]
+ include/llvm/XRay/FileHeaderReader.h [10:11]
+ include/llvm/XRay/Graph.h [10:11]
+ include/llvm/XRay/InstrumentationMap.h [10:11]
+ include/llvm/XRay/Profile.h [10:11]
+ include/llvm/XRay/RecordPrinter.h [10:11]
+ include/llvm/XRay/Trace.h [10:11]
+ include/llvm/XRay/XRayRecord.h [10:11]
+ include/llvm/XRay/YAMLXRayRecord.h [10:11]
+ lib/Analysis/AliasAnalysis.cpp [3:4]
+ lib/Analysis/AliasAnalysisEvaluator.cpp [3:4]
+ lib/Analysis/AliasAnalysisSummary.h [3:4]
+ lib/Analysis/AliasSetTracker.cpp [3:4]
+ lib/Analysis/Analysis.cpp [3:4]
+ lib/Analysis/AssumeBundleQueries.cpp [3:4]
+ lib/Analysis/AssumptionCache.cpp [3:4]
+ lib/Analysis/BasicAliasAnalysis.cpp [3:4]
+ lib/Analysis/BlockFrequencyInfo.cpp [3:4]
+ lib/Analysis/BlockFrequencyInfoImpl.cpp [3:4]
+ lib/Analysis/BranchProbabilityInfo.cpp [3:4]
+ lib/Analysis/CFG.cpp [3:4]
+ lib/Analysis/CFGPrinter.cpp [3:4]
+ lib/Analysis/CFGSCCPrinter.cpp [3:4]
+ lib/Analysis/CGSCCPassManager.cpp [3:4]
+ lib/Analysis/CallGraph.cpp [3:4]
+ lib/Analysis/CallGraphSCCPass.cpp [3:4]
+ lib/Analysis/CallPrinter.cpp [3:4]
+ lib/Analysis/CaptureTracking.cpp [3:4]
+ lib/Analysis/CmpInstAnalysis.cpp [3:4]
+ lib/Analysis/CodeMetrics.cpp [3:4]
+ lib/Analysis/ConstantFolding.cpp [3:4]
+ lib/Analysis/ConstraintSystem.cpp [3:4]
+ lib/Analysis/CostModel.cpp [3:4]
+ lib/Analysis/CycleAnalysis.cpp [3:4]
+ lib/Analysis/DDG.cpp [3:4]
+ lib/Analysis/DDGPrinter.cpp [3:4]
+ lib/Analysis/Delinearization.cpp [3:4]
+ lib/Analysis/DemandedBits.cpp [3:4]
+ lib/Analysis/DependenceAnalysis.cpp [3:4]
+ lib/Analysis/DependenceGraphBuilder.cpp [3:4]
+ lib/Analysis/DevelopmentModeInlineAdvisor.cpp [3:4]
+ lib/Analysis/DivergenceAnalysis.cpp [3:4]
+ lib/Analysis/DomPrinter.cpp [3:4]
+ lib/Analysis/DomTreeUpdater.cpp [3:4]
+ lib/Analysis/DominanceFrontier.cpp [3:4]
+ lib/Analysis/EHPersonalities.cpp [3:4]
+ lib/Analysis/FunctionPropertiesAnalysis.cpp [3:4]
+ lib/Analysis/GlobalsModRef.cpp [3:4]
+ lib/Analysis/GuardUtils.cpp [3:4]
+ lib/Analysis/HeatUtils.cpp [3:4]
+ lib/Analysis/IRSimilarityIdentifier.cpp [3:4]
+ lib/Analysis/IVDescriptors.cpp [3:4]
+ lib/Analysis/IVUsers.cpp [3:4]
+ lib/Analysis/ImportedFunctionsInliningStatistics.cpp [3:4]
+ lib/Analysis/IndirectCallPromotionAnalysis.cpp [3:4]
+ lib/Analysis/InlineAdvisor.cpp [3:4]
+ lib/Analysis/InlineCost.cpp [3:4]
+ lib/Analysis/InlineOrder.cpp [3:4]
+ lib/Analysis/InlineSizeEstimatorAnalysis.cpp [3:4]
+ lib/Analysis/InstCount.cpp [3:4]
+ lib/Analysis/InstructionPrecedenceTracking.cpp [3:4]
+ lib/Analysis/InstructionSimplify.cpp [3:4]
+ lib/Analysis/Interval.cpp [3:4]
+ lib/Analysis/IntervalPartition.cpp [3:4]
+ lib/Analysis/LazyBlockFrequencyInfo.cpp [3:4]
+ lib/Analysis/LazyBranchProbabilityInfo.cpp [3:4]
+ lib/Analysis/LazyCallGraph.cpp [3:4]
+ lib/Analysis/LazyValueInfo.cpp [3:4]
+ lib/Analysis/LegacyDivergenceAnalysis.cpp [4:5]
+ lib/Analysis/Lint.cpp [3:4]
+ lib/Analysis/Loads.cpp [3:4]
+ lib/Analysis/Local.cpp [3:4]
+ lib/Analysis/LoopAccessAnalysis.cpp [3:4]
+ lib/Analysis/LoopAnalysisManager.cpp [3:4]
+ lib/Analysis/LoopCacheAnalysis.cpp [5:6]
+ lib/Analysis/LoopInfo.cpp [3:4]
+ lib/Analysis/LoopNestAnalysis.cpp [3:4]
+ lib/Analysis/LoopPass.cpp [3:4]
+ lib/Analysis/LoopUnrollAnalyzer.cpp [3:4]
+ lib/Analysis/MLInlineAdvisor.cpp [3:4]
+ lib/Analysis/MemDepPrinter.cpp [3:4]
+ lib/Analysis/MemDerefPrinter.cpp [3:4]
+ lib/Analysis/MemoryBuiltins.cpp [3:4]
+ lib/Analysis/MemoryDependenceAnalysis.cpp [3:4]
+ lib/Analysis/MemoryLocation.cpp [3:4]
+ lib/Analysis/MemoryProfileInfo.cpp [3:4]
+ lib/Analysis/MemorySSA.cpp [3:4]
+ lib/Analysis/MemorySSAUpdater.cpp [3:4]
+ lib/Analysis/ModelUnderTrainingRunner.cpp [3:4]
+ lib/Analysis/ModuleDebugInfoPrinter.cpp [3:4]
+ lib/Analysis/ModuleSummaryAnalysis.cpp [3:4]
+ lib/Analysis/MustExecute.cpp [3:4]
+ lib/Analysis/NoInferenceModelRunner.cpp [3:4]
+ lib/Analysis/ObjCARCAliasAnalysis.cpp [3:4]
+ lib/Analysis/ObjCARCAnalysisUtils.cpp [3:4]
+ lib/Analysis/ObjCARCInstKind.cpp [3:4]
+ lib/Analysis/OptimizationRemarkEmitter.cpp [3:4]
+ lib/Analysis/OverflowInstAnalysis.cpp [3:4]
+ lib/Analysis/PHITransAddr.cpp [3:4]
+ lib/Analysis/PhiValues.cpp [3:4]
+ lib/Analysis/PostDominators.cpp [3:4]
+ lib/Analysis/ProfileSummaryInfo.cpp [3:4]
+ lib/Analysis/PtrUseVisitor.cpp [3:4]
+ lib/Analysis/RegionInfo.cpp [3:4]
+ lib/Analysis/RegionPass.cpp [3:4]
+ lib/Analysis/RegionPrinter.cpp [3:4]
+ lib/Analysis/ReplayInlineAdvisor.cpp [3:4]
+ lib/Analysis/ScalarEvolution.cpp [3:4]
+ lib/Analysis/ScalarEvolutionAliasAnalysis.cpp [3:4]
+ lib/Analysis/ScalarEvolutionDivision.cpp [3:4]
+ lib/Analysis/ScalarEvolutionNormalization.cpp [3:4]
+ lib/Analysis/ScopedNoAliasAA.cpp [3:4]
+ lib/Analysis/StackLifetime.cpp [3:4]
+ lib/Analysis/StackSafetyAnalysis.cpp [3:4]
+ lib/Analysis/SyncDependenceAnalysis.cpp [3:4]
+ lib/Analysis/SyntheticCountsUtils.cpp [3:4]
+ lib/Analysis/TFLiteUtils.cpp [3:4]
+ lib/Analysis/TargetLibraryInfo.cpp [3:4]
+ lib/Analysis/TargetTransformInfo.cpp [3:4]
+ lib/Analysis/TensorSpec.cpp [3:4]
+ lib/Analysis/Trace.cpp [3:4]
+ lib/Analysis/TrainingLogger.cpp [3:4]
+ lib/Analysis/TypeBasedAliasAnalysis.cpp [3:4]
+ lib/Analysis/TypeMetadataUtils.cpp [3:4]
+ lib/Analysis/UniformityAnalysis.cpp [3:4]
+ lib/Analysis/VFABIDemangling.cpp [3:4]
+ lib/Analysis/ValueLattice.cpp [3:4]
+ lib/Analysis/ValueLatticeUtils.cpp [3:4]
+ lib/Analysis/ValueTracking.cpp [3:4]
+ lib/Analysis/VectorUtils.cpp [3:4]
+ lib/AsmParser/LLLexer.cpp [3:4]
+ lib/AsmParser/LLParser.cpp [3:4]
+ lib/AsmParser/Parser.cpp [3:4]
+ lib/BinaryFormat/AMDGPUMetadataVerifier.cpp [3:4]
+ lib/BinaryFormat/COFF.cpp [3:4]
+ lib/BinaryFormat/DXContainer.cpp [4:5]
+ lib/BinaryFormat/Dwarf.cpp [3:4]
+ lib/BinaryFormat/ELF.cpp [3:4]
+ lib/BinaryFormat/MachO.cpp [3:4]
+ lib/BinaryFormat/Magic.cpp [3:4]
+ lib/BinaryFormat/Minidump.cpp [3:4]
+ lib/BinaryFormat/MsgPackDocument.cpp [3:4]
+ lib/BinaryFormat/MsgPackDocumentYAML.cpp [3:4]
+ lib/BinaryFormat/MsgPackReader.cpp [3:4]
+ lib/BinaryFormat/MsgPackWriter.cpp [3:4]
+ lib/BinaryFormat/Wasm.cpp [3:4]
+ lib/BinaryFormat/XCOFF.cpp [3:4]
+ lib/Bitcode/Reader/BitReader.cpp [3:4]
+ lib/Bitcode/Reader/BitcodeAnalyzer.cpp [3:4]
+ lib/Bitcode/Reader/BitcodeReader.cpp [3:4]
+ lib/Bitcode/Reader/MetadataLoader.cpp [3:4]
+ lib/Bitcode/Reader/MetadataLoader.h [3:4]
+ lib/Bitcode/Reader/ValueList.cpp [3:4]
+ lib/Bitcode/Reader/ValueList.h [3:4]
+ lib/Bitcode/Writer/BitWriter.cpp [3:4]
+ lib/Bitcode/Writer/BitcodeWriter.cpp [3:4]
+ lib/Bitcode/Writer/BitcodeWriterPass.cpp [3:4]
+ lib/Bitcode/Writer/ValueEnumerator.cpp [3:4]
+ lib/Bitcode/Writer/ValueEnumerator.h [3:4]
+ lib/Bitstream/Reader/BitstreamReader.cpp [3:4]
+ lib/CodeGen/AggressiveAntiDepBreaker.cpp [3:4]
+ lib/CodeGen/AggressiveAntiDepBreaker.h [3:4]
+ lib/CodeGen/AllocationOrder.cpp [3:4]
+ lib/CodeGen/AllocationOrder.h [3:4]
+ lib/CodeGen/Analysis.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AIXException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/ARMException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AccelTable.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AddressPool.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AddressPool.h [3:4]
+ lib/CodeGen/AsmPrinter/AsmPrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp [3:4]
+ lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp [3:4]
+ lib/CodeGen/AsmPrinter/ByteStreamer.h [3:4]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.cpp [3:4]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.h [3:4]
+ lib/CodeGen/AsmPrinter/DIE.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DIEHash.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DIEHash.h [3:4]
+ lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DebugLocEntry.h [3:4]
+ lib/CodeGen/AsmPrinter/DebugLocStream.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DebugLocStream.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfCFIException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfDebug.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfDebug.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfException.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfExpression.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfExpression.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfFile.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfFile.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.h [3:4]
+ lib/CodeGen/AsmPrinter/DwarfUnit.cpp [3:4]
+ lib/CodeGen/AsmPrinter/DwarfUnit.h [3:4]
+ lib/CodeGen/AsmPrinter/EHStreamer.cpp [3:4]
+ lib/CodeGen/AsmPrinter/EHStreamer.h [3:4]
+ lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp [3:4]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.h [3:4]
+ lib/CodeGen/AsmPrinter/WasmException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/WasmException.h [3:4]
+ lib/CodeGen/AsmPrinter/WinCFGuard.cpp [3:4]
+ lib/CodeGen/AsmPrinter/WinCFGuard.h [3:4]
+ lib/CodeGen/AsmPrinter/WinException.cpp [3:4]
+ lib/CodeGen/AsmPrinter/WinException.h [3:4]
+ lib/CodeGen/AtomicExpandPass.cpp [3:4]
+ lib/CodeGen/BasicBlockSections.cpp [3:4]
+ lib/CodeGen/BasicBlockSectionsProfileReader.cpp [3:4]
+ lib/CodeGen/BasicTargetTransformInfo.cpp [3:4]
+ lib/CodeGen/BranchFolding.cpp [3:4]
+ lib/CodeGen/BranchFolding.h [3:4]
+ lib/CodeGen/BranchRelaxation.cpp [3:4]
+ lib/CodeGen/BreakFalseDeps.cpp [3:4]
+ lib/CodeGen/CFGuardLongjmp.cpp [3:4]
+ lib/CodeGen/CFIFixup.cpp [3:4]
+ lib/CodeGen/CFIInstrInserter.cpp [3:4]
+ lib/CodeGen/CalcSpillWeights.cpp [3:4]
+ lib/CodeGen/CallingConvLower.cpp [3:4]
+ lib/CodeGen/CodeGen.cpp [3:4]
+ lib/CodeGen/CodeGenCommonISel.cpp [3:4]
+ lib/CodeGen/CodeGenPassBuilder.cpp [3:4]
+ lib/CodeGen/CodeGenPrepare.cpp [3:4]
+ lib/CodeGen/CommandFlags.cpp [3:4]
+ lib/CodeGen/ComplexDeinterleavingPass.cpp [3:4]
+ lib/CodeGen/CriticalAntiDepBreaker.cpp [3:4]
+ lib/CodeGen/CriticalAntiDepBreaker.h [3:4]
+ lib/CodeGen/DFAPacketizer.cpp [3:4]
+ lib/CodeGen/DeadMachineInstructionElim.cpp [3:4]
+ lib/CodeGen/DetectDeadLanes.cpp [3:4]
+ lib/CodeGen/DwarfEHPrepare.cpp [3:4]
+ lib/CodeGen/EHContGuardCatchret.cpp [3:4]
+ lib/CodeGen/EarlyIfConversion.cpp [3:4]
+ lib/CodeGen/EdgeBundles.cpp [3:4]
+ lib/CodeGen/ExecutionDomainFix.cpp [3:4]
+ lib/CodeGen/ExpandLargeDivRem.cpp [3:4]
+ lib/CodeGen/ExpandLargeFpConvert.cpp [3:4]
+ lib/CodeGen/ExpandMemCmp.cpp [3:4]
+ lib/CodeGen/ExpandPostRAPseudos.cpp [3:4]
+ lib/CodeGen/ExpandReductions.cpp [3:4]
+ lib/CodeGen/ExpandVectorPredication.cpp [3:4]
+ lib/CodeGen/FEntryInserter.cpp [3:4]
+ lib/CodeGen/FaultMaps.cpp [3:4]
+ lib/CodeGen/FinalizeISel.cpp [3:4]
+ lib/CodeGen/FixupStatepointCallerSaved.cpp [3:4]
+ lib/CodeGen/FuncletLayout.cpp [3:4]
+ lib/CodeGen/GCMetadata.cpp [3:4]
+ lib/CodeGen/GCMetadataPrinter.cpp [3:4]
+ lib/CodeGen/GCRootLowering.cpp [3:4]
+ lib/CodeGen/GlobalISel/CSEInfo.cpp [3:4]
+ lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp [3:4]
+ lib/CodeGen/GlobalISel/CallLowering.cpp [3:4]
+ lib/CodeGen/GlobalISel/Combiner.cpp [3:4]
+ lib/CodeGen/GlobalISel/CombinerHelper.cpp [3:4]
+ lib/CodeGen/GlobalISel/GISelChangeObserver.cpp [3:4]
+ lib/CodeGen/GlobalISel/GISelKnownBits.cpp [3:4]
+ lib/CodeGen/GlobalISel/GlobalISel.cpp [3:4]
+ lib/CodeGen/GlobalISel/IRTranslator.cpp [3:4]
+ lib/CodeGen/GlobalISel/InlineAsmLowering.cpp [3:4]
+ lib/CodeGen/GlobalISel/InstructionSelect.cpp [3:4]
+ lib/CodeGen/GlobalISel/InstructionSelector.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalityPredicates.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalizeMutations.cpp [3:4]
+ lib/CodeGen/GlobalISel/Legalizer.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalizerHelper.cpp [3:4]
+ lib/CodeGen/GlobalISel/LegalizerInfo.cpp [3:4]
+ lib/CodeGen/GlobalISel/LoadStoreOpt.cpp [3:4]
+ lib/CodeGen/GlobalISel/Localizer.cpp [3:4]
+ lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp [3:4]
+ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp [3:4]
+ lib/CodeGen/GlobalISel/RegBankSelect.cpp [3:4]
+ lib/CodeGen/GlobalISel/Utils.cpp [3:4]
+ lib/CodeGen/GlobalMerge.cpp [3:4]
+ lib/CodeGen/HardwareLoops.cpp [3:4]
+ lib/CodeGen/IfConversion.cpp [3:4]
+ lib/CodeGen/ImplicitNullChecks.cpp [3:4]
+ lib/CodeGen/IndirectBrExpandPass.cpp [3:4]
+ lib/CodeGen/InlineSpiller.cpp [3:4]
+ lib/CodeGen/InterferenceCache.cpp [3:4]
+ lib/CodeGen/InterferenceCache.h [3:4]
+ lib/CodeGen/InterleavedAccessPass.cpp [3:4]
+ lib/CodeGen/InterleavedLoadCombinePass.cpp [3:4]
+ lib/CodeGen/IntrinsicLowering.cpp [3:4]
+ lib/CodeGen/JMCInstrumenter.cpp [3:4]
+ lib/CodeGen/LLVMTargetMachine.cpp [3:4]
+ lib/CodeGen/LatencyPriorityQueue.cpp [3:4]
+ lib/CodeGen/LexicalScopes.cpp [3:4]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp [3:4]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h [3:4]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp [3:4]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.h [3:4]
+ lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp [3:4]
+ lib/CodeGen/LiveDebugVariables.cpp [3:4]
+ lib/CodeGen/LiveDebugVariables.h [3:4]
+ lib/CodeGen/LiveInterval.cpp [3:4]
+ lib/CodeGen/LiveIntervalCalc.cpp [3:4]
+ lib/CodeGen/LiveIntervalUnion.cpp [3:4]
+ lib/CodeGen/LiveIntervals.cpp [3:4]
+ lib/CodeGen/LivePhysRegs.cpp [3:4]
+ lib/CodeGen/LiveRangeCalc.cpp [3:4]
+ lib/CodeGen/LiveRangeEdit.cpp [3:4]
+ lib/CodeGen/LiveRangeShrink.cpp [3:4]
+ lib/CodeGen/LiveRangeUtils.h [3:4]
+ lib/CodeGen/LiveRegMatrix.cpp [3:4]
+ lib/CodeGen/LiveRegUnits.cpp [3:4]
+ lib/CodeGen/LiveStacks.cpp [3:4]
+ lib/CodeGen/LiveVariables.cpp [3:4]
+ lib/CodeGen/LocalStackSlotAllocation.cpp [3:4]
+ lib/CodeGen/LoopTraversal.cpp [3:4]
+ lib/CodeGen/LowLevelType.cpp [3:4]
+ lib/CodeGen/LowerEmuTLS.cpp [3:4]
+ lib/CodeGen/MBFIWrapper.cpp [3:4]
+ lib/CodeGen/MIRCanonicalizerPass.cpp [3:4]
+ lib/CodeGen/MIRFSDiscriminator.cpp [3:4]
+ lib/CodeGen/MIRNamerPass.cpp [3:4]
+ lib/CodeGen/MIRParser/MILexer.cpp [3:4]
+ lib/CodeGen/MIRParser/MILexer.h [3:4]
+ lib/CodeGen/MIRParser/MIParser.cpp [3:4]
+ lib/CodeGen/MIRParser/MIRParser.cpp [3:4]
+ lib/CodeGen/MIRPrinter.cpp [3:4]
+ lib/CodeGen/MIRPrintingPass.cpp [3:4]
+ lib/CodeGen/MIRSampleProfile.cpp [3:4]
+ lib/CodeGen/MIRVRegNamerUtils.cpp [3:4]
+ lib/CodeGen/MIRVRegNamerUtils.h [4:5]
+ lib/CodeGen/MIRYamlMapping.cpp [3:4]
+ lib/CodeGen/MLRegallocEvictAdvisor.cpp [3:4]
+ lib/CodeGen/MLRegallocEvictAdvisor.h [3:4]
+ lib/CodeGen/MLRegallocPriorityAdvisor.cpp [3:4]
+ lib/CodeGen/MachineBasicBlock.cpp [3:4]
+ lib/CodeGen/MachineBlockFrequencyInfo.cpp [3:4]
+ lib/CodeGen/MachineBlockPlacement.cpp [3:4]
+ lib/CodeGen/MachineBranchProbabilityInfo.cpp [3:4]
+ lib/CodeGen/MachineCFGPrinter.cpp [3:4]
+ lib/CodeGen/MachineCSE.cpp [3:4]
+ lib/CodeGen/MachineCheckDebugify.cpp [3:4]
+ lib/CodeGen/MachineCombiner.cpp [3:4]
+ lib/CodeGen/MachineCopyPropagation.cpp [3:4]
+ lib/CodeGen/MachineCycleAnalysis.cpp [3:4]
+ lib/CodeGen/MachineDebugify.cpp [3:4]
+ lib/CodeGen/MachineDominanceFrontier.cpp [3:4]
+ lib/CodeGen/MachineDominators.cpp [3:4]
+ lib/CodeGen/MachineFrameInfo.cpp [3:4]
+ lib/CodeGen/MachineFunction.cpp [3:4]
+ lib/CodeGen/MachineFunctionPass.cpp [3:4]
+ lib/CodeGen/MachineFunctionPrinterPass.cpp [3:4]
+ lib/CodeGen/MachineFunctionSplitter.cpp [3:4]
+ lib/CodeGen/MachineInstr.cpp [3:4]
+ lib/CodeGen/MachineInstrBundle.cpp [3:4]
+ lib/CodeGen/MachineLICM.cpp [3:4]
+ lib/CodeGen/MachineLateInstrsCleanup.cpp [3:4]
+ lib/CodeGen/MachineLoopInfo.cpp [3:4]
+ lib/CodeGen/MachineLoopUtils.cpp [3:4]
+ lib/CodeGen/MachineModuleInfo.cpp [3:4]
+ lib/CodeGen/MachineModuleInfoImpls.cpp [3:4]
+ lib/CodeGen/MachineModuleSlotTracker.cpp [3:4]
+ lib/CodeGen/MachineOperand.cpp [3:4]
+ lib/CodeGen/MachineOutliner.cpp [3:4]
+ lib/CodeGen/MachinePassManager.cpp [3:4]
+ lib/CodeGen/MachinePipeliner.cpp [3:4]
+ lib/CodeGen/MachinePostDominators.cpp [3:4]
+ lib/CodeGen/MachineRegionInfo.cpp [3:4]
+ lib/CodeGen/MachineRegisterInfo.cpp [3:4]
+ lib/CodeGen/MachineSSAContext.cpp [3:4]
+ lib/CodeGen/MachineSSAUpdater.cpp [3:4]
+ lib/CodeGen/MachineScheduler.cpp [3:4]
+ lib/CodeGen/MachineSink.cpp [3:4]
+ lib/CodeGen/MachineSizeOpts.cpp [3:4]
+ lib/CodeGen/MachineStableHash.cpp [3:4]
+ lib/CodeGen/MachineStripDebug.cpp [3:4]
+ lib/CodeGen/MachineTraceMetrics.cpp [3:4]
+ lib/CodeGen/MachineUniformityAnalysis.cpp [3:4]
+ lib/CodeGen/MachineVerifier.cpp [3:4]
+ lib/CodeGen/MacroFusion.cpp [3:4]
+ lib/CodeGen/ModuloSchedule.cpp [3:4]
+ lib/CodeGen/MultiHazardRecognizer.cpp [3:4]
+ lib/CodeGen/NonRelocatableStringpool.cpp [3:4]
+ lib/CodeGen/OptimizePHIs.cpp [3:4]
+ lib/CodeGen/PHIElimination.cpp [3:4]
+ lib/CodeGen/PHIEliminationUtils.cpp [3:4]
+ lib/CodeGen/PHIEliminationUtils.h [3:4]
+ lib/CodeGen/ParallelCG.cpp [3:4]
+ lib/CodeGen/PatchableFunction.cpp [3:4]
+ lib/CodeGen/PeepholeOptimizer.cpp [3:4]
+ lib/CodeGen/PostRAHazardRecognizer.cpp [3:4]
+ lib/CodeGen/PostRASchedulerList.cpp [3:4]
+ lib/CodeGen/PreISelIntrinsicLowering.cpp [3:4]
+ lib/CodeGen/ProcessImplicitDefs.cpp [3:4]
+ lib/CodeGen/PrologEpilogInserter.cpp [3:4]
+ lib/CodeGen/PseudoProbeInserter.cpp [3:4]
+ lib/CodeGen/PseudoSourceValue.cpp [3:4]
+ lib/CodeGen/RDFGraph.cpp [3:4]
+ lib/CodeGen/RDFLiveness.cpp [3:4]
+ lib/CodeGen/RDFRegisters.cpp [3:4]
+ lib/CodeGen/ReachingDefAnalysis.cpp [3:4]
+ lib/CodeGen/RegAllocBase.cpp [3:4]
+ lib/CodeGen/RegAllocBase.h [3:4]
+ lib/CodeGen/RegAllocBasic.cpp [3:4]
+ lib/CodeGen/RegAllocEvictionAdvisor.cpp [3:4]
+ lib/CodeGen/RegAllocEvictionAdvisor.h [3:4]
+ lib/CodeGen/RegAllocFast.cpp [3:4]
+ lib/CodeGen/RegAllocGreedy.cpp [3:4]
+ lib/CodeGen/RegAllocGreedy.h [3:4]
+ lib/CodeGen/RegAllocPBQP.cpp [3:4]
+ lib/CodeGen/RegAllocPriorityAdvisor.cpp [3:4]
+ lib/CodeGen/RegAllocPriorityAdvisor.h [3:4]
+ lib/CodeGen/RegAllocScore.cpp [3:4]
+ lib/CodeGen/RegAllocScore.h [3:4]
+ lib/CodeGen/RegUsageInfoCollector.cpp [3:4]
+ lib/CodeGen/RegUsageInfoPropagate.cpp [3:4]
+ lib/CodeGen/RegisterBank.cpp [3:4]
+ lib/CodeGen/RegisterBankInfo.cpp [3:4]
+ lib/CodeGen/RegisterClassInfo.cpp [3:4]
+ lib/CodeGen/RegisterCoalescer.cpp [3:4]
+ lib/CodeGen/RegisterCoalescer.h [3:4]
+ lib/CodeGen/RegisterPressure.cpp [3:4]
+ lib/CodeGen/RegisterScavenging.cpp [3:4]
+ lib/CodeGen/RegisterUsageInfo.cpp [3:4]
+ lib/CodeGen/RemoveRedundantDebugValues.cpp [3:4]
+ lib/CodeGen/RenameIndependentSubregs.cpp [3:4]
+ lib/CodeGen/ReplaceWithVeclib.cpp [3:4]
+ lib/CodeGen/ResetMachineFunctionPass.cpp [3:4]
+ lib/CodeGen/SafeStack.cpp [3:4]
+ lib/CodeGen/SafeStackLayout.cpp [3:4]
+ lib/CodeGen/SafeStackLayout.h [3:4]
+ lib/CodeGen/SanitizerBinaryMetadata.cpp [4:5]
+ lib/CodeGen/ScheduleDAG.cpp [3:4]
+ lib/CodeGen/ScheduleDAGInstrs.cpp [3:4]
+ lib/CodeGen/ScheduleDAGPrinter.cpp [3:4]
+ lib/CodeGen/ScoreboardHazardRecognizer.cpp [3:4]
+ lib/CodeGen/SelectOptimize.cpp [3:4]
+ lib/CodeGen/SelectionDAG/DAGCombiner.cpp [3:4]
+ lib/CodeGen/SelectionDAG/FastISel.cpp [3:4]
+ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp [3:4]
+ lib/CodeGen/SelectionDAG/InstrEmitter.cpp [3:4]
+ lib/CodeGen/SelectionDAG/InstrEmitter.h [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.h [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp [3:4]
+ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SDNodeDbgValue.h [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h [3:4]
+ lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAG.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp [3:4]
+ lib/CodeGen/SelectionDAG/SelectionDAGTargetInfo.cpp [3:4]
+ lib/CodeGen/SelectionDAG/StatepointLowering.cpp [3:4]
+ lib/CodeGen/SelectionDAG/StatepointLowering.h [3:4]
+ lib/CodeGen/SelectionDAG/TargetLowering.cpp [3:4]
+ lib/CodeGen/ShadowStackGCLowering.cpp [3:4]
+ lib/CodeGen/ShrinkWrap.cpp [3:4]
+ lib/CodeGen/SjLjEHPrepare.cpp [3:4]
+ lib/CodeGen/SlotIndexes.cpp [3:4]
+ lib/CodeGen/SpillPlacement.cpp [3:4]
+ lib/CodeGen/SpillPlacement.h [3:4]
+ lib/CodeGen/SplitKit.cpp [3:4]
+ lib/CodeGen/SplitKit.h [3:4]
+ lib/CodeGen/StackColoring.cpp [3:4]
+ lib/CodeGen/StackFrameLayoutAnalysisPass.cpp [4:5]
+ lib/CodeGen/StackMapLivenessAnalysis.cpp [3:4]
+ lib/CodeGen/StackMaps.cpp [3:4]
+ lib/CodeGen/StackProtector.cpp [3:4]
+ lib/CodeGen/StackSlotColoring.cpp [3:4]
+ lib/CodeGen/SwiftErrorValueTracking.cpp [3:4]
+ lib/CodeGen/SwitchLoweringUtils.cpp [3:4]
+ lib/CodeGen/TailDuplication.cpp [3:4]
+ lib/CodeGen/TailDuplicator.cpp [3:4]
+ lib/CodeGen/TargetFrameLoweringImpl.cpp [3:4]
+ lib/CodeGen/TargetInstrInfo.cpp [3:4]
+ lib/CodeGen/TargetLoweringBase.cpp [3:4]
+ lib/CodeGen/TargetLoweringObjectFileImpl.cpp [3:4]
+ lib/CodeGen/TargetOptionsImpl.cpp [3:4]
+ lib/CodeGen/TargetPassConfig.cpp [3:4]
+ lib/CodeGen/TargetRegisterInfo.cpp [3:4]
+ lib/CodeGen/TargetSchedule.cpp [3:4]
+ lib/CodeGen/TargetSubtargetInfo.cpp [3:4]
+ lib/CodeGen/TwoAddressInstructionPass.cpp [3:4]
+ lib/CodeGen/TypePromotion.cpp [3:4]
+ lib/CodeGen/UnreachableBlockElim.cpp [3:4]
+ lib/CodeGen/VLIWMachineScheduler.cpp [3:4]
+ lib/CodeGen/ValueTypes.cpp [3:4]
+ lib/CodeGen/VirtRegMap.cpp [3:4]
+ lib/CodeGen/WasmEHPrepare.cpp [3:4]
+ lib/CodeGen/WinEHPrepare.cpp [3:4]
+ lib/CodeGen/XRayInstrumentation.cpp [3:4]
+ lib/DWARFLinker/DWARFLinker.cpp [3:4]
+ lib/DWARFLinker/DWARFLinkerCompileUnit.cpp [3:4]
+ lib/DWARFLinker/DWARFLinkerDeclContext.cpp [3:4]
+ lib/DWARFLinker/DWARFStreamer.cpp [3:4]
+ lib/DWARFLinkerParallel/DWARFLinker.cpp [3:4]
+ lib/DWP/DWP.cpp [3:4]
+ lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp [3:4]
+ lib/DebugInfo/CodeView/CVSymbolVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/CVTypeVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/CodeViewError.cpp [3:4]
+ lib/DebugInfo/CodeView/CodeViewRecordIO.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugChecksumsSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugCrossExSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugFrameDataSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugInlineeLinesSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugLinesSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSymbolRVASubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/DebugSymbolsSubsection.cpp [3:4]
+ lib/DebugInfo/CodeView/EnumTables.cpp [3:4]
+ lib/DebugInfo/CodeView/Formatters.cpp [3:4]
+ lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp [3:4]
+ lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp [3:4]
+ lib/DebugInfo/CodeView/Line.cpp [3:4]
+ lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp [3:4]
+ lib/DebugInfo/CodeView/RecordName.cpp [3:4]
+ lib/DebugInfo/CodeView/RecordSerialization.cpp [3:4]
+ lib/DebugInfo/CodeView/SimpleTypeSerializer.cpp [3:4]
+ lib/DebugInfo/CodeView/StringsAndChecksums.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolDumper.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolRecordMapping.cpp [3:4]
+ lib/DebugInfo/CodeView/SymbolSerializer.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeDumpVisitor.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeHashing.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeIndex.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeRecordHelpers.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeRecordMapping.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeStreamMerger.cpp [3:4]
+ lib/DebugInfo/CodeView/TypeTableCollection.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFAddressRange.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFCompileUnit.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFContext.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDataExtractor.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugAddr.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugAranges.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugFrame.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugInfoEntry.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugLine.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugLoc.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugMacro.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugPubTable.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDebugRnglists.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFDie.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFExpression.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFFormValue.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFGdbIndex.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFListTable.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFLocationExpression.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFTypeUnit.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFUnit.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFUnitIndex.cpp [3:4]
+ lib/DebugInfo/DWARF/DWARFVerifier.cpp [3:4]
+ lib/DebugInfo/GSYM/DwarfTransformer.cpp [3:4]
+ lib/DebugInfo/GSYM/ExtractRanges.cpp [3:4]
+ lib/DebugInfo/GSYM/FileWriter.cpp [3:4]
+ lib/DebugInfo/GSYM/FunctionInfo.cpp [3:4]
+ lib/DebugInfo/GSYM/GsymCreator.cpp [3:4]
+ lib/DebugInfo/GSYM/GsymReader.cpp [3:4]
+ lib/DebugInfo/GSYM/Header.cpp [3:4]
+ lib/DebugInfo/GSYM/InlineInfo.cpp [3:4]
+ lib/DebugInfo/GSYM/LineTable.cpp [3:4]
+ lib/DebugInfo/GSYM/LookupResult.cpp [3:4]
+ lib/DebugInfo/GSYM/ObjectFileTransformer.cpp [3:4]
+ lib/DebugInfo/MSF/MSFBuilder.cpp [3:4]
+ lib/DebugInfo/MSF/MSFCommon.cpp [3:4]
+ lib/DebugInfo/MSF/MSFError.cpp [3:4]
+ lib/DebugInfo/MSF/MappedBlockStream.cpp [3:4]
+ lib/DebugInfo/PDB/GenericError.cpp [3:4]
+ lib/DebugInfo/PDB/IPDBSourceFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptor.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiModuleList.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/EnumTables.cpp [3:4]
+ lib/DebugInfo/PDB/Native/FormatUtil.cpp [3:4]
+ lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/GlobalsStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/Hash.cpp [3:4]
+ lib/DebugInfo/PDB/Native/HashTable.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InfoStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InfoStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InjectedSourceStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/InputFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/LinePrinter.cpp [3:4]
+ lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NamedStreamMap.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumGlobals.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumInjectedSources.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumLineNumbers.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumModules.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumSymbols.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeFunctionSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeInlineSiteSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeLineNumber.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativePublicSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeSession.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeSourceFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeSymbolEnumerator.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeArray.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeBuiltin.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeEnum.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeFunctionSig.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypePointer.cpp [3:4]
+ lib/DebugInfo/PDB/Native/NativeTypeUDT.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBFile.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBStringTable.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/Native/PublicsStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/SymbolStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/TpiHashing.cpp [3:4]
+ lib/DebugInfo/PDB/Native/TpiStream.cpp [3:4]
+ lib/DebugInfo/PDB/Native/TpiStreamBuilder.cpp [3:4]
+ lib/DebugInfo/PDB/PDB.cpp [3:4]
+ lib/DebugInfo/PDB/PDBContext.cpp [3:4]
+ lib/DebugInfo/PDB/PDBExtras.cpp [3:4]
+ lib/DebugInfo/PDB/PDBInterfaceAnchors.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymDumper.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolAnnotation.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolBlock.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCompiland.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCompilandDetails.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCompilandEnv.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolCustom.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolData.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolExe.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolFunc.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugEnd.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugStart.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolLabel.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolPublicSymbol.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolThunk.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeArray.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeBaseClass.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeBuiltin.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeCustom.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeDimension.cpp [4:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeEnum.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeFriend.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionArg.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeManaged.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypePointer.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeTypedef.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeUDT.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTable.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTableShape.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolUnknown.cpp [3:4]
+ lib/DebugInfo/PDB/PDBSymbolUsingNamespace.cpp [3:4]
+ lib/DebugInfo/PDB/UDTLayout.cpp [3:4]
+ lib/DebugInfo/Symbolize/DIPrinter.cpp [3:4]
+ lib/DebugInfo/Symbolize/Markup.cpp [3:4]
+ lib/DebugInfo/Symbolize/MarkupFilter.cpp [3:4]
+ lib/DebugInfo/Symbolize/SymbolizableObjectFile.cpp [3:4]
+ lib/DebugInfo/Symbolize/Symbolize.cpp [3:4]
+ lib/Debuginfod/BuildIDFetcher.cpp [3:4]
+ lib/Debuginfod/Debuginfod.cpp [3:4]
+ lib/Debuginfod/HTTPClient.cpp [3:4]
+ lib/Debuginfod/HTTPServer.cpp [3:4]
+ lib/Demangle/DLangDemangle.cpp [3:4]
+ lib/Demangle/Demangle.cpp [3:4]
+ lib/Demangle/ItaniumDemangle.cpp [3:4]
+ lib/Demangle/MicrosoftDemangle.cpp [3:4]
+ lib/Demangle/MicrosoftDemangleNodes.cpp [3:4]
+ lib/Demangle/RustDemangle.cpp [3:4]
+ lib/ExecutionEngine/ExecutionEngine.cpp [3:4]
+ lib/ExecutionEngine/ExecutionEngineBindings.cpp [3:4]
+ lib/ExecutionEngine/GDBRegistrationListener.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/Execution.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/Interpreter.cpp [3:4]
+ lib/ExecutionEngine/Interpreter/Interpreter.h [3:4]
+ lib/ExecutionEngine/JITLink/COFF.cpp [3:4]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp [3:4]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.h [3:4]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp [3:4]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/COFF_x86_64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp [3:4]
+ lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h [3:4]
+ lib/ExecutionEngine/JITLink/EHFrameSupport.cpp [3:4]
+ lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h [3:4]
+ lib/ExecutionEngine/JITLink/ELF.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/ELF_aarch64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_i386.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_loongarch.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_riscv.cpp [3:4]
+ lib/ExecutionEngine/JITLink/ELF_x86_64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/JITLink.cpp [3:4]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp [3:4]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.h [3:4]
+ lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachO.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/MachO_arm64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/MachO_x86_64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h [3:4]
+ lib/ExecutionEngine/JITLink/SEHFrameSupport.h [3:4]
+ lib/ExecutionEngine/JITLink/aarch64.cpp [3:4]
+ lib/ExecutionEngine/JITLink/i386.cpp [3:4]
+ lib/ExecutionEngine/JITLink/loongarch.cpp [3:4]
+ lib/ExecutionEngine/JITLink/riscv.cpp [3:4]
+ lib/ExecutionEngine/JITLink/x86_64.cpp [3:4]
+ lib/ExecutionEngine/MCJIT/MCJIT.cpp [3:4]
+ lib/ExecutionEngine/MCJIT/MCJIT.h [3:4]
+ lib/ExecutionEngine/Orc/COFFPlatform.cpp [3:4]
+ lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp [3:4]
+ lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/CompileUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/Core.cpp [3:4]
+ lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp [3:4]
+ lib/ExecutionEngine/Orc/DebugUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp [3:4]
+ lib/ExecutionEngine/Orc/ELFNixPlatform.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/ExecutionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp [3:4]
+ lib/ExecutionEngine/Orc/IRCompileLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/IRTransformLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/IndirectionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp [3:4]
+ lib/ExecutionEngine/Orc/LLJIT.cpp [3:4]
+ lib/ExecutionEngine/Orc/Layer.cpp [3:4]
+ lib/ExecutionEngine/Orc/LazyReexports.cpp [3:4]
+ lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp [3:4]
+ lib/ExecutionEngine/Orc/MachOPlatform.cpp [3:4]
+ lib/ExecutionEngine/Orc/Mangling.cpp [3:4]
+ lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/MemoryMapper.cpp [3:4]
+ lib/ExecutionEngine/Orc/ObjectFileInterface.cpp [3:4]
+ lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/OrcABISupport.cpp [3:4]
+ lib/ExecutionEngine/Orc/OrcV2CBindings.cpp [3:4]
+ lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/OrcError.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp [3:4]
+ lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp [3:4]
+ lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp [3:4]
+ lib/ExecutionEngine/Orc/Speculation.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp [3:4]
+ lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp [3:4]
+ lib/ExecutionEngine/Orc/TaskDispatch.cpp [3:4]
+ lib/ExecutionEngine/Orc/ThreadSafeModule.cpp [4:5]
+ lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h [4:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h [3:4]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h [3:4]
+ lib/ExecutionEngine/SectionMemoryManager.cpp [3:4]
+ lib/ExecutionEngine/TargetSelect.cpp [3:4]
+ lib/FileCheck/FileCheck.cpp [3:4]
+ lib/FileCheck/FileCheckImpl.h [3:4]
+ lib/Frontend/HLSL/HLSLResource.cpp [3:4]
+ lib/Frontend/OpenACC/ACC.cpp [3:4]
+ lib/Frontend/OpenMP/OMP.cpp [3:4]
+ lib/Frontend/OpenMP/OMPContext.cpp [3:4]
+ lib/Frontend/OpenMP/OMPIRBuilder.cpp [3:4]
+ lib/FuzzMutate/IRMutator.cpp [3:4]
+ lib/FuzzMutate/OpDescriptor.cpp [3:4]
+ lib/FuzzMutate/Operations.cpp [3:4]
+ lib/FuzzMutate/RandomIRBuilder.cpp [3:4]
+ lib/IR/AbstractCallSite.cpp [3:4]
+ lib/IR/AsmWriter.cpp [3:4]
+ lib/IR/Assumptions.cpp [3:4]
+ lib/IR/AttributeImpl.h [3:4]
+ lib/IR/Attributes.cpp [3:4]
+ lib/IR/AutoUpgrade.cpp [3:4]
+ lib/IR/BasicBlock.cpp [3:4]
+ lib/IR/BuiltinGCs.cpp [3:4]
+ lib/IR/Comdat.cpp [3:4]
+ lib/IR/ConstantFold.cpp [3:4]
+ lib/IR/ConstantRange.cpp [3:4]
+ lib/IR/Constants.cpp [3:4]
+ lib/IR/ConstantsContext.h [3:4]
+ lib/IR/Core.cpp [3:4]
+ lib/IR/DIBuilder.cpp [3:4]
+ lib/IR/DataLayout.cpp [3:4]
+ lib/IR/DebugInfo.cpp [3:4]
+ lib/IR/DebugInfoMetadata.cpp [3:4]
+ lib/IR/DebugLoc.cpp [3:4]
+ lib/IR/DiagnosticHandler.cpp [3:4]
+ lib/IR/DiagnosticInfo.cpp [3:4]
+ lib/IR/DiagnosticPrinter.cpp [3:4]
+ lib/IR/Dominators.cpp [3:4]
+ lib/IR/FPEnv.cpp [3:4]
+ lib/IR/Function.cpp [3:4]
+ lib/IR/GCStrategy.cpp [3:4]
+ lib/IR/GVMaterializer.cpp [3:4]
+ lib/IR/Globals.cpp [3:4]
+ lib/IR/IRBuilder.cpp [3:4]
+ lib/IR/IRPrintingPasses.cpp [3:4]
+ lib/IR/InlineAsm.cpp [3:4]
+ lib/IR/Instruction.cpp [3:4]
+ lib/IR/Instructions.cpp [3:4]
+ lib/IR/IntrinsicInst.cpp [3:4]
+ lib/IR/LLVMContext.cpp [3:4]
+ lib/IR/LLVMContextImpl.cpp [3:4]
+ lib/IR/LLVMContextImpl.h [3:4]
+ lib/IR/LLVMRemarkStreamer.cpp [3:4]
+ lib/IR/LegacyPassManager.cpp [3:4]
+ lib/IR/MDBuilder.cpp [3:4]
+ lib/IR/Mangler.cpp [3:4]
+ lib/IR/Metadata.cpp [3:4]
+ lib/IR/MetadataImpl.h [3:4]
+ lib/IR/Module.cpp [3:4]
+ lib/IR/ModuleSummaryIndex.cpp [3:4]
+ lib/IR/Operator.cpp [3:4]
+ lib/IR/OptBisect.cpp [3:4]
+ lib/IR/Pass.cpp [3:4]
+ lib/IR/PassInstrumentation.cpp [3:4]
+ lib/IR/PassManager.cpp [3:4]
+ lib/IR/PassRegistry.cpp [3:4]
+ lib/IR/PassTimingInfo.cpp [3:4]
+ lib/IR/PrintPasses.cpp [3:4]
+ lib/IR/ProfDataUtils.cpp [3:4]
+ lib/IR/ProfileSummary.cpp [3:4]
+ lib/IR/PseudoProbe.cpp [3:4]
+ lib/IR/ReplaceConstant.cpp [3:4]
+ lib/IR/SSAContext.cpp [3:4]
+ lib/IR/SafepointIRVerifier.cpp [3:4]
+ lib/IR/Statepoint.cpp [3:4]
+ lib/IR/StructuralHash.cpp [3:4]
+ lib/IR/SymbolTableListTraitsImpl.h [3:4]
+ lib/IR/Type.cpp [3:4]
+ lib/IR/TypeFinder.cpp [3:4]
+ lib/IR/TypedPointerType.cpp [3:4]
+ lib/IR/Use.cpp [3:4]
+ lib/IR/User.cpp [3:4]
+ lib/IR/Value.cpp [3:4]
+ lib/IR/ValueSymbolTable.cpp [3:4]
+ lib/IR/VectorBuilder.cpp [3:4]
+ lib/IR/Verifier.cpp [3:4]
+ lib/IRPrinter/IRPrintingPasses.cpp [3:4]
+ lib/IRReader/IRReader.cpp [3:4]
+ lib/InterfaceStub/ELFObjHandler.cpp [3:4]
+ lib/InterfaceStub/IFSHandler.cpp [3:4]
+ lib/InterfaceStub/IFSStub.cpp [3:4]
+ lib/LTO/LTO.cpp [3:4]
+ lib/LTO/LTOBackend.cpp [3:4]
+ lib/LTO/LTOCodeGenerator.cpp [3:4]
+ lib/LTO/LTOModule.cpp [3:4]
+ lib/LTO/SummaryBasedOptimizations.cpp [3:4]
+ lib/LTO/ThinLTOCodeGenerator.cpp [3:4]
+ lib/LTO/UpdateCompilerUsed.cpp [3:4]
+ lib/LineEditor/LineEditor.cpp [3:4]
+ lib/Linker/IRMover.cpp [3:4]
+ lib/Linker/LinkDiagnosticInfo.h [3:4]
+ lib/Linker/LinkModules.cpp [3:4]
+ lib/MC/ConstantPools.cpp [3:4]
+ lib/MC/ELFObjectWriter.cpp [3:4]
+ lib/MC/MCAsmBackend.cpp [3:4]
+ lib/MC/MCAsmInfo.cpp [3:4]
+ lib/MC/MCAsmInfoCOFF.cpp [3:4]
+ lib/MC/MCAsmInfoDarwin.cpp [3:4]
+ lib/MC/MCAsmInfoELF.cpp [3:4]
+ lib/MC/MCAsmInfoGOFF.cpp [3:4]
+ lib/MC/MCAsmInfoWasm.cpp [3:4]
+ lib/MC/MCAsmInfoXCOFF.cpp [3:4]
+ lib/MC/MCAsmMacro.cpp [3:4]
+ lib/MC/MCAsmStreamer.cpp [3:4]
+ lib/MC/MCAssembler.cpp [3:4]
+ lib/MC/MCCodeEmitter.cpp [3:4]
+ lib/MC/MCCodeView.cpp [3:4]
+ lib/MC/MCContext.cpp [3:4]
+ lib/MC/MCDXContainerStreamer.cpp [3:4]
+ lib/MC/MCDXContainerWriter.cpp [3:4]
+ lib/MC/MCDisassembler/Disassembler.cpp [3:4]
+ lib/MC/MCDisassembler/Disassembler.h [3:4]
+ lib/MC/MCDisassembler/MCDisassembler.cpp [3:4]
+ lib/MC/MCDisassembler/MCExternalSymbolizer.cpp [3:4]
+ lib/MC/MCDisassembler/MCRelocationInfo.cpp [3:4]
+ lib/MC/MCDisassembler/MCSymbolizer.cpp [3:4]
+ lib/MC/MCDwarf.cpp [3:4]
+ lib/MC/MCELFObjectTargetWriter.cpp [3:4]
+ lib/MC/MCELFStreamer.cpp [3:4]
+ lib/MC/MCExpr.cpp [3:4]
+ lib/MC/MCFragment.cpp [3:4]
+ lib/MC/MCInst.cpp [3:4]
+ lib/MC/MCInstPrinter.cpp [3:4]
+ lib/MC/MCInstrAnalysis.cpp [3:4]
+ lib/MC/MCInstrDesc.cpp [3:4]
+ lib/MC/MCInstrInfo.cpp [3:4]
+ lib/MC/MCLabel.cpp [3:4]
+ lib/MC/MCLinkerOptimizationHint.cpp [3:4]
+ lib/MC/MCMachOStreamer.cpp [3:4]
+ lib/MC/MCMachObjectTargetWriter.cpp [3:4]
+ lib/MC/MCNullStreamer.cpp [3:4]
+ lib/MC/MCObjectFileInfo.cpp [3:4]
+ lib/MC/MCObjectStreamer.cpp [3:4]
+ lib/MC/MCObjectWriter.cpp [3:4]
+ lib/MC/MCParser/AsmLexer.cpp [3:4]
+ lib/MC/MCParser/AsmParser.cpp [3:4]
+ lib/MC/MCParser/COFFAsmParser.cpp [3:4]
+ lib/MC/MCParser/COFFMasmParser.cpp [3:4]
+ lib/MC/MCParser/DarwinAsmParser.cpp [3:4]
+ lib/MC/MCParser/ELFAsmParser.cpp [3:4]
+ lib/MC/MCParser/GOFFAsmParser.cpp [3:4]
+ lib/MC/MCParser/MCAsmLexer.cpp [3:4]
+ lib/MC/MCParser/MCAsmParser.cpp [3:4]
+ lib/MC/MCParser/MCAsmParserExtension.cpp [3:4]
+ lib/MC/MCParser/MCTargetAsmParser.cpp [3:4]
+ lib/MC/MCParser/MasmParser.cpp [3:4]
+ lib/MC/MCParser/WasmAsmParser.cpp [3:4]
+ lib/MC/MCParser/XCOFFAsmParser.cpp [4:5]
+ lib/MC/MCPseudoProbe.cpp [3:4]
+ lib/MC/MCRegisterInfo.cpp [3:4]
+ lib/MC/MCSPIRVStreamer.cpp [3:4]
+ lib/MC/MCSchedule.cpp [3:4]
+ lib/MC/MCSection.cpp [3:4]
+ lib/MC/MCSectionCOFF.cpp [3:4]
+ lib/MC/MCSectionDXContainer.cpp [3:4]
+ lib/MC/MCSectionELF.cpp [3:4]
+ lib/MC/MCSectionMachO.cpp [3:4]
+ lib/MC/MCSectionWasm.cpp [3:4]
+ lib/MC/MCSectionXCOFF.cpp [3:4]
+ lib/MC/MCStreamer.cpp [3:4]
+ lib/MC/MCSubtargetInfo.cpp [3:4]
+ lib/MC/MCSymbol.cpp [3:4]
+ lib/MC/MCSymbolELF.cpp [3:4]
+ lib/MC/MCSymbolXCOFF.cpp [3:4]
+ lib/MC/MCTargetOptions.cpp [3:4]
+ lib/MC/MCTargetOptionsCommandFlags.cpp [3:4]
+ lib/MC/MCValue.cpp [3:4]
+ lib/MC/MCWasmObjectTargetWriter.cpp [3:4]
+ lib/MC/MCWasmStreamer.cpp [3:4]
+ lib/MC/MCWin64EH.cpp [3:4]
+ lib/MC/MCWinCOFFStreamer.cpp [3:4]
+ lib/MC/MCWinEH.cpp [3:4]
+ lib/MC/MCXCOFFObjectTargetWriter.cpp [3:4]
+ lib/MC/MCXCOFFStreamer.cpp [3:4]
+ lib/MC/MachObjectWriter.cpp [3:4]
+ lib/MC/SPIRVObjectWriter.cpp [3:4]
+ lib/MC/StringTableBuilder.cpp [3:4]
+ lib/MC/SubtargetFeature.cpp [3:4]
+ lib/MC/TargetRegistry.cpp [3:4]
+ lib/MC/WasmObjectWriter.cpp [3:4]
+ lib/MC/WinCOFFObjectWriter.cpp [3:4]
+ lib/MC/XCOFFObjectWriter.cpp [3:4]
+ lib/MCA/CodeEmitter.cpp [3:4]
+ lib/MCA/Context.cpp [3:4]
+ lib/MCA/CustomBehaviour.cpp [3:4]
+ lib/MCA/HWEventListener.cpp [3:4]
+ lib/MCA/HardwareUnits/HardwareUnit.cpp [3:4]
+ lib/MCA/HardwareUnits/LSUnit.cpp [3:4]
+ lib/MCA/HardwareUnits/RegisterFile.cpp [3:4]
+ lib/MCA/HardwareUnits/ResourceManager.cpp [3:4]
+ lib/MCA/HardwareUnits/RetireControlUnit.cpp [3:4]
+ lib/MCA/HardwareUnits/Scheduler.cpp [3:4]
+ lib/MCA/IncrementalSourceMgr.cpp [3:4]
+ lib/MCA/InstrBuilder.cpp [3:4]
+ lib/MCA/Instruction.cpp [3:4]
+ lib/MCA/Pipeline.cpp [3:4]
+ lib/MCA/Stages/DispatchStage.cpp [3:4]
+ lib/MCA/Stages/EntryStage.cpp [3:4]
+ lib/MCA/Stages/ExecuteStage.cpp [3:4]
+ lib/MCA/Stages/InOrderIssueStage.cpp [3:4]
+ lib/MCA/Stages/InstructionTables.cpp [3:4]
+ lib/MCA/Stages/MicroOpQueueStage.cpp [3:4]
+ lib/MCA/Stages/RetireStage.cpp [3:4]
+ lib/MCA/Stages/Stage.cpp [3:4]
+ lib/MCA/Support.cpp [3:4]
+ lib/MCA/View.cpp [3:4]
+ lib/ObjCopy/Archive.cpp [3:4]
+ lib/ObjCopy/Archive.h [3:4]
+ lib/ObjCopy/COFF/COFFObjcopy.cpp [3:4]
+ lib/ObjCopy/COFF/COFFObject.cpp [3:4]
+ lib/ObjCopy/COFF/COFFObject.h [3:4]
+ lib/ObjCopy/COFF/COFFReader.cpp [3:4]
+ lib/ObjCopy/COFF/COFFReader.h [3:4]
+ lib/ObjCopy/COFF/COFFWriter.cpp [3:4]
+ lib/ObjCopy/COFF/COFFWriter.h [3:4]
+ lib/ObjCopy/CommonConfig.cpp [3:4]
+ lib/ObjCopy/ConfigManager.cpp [3:4]
+ lib/ObjCopy/ELF/ELFObjcopy.cpp [3:4]
+ lib/ObjCopy/ELF/ELFObject.cpp [3:4]
+ lib/ObjCopy/ELF/ELFObject.h [3:4]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.cpp [3:4]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.h [3:4]
+ lib/ObjCopy/MachO/MachOObjcopy.cpp [3:4]
+ lib/ObjCopy/MachO/MachOObject.cpp [3:4]
+ lib/ObjCopy/MachO/MachOObject.h [3:4]
+ lib/ObjCopy/MachO/MachOReader.cpp [3:4]
+ lib/ObjCopy/MachO/MachOReader.h [3:4]
+ lib/ObjCopy/MachO/MachOWriter.cpp [3:4]
+ lib/ObjCopy/MachO/MachOWriter.h [3:4]
+ lib/ObjCopy/ObjCopy.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFObjcopy.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFObject.h [3:4]
+ lib/ObjCopy/XCOFF/XCOFFReader.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFReader.h [3:4]
+ lib/ObjCopy/XCOFF/XCOFFWriter.cpp [3:4]
+ lib/ObjCopy/XCOFF/XCOFFWriter.h [3:4]
+ lib/ObjCopy/wasm/WasmObjcopy.cpp [3:4]
+ lib/ObjCopy/wasm/WasmObject.cpp [3:4]
+ lib/ObjCopy/wasm/WasmObject.h [3:4]
+ lib/ObjCopy/wasm/WasmReader.cpp [3:4]
+ lib/ObjCopy/wasm/WasmReader.h [3:4]
+ lib/ObjCopy/wasm/WasmWriter.cpp [3:4]
+ lib/ObjCopy/wasm/WasmWriter.h [3:4]
+ lib/Object/Archive.cpp [3:4]
+ lib/Object/ArchiveWriter.cpp [3:4]
+ lib/Object/Binary.cpp [3:4]
+ lib/Object/BuildID.cpp [3:4]
+ lib/Object/COFFImportFile.cpp [3:4]
+ lib/Object/COFFModuleDefinition.cpp [3:4]
+ lib/Object/COFFObjectFile.cpp [3:4]
+ lib/Object/DXContainer.cpp [3:4]
+ lib/Object/Decompressor.cpp [3:4]
+ lib/Object/ELF.cpp [3:4]
+ lib/Object/ELFObjectFile.cpp [3:4]
+ lib/Object/Error.cpp [3:4]
+ lib/Object/FaultMapParser.cpp [3:4]
+ lib/Object/IRObjectFile.cpp [3:4]
+ lib/Object/IRSymtab.cpp [3:4]
+ lib/Object/MachOObjectFile.cpp [3:4]
+ lib/Object/MachOUniversal.cpp [3:4]
+ lib/Object/MachOUniversalWriter.cpp [3:4]
+ lib/Object/Minidump.cpp [3:4]
+ lib/Object/ModuleSymbolTable.cpp [3:4]
+ lib/Object/Object.cpp [3:4]
+ lib/Object/ObjectFile.cpp [3:4]
+ lib/Object/OffloadBinary.cpp [3:4]
+ lib/Object/RecordStreamer.cpp [3:4]
+ lib/Object/RecordStreamer.h [3:4]
+ lib/Object/RelocationResolver.cpp [3:4]
+ lib/Object/SymbolSize.cpp [3:4]
+ lib/Object/SymbolicFile.cpp [3:4]
+ lib/Object/TapiFile.cpp [3:4]
+ lib/Object/TapiUniversal.cpp [3:4]
+ lib/Object/WasmObjectFile.cpp [3:4]
+ lib/Object/WindowsMachineFlag.cpp [3:4]
+ lib/Object/WindowsResource.cpp [3:4]
+ lib/Object/XCOFFObjectFile.cpp [3:4]
+ lib/ObjectYAML/ArchiveEmitter.cpp [3:4]
+ lib/ObjectYAML/ArchiveYAML.cpp [3:4]
+ lib/ObjectYAML/COFFEmitter.cpp [3:4]
+ lib/ObjectYAML/COFFYAML.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLDebugSections.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLSymbols.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp [3:4]
+ lib/ObjectYAML/CodeViewYAMLTypes.cpp [3:4]
+ lib/ObjectYAML/DWARFEmitter.cpp [3:4]
+ lib/ObjectYAML/DWARFYAML.cpp [3:4]
+ lib/ObjectYAML/DXContainerEmitter.cpp [3:4]
+ lib/ObjectYAML/DXContainerYAML.cpp [3:4]
+ lib/ObjectYAML/ELFEmitter.cpp [3:4]
+ lib/ObjectYAML/ELFYAML.cpp [3:4]
+ lib/ObjectYAML/MachOEmitter.cpp [3:4]
+ lib/ObjectYAML/MachOYAML.cpp [3:4]
+ lib/ObjectYAML/MinidumpEmitter.cpp [3:4]
+ lib/ObjectYAML/MinidumpYAML.cpp [3:4]
+ lib/ObjectYAML/ObjectYAML.cpp [3:4]
+ lib/ObjectYAML/OffloadEmitter.cpp [3:4]
+ lib/ObjectYAML/OffloadYAML.cpp [3:4]
+ lib/ObjectYAML/WasmEmitter.cpp [3:4]
+ lib/ObjectYAML/WasmYAML.cpp [3:4]
+ lib/ObjectYAML/XCOFFEmitter.cpp [3:4]
+ lib/ObjectYAML/XCOFFYAML.cpp [3:4]
+ lib/ObjectYAML/YAML.cpp [3:4]
+ lib/ObjectYAML/yaml2obj.cpp [3:4]
+ lib/Option/Arg.cpp [3:4]
+ lib/Option/ArgList.cpp [3:4]
+ lib/Option/OptTable.cpp [3:4]
+ lib/Option/Option.cpp [3:4]
+ lib/Passes/OptimizationLevel.cpp [3:4]
+ lib/Passes/PassBuilder.cpp [3:4]
+ lib/Passes/PassBuilderBindings.cpp [3:4]
+ lib/Passes/PassBuilderPipelines.cpp [3:4]
+ lib/Passes/PassPlugin.cpp [3:4]
+ lib/Passes/PassRegistry.def [3:4]
+ lib/Passes/StandardInstrumentations.cpp [3:4]
+ lib/ProfileData/Coverage/CoverageMapping.cpp [3:4]
+ lib/ProfileData/Coverage/CoverageMappingReader.cpp [3:4]
+ lib/ProfileData/Coverage/CoverageMappingWriter.cpp [3:4]
+ lib/ProfileData/GCOV.cpp [3:4]
+ lib/ProfileData/InstrProf.cpp [3:4]
+ lib/ProfileData/InstrProfCorrelator.cpp [3:4]
+ lib/ProfileData/InstrProfReader.cpp [3:4]
+ lib/ProfileData/InstrProfWriter.cpp [3:4]
+ lib/ProfileData/ProfileSummaryBuilder.cpp [3:4]
+ lib/ProfileData/RawMemProfReader.cpp [3:4]
+ lib/ProfileData/SampleProf.cpp [3:4]
+ lib/ProfileData/SampleProfReader.cpp [3:4]
+ lib/ProfileData/SampleProfWriter.cpp [3:4]
+ lib/Remarks/BitstreamRemarkParser.cpp [3:4]
+ lib/Remarks/BitstreamRemarkParser.h [3:4]
+ lib/Remarks/BitstreamRemarkSerializer.cpp [3:4]
+ lib/Remarks/Remark.cpp [3:4]
+ lib/Remarks/RemarkFormat.cpp [3:4]
+ lib/Remarks/RemarkLinker.cpp [3:4]
+ lib/Remarks/RemarkParser.cpp [3:4]
+ lib/Remarks/RemarkSerializer.cpp [3:4]
+ lib/Remarks/RemarkStreamer.cpp [3:4]
+ lib/Remarks/RemarkStringTable.cpp [3:4]
+ lib/Remarks/YAMLRemarkParser.cpp [3:4]
+ lib/Remarks/YAMLRemarkParser.h [3:4]
+ lib/Remarks/YAMLRemarkSerializer.cpp [3:4]
+ lib/Support/ABIBreak.cpp [3:4]
+ lib/Support/AMDGPUMetadata.cpp [3:4]
+ lib/Support/APFixedPoint.cpp [3:4]
+ lib/Support/APFloat.cpp [3:4]
+ lib/Support/APInt.cpp [3:4]
+ lib/Support/APSInt.cpp [3:4]
+ lib/Support/ARMAttributeParser.cpp [3:4]
+ lib/Support/ARMBuildAttrs.cpp [3:4]
+ lib/Support/ARMWinEH.cpp [3:4]
+ lib/Support/AddressRanges.cpp [3:4]
+ lib/Support/Allocator.cpp [3:4]
+ lib/Support/Atomic.cpp [3:4]
+ lib/Support/AutoConvert.cpp [3:4]
+ lib/Support/Base64.cpp [3:4]
+ lib/Support/BinaryStreamError.cpp [3:4]
+ lib/Support/BinaryStreamReader.cpp [3:4]
+ lib/Support/BinaryStreamRef.cpp [3:4]
+ lib/Support/BinaryStreamWriter.cpp [3:4]
+ lib/Support/BlockFrequency.cpp [3:4]
+ lib/Support/BranchProbability.cpp [3:4]
+ lib/Support/BuryPointer.cpp [3:4]
+ lib/Support/COM.cpp [3:4]
+ lib/Support/CRC.cpp [3:4]
+ lib/Support/CSKYAttributeParser.cpp [3:4]
+ lib/Support/CSKYAttributes.cpp [3:4]
+ lib/Support/CachePruning.cpp [3:4]
+ lib/Support/Caching.cpp [3:4]
+ lib/Support/Chrono.cpp [3:4]
+ lib/Support/CodeGenCoverage.cpp [3:4]
+ lib/Support/CommandLine.cpp [3:4]
+ lib/Support/Compression.cpp [3:4]
+ lib/Support/ConvertUTFWrapper.cpp [3:4]
+ lib/Support/CrashRecoveryContext.cpp [3:4]
+ lib/Support/DAGDeltaAlgorithm.cpp [3:4]
+ lib/Support/DJB.cpp [3:4]
+ lib/Support/DataExtractor.cpp [3:4]
+ lib/Support/Debug.cpp [3:4]
+ lib/Support/DebugOptions.h [3:4]
+ lib/Support/DeltaAlgorithm.cpp [3:4]
+ lib/Support/DivisionByConstantInfo.cpp [3:4]
+ lib/Support/DynamicLibrary.cpp [3:4]
+ lib/Support/ELFAttributeParser.cpp [3:4]
+ lib/Support/ELFAttributes.cpp [3:4]
+ lib/Support/Errno.cpp [3:4]
+ lib/Support/Error.cpp [3:4]
+ lib/Support/ErrorHandling.cpp [3:4]
+ lib/Support/ExtensibleRTTI.cpp [3:4]
+ lib/Support/FileCollector.cpp [3:4]
+ lib/Support/FileOutputBuffer.cpp [3:4]
+ lib/Support/FileUtilities.cpp [3:4]
+ lib/Support/FoldingSet.cpp [3:4]
+ lib/Support/FormatVariadic.cpp [3:4]
+ lib/Support/FormattedStream.cpp [3:4]
+ lib/Support/GlobPattern.cpp [3:4]
+ lib/Support/GraphWriter.cpp [3:4]
+ lib/Support/Hashing.cpp [3:4]
+ lib/Support/InitLLVM.cpp [3:4]
+ lib/Support/InstructionCost.cpp [3:4]
+ lib/Support/IntEqClasses.cpp [3:4]
+ lib/Support/IntervalMap.cpp [3:4]
+ lib/Support/ItaniumManglingCanonicalizer.cpp [3:4]
+ lib/Support/JSON.cpp [3:4]
+ lib/Support/KnownBits.cpp [3:4]
+ lib/Support/LEB128.cpp [3:4]
+ lib/Support/LineIterator.cpp [3:4]
+ lib/Support/LockFileManager.cpp [3:4]
+ lib/Support/LowLevelType.cpp [3:4]
+ lib/Support/MSP430AttributeParser.cpp [3:4]
+ lib/Support/MSP430Attributes.cpp [3:4]
+ lib/Support/ManagedStatic.cpp [3:4]
+ lib/Support/MathExtras.cpp [3:4]
+ lib/Support/MemAlloc.cpp [3:4]
+ lib/Support/Memory.cpp [3:4]
+ lib/Support/MemoryBuffer.cpp [3:4]
+ lib/Support/MemoryBufferRef.cpp [3:4]
+ lib/Support/NativeFormatting.cpp [3:4]
+ lib/Support/OptimizedStructLayout.cpp [3:4]
+ lib/Support/Optional.cpp [3:4]
+ lib/Support/Parallel.cpp [3:4]
+ lib/Support/Path.cpp [3:4]
+ lib/Support/PluginLoader.cpp [3:4]
+ lib/Support/PrettyStackTrace.cpp [3:4]
+ lib/Support/Process.cpp [3:4]
+ lib/Support/Program.cpp [3:4]
+ lib/Support/RISCVAttributeParser.cpp [3:4]
+ lib/Support/RISCVAttributes.cpp [3:4]
+ lib/Support/RISCVISAInfo.cpp [3:4]
+ lib/Support/RWMutex.cpp [3:4]
+ lib/Support/RandomNumberGenerator.cpp [3:4]
+ lib/Support/Regex.cpp [3:4]
+ lib/Support/SHA1.cpp [3:4]
+ lib/Support/SHA256.cpp [3:4]
+ lib/Support/ScaledNumber.cpp [3:4]
+ lib/Support/Signals.cpp [3:4]
+ lib/Support/Signposts.cpp [3:4]
+ lib/Support/SmallPtrSet.cpp [3:4]
+ lib/Support/SmallVector.cpp [3:4]
+ lib/Support/SourceMgr.cpp [3:4]
+ lib/Support/SpecialCaseList.cpp [3:4]
+ lib/Support/Statistic.cpp [3:4]
+ lib/Support/StringExtras.cpp [3:4]
+ lib/Support/StringMap.cpp [3:4]
+ lib/Support/StringRef.cpp [3:4]
+ lib/Support/StringSaver.cpp [3:4]
+ lib/Support/SuffixTree.cpp [3:4]
+ lib/Support/SymbolRemappingReader.cpp [3:4]
+ lib/Support/SystemUtils.cpp [3:4]
+ lib/Support/TarWriter.cpp [3:4]
+ lib/Support/ThreadPool.cpp [3:4]
+ lib/Support/Threading.cpp [3:4]
+ lib/Support/TimeProfiler.cpp [3:4]
+ lib/Support/Timer.cpp [3:4]
+ lib/Support/ToolOutputFile.cpp [3:4]
+ lib/Support/TrigramIndex.cpp [3:4]
+ lib/Support/Twine.cpp [3:4]
+ lib/Support/TypeSize.cpp [3:4]
+ lib/Support/Unicode.cpp [3:4]
+ lib/Support/UnicodeNameToCodepoint.cpp [4:5]
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [3:4]
+ lib/Support/Unix/COM.inc [3:4]
+ lib/Support/Unix/DynamicLibrary.inc [3:4]
+ lib/Support/Unix/Memory.inc [3:4]
+ lib/Support/Unix/Path.inc [3:4]
+ lib/Support/Unix/Process.inc [3:4]
+ lib/Support/Unix/Program.inc [3:4]
+ lib/Support/Unix/Signals.inc [3:4]
+ lib/Support/Unix/Threading.inc [3:4]
+ lib/Support/Unix/Unix.h [3:4]
+ lib/Support/Unix/Watchdog.inc [3:4]
+ lib/Support/Valgrind.cpp [3:4]
+ lib/Support/VersionTuple.cpp [3:4]
+ lib/Support/VirtualFileSystem.cpp [3:4]
+ lib/Support/Watchdog.cpp [3:4]
+ lib/Support/Windows/COM.inc [3:4]
+ lib/Support/Windows/DynamicLibrary.inc [3:4]
+ lib/Support/Windows/Memory.inc [3:4]
+ lib/Support/Windows/Path.inc [3:4]
+ lib/Support/Windows/Process.inc [3:4]
+ lib/Support/Windows/Program.inc [3:4]
+ lib/Support/Windows/Signals.inc [3:4]
+ lib/Support/Windows/Threading.inc [3:4]
+ lib/Support/Windows/Watchdog.inc [3:4]
+ lib/Support/WithColor.cpp [3:4]
+ lib/Support/YAMLParser.cpp [3:4]
+ lib/Support/YAMLTraits.cpp [3:4]
+ lib/Support/Z3Solver.cpp [3:4]
+ lib/Support/circular_raw_ostream.cpp [3:4]
+ lib/Support/raw_os_ostream.cpp [3:4]
+ lib/Support/raw_ostream.cpp [3:4]
+ lib/TableGen/DetailedRecordsBackend.cpp [3:4]
+ lib/TableGen/Error.cpp [3:4]
+ lib/TableGen/JSONBackend.cpp [3:4]
+ lib/TableGen/Main.cpp [3:4]
+ lib/TableGen/Parser.cpp [3:4]
+ lib/TableGen/Record.cpp [3:4]
+ lib/TableGen/SetTheory.cpp [3:4]
+ lib/TableGen/StringMatcher.cpp [3:4]
+ lib/TableGen/TGLexer.cpp [3:4]
+ lib/TableGen/TGLexer.h [3:4]
+ lib/TableGen/TGParser.cpp [3:4]
+ lib/TableGen/TGParser.h [3:4]
+ lib/TableGen/TableGenBackend.cpp [3:4]
+ lib/TableGen/TableGenBackendSkeleton.cpp [3:4]
+ lib/Target/AArch64/AArch64.h [3:4]
+ lib/Target/AArch64/AArch64.td [3:4]
+ lib/Target/AArch64/AArch64A53Fix835769.cpp [3:4]
+ lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp [3:4]
+ lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp [3:4]
+ lib/Target/AArch64/AArch64AsmPrinter.cpp [3:4]
+ lib/Target/AArch64/AArch64BranchTargets.cpp [3:4]
+ lib/Target/AArch64/AArch64CallingConvention.cpp [3:4]
+ lib/Target/AArch64/AArch64CallingConvention.h [3:4]
+ lib/Target/AArch64/AArch64CallingConvention.td [3:4]
+ lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp [3:4]
+ lib/Target/AArch64/AArch64CollectLOH.cpp [3:4]
+ lib/Target/AArch64/AArch64Combine.td [3:4]
+ lib/Target/AArch64/AArch64CompressJumpTables.cpp [3:4]
+ lib/Target/AArch64/AArch64CondBrTuning.cpp [3:4]
+ lib/Target/AArch64/AArch64ConditionOptimizer.cpp [3:4]
+ lib/Target/AArch64/AArch64ConditionalCompares.cpp [3:4]
+ lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp [3:4]
+ lib/Target/AArch64/AArch64ExpandImm.cpp [3:4]
+ lib/Target/AArch64/AArch64ExpandImm.h [3:4]
+ lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp [3:4]
+ lib/Target/AArch64/AArch64FalkorHWPFFix.cpp [3:4]
+ lib/Target/AArch64/AArch64FastISel.cpp [3:4]
+ lib/Target/AArch64/AArch64FrameLowering.cpp [3:4]
+ lib/Target/AArch64/AArch64FrameLowering.h [3:4]
+ lib/Target/AArch64/AArch64GenRegisterBankInfo.def [3:4]
+ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp [3:4]
+ lib/Target/AArch64/AArch64ISelLowering.cpp [3:4]
+ lib/Target/AArch64/AArch64ISelLowering.h [3:4]
+ lib/Target/AArch64/AArch64InstrAtomics.td [3:4]
+ lib/Target/AArch64/AArch64InstrFormats.td [3:4]
+ lib/Target/AArch64/AArch64InstrGISel.td [3:4]
+ lib/Target/AArch64/AArch64InstrInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64InstrInfo.h [3:4]
+ lib/Target/AArch64/AArch64InstrInfo.td [3:4]
+ lib/Target/AArch64/AArch64KCFI.cpp [3:4]
+ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp [3:4]
+ lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp [3:4]
+ lib/Target/AArch64/AArch64MCInstLower.cpp [3:4]
+ lib/Target/AArch64/AArch64MCInstLower.h [3:4]
+ lib/Target/AArch64/AArch64MIPeepholeOpt.cpp [3:4]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.cpp [4:5]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.h [3:4]
+ lib/Target/AArch64/AArch64MachineScheduler.cpp [3:4]
+ lib/Target/AArch64/AArch64MachineScheduler.h [3:4]
+ lib/Target/AArch64/AArch64MacroFusion.cpp [3:4]
+ lib/Target/AArch64/AArch64MacroFusion.h [3:4]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.cpp [3:4]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.h [3:4]
+ lib/Target/AArch64/AArch64PerfectShuffle.h [3:4]
+ lib/Target/AArch64/AArch64PfmCounters.td [3:4]
+ lib/Target/AArch64/AArch64PromoteConstant.cpp [3:4]
+ lib/Target/AArch64/AArch64RedundantCopyElimination.cpp [3:4]
+ lib/Target/AArch64/AArch64RegisterBanks.td [3:4]
+ lib/Target/AArch64/AArch64RegisterInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64RegisterInfo.h [3:4]
+ lib/Target/AArch64/AArch64RegisterInfo.td [3:4]
+ lib/Target/AArch64/AArch64SIMDInstrOpt.cpp [2:3]
+ lib/Target/AArch64/AArch64SLSHardening.cpp [3:4]
+ lib/Target/AArch64/AArch64SMEInstrInfo.td [3:4]
+ lib/Target/AArch64/AArch64SVEInstrInfo.td [3:4]
+ lib/Target/AArch64/AArch64SchedA53.td [3:4]
+ lib/Target/AArch64/AArch64SchedA55.td [3:4]
+ lib/Target/AArch64/AArch64SchedA57.td [3:4]
+ lib/Target/AArch64/AArch64SchedA57WriteRes.td [3:4]
+ lib/Target/AArch64/AArch64SchedA64FX.td [3:4]
+ lib/Target/AArch64/AArch64SchedAmpere1.td [3:4]
+ lib/Target/AArch64/AArch64SchedCyclone.td [3:4]
+ lib/Target/AArch64/AArch64SchedExynosM3.td [3:4]
+ lib/Target/AArch64/AArch64SchedExynosM4.td [3:4]
+ lib/Target/AArch64/AArch64SchedExynosM5.td [3:4]
+ lib/Target/AArch64/AArch64SchedFalkor.td [3:4]
+ lib/Target/AArch64/AArch64SchedFalkorDetails.td [3:4]
+ lib/Target/AArch64/AArch64SchedKryo.td [3:4]
+ lib/Target/AArch64/AArch64SchedKryoDetails.td [3:4]
+ lib/Target/AArch64/AArch64SchedNeoverseN2.td [3:4]
+ lib/Target/AArch64/AArch64SchedPredAmpere.td [3:4]
+ lib/Target/AArch64/AArch64SchedPredExynos.td [3:4]
+ lib/Target/AArch64/AArch64SchedPredicates.td [3:4]
+ lib/Target/AArch64/AArch64SchedTSV110.td [3:4]
+ lib/Target/AArch64/AArch64SchedThunderX.td [3:4]
+ lib/Target/AArch64/AArch64SchedThunderX2T99.td [3:4]
+ lib/Target/AArch64/AArch64SchedThunderX3T110.td [3:4]
+ lib/Target/AArch64/AArch64Schedule.td [3:4]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.h [3:4]
+ lib/Target/AArch64/AArch64SpeculationHardening.cpp [3:4]
+ lib/Target/AArch64/AArch64StackTagging.cpp [3:4]
+ lib/Target/AArch64/AArch64StackTaggingPreRA.cpp [3:4]
+ lib/Target/AArch64/AArch64StorePairSuppress.cpp [3:4]
+ lib/Target/AArch64/AArch64Subtarget.cpp [3:4]
+ lib/Target/AArch64/AArch64Subtarget.h [3:4]
+ lib/Target/AArch64/AArch64SystemOperands.td [3:4]
+ lib/Target/AArch64/AArch64TargetMachine.cpp [3:4]
+ lib/Target/AArch64/AArch64TargetMachine.h [3:4]
+ lib/Target/AArch64/AArch64TargetObjectFile.cpp [3:4]
+ lib/Target/AArch64/AArch64TargetObjectFile.h [3:4]
+ lib/Target/AArch64/AArch64TargetTransformInfo.cpp [3:4]
+ lib/Target/AArch64/AArch64TargetTransformInfo.h [3:4]
+ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp [3:4]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp [3:4]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.h [3:4]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp [3:4]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h [3:4]
+ lib/Target/AArch64/GISel/AArch64CallLowering.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64CallLowering.h [3:4]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h [3:4]
+ lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.h [3:4]
+ lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp [3:4]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp [3:4]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h [3:4]
+ lib/Target/AArch64/SMEABIPass.cpp [3:4]
+ lib/Target/AArch64/SMEInstrFormats.td [3:4]
+ lib/Target/AArch64/SVEInstrFormats.td [3:4]
+ lib/Target/AArch64/SVEIntrinsicOpts.cpp [3:4]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp [3:4]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.h [3:4]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.cpp [3:4]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.h [3:4]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp [3:4]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.h [3:4]
+ lib/Target/ARM/A15SDOptimizer.cpp [3:4]
+ lib/Target/ARM/ARM.h [3:4]
+ lib/Target/ARM/ARM.td [3:4]
+ lib/Target/ARM/ARMAsmPrinter.cpp [3:4]
+ lib/Target/ARM/ARMAsmPrinter.h [3:4]
+ lib/Target/ARM/ARMBaseInstrInfo.cpp [3:4]
+ lib/Target/ARM/ARMBaseInstrInfo.h [3:4]
+ lib/Target/ARM/ARMBaseRegisterInfo.cpp [3:4]
+ lib/Target/ARM/ARMBaseRegisterInfo.h [3:4]
+ lib/Target/ARM/ARMBasicBlockInfo.cpp [3:4]
+ lib/Target/ARM/ARMBasicBlockInfo.h [3:4]
+ lib/Target/ARM/ARMBlockPlacement.cpp [3:4]
+ lib/Target/ARM/ARMBranchTargets.cpp [3:4]
+ lib/Target/ARM/ARMCallLowering.cpp [3:4]
+ lib/Target/ARM/ARMCallLowering.h [3:4]
+ lib/Target/ARM/ARMCallingConv.cpp [3:4]
+ lib/Target/ARM/ARMCallingConv.h [3:4]
+ lib/Target/ARM/ARMCallingConv.td [3:4]
+ lib/Target/ARM/ARMConstantIslandPass.cpp [3:4]
+ lib/Target/ARM/ARMConstantPoolValue.cpp [3:4]
+ lib/Target/ARM/ARMConstantPoolValue.h [3:4]
+ lib/Target/ARM/ARMExpandPseudoInsts.cpp [3:4]
+ lib/Target/ARM/ARMFastISel.cpp [3:4]
+ lib/Target/ARM/ARMFeatures.h [3:4]
+ lib/Target/ARM/ARMFixCortexA57AES1742098Pass.cpp [3:4]
+ lib/Target/ARM/ARMFrameLowering.cpp [3:4]
+ lib/Target/ARM/ARMFrameLowering.h [3:4]
+ lib/Target/ARM/ARMHazardRecognizer.cpp [3:4]
+ lib/Target/ARM/ARMHazardRecognizer.h [3:4]
+ lib/Target/ARM/ARMISelDAGToDAG.cpp [3:4]
+ lib/Target/ARM/ARMISelLowering.cpp [3:4]
+ lib/Target/ARM/ARMISelLowering.h [3:4]
+ lib/Target/ARM/ARMInstrCDE.td [3:4]
+ lib/Target/ARM/ARMInstrFormats.td [3:4]
+ lib/Target/ARM/ARMInstrInfo.cpp [3:4]
+ lib/Target/ARM/ARMInstrInfo.h [3:4]
+ lib/Target/ARM/ARMInstrInfo.td [3:4]
+ lib/Target/ARM/ARMInstrMVE.td [3:4]
+ lib/Target/ARM/ARMInstrNEON.td [3:4]
+ lib/Target/ARM/ARMInstrThumb.td [3:4]
+ lib/Target/ARM/ARMInstrThumb2.td [3:4]
+ lib/Target/ARM/ARMInstrVFP.td [3:4]
+ lib/Target/ARM/ARMInstructionSelector.cpp [3:4]
+ lib/Target/ARM/ARMLegalizerInfo.cpp [3:4]
+ lib/Target/ARM/ARMLegalizerInfo.h [3:4]
+ lib/Target/ARM/ARMLoadStoreOptimizer.cpp [3:4]
+ lib/Target/ARM/ARMLowOverheadLoops.cpp [3:4]
+ lib/Target/ARM/ARMMCInstLower.cpp [3:4]
+ lib/Target/ARM/ARMMachineFunctionInfo.cpp [3:4]
+ lib/Target/ARM/ARMMachineFunctionInfo.h [3:4]
+ lib/Target/ARM/ARMMacroFusion.cpp [3:4]
+ lib/Target/ARM/ARMMacroFusion.h [3:4]
+ lib/Target/ARM/ARMOptimizeBarriersPass.cpp [4:5]
+ lib/Target/ARM/ARMParallelDSP.cpp [3:4]
+ lib/Target/ARM/ARMPerfectShuffle.h [3:4]
+ lib/Target/ARM/ARMPredicates.td [3:4]
+ lib/Target/ARM/ARMRegisterBankInfo.cpp [3:4]
+ lib/Target/ARM/ARMRegisterBankInfo.h [3:4]
+ lib/Target/ARM/ARMRegisterBanks.td [3:4]
+ lib/Target/ARM/ARMRegisterInfo.cpp [3:4]
+ lib/Target/ARM/ARMRegisterInfo.h [3:4]
+ lib/Target/ARM/ARMRegisterInfo.td [3:4]
+ lib/Target/ARM/ARMSLSHardening.cpp [3:4]
+ lib/Target/ARM/ARMSchedule.td [3:4]
+ lib/Target/ARM/ARMScheduleA57.td [3:4]
+ lib/Target/ARM/ARMScheduleA57WriteRes.td [3:4]
+ lib/Target/ARM/ARMScheduleA8.td [3:4]
+ lib/Target/ARM/ARMScheduleA9.td [3:4]
+ lib/Target/ARM/ARMScheduleM4.td [3:4]
+ lib/Target/ARM/ARMScheduleM55.td [3:4]
+ lib/Target/ARM/ARMScheduleM7.td [3:4]
+ lib/Target/ARM/ARMScheduleR52.td [3:4]
+ lib/Target/ARM/ARMScheduleSwift.td [3:4]
+ lib/Target/ARM/ARMScheduleV6.td [3:4]
+ lib/Target/ARM/ARMSelectionDAGInfo.cpp [3:4]
+ lib/Target/ARM/ARMSelectionDAGInfo.h [3:4]
+ lib/Target/ARM/ARMSubtarget.cpp [3:4]
+ lib/Target/ARM/ARMSubtarget.h [3:4]
+ lib/Target/ARM/ARMSystemRegister.td [3:4]
+ lib/Target/ARM/ARMTargetMachine.cpp [3:4]
+ lib/Target/ARM/ARMTargetMachine.h [3:4]
+ lib/Target/ARM/ARMTargetObjectFile.cpp [3:4]
+ lib/Target/ARM/ARMTargetObjectFile.h [3:4]
+ lib/Target/ARM/ARMTargetTransformInfo.cpp [3:4]
+ lib/Target/ARM/ARMTargetTransformInfo.h [3:4]
+ lib/Target/ARM/AsmParser/ARMAsmParser.cpp [3:4]
+ lib/Target/ARM/Disassembler/ARMDisassembler.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp [3:4]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp [3:4]
+ lib/Target/ARM/MLxExpansionPass.cpp [3:4]
+ lib/Target/ARM/MVEGatherScatterLowering.cpp [3:4]
+ lib/Target/ARM/MVELaneInterleavingPass.cpp [3:4]
+ lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp [3:4]
+ lib/Target/ARM/MVETailPredUtils.h [3:4]
+ lib/Target/ARM/MVETailPredication.cpp [3:4]
+ lib/Target/ARM/MVEVPTBlockPass.cpp [3:4]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp [3:4]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.h [3:4]
+ lib/Target/ARM/Thumb1FrameLowering.cpp [3:4]
+ lib/Target/ARM/Thumb1FrameLowering.h [3:4]
+ lib/Target/ARM/Thumb1InstrInfo.cpp [3:4]
+ lib/Target/ARM/Thumb1InstrInfo.h [3:4]
+ lib/Target/ARM/Thumb2ITBlockPass.cpp [3:4]
+ lib/Target/ARM/Thumb2InstrInfo.cpp [3:4]
+ lib/Target/ARM/Thumb2InstrInfo.h [3:4]
+ lib/Target/ARM/Thumb2SizeReduction.cpp [3:4]
+ lib/Target/ARM/ThumbRegisterInfo.cpp [3:4]
+ lib/Target/ARM/ThumbRegisterInfo.h [3:4]
+ lib/Target/ARM/Utils/ARMBaseInfo.cpp [3:4]
+ lib/Target/ARM/Utils/ARMBaseInfo.h [3:4]
+ lib/Target/BPF/AsmParser/BPFAsmParser.cpp [3:4]
+ lib/Target/BPF/BPF.h [3:4]
+ lib/Target/BPF/BPF.td [3:4]
+ lib/Target/BPF/BPFAbstractMemberAccess.cpp [3:4]
+ lib/Target/BPF/BPFAdjustOpt.cpp [3:4]
+ lib/Target/BPF/BPFAsmPrinter.cpp [3:4]
+ lib/Target/BPF/BPFCORE.h [3:4]
+ lib/Target/BPF/BPFCallingConv.td [3:4]
+ lib/Target/BPF/BPFCheckAndAdjustIR.cpp [3:4]
+ lib/Target/BPF/BPFFrameLowering.cpp [3:4]
+ lib/Target/BPF/BPFFrameLowering.h [3:4]
+ lib/Target/BPF/BPFIRPeephole.cpp [3:4]
+ lib/Target/BPF/BPFISelDAGToDAG.cpp [3:4]
+ lib/Target/BPF/BPFISelLowering.cpp [3:4]
+ lib/Target/BPF/BPFISelLowering.h [3:4]
+ lib/Target/BPF/BPFInstrFormats.td [3:4]
+ lib/Target/BPF/BPFInstrInfo.cpp [3:4]
+ lib/Target/BPF/BPFInstrInfo.h [3:4]
+ lib/Target/BPF/BPFInstrInfo.td [3:4]
+ lib/Target/BPF/BPFMCInstLower.cpp [3:4]
+ lib/Target/BPF/BPFMCInstLower.h [3:4]
+ lib/Target/BPF/BPFMIChecking.cpp [3:4]
+ lib/Target/BPF/BPFMIPeephole.cpp [3:4]
+ lib/Target/BPF/BPFMISimplifyPatchable.cpp [3:4]
+ lib/Target/BPF/BPFPreserveDIType.cpp [3:4]
+ lib/Target/BPF/BPFRegisterInfo.cpp [3:4]
+ lib/Target/BPF/BPFRegisterInfo.h [3:4]
+ lib/Target/BPF/BPFRegisterInfo.td [3:4]
+ lib/Target/BPF/BPFSelectionDAGInfo.cpp [3:4]
+ lib/Target/BPF/BPFSelectionDAGInfo.h [3:4]
+ lib/Target/BPF/BPFSubtarget.cpp [3:4]
+ lib/Target/BPF/BPFSubtarget.h [3:4]
+ lib/Target/BPF/BPFTargetMachine.cpp [3:4]
+ lib/Target/BPF/BPFTargetMachine.h [3:4]
+ lib/Target/BPF/BPFTargetTransformInfo.h [3:4]
+ lib/Target/BPF/BTF.def [3:4]
+ lib/Target/BPF/BTF.h [3:4]
+ lib/Target/BPF/BTFDebug.cpp [3:4]
+ lib/Target/BPF/BTFDebug.h [3:4]
+ lib/Target/BPF/Disassembler/BPFDisassembler.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.h [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp [3:4]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h [3:4]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp [3:4]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.h [3:4]
+ lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp [3:4]
+ lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp [3:4]
+ lib/Target/LoongArch/LoongArch.h [3:4]
+ lib/Target/LoongArch/LoongArch.td [3:4]
+ lib/Target/LoongArch/LoongArchAsmPrinter.cpp [3:4]
+ lib/Target/LoongArch/LoongArchAsmPrinter.h [3:4]
+ lib/Target/LoongArch/LoongArchCallingConv.td [3:4]
+ lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp [3:4]
+ lib/Target/LoongArch/LoongArchExpandPseudoInsts.cpp [3:4]
+ lib/Target/LoongArch/LoongArchFloat32InstrInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchFloat64InstrInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchFloatInstrFormats.td [3:4]
+ lib/Target/LoongArch/LoongArchFrameLowering.cpp [3:4]
+ lib/Target/LoongArch/LoongArchFrameLowering.h [3:4]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp [3:4]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.h [3:4]
+ lib/Target/LoongArch/LoongArchISelLowering.cpp [3:4]
+ lib/Target/LoongArch/LoongArchISelLowering.h [3:4]
+ lib/Target/LoongArch/LoongArchInstrFormats.td [3:4]
+ lib/Target/LoongArch/LoongArchInstrInfo.cpp [3:4]
+ lib/Target/LoongArch/LoongArchInstrInfo.h [3:4]
+ lib/Target/LoongArch/LoongArchInstrInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchMCInstLower.cpp [3:4]
+ lib/Target/LoongArch/LoongArchMachineFunctionInfo.h [3:4]
+ lib/Target/LoongArch/LoongArchRegisterInfo.cpp [3:4]
+ lib/Target/LoongArch/LoongArchRegisterInfo.h [3:4]
+ lib/Target/LoongArch/LoongArchRegisterInfo.td [3:4]
+ lib/Target/LoongArch/LoongArchSubtarget.cpp [3:4]
+ lib/Target/LoongArch/LoongArchSubtarget.h [3:4]
+ lib/Target/LoongArch/LoongArchTargetMachine.cpp [3:4]
+ lib/Target/LoongArch/LoongArchTargetMachine.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.cpp [3:4]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.h [3:4]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.cpp [3:4]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.h [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp [3:4]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.h [3:4]
+ lib/Target/NVPTX/NVPTX.h [3:4]
+ lib/Target/NVPTX/NVPTX.td [3:4]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.h [3:4]
+ lib/Target/NVPTX/NVPTXAsmPrinter.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAsmPrinter.h [3:4]
+ lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAtomicLower.cpp [3:4]
+ lib/Target/NVPTX/NVPTXAtomicLower.h [3:4]
+ lib/Target/NVPTX/NVPTXFrameLowering.cpp [3:4]
+ lib/Target/NVPTX/NVPTXFrameLowering.h [3:4]
+ lib/Target/NVPTX/NVPTXGenericToNVVM.cpp [3:4]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp [3:4]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.h [3:4]
+ lib/Target/NVPTX/NVPTXISelLowering.cpp [3:4]
+ lib/Target/NVPTX/NVPTXISelLowering.h [3:4]
+ lib/Target/NVPTX/NVPTXImageOptimizer.cpp [3:4]
+ lib/Target/NVPTX/NVPTXInstrFormats.td [3:4]
+ lib/Target/NVPTX/NVPTXInstrInfo.cpp [3:4]
+ lib/Target/NVPTX/NVPTXInstrInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXInstrInfo.td [3:4]
+ lib/Target/NVPTX/NVPTXIntrinsics.td [3:4]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp [3:4]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.h [3:4]
+ lib/Target/NVPTX/NVPTXLowerAlloca.cpp [3:4]
+ lib/Target/NVPTX/NVPTXLowerArgs.cpp [3:4]
+ lib/Target/NVPTX/NVPTXMCExpr.cpp [3:4]
+ lib/Target/NVPTX/NVPTXMCExpr.h [3:4]
+ lib/Target/NVPTX/NVPTXMachineFunctionInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXPeephole.cpp [3:4]
+ lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp [3:4]
+ lib/Target/NVPTX/NVPTXProxyRegErasure.cpp [3:4]
+ lib/Target/NVPTX/NVPTXRegisterInfo.cpp [3:4]
+ lib/Target/NVPTX/NVPTXRegisterInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXRegisterInfo.td [3:4]
+ lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp [3:4]
+ lib/Target/NVPTX/NVPTXSubtarget.cpp [3:4]
+ lib/Target/NVPTX/NVPTXSubtarget.h [3:4]
+ lib/Target/NVPTX/NVPTXTargetMachine.cpp [3:4]
+ lib/Target/NVPTX/NVPTXTargetMachine.h [3:4]
+ lib/Target/NVPTX/NVPTXTargetObjectFile.h [3:4]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp [3:4]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.h [3:4]
+ lib/Target/NVPTX/NVPTXUtilities.cpp [3:4]
+ lib/Target/NVPTX/NVPTXUtilities.h [3:4]
+ lib/Target/NVPTX/NVVMIntrRange.cpp [3:4]
+ lib/Target/NVPTX/NVVMReflect.cpp [3:4]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp [3:4]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.h [3:4]
+ lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp [3:4]
+ lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCCallLowering.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCCallLowering.h [3:4]
+ lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.h [3:4]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp [3:4]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.h [3:4]
+ lib/Target/PowerPC/GISel/PPCRegisterBanks.td [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp [4:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.cpp [3:4]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.h [3:4]
+ lib/Target/PowerPC/P10InstrResources.td [3:4]
+ lib/Target/PowerPC/P9InstrResources.td [3:4]
+ lib/Target/PowerPC/PPC.h [3:4]
+ lib/Target/PowerPC/PPC.td [3:4]
+ lib/Target/PowerPC/PPCAsmPrinter.cpp [3:4]
+ lib/Target/PowerPC/PPCBoolRetToInt.cpp [3:4]
+ lib/Target/PowerPC/PPCBranchCoalescing.cpp [3:4]
+ lib/Target/PowerPC/PPCBranchSelector.cpp [3:4]
+ lib/Target/PowerPC/PPCCCState.cpp [3:4]
+ lib/Target/PowerPC/PPCCCState.h [3:4]
+ lib/Target/PowerPC/PPCCTRLoops.cpp [3:4]
+ lib/Target/PowerPC/PPCCTRLoopsVerify.cpp [3:4]
+ lib/Target/PowerPC/PPCCallingConv.cpp [3:4]
+ lib/Target/PowerPC/PPCCallingConv.h [3:4]
+ lib/Target/PowerPC/PPCCallingConv.td [3:4]
+ lib/Target/PowerPC/PPCEarlyReturn.cpp [3:4]
+ lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp [3:4]
+ lib/Target/PowerPC/PPCExpandISEL.cpp [3:4]
+ lib/Target/PowerPC/PPCFastISel.cpp [3:4]
+ lib/Target/PowerPC/PPCFrameLowering.cpp [3:4]
+ lib/Target/PowerPC/PPCFrameLowering.h [3:4]
+ lib/Target/PowerPC/PPCGenRegisterBankInfo.def [3:4]
+ lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp [3:4]
+ lib/Target/PowerPC/PPCHazardRecognizers.cpp [3:4]
+ lib/Target/PowerPC/PPCHazardRecognizers.h [3:4]
+ lib/Target/PowerPC/PPCISelDAGToDAG.cpp [3:4]
+ lib/Target/PowerPC/PPCISelLowering.cpp [3:4]
+ lib/Target/PowerPC/PPCISelLowering.h [3:4]
+ lib/Target/PowerPC/PPCInstr64Bit.td [3:4]
+ lib/Target/PowerPC/PPCInstrAltivec.td [3:4]
+ lib/Target/PowerPC/PPCInstrBuilder.h [3:4]
+ lib/Target/PowerPC/PPCInstrFormats.td [3:4]
+ lib/Target/PowerPC/PPCInstrHTM.td [3:4]
+ lib/Target/PowerPC/PPCInstrInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCInstrInfo.h [3:4]
+ lib/Target/PowerPC/PPCInstrInfo.td [3:4]
+ lib/Target/PowerPC/PPCInstrP10.td [5:6]
+ lib/Target/PowerPC/PPCInstrSPE.td [3:4]
+ lib/Target/PowerPC/PPCInstrVSX.td [3:4]
+ lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp [3:4]
+ lib/Target/PowerPC/PPCLowerMASSVEntries.cpp [3:4]
+ lib/Target/PowerPC/PPCMCInstLower.cpp [3:4]
+ lib/Target/PowerPC/PPCMIPeephole.cpp [3:4]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.h [3:4]
+ lib/Target/PowerPC/PPCMachineScheduler.cpp [3:4]
+ lib/Target/PowerPC/PPCMachineScheduler.h [3:4]
+ lib/Target/PowerPC/PPCMacroFusion.cpp [3:4]
+ lib/Target/PowerPC/PPCMacroFusion.h [3:4]
+ lib/Target/PowerPC/PPCPerfectShuffle.h [3:4]
+ lib/Target/PowerPC/PPCPfmCounters.td [3:4]
+ lib/Target/PowerPC/PPCPreEmitPeephole.cpp [3:4]
+ lib/Target/PowerPC/PPCReduceCRLogicals.cpp [3:4]
+ lib/Target/PowerPC/PPCRegisterInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCRegisterInfo.h [3:4]
+ lib/Target/PowerPC/PPCRegisterInfo.td [3:4]
+ lib/Target/PowerPC/PPCRegisterInfoDMR.td [3:4]
+ lib/Target/PowerPC/PPCRegisterInfoMMA.td [3:4]
+ lib/Target/PowerPC/PPCSchedPredicates.td [3:4]
+ lib/Target/PowerPC/PPCSchedule.td [3:4]
+ lib/Target/PowerPC/PPCSchedule440.td [3:4]
+ lib/Target/PowerPC/PPCScheduleA2.td [3:4]
+ lib/Target/PowerPC/PPCScheduleE500.td [3:4]
+ lib/Target/PowerPC/PPCScheduleE500mc.td [3:4]
+ lib/Target/PowerPC/PPCScheduleE5500.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG3.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG4.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG4Plus.td [3:4]
+ lib/Target/PowerPC/PPCScheduleG5.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP10.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP7.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP8.td [3:4]
+ lib/Target/PowerPC/PPCScheduleP9.td [3:4]
+ lib/Target/PowerPC/PPCSubtarget.cpp [3:4]
+ lib/Target/PowerPC/PPCSubtarget.h [3:4]
+ lib/Target/PowerPC/PPCTLSDynamicCall.cpp [3:4]
+ lib/Target/PowerPC/PPCTOCRegDeps.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetMachine.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetMachine.h [3:4]
+ lib/Target/PowerPC/PPCTargetObjectFile.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetObjectFile.h [3:4]
+ lib/Target/PowerPC/PPCTargetStreamer.h [3:4]
+ lib/Target/PowerPC/PPCTargetTransformInfo.cpp [3:4]
+ lib/Target/PowerPC/PPCTargetTransformInfo.h [3:4]
+ lib/Target/PowerPC/PPCVSXCopy.cpp [3:4]
+ lib/Target/PowerPC/PPCVSXFMAMutate.cpp [3:4]
+ lib/Target/PowerPC/PPCVSXSwapRemoval.cpp [3:4]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp [3:4]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.h [3:4]
+ lib/Target/RISCV/GISel/RISCVCallLowering.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVCallLowering.h [3:4]
+ lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.h [3:4]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp [3:4]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h [3:4]
+ lib/Target/RISCV/GISel/RISCVRegisterBanks.td [3:4]
+ lib/Target/RISCV/RISCV.h [3:4]
+ lib/Target/RISCV/RISCV.td [3:4]
+ lib/Target/RISCV/RISCVAsmPrinter.cpp [3:4]
+ lib/Target/RISCV/RISCVCallingConv.td [3:4]
+ lib/Target/RISCV/RISCVCodeGenPrepare.cpp [3:4]
+ lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp [3:4]
+ lib/Target/RISCV/RISCVExpandPseudoInsts.cpp [3:4]
+ lib/Target/RISCV/RISCVFeatures.td [3:4]
+ lib/Target/RISCV/RISCVFrameLowering.cpp [3:4]
+ lib/Target/RISCV/RISCVFrameLowering.h [3:4]
+ lib/Target/RISCV/RISCVGatherScatterLowering.cpp [3:4]
+ lib/Target/RISCV/RISCVISelDAGToDAG.cpp [3:4]
+ lib/Target/RISCV/RISCVISelDAGToDAG.h [3:4]
+ lib/Target/RISCV/RISCVISelLowering.cpp [3:4]
+ lib/Target/RISCV/RISCVISelLowering.h [3:4]
+ lib/Target/RISCV/RISCVInsertVSETVLI.cpp [3:4]
+ lib/Target/RISCV/RISCVInstrFormats.td [3:4]
+ lib/Target/RISCV/RISCVInstrFormatsC.td [3:4]
+ lib/Target/RISCV/RISCVInstrFormatsV.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVInstrInfo.h [3:4]
+ lib/Target/RISCV/RISCVInstrInfo.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoA.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoC.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoD.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoF.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoM.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoV.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoVPseudos.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoXTHead.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoXVentana.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZb.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZfh.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZicbo.td [3:4]
+ lib/Target/RISCV/RISCVInstrInfoZk.td [3:4]
+ lib/Target/RISCV/RISCVMCInstLower.cpp [3:4]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.h [3:4]
+ lib/Target/RISCV/RISCVMacroFusion.cpp [3:4]
+ lib/Target/RISCV/RISCVMacroFusion.h [3:4]
+ lib/Target/RISCV/RISCVMakeCompressible.cpp [3:4]
+ lib/Target/RISCV/RISCVMergeBaseOffset.cpp [3:4]
+ lib/Target/RISCV/RISCVProcessors.td [3:4]
+ lib/Target/RISCV/RISCVRedundantCopyElimination.cpp [3:4]
+ lib/Target/RISCV/RISCVRegisterInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVRegisterInfo.h [3:4]
+ lib/Target/RISCV/RISCVRegisterInfo.td [3:4]
+ lib/Target/RISCV/RISCVSExtWRemoval.cpp [3:4]
+ lib/Target/RISCV/RISCVSchedRocket.td [3:4]
+ lib/Target/RISCV/RISCVSchedSiFive7.td [3:4]
+ lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td [3:4]
+ lib/Target/RISCV/RISCVSchedule.td [3:4]
+ lib/Target/RISCV/RISCVScheduleV.td [3:4]
+ lib/Target/RISCV/RISCVScheduleZb.td [3:4]
+ lib/Target/RISCV/RISCVStripWSuffix.cpp [4:5]
+ lib/Target/RISCV/RISCVSubtarget.cpp [3:4]
+ lib/Target/RISCV/RISCVSubtarget.h [3:4]
+ lib/Target/RISCV/RISCVSystemOperands.td [3:4]
+ lib/Target/RISCV/RISCVTargetMachine.cpp [3:4]
+ lib/Target/RISCV/RISCVTargetMachine.h [3:4]
+ lib/Target/RISCV/RISCVTargetObjectFile.cpp [3:4]
+ lib/Target/RISCV/RISCVTargetObjectFile.h [3:4]
+ lib/Target/RISCV/RISCVTargetTransformInfo.cpp [3:4]
+ lib/Target/RISCV/RISCVTargetTransformInfo.h [3:4]
+ lib/Target/Target.cpp [3:4]
+ lib/Target/TargetIntrinsicInfo.cpp [3:4]
+ lib/Target/TargetLoweringObjectFile.cpp [3:4]
+ lib/Target/TargetMachine.cpp [3:4]
+ lib/Target/TargetMachineC.cpp [3:4]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp [3:4]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp [3:4]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h [3:4]
+ lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h [3:4]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp [3:4]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp [3:4]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp [3:4]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h [3:4]
+ lib/Target/WebAssembly/WebAssembly.h [3:4]
+ lib/Target/WebAssembly/WebAssembly.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyCFGSort.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFastISel.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyISD.def [3:4]
+ lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrAtomics.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrCall.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrControl.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrConv.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrFloat.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrFormats.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrInteger.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrMemory.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrRef.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrSIMD.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyInstrTable.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp [4:5]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyPeephole.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegColoring.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegStackify.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.td [3:4]
+ lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h [3:4]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h [3:4]
+ lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblySortRegion.h [3:4]
+ lib/Target/WebAssembly/WebAssemblySubtarget.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblySubtarget.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp [3:4]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h [3:4]
+ lib/Target/X86/AsmParser/X86AsmParser.cpp [3:4]
+ lib/Target/X86/AsmParser/X86AsmParserCommon.h [3:4]
+ lib/Target/X86/AsmParser/X86Operand.h [3:4]
+ lib/Target/X86/Disassembler/X86Disassembler.cpp [3:4]
+ lib/Target/X86/Disassembler/X86DisassemblerDecoder.h [3:4]
+ lib/Target/X86/ImmutableGraph.h [3:4]
+ lib/Target/X86/MCA/X86CustomBehaviour.cpp [3:4]
+ lib/Target/X86/MCA/X86CustomBehaviour.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86BaseInfo.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86FixupKinds.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstComments.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstComments.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCExpr.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86MnemonicTables.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86TargetStreamer.h [3:4]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp [3:4]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp [3:4]
+ lib/Target/X86/TargetInfo/X86TargetInfo.cpp [3:4]
+ lib/Target/X86/TargetInfo/X86TargetInfo.h [3:4]
+ lib/Target/X86/X86.h [3:4]
+ lib/Target/X86/X86.td [3:4]
+ lib/Target/X86/X86AsmPrinter.cpp [3:4]
+ lib/Target/X86/X86AsmPrinter.h [3:4]
+ lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp [3:4]
+ lib/Target/X86/X86AvoidTrailingCall.cpp [3:4]
+ lib/Target/X86/X86CallFrameOptimization.cpp [3:4]
+ lib/Target/X86/X86CallLowering.cpp [3:4]
+ lib/Target/X86/X86CallLowering.h [3:4]
+ lib/Target/X86/X86CallingConv.cpp [3:4]
+ lib/Target/X86/X86CallingConv.h [3:4]
+ lib/Target/X86/X86CallingConv.td [3:4]
+ lib/Target/X86/X86CmovConversion.cpp [3:4]
+ lib/Target/X86/X86DiscriminateMemOps.cpp [3:4]
+ lib/Target/X86/X86DomainReassignment.cpp [3:4]
+ lib/Target/X86/X86DynAllocaExpander.cpp [3:4]
+ lib/Target/X86/X86EvexToVex.cpp [4:5]
+ lib/Target/X86/X86ExpandPseudo.cpp [3:4]
+ lib/Target/X86/X86FastISel.cpp [3:4]
+ lib/Target/X86/X86FastPreTileConfig.cpp [3:4]
+ lib/Target/X86/X86FastTileConfig.cpp [3:4]
+ lib/Target/X86/X86FixupBWInsts.cpp [3:4]
+ lib/Target/X86/X86FixupLEAs.cpp [3:4]
+ lib/Target/X86/X86FixupSetCC.cpp [3:4]
+ lib/Target/X86/X86FlagsCopyLowering.cpp [3:4]
+ lib/Target/X86/X86FloatingPoint.cpp [3:4]
+ lib/Target/X86/X86FrameLowering.cpp [3:4]
+ lib/Target/X86/X86FrameLowering.h [3:4]
+ lib/Target/X86/X86GenRegisterBankInfo.def [3:4]
+ lib/Target/X86/X86ISelDAGToDAG.cpp [3:4]
+ lib/Target/X86/X86ISelLowering.cpp [3:4]
+ lib/Target/X86/X86ISelLowering.h [3:4]
+ lib/Target/X86/X86IndirectBranchTracking.cpp [3:4]
+ lib/Target/X86/X86IndirectThunks.cpp [3:4]
+ lib/Target/X86/X86InsertPrefetch.cpp [3:4]
+ lib/Target/X86/X86InsertWait.cpp [3:4]
+ lib/Target/X86/X86InstCombineIntrinsic.cpp [3:4]
+ lib/Target/X86/X86Instr3DNow.td [3:4]
+ lib/Target/X86/X86InstrAMX.td [3:4]
+ lib/Target/X86/X86InstrAVX512.td [3:4]
+ lib/Target/X86/X86InstrArithmetic.td [3:4]
+ lib/Target/X86/X86InstrBuilder.h [3:4]
+ lib/Target/X86/X86InstrCMovSetCC.td [3:4]
+ lib/Target/X86/X86InstrCompiler.td [3:4]
+ lib/Target/X86/X86InstrControl.td [3:4]
+ lib/Target/X86/X86InstrExtension.td [3:4]
+ lib/Target/X86/X86InstrFMA.td [3:4]
+ lib/Target/X86/X86InstrFMA3Info.cpp [3:4]
+ lib/Target/X86/X86InstrFMA3Info.h [3:4]
+ lib/Target/X86/X86InstrFPStack.td [3:4]
+ lib/Target/X86/X86InstrFoldTables.cpp [3:4]
+ lib/Target/X86/X86InstrFoldTables.h [3:4]
+ lib/Target/X86/X86InstrFormats.td [3:4]
+ lib/Target/X86/X86InstrFragmentsSIMD.td [3:4]
+ lib/Target/X86/X86InstrInfo.cpp [3:4]
+ lib/Target/X86/X86InstrInfo.h [3:4]
+ lib/Target/X86/X86InstrInfo.td [3:4]
+ lib/Target/X86/X86InstrKL.td [4:5]
+ lib/Target/X86/X86InstrMMX.td [3:4]
+ lib/Target/X86/X86InstrRAOINT.td [3:4]
+ lib/Target/X86/X86InstrSGX.td [3:4]
+ lib/Target/X86/X86InstrSNP.td [3:4]
+ lib/Target/X86/X86InstrSSE.td [3:4]
+ lib/Target/X86/X86InstrSVM.td [3:4]
+ lib/Target/X86/X86InstrShiftRotate.td [3:4]
+ lib/Target/X86/X86InstrSystem.td [3:4]
+ lib/Target/X86/X86InstrTDX.td [3:4]
+ lib/Target/X86/X86InstrTSX.td [3:4]
+ lib/Target/X86/X86InstrVMX.td [3:4]
+ lib/Target/X86/X86InstrVecCompiler.td [3:4]
+ lib/Target/X86/X86InstrXOP.td [3:4]
+ lib/Target/X86/X86InstructionSelector.cpp [3:4]
+ lib/Target/X86/X86InterleavedAccess.cpp [3:4]
+ lib/Target/X86/X86IntrinsicsInfo.h [3:4]
+ lib/Target/X86/X86KCFI.cpp [3:4]
+ lib/Target/X86/X86LegalizerInfo.cpp [3:4]
+ lib/Target/X86/X86LegalizerInfo.h [4:5]
+ lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp [3:4]
+ lib/Target/X86/X86LoadValueInjectionRetHardening.cpp [3:4]
+ lib/Target/X86/X86LowerAMXIntrinsics.cpp [3:4]
+ lib/Target/X86/X86LowerAMXType.cpp [3:4]
+ lib/Target/X86/X86LowerTileCopy.cpp [3:4]
+ lib/Target/X86/X86MCInstLower.cpp [3:4]
+ lib/Target/X86/X86MachineFunctionInfo.cpp [3:4]
+ lib/Target/X86/X86MachineFunctionInfo.h [3:4]
+ lib/Target/X86/X86MacroFusion.cpp [3:4]
+ lib/Target/X86/X86MacroFusion.h [3:4]
+ lib/Target/X86/X86OptimizeLEAs.cpp [3:4]
+ lib/Target/X86/X86PadShortFunction.cpp [3:4]
+ lib/Target/X86/X86PartialReduction.cpp [3:4]
+ lib/Target/X86/X86PfmCounters.td [3:4]
+ lib/Target/X86/X86PreAMXConfig.cpp [3:4]
+ lib/Target/X86/X86PreTileConfig.cpp [3:4]
+ lib/Target/X86/X86RegisterBankInfo.cpp [3:4]
+ lib/Target/X86/X86RegisterBankInfo.h [3:4]
+ lib/Target/X86/X86RegisterBanks.td [3:4]
+ lib/Target/X86/X86RegisterInfo.cpp [3:4]
+ lib/Target/X86/X86RegisterInfo.h [3:4]
+ lib/Target/X86/X86RegisterInfo.td [3:4]
+ lib/Target/X86/X86ReturnThunks.cpp [3:4]
+ lib/Target/X86/X86SchedAlderlakeP.td [3:4]
+ lib/Target/X86/X86SchedBroadwell.td [3:4]
+ lib/Target/X86/X86SchedHaswell.td [3:4]
+ lib/Target/X86/X86SchedIceLake.td [3:4]
+ lib/Target/X86/X86SchedPredicates.td [3:4]
+ lib/Target/X86/X86SchedSandyBridge.td [3:4]
+ lib/Target/X86/X86SchedSkylakeClient.td [3:4]
+ lib/Target/X86/X86SchedSkylakeServer.td [3:4]
+ lib/Target/X86/X86Schedule.td [3:4]
+ lib/Target/X86/X86ScheduleAtom.td [3:4]
+ lib/Target/X86/X86ScheduleBdVer2.td [3:4]
+ lib/Target/X86/X86ScheduleBtVer2.td [3:4]
+ lib/Target/X86/X86ScheduleSLM.td [3:4]
+ lib/Target/X86/X86ScheduleZnver1.td [3:4]
+ lib/Target/X86/X86ScheduleZnver2.td [3:4]
+ lib/Target/X86/X86ScheduleZnver3.td [3:4]
+ lib/Target/X86/X86SelectionDAGInfo.cpp [3:4]
+ lib/Target/X86/X86SelectionDAGInfo.h [3:4]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.cpp [3:4]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.h [3:4]
+ lib/Target/X86/X86SpeculativeExecutionSideEffectSuppression.cpp [3:4]
+ lib/Target/X86/X86SpeculativeLoadHardening.cpp [3:4]
+ lib/Target/X86/X86Subtarget.cpp [3:4]
+ lib/Target/X86/X86Subtarget.h [3:4]
+ lib/Target/X86/X86TargetMachine.cpp [3:4]
+ lib/Target/X86/X86TargetMachine.h [3:4]
+ lib/Target/X86/X86TargetObjectFile.cpp [3:4]
+ lib/Target/X86/X86TargetObjectFile.h [3:4]
+ lib/Target/X86/X86TargetTransformInfo.cpp [3:4]
+ lib/Target/X86/X86TargetTransformInfo.h [3:4]
+ lib/Target/X86/X86TileConfig.cpp [3:4]
+ lib/Target/X86/X86VZeroUpper.cpp [3:4]
+ lib/Target/X86/X86WinEHState.cpp [3:4]
+ lib/TargetParser/AArch64TargetParser.cpp [3:4]
+ lib/TargetParser/ARMTargetParser.cpp [3:4]
+ lib/TargetParser/ARMTargetParserCommon.cpp [3:4]
+ lib/TargetParser/Host.cpp [3:4]
+ lib/TargetParser/LoongArchTargetParser.cpp [3:4]
+ lib/TargetParser/RISCVTargetParser.cpp [3:4]
+ lib/TargetParser/TargetParser.cpp [3:4]
+ lib/TargetParser/Triple.cpp [3:4]
+ lib/TargetParser/Unix/Host.inc [3:4]
+ lib/TargetParser/Windows/Host.inc [3:4]
+ lib/TargetParser/X86TargetParser.cpp [3:4]
+ lib/TextAPI/Architecture.cpp [3:4]
+ lib/TextAPI/ArchitectureSet.cpp [3:4]
+ lib/TextAPI/InterfaceFile.cpp [3:4]
+ lib/TextAPI/PackedVersion.cpp [3:4]
+ lib/TextAPI/Platform.cpp [3:4]
+ lib/TextAPI/Symbol.cpp [3:4]
+ lib/TextAPI/Target.cpp [3:4]
+ lib/TextAPI/TextAPIContext.h [3:4]
+ lib/TextAPI/TextStub.cpp [3:4]
+ lib/TextAPI/TextStubCommon.cpp [3:4]
+ lib/TextAPI/TextStubCommon.h [3:4]
+ lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp [3:4]
+ lib/ToolDrivers/llvm-lib/LibDriver.cpp [3:4]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp [3:4]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h [3:4]
+ lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp [3:4]
+ lib/Transforms/CFGuard/CFGuard.cpp [3:4]
+ lib/Transforms/Coroutines/CoroCleanup.cpp [3:4]
+ lib/Transforms/Coroutines/CoroConditionalWrapper.cpp [3:4]
+ lib/Transforms/Coroutines/CoroEarly.cpp [3:4]
+ lib/Transforms/Coroutines/CoroElide.cpp [3:4]
+ lib/Transforms/Coroutines/CoroFrame.cpp [3:4]
+ lib/Transforms/Coroutines/CoroInstr.h [3:4]
+ lib/Transforms/Coroutines/CoroInternal.h [3:4]
+ lib/Transforms/Coroutines/CoroSplit.cpp [3:4]
+ lib/Transforms/Coroutines/Coroutines.cpp [3:4]
+ lib/Transforms/IPO/AlwaysInliner.cpp [3:4]
+ lib/Transforms/IPO/Annotation2Metadata.cpp [3:4]
+ lib/Transforms/IPO/ArgumentPromotion.cpp [3:4]
+ lib/Transforms/IPO/Attributor.cpp [3:4]
+ lib/Transforms/IPO/AttributorAttributes.cpp [3:4]
+ lib/Transforms/IPO/BarrierNoopPass.cpp [3:4]
+ lib/Transforms/IPO/BlockExtractor.cpp [3:4]
+ lib/Transforms/IPO/CalledValuePropagation.cpp [3:4]
+ lib/Transforms/IPO/ConstantMerge.cpp [3:4]
+ lib/Transforms/IPO/CrossDSOCFI.cpp [3:4]
+ lib/Transforms/IPO/DeadArgumentElimination.cpp [3:4]
+ lib/Transforms/IPO/ElimAvailExtern.cpp [3:4]
+ lib/Transforms/IPO/ExtractGV.cpp [3:4]
+ lib/Transforms/IPO/ForceFunctionAttrs.cpp [3:4]
+ lib/Transforms/IPO/FunctionAttrs.cpp [3:4]
+ lib/Transforms/IPO/FunctionImport.cpp [3:4]
+ lib/Transforms/IPO/FunctionSpecialization.cpp [3:4]
+ lib/Transforms/IPO/GlobalDCE.cpp [3:4]
+ lib/Transforms/IPO/GlobalOpt.cpp [3:4]
+ lib/Transforms/IPO/GlobalSplit.cpp [3:4]
+ lib/Transforms/IPO/HotColdSplitting.cpp [3:4]
+ lib/Transforms/IPO/IPO.cpp [3:4]
+ lib/Transforms/IPO/IROutliner.cpp [3:4]
+ lib/Transforms/IPO/InferFunctionAttrs.cpp [3:4]
+ lib/Transforms/IPO/InlineSimple.cpp [3:4]
+ lib/Transforms/IPO/Inliner.cpp [3:4]
+ lib/Transforms/IPO/Internalize.cpp [3:4]
+ lib/Transforms/IPO/LoopExtractor.cpp [3:4]
+ lib/Transforms/IPO/LowerTypeTests.cpp [3:4]
+ lib/Transforms/IPO/MergeFunctions.cpp [3:4]
+ lib/Transforms/IPO/ModuleInliner.cpp [3:4]
+ lib/Transforms/IPO/OpenMPOpt.cpp [3:4]
+ lib/Transforms/IPO/PartialInlining.cpp [3:4]
+ lib/Transforms/IPO/PassManagerBuilder.cpp [3:4]
+ lib/Transforms/IPO/SCCP.cpp [3:4]
+ lib/Transforms/IPO/SampleContextTracker.cpp [3:4]
+ lib/Transforms/IPO/SampleProfile.cpp [3:4]
+ lib/Transforms/IPO/SampleProfileProbe.cpp [3:4]
+ lib/Transforms/IPO/StripDeadPrototypes.cpp [3:4]
+ lib/Transforms/IPO/StripSymbols.cpp [3:4]
+ lib/Transforms/IPO/SyntheticCountsPropagation.cpp [3:4]
+ lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp [3:4]
+ lib/Transforms/IPO/WholeProgramDevirt.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineAddSub.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineAndOrXor.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineCalls.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineCasts.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineCompares.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineInternal.h [3:4]
+ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineNegator.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombinePHI.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineSelect.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineShifts.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp [3:4]
+ lib/Transforms/InstCombine/InstCombineVectorOps.cpp [3:4]
+ lib/Transforms/InstCombine/InstructionCombining.cpp [3:4]
+ lib/Transforms/Instrumentation/AddressSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/BoundsChecking.cpp [3:4]
+ lib/Transforms/Instrumentation/CFGMST.h [3:4]
+ lib/Transforms/Instrumentation/CGProfile.cpp [3:4]
+ lib/Transforms/Instrumentation/ControlHeightReduction.cpp [3:4]
+ lib/Transforms/Instrumentation/DataFlowSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/GCOVProfiling.cpp [3:4]
+ lib/Transforms/Instrumentation/HWAddressSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/IndirectCallPromotion.cpp [3:4]
+ lib/Transforms/Instrumentation/InstrOrderFile.cpp [3:4]
+ lib/Transforms/Instrumentation/InstrProfiling.cpp [3:4]
+ lib/Transforms/Instrumentation/Instrumentation.cpp [3:4]
+ lib/Transforms/Instrumentation/KCFI.cpp [3:4]
+ lib/Transforms/Instrumentation/MemProfiler.cpp [3:4]
+ lib/Transforms/Instrumentation/MemorySanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/PGOInstrumentation.cpp [3:4]
+ lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp [3:4]
+ lib/Transforms/Instrumentation/PoisonChecking.cpp [3:4]
+ lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp [3:4]
+ lib/Transforms/Instrumentation/SanitizerCoverage.cpp [3:4]
+ lib/Transforms/Instrumentation/ThreadSanitizer.cpp [3:4]
+ lib/Transforms/Instrumentation/ValueProfileCollector.cpp [3:4]
+ lib/Transforms/Instrumentation/ValueProfileCollector.h [3:4]
+ lib/Transforms/Instrumentation/ValueProfilePlugins.inc [3:4]
+ lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h [3:4]
+ lib/Transforms/ObjCARC/BlotMapVector.h [3:4]
+ lib/Transforms/ObjCARC/DependencyAnalysis.cpp [3:4]
+ lib/Transforms/ObjCARC/DependencyAnalysis.h [3:4]
+ lib/Transforms/ObjCARC/ObjCARC.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARC.h [3:4]
+ lib/Transforms/ObjCARC/ObjCARCAPElim.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARCContract.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARCExpand.cpp [3:4]
+ lib/Transforms/ObjCARC/ObjCARCOpts.cpp [3:4]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp [3:4]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.h [3:4]
+ lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp [3:4]
+ lib/Transforms/ObjCARC/PtrState.cpp [3:4]
+ lib/Transforms/ObjCARC/PtrState.h [3:4]
+ lib/Transforms/Scalar/ADCE.cpp [3:4]
+ lib/Transforms/Scalar/AlignmentFromAssumptions.cpp [4:5]
+ lib/Transforms/Scalar/AnnotationRemarks.cpp [3:4]
+ lib/Transforms/Scalar/BDCE.cpp [3:4]
+ lib/Transforms/Scalar/CallSiteSplitting.cpp [3:4]
+ lib/Transforms/Scalar/ConstantHoisting.cpp [3:4]
+ lib/Transforms/Scalar/ConstraintElimination.cpp [3:4]
+ lib/Transforms/Scalar/CorrelatedValuePropagation.cpp [3:4]
+ lib/Transforms/Scalar/DCE.cpp [3:4]
+ lib/Transforms/Scalar/DFAJumpThreading.cpp [3:4]
+ lib/Transforms/Scalar/DeadStoreElimination.cpp [3:4]
+ lib/Transforms/Scalar/DivRemPairs.cpp [3:4]
+ lib/Transforms/Scalar/EarlyCSE.cpp [3:4]
+ lib/Transforms/Scalar/FlattenCFGPass.cpp [3:4]
+ lib/Transforms/Scalar/Float2Int.cpp [3:4]
+ lib/Transforms/Scalar/GVN.cpp [3:4]
+ lib/Transforms/Scalar/GVNHoist.cpp [3:4]
+ lib/Transforms/Scalar/GVNSink.cpp [3:4]
+ lib/Transforms/Scalar/GuardWidening.cpp [3:4]
+ lib/Transforms/Scalar/IVUsersPrinter.cpp [3:4]
+ lib/Transforms/Scalar/IndVarSimplify.cpp [3:4]
+ lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp [3:4]
+ lib/Transforms/Scalar/InferAddressSpaces.cpp [3:4]
+ lib/Transforms/Scalar/InstSimplifyPass.cpp [3:4]
+ lib/Transforms/Scalar/JumpThreading.cpp [3:4]
+ lib/Transforms/Scalar/LICM.cpp [3:4]
+ lib/Transforms/Scalar/LoopAccessAnalysisPrinter.cpp [3:4]
+ lib/Transforms/Scalar/LoopBoundSplit.cpp [3:4]
+ lib/Transforms/Scalar/LoopDataPrefetch.cpp [3:4]
+ lib/Transforms/Scalar/LoopDeletion.cpp [3:4]
+ lib/Transforms/Scalar/LoopDistribute.cpp [3:4]
+ lib/Transforms/Scalar/LoopFlatten.cpp [3:4]
+ lib/Transforms/Scalar/LoopFuse.cpp [3:4]
+ lib/Transforms/Scalar/LoopIdiomRecognize.cpp [3:4]
+ lib/Transforms/Scalar/LoopInstSimplify.cpp [3:4]
+ lib/Transforms/Scalar/LoopInterchange.cpp [3:4]
+ lib/Transforms/Scalar/LoopLoadElimination.cpp [3:4]
+ lib/Transforms/Scalar/LoopPassManager.cpp [3:4]
+ lib/Transforms/Scalar/LoopPredication.cpp [3:4]
+ lib/Transforms/Scalar/LoopRerollPass.cpp [3:4]
+ lib/Transforms/Scalar/LoopRotation.cpp [3:4]
+ lib/Transforms/Scalar/LoopSimplifyCFG.cpp [3:4]
+ lib/Transforms/Scalar/LoopSink.cpp [3:4]
+ lib/Transforms/Scalar/LoopStrengthReduce.cpp [3:4]
+ lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp [3:4]
+ lib/Transforms/Scalar/LoopUnrollPass.cpp [3:4]
+ lib/Transforms/Scalar/LoopVersioningLICM.cpp [3:4]
+ lib/Transforms/Scalar/LowerAtomicPass.cpp [3:4]
+ lib/Transforms/Scalar/LowerConstantIntrinsics.cpp [3:4]
+ lib/Transforms/Scalar/LowerExpectIntrinsic.cpp [3:4]
+ lib/Transforms/Scalar/LowerGuardIntrinsic.cpp [3:4]
+ lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp [3:4]
+ lib/Transforms/Scalar/LowerWidenableCondition.cpp [3:4]
+ lib/Transforms/Scalar/MakeGuardsExplicit.cpp [3:4]
+ lib/Transforms/Scalar/MemCpyOptimizer.cpp [3:4]
+ lib/Transforms/Scalar/MergeICmps.cpp [3:4]
+ lib/Transforms/Scalar/MergedLoadStoreMotion.cpp [3:4]
+ lib/Transforms/Scalar/NaryReassociate.cpp [3:4]
+ lib/Transforms/Scalar/NewGVN.cpp [3:4]
+ lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp [3:4]
+ lib/Transforms/Scalar/PlaceSafepoints.cpp [3:4]
+ lib/Transforms/Scalar/Reassociate.cpp [3:4]
+ lib/Transforms/Scalar/Reg2Mem.cpp [3:4]
+ lib/Transforms/Scalar/RewriteStatepointsForGC.cpp [3:4]
+ lib/Transforms/Scalar/SCCP.cpp [3:4]
+ lib/Transforms/Scalar/SROA.cpp [3:4]
+ lib/Transforms/Scalar/Scalar.cpp [3:4]
+ lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp [4:5]
+ lib/Transforms/Scalar/Scalarizer.cpp [3:4]
+ lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp [3:4]
+ lib/Transforms/Scalar/SimpleLoopUnswitch.cpp [3:4]
+ lib/Transforms/Scalar/SimplifyCFGPass.cpp [3:4]
+ lib/Transforms/Scalar/Sink.cpp [3:4]
+ lib/Transforms/Scalar/SpeculativeExecution.cpp [3:4]
+ lib/Transforms/Scalar/StraightLineStrengthReduce.cpp [3:4]
+ lib/Transforms/Scalar/StructurizeCFG.cpp [3:4]
+ lib/Transforms/Scalar/TLSVariableHoist.cpp [3:4]
+ lib/Transforms/Scalar/TailRecursionElimination.cpp [3:4]
+ lib/Transforms/Scalar/WarnMissedTransforms.cpp [3:4]
+ lib/Transforms/Utils/AMDGPUEmitPrintf.cpp [3:4]
+ lib/Transforms/Utils/ASanStackFrameLayout.cpp [3:4]
+ lib/Transforms/Utils/AddDiscriminators.cpp [3:4]
+ lib/Transforms/Utils/AssumeBundleBuilder.cpp [3:4]
+ lib/Transforms/Utils/BasicBlockUtils.cpp [3:4]
+ lib/Transforms/Utils/BreakCriticalEdges.cpp [3:4]
+ lib/Transforms/Utils/BuildLibCalls.cpp [3:4]
+ lib/Transforms/Utils/BypassSlowDivision.cpp [3:4]
+ lib/Transforms/Utils/CallGraphUpdater.cpp [3:4]
+ lib/Transforms/Utils/CallPromotionUtils.cpp [3:4]
+ lib/Transforms/Utils/CanonicalizeAliases.cpp [3:4]
+ lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp [3:4]
+ lib/Transforms/Utils/CloneFunction.cpp [3:4]
+ lib/Transforms/Utils/CloneModule.cpp [3:4]
+ lib/Transforms/Utils/CodeExtractor.cpp [3:4]
+ lib/Transforms/Utils/CodeLayout.cpp [3:4]
+ lib/Transforms/Utils/CodeMoverUtils.cpp [3:4]
+ lib/Transforms/Utils/CtorUtils.cpp [3:4]
+ lib/Transforms/Utils/Debugify.cpp [3:4]
+ lib/Transforms/Utils/DemoteRegToStack.cpp [3:4]
+ lib/Transforms/Utils/EntryExitInstrumenter.cpp [3:4]
+ lib/Transforms/Utils/EscapeEnumerator.cpp [3:4]
+ lib/Transforms/Utils/Evaluator.cpp [3:4]
+ lib/Transforms/Utils/FixIrreducible.cpp [3:4]
+ lib/Transforms/Utils/FlattenCFG.cpp [3:4]
+ lib/Transforms/Utils/FunctionComparator.cpp [3:4]
+ lib/Transforms/Utils/FunctionImportUtils.cpp [3:4]
+ lib/Transforms/Utils/GlobalStatus.cpp [3:4]
+ lib/Transforms/Utils/GuardUtils.cpp [3:4]
+ lib/Transforms/Utils/HelloWorld.cpp [3:4]
+ lib/Transforms/Utils/InjectTLIMappings.cpp [3:4]
+ lib/Transforms/Utils/InlineFunction.cpp [3:4]
+ lib/Transforms/Utils/InstructionNamer.cpp [3:4]
+ lib/Transforms/Utils/IntegerDivision.cpp [3:4]
+ lib/Transforms/Utils/LCSSA.cpp [3:4]
+ lib/Transforms/Utils/LibCallsShrinkWrap.cpp [3:4]
+ lib/Transforms/Utils/Local.cpp [3:4]
+ lib/Transforms/Utils/LoopPeel.cpp [3:4]
+ lib/Transforms/Utils/LoopRotationUtils.cpp [3:4]
+ lib/Transforms/Utils/LoopSimplify.cpp [3:4]
+ lib/Transforms/Utils/LoopUnroll.cpp [3:4]
+ lib/Transforms/Utils/LoopUnrollAndJam.cpp [3:4]
+ lib/Transforms/Utils/LoopUnrollRuntime.cpp [3:4]
+ lib/Transforms/Utils/LoopUtils.cpp [3:4]
+ lib/Transforms/Utils/LoopVersioning.cpp [3:4]
+ lib/Transforms/Utils/LowerAtomic.cpp [3:4]
+ lib/Transforms/Utils/LowerGlobalDtors.cpp [3:4]
+ lib/Transforms/Utils/LowerIFunc.cpp [3:4]
+ lib/Transforms/Utils/LowerInvoke.cpp [3:4]
+ lib/Transforms/Utils/LowerMemIntrinsics.cpp [3:4]
+ lib/Transforms/Utils/LowerSwitch.cpp [3:4]
+ lib/Transforms/Utils/MatrixUtils.cpp [3:4]
+ lib/Transforms/Utils/Mem2Reg.cpp [3:4]
+ lib/Transforms/Utils/MemoryOpRemark.cpp [3:4]
+ lib/Transforms/Utils/MemoryTaggingSupport.cpp [2:3]
+ lib/Transforms/Utils/MetaRenamer.cpp [3:4]
+ lib/Transforms/Utils/MisExpect.cpp [3:4]
+ lib/Transforms/Utils/ModuleUtils.cpp [3:4]
+ lib/Transforms/Utils/NameAnonGlobals.cpp [3:4]
+ lib/Transforms/Utils/PredicateInfo.cpp [3:4]
+ lib/Transforms/Utils/PromoteMemoryToRegister.cpp [3:4]
+ lib/Transforms/Utils/RelLookupTableConverter.cpp [3:4]
+ lib/Transforms/Utils/SCCPSolver.cpp [3:4]
+ lib/Transforms/Utils/SSAUpdater.cpp [3:4]
+ lib/Transforms/Utils/SSAUpdaterBulk.cpp [3:4]
+ lib/Transforms/Utils/SampleProfileInference.cpp [3:4]
+ lib/Transforms/Utils/SampleProfileLoaderBaseUtil.cpp [3:4]
+ lib/Transforms/Utils/SanitizerStats.cpp [3:4]
+ lib/Transforms/Utils/ScalarEvolutionExpander.cpp [3:4]
+ lib/Transforms/Utils/SimplifyCFG.cpp [3:4]
+ lib/Transforms/Utils/SimplifyIndVar.cpp [3:4]
+ lib/Transforms/Utils/SimplifyLibCalls.cpp [3:4]
+ lib/Transforms/Utils/SizeOpts.cpp [3:4]
+ lib/Transforms/Utils/SplitModule.cpp [3:4]
+ lib/Transforms/Utils/StripGCRelocates.cpp [3:4]
+ lib/Transforms/Utils/StripNonLineTableDebugInfo.cpp [3:4]
+ lib/Transforms/Utils/SymbolRewriter.cpp [3:4]
+ lib/Transforms/Utils/UnifyFunctionExitNodes.cpp [3:4]
+ lib/Transforms/Utils/UnifyLoopExits.cpp [3:4]
+ lib/Transforms/Utils/Utils.cpp [3:4]
+ lib/Transforms/Utils/ValueMapper.cpp [3:4]
+ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp [3:4]
+ lib/Transforms/Vectorize/LoopVectorizationLegality.cpp [3:4]
+ lib/Transforms/Vectorize/LoopVectorizationPlanner.h [3:4]
+ lib/Transforms/Vectorize/LoopVectorize.cpp [3:4]
+ lib/Transforms/Vectorize/SLPVectorizer.cpp [3:4]
+ lib/Transforms/Vectorize/VPRecipeBuilder.h [3:4]
+ lib/Transforms/Vectorize/VPlan.cpp [3:4]
+ lib/Transforms/Vectorize/VPlan.h [3:4]
+ lib/Transforms/Vectorize/VPlanCFG.h [3:4]
+ lib/Transforms/Vectorize/VPlanDominatorTree.h [3:4]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.h [3:4]
+ lib/Transforms/Vectorize/VPlanRecipes.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanSLP.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanTransforms.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanTransforms.h [3:4]
+ lib/Transforms/Vectorize/VPlanValue.h [3:4]
+ lib/Transforms/Vectorize/VPlanVerifier.cpp [3:4]
+ lib/Transforms/Vectorize/VPlanVerifier.h [3:4]
+ lib/Transforms/Vectorize/VectorCombine.cpp [3:4]
+ lib/Transforms/Vectorize/Vectorize.cpp [3:4]
+ lib/WindowsDriver/MSVCPaths.cpp [3:4]
+ lib/WindowsManifest/WindowsManifestMerger.cpp [3:4]
+ lib/XRay/BlockIndexer.cpp [3:4]
+ lib/XRay/BlockPrinter.cpp [3:4]
+ lib/XRay/BlockVerifier.cpp [3:4]
+ lib/XRay/FDRRecordProducer.cpp [3:4]
+ lib/XRay/FDRRecords.cpp [3:4]
+ lib/XRay/FDRTraceExpander.cpp [3:4]
+ lib/XRay/FDRTraceWriter.cpp [3:4]
+ lib/XRay/FileHeaderReader.cpp [3:4]
+ lib/XRay/InstrumentationMap.cpp [3:4]
+ lib/XRay/LogBuilderConsumer.cpp [3:4]
+ lib/XRay/Profile.cpp [3:4]
+ lib/XRay/RecordInitializer.cpp [3:4]
+ lib/XRay/RecordPrinter.cpp [3:4]
+ lib/XRay/Trace.cpp [3:4]
+ tools/bugpoint/BugDriver.cpp [3:4]
+ tools/bugpoint/BugDriver.h [3:4]
+ tools/bugpoint/CrashDebugger.cpp [3:4]
+ tools/bugpoint/ExecutionDriver.cpp [3:4]
+ tools/bugpoint/ExtractFunction.cpp [3:4]
+ tools/bugpoint/FindBugs.cpp [3:4]
+ tools/bugpoint/ListReducer.h [3:4]
+ tools/bugpoint/Miscompilation.cpp [3:4]
+ tools/bugpoint/OptimizerDriver.cpp [3:4]
+ tools/bugpoint/ToolRunner.cpp [3:4]
+ tools/bugpoint/ToolRunner.h [3:4]
+ tools/bugpoint/bugpoint.cpp [3:4]
+ tools/dsymutil/BinaryHolder.cpp [3:4]
+ tools/dsymutil/BinaryHolder.h [3:4]
+ tools/dsymutil/CFBundle.cpp [3:4]
+ tools/dsymutil/CFBundle.h [3:4]
+ tools/dsymutil/DebugMap.cpp [3:4]
+ tools/dsymutil/DebugMap.h [3:4]
+ tools/dsymutil/DwarfLinkerForBinary.cpp [3:4]
+ tools/dsymutil/DwarfLinkerForBinary.h [3:4]
+ tools/dsymutil/LinkUtils.h [3:4]
+ tools/dsymutil/MachODebugMapParser.cpp [3:4]
+ tools/dsymutil/MachOUtils.cpp [3:4]
+ tools/dsymutil/MachOUtils.h [3:4]
+ tools/dsymutil/Reproducer.cpp [3:4]
+ tools/dsymutil/Reproducer.h [3:4]
+ tools/dsymutil/SymbolMap.cpp [3:4]
+ tools/dsymutil/SymbolMap.h [3:4]
+ tools/dsymutil/dsymutil.cpp [3:4]
+ tools/dsymutil/dsymutil.h [3:4]
+ tools/gold/gold-plugin.cpp [3:4]
+ tools/llc/llc.cpp [3:4]
+ tools/lli/ChildTarget/ChildTarget.cpp [3:4]
+ tools/lli/ExecutionUtils.cpp [3:4]
+ tools/lli/ExecutionUtils.h [3:4]
+ tools/lli/ForwardingMemoryManager.h [3:4]
+ tools/lli/lli.cpp [3:4]
+ tools/llvm-ar/llvm-ar-driver.cpp [3:4]
+ tools/llvm-ar/llvm-ar.cpp [3:4]
+ tools/llvm-as/llvm-as.cpp [3:4]
+ tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp [3:4]
+ tools/llvm-cat/llvm-cat.cpp [3:4]
+ tools/llvm-cfi-verify/lib/FileAnalysis.cpp [3:4]
+ tools/llvm-cfi-verify/lib/FileAnalysis.h [3:4]
+ tools/llvm-cfi-verify/lib/GraphBuilder.cpp [3:4]
+ tools/llvm-cfi-verify/lib/GraphBuilder.h [3:4]
+ tools/llvm-cfi-verify/llvm-cfi-verify.cpp [3:4]
+ tools/llvm-config/BuildVariables.inc [3:4]
+ tools/llvm-config/llvm-config.cpp [3:4]
+ tools/llvm-cov/CodeCoverage.cpp [3:4]
+ tools/llvm-cov/CoverageExporter.h [3:4]
+ tools/llvm-cov/CoverageExporterJson.cpp [3:4]
+ tools/llvm-cov/CoverageExporterJson.h [3:4]
+ tools/llvm-cov/CoverageExporterLcov.cpp [3:4]
+ tools/llvm-cov/CoverageExporterLcov.h [3:4]
+ tools/llvm-cov/CoverageFilters.cpp [3:4]
+ tools/llvm-cov/CoverageFilters.h [3:4]
+ tools/llvm-cov/CoverageReport.cpp [3:4]
+ tools/llvm-cov/CoverageReport.h [3:4]
+ tools/llvm-cov/CoverageSummaryInfo.cpp [3:4]
+ tools/llvm-cov/CoverageSummaryInfo.h [3:4]
+ tools/llvm-cov/CoverageViewOptions.h [3:4]
+ tools/llvm-cov/RenderingSupport.h [3:4]
+ tools/llvm-cov/SourceCoverageView.cpp [3:4]
+ tools/llvm-cov/SourceCoverageView.h [3:4]
+ tools/llvm-cov/SourceCoverageViewHTML.cpp [3:4]
+ tools/llvm-cov/SourceCoverageViewHTML.h [3:4]
+ tools/llvm-cov/SourceCoverageViewText.cpp [3:4]
+ tools/llvm-cov/SourceCoverageViewText.h [3:4]
+ tools/llvm-cov/TestingSupport.cpp [3:4]
+ tools/llvm-cov/gcov.cpp [3:4]
+ tools/llvm-cov/llvm-cov.cpp [3:4]
+ tools/llvm-cvtres/llvm-cvtres.cpp [3:4]
+ tools/llvm-cxxdump/Error.cpp [3:4]
+ tools/llvm-cxxdump/Error.h [3:4]
+ tools/llvm-cxxdump/llvm-cxxdump.cpp [3:4]
+ tools/llvm-cxxdump/llvm-cxxdump.h [3:4]
+ tools/llvm-cxxfilt/llvm-cxxfilt-driver.cpp [3:4]
+ tools/llvm-cxxfilt/llvm-cxxfilt.cpp [3:4]
+ tools/llvm-cxxmap/llvm-cxxmap.cpp [3:4]
+ tools/llvm-diff/lib/DiffConsumer.cpp [3:4]
+ tools/llvm-diff/lib/DiffConsumer.h [3:4]
+ tools/llvm-diff/lib/DiffLog.cpp [3:4]
+ tools/llvm-diff/lib/DiffLog.h [3:4]
+ tools/llvm-diff/lib/DifferenceEngine.cpp [3:4]
+ tools/llvm-diff/lib/DifferenceEngine.h [3:4]
+ tools/llvm-diff/llvm-diff.cpp [3:4]
+ tools/llvm-dis/llvm-dis.cpp [3:4]
+ tools/llvm-dwarfdump/SectionSizes.cpp [3:4]
+ tools/llvm-dwarfdump/Statistics.cpp [3:4]
+ tools/llvm-dwarfdump/llvm-dwarfdump.cpp [3:4]
+ tools/llvm-dwarfdump/llvm-dwarfdump.h [3:4]
+ tools/llvm-dwp/llvm-dwp.cpp [3:4]
+ tools/llvm-exegesis/lib/AArch64/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/Analysis.cpp [3:4]
+ tools/llvm-exegesis/lib/Analysis.h [3:4]
+ tools/llvm-exegesis/lib/Assembler.cpp [3:4]
+ tools/llvm-exegesis/lib/Assembler.h [3:4]
+ tools/llvm-exegesis/lib/BenchmarkCode.h [3:4]
+ tools/llvm-exegesis/lib/BenchmarkResult.cpp [3:4]
+ tools/llvm-exegesis/lib/BenchmarkResult.h [3:4]
+ tools/llvm-exegesis/lib/BenchmarkRunner.cpp [3:4]
+ tools/llvm-exegesis/lib/BenchmarkRunner.h [3:4]
+ tools/llvm-exegesis/lib/Clustering.cpp [3:4]
+ tools/llvm-exegesis/lib/Clustering.h [3:4]
+ tools/llvm-exegesis/lib/CodeTemplate.cpp [3:4]
+ tools/llvm-exegesis/lib/CodeTemplate.h [3:4]
+ tools/llvm-exegesis/lib/Error.cpp [3:4]
+ tools/llvm-exegesis/lib/Error.h [3:4]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.cpp [3:4]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.h [3:4]
+ tools/llvm-exegesis/lib/LlvmState.cpp [3:4]
+ tools/llvm-exegesis/lib/LlvmState.h [3:4]
+ tools/llvm-exegesis/lib/MCInstrDescView.cpp [3:4]
+ tools/llvm-exegesis/lib/MCInstrDescView.h [3:4]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp [3:4]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.h [3:4]
+ tools/llvm-exegesis/lib/PerfHelper.cpp [3:4]
+ tools/llvm-exegesis/lib/PerfHelper.h [3:4]
+ tools/llvm-exegesis/lib/PowerPC/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/ProgressMeter.h [3:4]
+ tools/llvm-exegesis/lib/RegisterAliasing.cpp [3:4]
+ tools/llvm-exegesis/lib/RegisterAliasing.h [3:4]
+ tools/llvm-exegesis/lib/RegisterValue.cpp [3:4]
+ tools/llvm-exegesis/lib/RegisterValue.h [3:4]
+ tools/llvm-exegesis/lib/SchedClassResolution.cpp [3:4]
+ tools/llvm-exegesis/lib/SchedClassResolution.h [3:4]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp [3:4]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.h [3:4]
+ tools/llvm-exegesis/lib/SnippetFile.cpp [3:4]
+ tools/llvm-exegesis/lib/SnippetFile.h [3:4]
+ tools/llvm-exegesis/lib/SnippetGenerator.cpp [3:4]
+ tools/llvm-exegesis/lib/SnippetGenerator.h [3:4]
+ tools/llvm-exegesis/lib/SnippetRepetitor.cpp [3:4]
+ tools/llvm-exegesis/lib/SnippetRepetitor.h [3:4]
+ tools/llvm-exegesis/lib/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/Target.h [3:4]
+ tools/llvm-exegesis/lib/TargetSelect.h [3:4]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.cpp [3:4]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.h [3:4]
+ tools/llvm-exegesis/lib/X86/Target.cpp [3:4]
+ tools/llvm-exegesis/lib/X86/X86Counter.cpp [3:4]
+ tools/llvm-exegesis/lib/X86/X86Counter.h [3:4]
+ tools/llvm-exegesis/llvm-exegesis.cpp [3:4]
+ tools/llvm-extract/llvm-extract.cpp [3:4]
+ tools/llvm-gsymutil/llvm-gsymutil.cpp [3:4]
+ tools/llvm-ifs/ErrorCollector.cpp [3:4]
+ tools/llvm-ifs/ErrorCollector.h [3:4]
+ tools/llvm-ifs/llvm-ifs-driver.cpp [3:4]
+ tools/llvm-ifs/llvm-ifs.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-coff.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-elf.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink-macho.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink.cpp [3:4]
+ tools/llvm-jitlink/llvm-jitlink.h [3:4]
+ tools/llvm-libtool-darwin/DependencyInfo.h [3:4]
+ tools/llvm-libtool-darwin/llvm-libtool-darwin.cpp [3:4]
+ tools/llvm-link/llvm-link.cpp [3:4]
+ tools/llvm-lipo/llvm-lipo-driver.cpp [3:4]
+ tools/llvm-lipo/llvm-lipo.cpp [3:4]
+ tools/llvm-lto/llvm-lto.cpp [3:4]
+ tools/llvm-lto2/llvm-lto2.cpp [3:4]
+ tools/llvm-mc/Disassembler.cpp [3:4]
+ tools/llvm-mc/Disassembler.h [3:4]
+ tools/llvm-mc/llvm-mc.cpp [3:4]
+ tools/llvm-mca/CodeRegion.cpp [3:4]
+ tools/llvm-mca/CodeRegion.h [3:4]
+ tools/llvm-mca/CodeRegionGenerator.cpp [3:4]
+ tools/llvm-mca/CodeRegionGenerator.h [3:4]
+ tools/llvm-mca/PipelinePrinter.cpp [3:4]
+ tools/llvm-mca/PipelinePrinter.h [3:4]
+ tools/llvm-mca/Views/BottleneckAnalysis.cpp [3:4]
+ tools/llvm-mca/Views/BottleneckAnalysis.h [3:4]
+ tools/llvm-mca/Views/DispatchStatistics.cpp [3:4]
+ tools/llvm-mca/Views/DispatchStatistics.h [3:4]
+ tools/llvm-mca/Views/InstructionInfoView.cpp [3:4]
+ tools/llvm-mca/Views/InstructionInfoView.h [3:4]
+ tools/llvm-mca/Views/InstructionView.cpp [3:4]
+ tools/llvm-mca/Views/InstructionView.h [3:4]
+ tools/llvm-mca/Views/RegisterFileStatistics.cpp [3:4]
+ tools/llvm-mca/Views/RegisterFileStatistics.h [3:4]
+ tools/llvm-mca/Views/ResourcePressureView.cpp [3:4]
+ tools/llvm-mca/Views/ResourcePressureView.h [3:4]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.cpp [3:4]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.h [3:4]
+ tools/llvm-mca/Views/SchedulerStatistics.cpp [3:4]
+ tools/llvm-mca/Views/SchedulerStatistics.h [3:4]
+ tools/llvm-mca/Views/SummaryView.cpp [3:4]
+ tools/llvm-mca/Views/SummaryView.h [3:4]
+ tools/llvm-mca/Views/TimelineView.cpp [3:4]
+ tools/llvm-mca/Views/TimelineView.h [3:4]
+ tools/llvm-mca/llvm-mca.cpp [3:4]
+ tools/llvm-ml/Disassembler.cpp [3:4]
+ tools/llvm-ml/Disassembler.h [3:4]
+ tools/llvm-ml/llvm-ml.cpp [3:4]
+ tools/llvm-modextract/llvm-modextract.cpp [3:4]
+ tools/llvm-mt/llvm-mt-driver.cpp [3:4]
+ tools/llvm-mt/llvm-mt.cpp [3:4]
+ tools/llvm-nm/llvm-nm-driver.cpp [3:4]
+ tools/llvm-nm/llvm-nm.cpp [3:4]
+ tools/llvm-objcopy/BitcodeStripOpts.td [3:4]
+ tools/llvm-objcopy/InstallNameToolOpts.td [3:4]
+ tools/llvm-objcopy/ObjcopyOptions.cpp [3:4]
+ tools/llvm-objcopy/ObjcopyOptions.h [3:4]
+ tools/llvm-objcopy/llvm-objcopy-driver.cpp [3:4]
+ tools/llvm-objcopy/llvm-objcopy.cpp [3:4]
+ tools/llvm-objdump/COFFDump.cpp [3:4]
+ tools/llvm-objdump/COFFDump.h [3:4]
+ tools/llvm-objdump/ELFDump.cpp [3:4]
+ tools/llvm-objdump/ELFDump.h [3:4]
+ tools/llvm-objdump/MachODump.cpp [3:4]
+ tools/llvm-objdump/MachODump.h [3:4]
+ tools/llvm-objdump/OffloadDump.cpp [3:4]
+ tools/llvm-objdump/OffloadDump.h [3:4]
+ tools/llvm-objdump/SourcePrinter.cpp [3:4]
+ tools/llvm-objdump/SourcePrinter.h [3:4]
+ tools/llvm-objdump/WasmDump.cpp [3:4]
+ tools/llvm-objdump/WasmDump.h [3:4]
+ tools/llvm-objdump/XCOFFDump.cpp [3:4]
+ tools/llvm-objdump/XCOFFDump.h [3:4]
+ tools/llvm-objdump/llvm-objdump.cpp [3:4]
+ tools/llvm-objdump/llvm-objdump.h [3:4]
+ tools/llvm-opt-report/OptReport.cpp [3:4]
+ tools/llvm-pdbutil/BytesOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/BytesOutputStyle.h [3:4]
+ tools/llvm-pdbutil/DumpOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/DumpOutputStyle.h [3:4]
+ tools/llvm-pdbutil/ExplainOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/ExplainOutputStyle.h [3:4]
+ tools/llvm-pdbutil/MinimalSymbolDumper.cpp [3:4]
+ tools/llvm-pdbutil/MinimalSymbolDumper.h [3:4]
+ tools/llvm-pdbutil/MinimalTypeDumper.cpp [3:4]
+ tools/llvm-pdbutil/MinimalTypeDumper.h [3:4]
+ tools/llvm-pdbutil/OutputStyle.h [3:4]
+ tools/llvm-pdbutil/PdbYaml.cpp [3:4]
+ tools/llvm-pdbutil/PdbYaml.h [3:4]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyCompilandDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyCompilandDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyEnumDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyEnumDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyFunctionDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyFunctionDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyTypeDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyTypeDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyTypedefDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyTypedefDumper.h [3:4]
+ tools/llvm-pdbutil/PrettyVariableDumper.cpp [3:4]
+ tools/llvm-pdbutil/PrettyVariableDumper.h [3:4]
+ tools/llvm-pdbutil/StreamUtil.cpp [3:4]
+ tools/llvm-pdbutil/StreamUtil.h [3:4]
+ tools/llvm-pdbutil/TypeReferenceTracker.cpp [3:4]
+ tools/llvm-pdbutil/TypeReferenceTracker.h [3:4]
+ tools/llvm-pdbutil/YAMLOutputStyle.cpp [3:4]
+ tools/llvm-pdbutil/YAMLOutputStyle.h [3:4]
+ tools/llvm-pdbutil/llvm-pdbutil.cpp [3:4]
+ tools/llvm-pdbutil/llvm-pdbutil.h [3:4]
+ tools/llvm-profdata/llvm-profdata-driver.cpp [3:4]
+ tools/llvm-profdata/llvm-profdata.cpp [3:4]
+ tools/llvm-profgen/CSPreInliner.cpp [3:4]
+ tools/llvm-profgen/CSPreInliner.h [3:4]
+ tools/llvm-profgen/CallContext.h [3:4]
+ tools/llvm-profgen/ErrorHandling.h [3:4]
+ tools/llvm-profgen/MissingFrameInferrer.cpp [3:4]
+ tools/llvm-profgen/MissingFrameInferrer.h [3:4]
+ tools/llvm-profgen/PerfReader.cpp [3:4]
+ tools/llvm-profgen/PerfReader.h [3:4]
+ tools/llvm-profgen/ProfileGenerator.cpp [3:4]
+ tools/llvm-profgen/ProfileGenerator.h [3:4]
+ tools/llvm-profgen/ProfiledBinary.cpp [3:4]
+ tools/llvm-profgen/ProfiledBinary.h [3:4]
+ tools/llvm-profgen/llvm-profgen.cpp [3:4]
+ tools/llvm-rc/ResourceFileWriter.cpp [3:4]
+ tools/llvm-rc/ResourceFileWriter.h [3:4]
+ tools/llvm-rc/ResourceScriptCppFilter.cpp [3:4]
+ tools/llvm-rc/ResourceScriptCppFilter.h [3:4]
+ tools/llvm-rc/ResourceScriptParser.cpp [3:4]
+ tools/llvm-rc/ResourceScriptParser.h [3:4]
+ tools/llvm-rc/ResourceScriptStmt.cpp [2:3]
+ tools/llvm-rc/ResourceScriptStmt.h [3:4]
+ tools/llvm-rc/ResourceScriptToken.cpp [3:4]
+ tools/llvm-rc/ResourceScriptToken.h [3:4]
+ tools/llvm-rc/ResourceScriptTokenList.def [3:4]
+ tools/llvm-rc/ResourceVisitor.h [3:4]
+ tools/llvm-rc/llvm-rc-driver.cpp [3:4]
+ tools/llvm-rc/llvm-rc.cpp [3:4]
+ tools/llvm-readobj/ARMEHABIPrinter.h [3:4]
+ tools/llvm-readobj/ARMWinEHPrinter.cpp [3:4]
+ tools/llvm-readobj/ARMWinEHPrinter.h [3:4]
+ tools/llvm-readobj/COFFDumper.cpp [3:4]
+ tools/llvm-readobj/COFFImportDumper.cpp [3:4]
+ tools/llvm-readobj/DwarfCFIEHPrinter.h [3:4]
+ tools/llvm-readobj/ELFDumper.cpp [3:4]
+ tools/llvm-readobj/MachODumper.cpp [3:4]
+ tools/llvm-readobj/ObjDumper.cpp [3:4]
+ tools/llvm-readobj/ObjDumper.h [3:4]
+ tools/llvm-readobj/StackMapPrinter.h [3:4]
+ tools/llvm-readobj/WasmDumper.cpp [3:4]
+ tools/llvm-readobj/Win64EHDumper.cpp [3:4]
+ tools/llvm-readobj/Win64EHDumper.h [3:4]
+ tools/llvm-readobj/WindowsResourceDumper.cpp [3:4]
+ tools/llvm-readobj/WindowsResourceDumper.h [3:4]
+ tools/llvm-readobj/XCOFFDumper.cpp [3:4]
+ tools/llvm-readobj/llvm-readobj-driver.cpp [3:4]
+ tools/llvm-readobj/llvm-readobj.cpp [3:4]
+ tools/llvm-readobj/llvm-readobj.h [3:4]
+ tools/llvm-reduce/DeltaManager.cpp [3:4]
+ tools/llvm-reduce/DeltaManager.h [3:4]
+ tools/llvm-reduce/ReducerWorkItem.cpp [3:4]
+ tools/llvm-reduce/ReducerWorkItem.h [3:4]
+ tools/llvm-reduce/TestRunner.cpp [3:4]
+ tools/llvm-reduce/TestRunner.h [3:4]
+ tools/llvm-reduce/deltas/Delta.cpp [3:4]
+ tools/llvm-reduce/deltas/Delta.h [3:4]
+ tools/llvm-reduce/deltas/ReduceAliases.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceAliases.h [3:4]
+ tools/llvm-reduce/deltas/ReduceArguments.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceArguments.h [3:4]
+ tools/llvm-reduce/deltas/ReduceAttributes.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceAttributes.h [3:4]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.h [3:4]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.h [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.h [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctions.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceFunctions.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.h [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.h [3:4]
+ tools/llvm-reduce/deltas/ReduceIRReferences.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceIRReferences.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructions.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructions.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.h [3:4]
+ tools/llvm-reduce/deltas/ReduceInvokes.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceInvokes.h [3:4]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.h [3:4]
+ tools/llvm-reduce/deltas/ReduceMetadata.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceMetadata.h [3:4]
+ tools/llvm-reduce/deltas/ReduceModuleData.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceModuleData.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOpcodes.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOpcodes.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperands.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperands.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.h [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.h [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.h [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.h [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.h [3:4]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.h [3:4]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.h [3:4]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp [3:4]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.h [3:4]
+ tools/llvm-reduce/deltas/RunIRPasses.cpp [3:4]
+ tools/llvm-reduce/deltas/RunIRPasses.h [3:4]
+ tools/llvm-reduce/deltas/SimplifyInstructions.cpp [3:4]
+ tools/llvm-reduce/deltas/SimplifyInstructions.h [3:4]
+ tools/llvm-reduce/deltas/StripDebugInfo.cpp [3:4]
+ tools/llvm-reduce/deltas/StripDebugInfo.h [3:4]
+ tools/llvm-reduce/deltas/Utils.cpp [3:4]
+ tools/llvm-reduce/deltas/Utils.h [3:4]
+ tools/llvm-reduce/llvm-reduce.cpp [3:4]
+ tools/llvm-rtdyld/llvm-rtdyld.cpp [3:4]
+ tools/llvm-size/llvm-size-driver.cpp [3:4]
+ tools/llvm-size/llvm-size.cpp [3:4]
+ tools/llvm-split/llvm-split.cpp [3:4]
+ tools/llvm-stress/llvm-stress.cpp [3:4]
+ tools/llvm-strings/llvm-strings.cpp [3:4]
+ tools/llvm-symbolizer/llvm-symbolizer.cpp [3:4]
+ tools/llvm-undname/llvm-undname.cpp [4:5]
+ tools/llvm-xray/func-id-helper.cpp [3:4]
+ tools/llvm-xray/func-id-helper.h [3:4]
+ tools/llvm-xray/llvm-xray.cpp [3:4]
+ tools/llvm-xray/trie-node.h [3:4]
+ tools/llvm-xray/xray-account.cpp [3:4]
+ tools/llvm-xray/xray-account.h [3:4]
+ tools/llvm-xray/xray-color-helper.cpp [3:4]
+ tools/llvm-xray/xray-color-helper.h [3:4]
+ tools/llvm-xray/xray-converter.cpp [3:4]
+ tools/llvm-xray/xray-converter.h [3:4]
+ tools/llvm-xray/xray-extract.cpp [3:4]
+ tools/llvm-xray/xray-fdr-dump.cpp [3:4]
+ tools/llvm-xray/xray-graph-diff.cpp [3:4]
+ tools/llvm-xray/xray-graph-diff.h [3:4]
+ tools/llvm-xray/xray-graph.cpp [3:4]
+ tools/llvm-xray/xray-graph.h [3:4]
+ tools/llvm-xray/xray-registry.cpp [3:4]
+ tools/llvm-xray/xray-registry.h [3:4]
+ tools/llvm-xray/xray-stacks.cpp [3:4]
+ tools/lto/LTODisassembler.cpp [3:4]
+ tools/lto/lto.cpp [3:4]
+ tools/obj2yaml/archive2yaml.cpp [3:4]
+ tools/obj2yaml/coff2yaml.cpp [3:4]
+ tools/obj2yaml/dwarf2yaml.cpp [3:4]
+ tools/obj2yaml/dxcontainer2yaml.cpp [3:4]
+ tools/obj2yaml/elf2yaml.cpp [3:4]
+ tools/obj2yaml/macho2yaml.cpp [3:4]
+ tools/obj2yaml/minidump2yaml.cpp [3:4]
+ tools/obj2yaml/obj2yaml.cpp [3:4]
+ tools/obj2yaml/obj2yaml.h [3:4]
+ tools/obj2yaml/offload2yaml.cpp [3:4]
+ tools/obj2yaml/wasm2yaml.cpp [3:4]
+ tools/obj2yaml/xcoff2yaml.cpp [3:4]
+ tools/opt/AnalysisWrappers.cpp [3:4]
+ tools/opt/BreakpointPrinter.cpp [3:4]
+ tools/opt/BreakpointPrinter.h [3:4]
+ tools/opt/NewPMDriver.cpp [3:4]
+ tools/opt/NewPMDriver.h [3:4]
+ tools/opt/opt.cpp [3:4]
+ tools/polly/include/polly/Canonicalization.h [3:4]
+ tools/polly/include/polly/CodeGen/BlockGenerators.h [3:4]
+ tools/polly/include/polly/CodeGen/CodeGeneration.h [3:4]
+ tools/polly/include/polly/CodeGen/IRBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/IslAst.h [3:4]
+ tools/polly/include/polly/CodeGen/IslExprBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/IslNodeBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/LoopGenerators.h [3:4]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsGOMP.h [3:4]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsKMP.h [3:4]
+ tools/polly/include/polly/CodeGen/PPCGCodeGeneration.h [3:4]
+ tools/polly/include/polly/CodeGen/PerfMonitor.h [3:4]
+ tools/polly/include/polly/CodeGen/RuntimeDebugBuilder.h [3:4]
+ tools/polly/include/polly/CodeGen/Utils.h [3:4]
+ tools/polly/include/polly/CodePreparation.h [3:4]
+ tools/polly/include/polly/Config/config.h [3:4]
+ tools/polly/include/polly/DeLICM.h [3:4]
+ tools/polly/include/polly/DeadCodeElimination.h [3:4]
+ tools/polly/include/polly/DependenceInfo.h [3:4]
+ tools/polly/include/polly/FlattenAlgo.h [3:4]
+ tools/polly/include/polly/FlattenSchedule.h [3:4]
+ tools/polly/include/polly/ForwardOpTree.h [3:4]
+ tools/polly/include/polly/JSONExporter.h [3:4]
+ tools/polly/include/polly/LinkAllPasses.h [3:4]
+ tools/polly/include/polly/ManualOptimizer.h [3:4]
+ tools/polly/include/polly/MatmulOptimizer.h [3:4]
+ tools/polly/include/polly/MaximalStaticExpansion.h [3:4]
+ tools/polly/include/polly/Options.h [3:4]
+ tools/polly/include/polly/PolyhedralInfo.h [3:4]
+ tools/polly/include/polly/PruneUnprofitable.h [3:4]
+ tools/polly/include/polly/RegisterPasses.h [3:4]
+ tools/polly/include/polly/ScheduleOptimizer.h [3:4]
+ tools/polly/include/polly/ScheduleTreeTransform.h [3:4]
+ tools/polly/include/polly/ScopBuilder.h [3:4]
+ tools/polly/include/polly/ScopDetection.h [3:4]
+ tools/polly/include/polly/ScopDetectionDiagnostic.h [3:4]
+ tools/polly/include/polly/ScopGraphPrinter.h [3:4]
+ tools/polly/include/polly/ScopInfo.h [3:4]
+ tools/polly/include/polly/ScopPass.h [3:4]
+ tools/polly/include/polly/Simplify.h [3:4]
+ tools/polly/include/polly/Support/DumpFunctionPass.h [3:4]
+ tools/polly/include/polly/Support/DumpModulePass.h [3:4]
+ tools/polly/include/polly/Support/GICHelper.h [3:4]
+ tools/polly/include/polly/Support/ISLOStream.h [3:4]
+ tools/polly/include/polly/Support/ISLOperators.h [3:4]
+ tools/polly/include/polly/Support/ISLTools.h [3:4]
+ tools/polly/include/polly/Support/SCEVAffinator.h [3:4]
+ tools/polly/include/polly/Support/SCEVValidator.h [3:4]
+ tools/polly/include/polly/Support/ScopHelper.h [3:4]
+ tools/polly/include/polly/Support/ScopLocation.h [3:4]
+ tools/polly/include/polly/Support/VirtualInstruction.h [3:4]
+ tools/polly/include/polly/ZoneAlgo.h [3:4]
+ tools/polly/lib/Analysis/DependenceInfo.cpp [3:4]
+ tools/polly/lib/Analysis/PolyhedralInfo.cpp [3:4]
+ tools/polly/lib/Analysis/PruneUnprofitable.cpp [3:4]
+ tools/polly/lib/Analysis/ScopBuilder.cpp [3:4]
+ tools/polly/lib/Analysis/ScopDetection.cpp [3:4]
+ tools/polly/lib/Analysis/ScopDetectionDiagnostic.cpp [3:4]
+ tools/polly/lib/Analysis/ScopGraphPrinter.cpp [3:4]
+ tools/polly/lib/Analysis/ScopInfo.cpp [3:4]
+ tools/polly/lib/Analysis/ScopPass.cpp [3:4]
+ tools/polly/lib/CodeGen/BlockGenerators.cpp [3:4]
+ tools/polly/lib/CodeGen/CodeGeneration.cpp [3:4]
+ tools/polly/lib/CodeGen/CodegenCleanup.cpp [3:4]
+ tools/polly/lib/CodeGen/IRBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/IslAst.cpp [3:4]
+ tools/polly/lib/CodeGen/IslExprBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/IslNodeBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/LoopGenerators.cpp [3:4]
+ tools/polly/lib/CodeGen/LoopGeneratorsGOMP.cpp [3:4]
+ tools/polly/lib/CodeGen/LoopGeneratorsKMP.cpp [3:4]
+ tools/polly/lib/CodeGen/ManagedMemoryRewrite.cpp [3:4]
+ tools/polly/lib/CodeGen/PPCGCodeGeneration.cpp [3:4]
+ tools/polly/lib/CodeGen/PerfMonitor.cpp [3:4]
+ tools/polly/lib/CodeGen/RuntimeDebugBuilder.cpp [3:4]
+ tools/polly/lib/CodeGen/Utils.cpp [3:4]
+ tools/polly/lib/Exchange/JSONExporter.cpp [3:4]
+ tools/polly/lib/Support/DumpFunctionPass.cpp [3:4]
+ tools/polly/lib/Support/DumpModulePass.cpp [3:4]
+ tools/polly/lib/Support/GICHelper.cpp [3:4]
+ tools/polly/lib/Support/ISLTools.cpp [3:4]
+ tools/polly/lib/Support/RegisterPasses.cpp [3:4]
+ tools/polly/lib/Support/SCEVAffinator.cpp [3:4]
+ tools/polly/lib/Support/ScopHelper.cpp [3:4]
+ tools/polly/lib/Support/ScopLocation.cpp [3:4]
+ tools/polly/lib/Support/VirtualInstruction.cpp [3:4]
+ tools/polly/lib/Transform/Canonicalization.cpp [3:4]
+ tools/polly/lib/Transform/CodePreparation.cpp [3:4]
+ tools/polly/lib/Transform/DeLICM.cpp [3:4]
+ tools/polly/lib/Transform/DeadCodeElimination.cpp [3:4]
+ tools/polly/lib/Transform/FlattenAlgo.cpp [3:4]
+ tools/polly/lib/Transform/FlattenSchedule.cpp [3:4]
+ tools/polly/lib/Transform/ForwardOpTree.cpp [3:4]
+ tools/polly/lib/Transform/ManualOptimizer.cpp [3:4]
+ tools/polly/lib/Transform/MatmulOptimizer.cpp [3:4]
+ tools/polly/lib/Transform/MaximalStaticExpansion.cpp [3:4]
+ tools/polly/lib/Transform/ScheduleOptimizer.cpp [3:4]
+ tools/polly/lib/Transform/ScheduleTreeTransform.cpp [3:4]
+ tools/polly/lib/Transform/ScopInliner.cpp [3:4]
+ tools/polly/lib/Transform/Simplify.cpp [3:4]
+ tools/polly/lib/Transform/ZoneAlgo.cpp [3:4]
+ tools/remarks-shlib/libremarks.cpp [3:4]
+ tools/sancov/sancov.cpp [3:4]
+ tools/sanstats/sanstats.cpp [3:4]
+ tools/verify-uselistorder/verify-uselistorder.cpp [3:4]
+ tools/yaml2obj/yaml2obj.cpp [3:4]
+ utils/TableGen/AsmMatcherEmitter.cpp [3:4]
+ utils/TableGen/AsmWriterEmitter.cpp [3:4]
+ utils/TableGen/AsmWriterInst.cpp [3:4]
+ utils/TableGen/AsmWriterInst.h [3:4]
+ utils/TableGen/Attributes.cpp [3:4]
+ utils/TableGen/CTagsEmitter.cpp [3:4]
+ utils/TableGen/CallingConvEmitter.cpp [3:4]
+ utils/TableGen/CodeEmitterGen.cpp [3:4]
+ utils/TableGen/CodeGenDAGPatterns.cpp [3:4]
+ utils/TableGen/CodeGenDAGPatterns.h [3:4]
+ utils/TableGen/CodeGenHwModes.cpp [3:4]
+ utils/TableGen/CodeGenHwModes.h [3:4]
+ utils/TableGen/CodeGenInstruction.cpp [3:4]
+ utils/TableGen/CodeGenInstruction.h [3:4]
+ utils/TableGen/CodeGenIntrinsics.h [3:4]
+ utils/TableGen/CodeGenMapTable.cpp [3:4]
+ utils/TableGen/CodeGenRegisters.cpp [3:4]
+ utils/TableGen/CodeGenRegisters.h [3:4]
+ utils/TableGen/CodeGenSchedule.cpp [3:4]
+ utils/TableGen/CodeGenSchedule.h [3:4]
+ utils/TableGen/CodeGenTarget.cpp [3:4]
+ utils/TableGen/CodeGenTarget.h [3:4]
+ utils/TableGen/CompressInstEmitter.cpp [3:4]
+ utils/TableGen/DAGISelEmitter.cpp [3:4]
+ utils/TableGen/DAGISelMatcher.cpp [3:4]
+ utils/TableGen/DAGISelMatcher.h [3:4]
+ utils/TableGen/DAGISelMatcherEmitter.cpp [3:4]
+ utils/TableGen/DAGISelMatcherGen.cpp [3:4]
+ utils/TableGen/DAGISelMatcherOpt.cpp [3:4]
+ utils/TableGen/DFAEmitter.cpp [3:4]
+ utils/TableGen/DFAEmitter.h [3:4]
+ utils/TableGen/DFAPacketizerEmitter.cpp [3:4]
+ utils/TableGen/DXILEmitter.cpp [3:4]
+ utils/TableGen/DecoderEmitter.cpp [3:4]
+ utils/TableGen/DirectiveEmitter.cpp [3:4]
+ utils/TableGen/DisassemblerEmitter.cpp [3:4]
+ utils/TableGen/ExegesisEmitter.cpp [3:4]
+ utils/TableGen/FastISelEmitter.cpp [3:4]
+ utils/TableGen/GICombinerEmitter.cpp [3:4]
+ utils/TableGen/GlobalISel/CodeExpander.cpp [3:4]
+ utils/TableGen/GlobalISel/CodeExpander.h [3:4]
+ utils/TableGen/GlobalISel/CodeExpansions.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDag.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDag.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h [3:4]
+ utils/TableGen/GlobalISel/GIMatchTree.cpp [3:4]
+ utils/TableGen/GlobalISel/GIMatchTree.h [3:4]
+ utils/TableGen/GlobalISelEmitter.cpp [3:4]
+ utils/TableGen/InfoByHwMode.cpp [3:4]
+ utils/TableGen/InfoByHwMode.h [3:4]
+ utils/TableGen/InstrDocsEmitter.cpp [3:4]
+ utils/TableGen/InstrInfoEmitter.cpp [3:4]
+ utils/TableGen/IntrinsicEmitter.cpp [3:4]
+ utils/TableGen/OptEmitter.cpp [3:4]
+ utils/TableGen/OptEmitter.h [3:4]
+ utils/TableGen/OptParserEmitter.cpp [3:4]
+ utils/TableGen/OptRSTEmitter.cpp [3:4]
+ utils/TableGen/PredicateExpander.cpp [3:4]
+ utils/TableGen/PredicateExpander.h [3:4]
+ utils/TableGen/PseudoLoweringEmitter.cpp [3:4]
+ utils/TableGen/RISCVTargetDefEmitter.cpp [3:4]
+ utils/TableGen/RegisterBankEmitter.cpp [3:4]
+ utils/TableGen/RegisterInfoEmitter.cpp [3:4]
+ utils/TableGen/SDNodeProperties.cpp [3:4]
+ utils/TableGen/SDNodeProperties.h [3:4]
+ utils/TableGen/SearchableTableEmitter.cpp [3:4]
+ utils/TableGen/SequenceToOffsetTable.h [3:4]
+ utils/TableGen/SubtargetEmitter.cpp [3:4]
+ utils/TableGen/SubtargetFeatureInfo.cpp [3:4]
+ utils/TableGen/SubtargetFeatureInfo.h [3:4]
+ utils/TableGen/TableGen.cpp [3:4]
+ utils/TableGen/TableGenBackends.h [3:4]
+ utils/TableGen/Types.cpp [3:4]
+ utils/TableGen/Types.h [3:4]
+ utils/TableGen/VarLenCodeEmitterGen.cpp [3:4]
+ utils/TableGen/VarLenCodeEmitterGen.h [3:4]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.cpp [3:4]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.h [3:4]
+ utils/TableGen/X86DisassemblerShared.h [3:4]
+ utils/TableGen/X86DisassemblerTables.cpp [3:4]
+ utils/TableGen/X86DisassemblerTables.h [3:4]
+ utils/TableGen/X86EVEX2VEXTablesEmitter.cpp [3:4]
+ utils/TableGen/X86FoldTablesEmitter.cpp [3:4]
+ utils/TableGen/X86MnemonicTables.cpp [3:4]
+ utils/TableGen/X86ModRMFilters.cpp [3:4]
+ utils/TableGen/X86ModRMFilters.h [3:4]
+ utils/TableGen/X86RecognizableInstr.cpp [3:4]
+ utils/TableGen/X86RecognizableInstr.h [3:4]
+
+KEEP Unicode 75bd25737b5e18085b2600ebf9c1e2a9
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-unicode
+ Score : 100.00
+ Match type : TEXT
+ Links : http://unicode.org/, http://unicode.org/copyright.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unicode.LICENSE
+ Files with this license:
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [33:62]
+
+SKIP LicenseRef-scancode-proprietary-license 75e95f9c2534d4d3b0fbae0c7df4dd55
+BELONGS include/ya.make
+ # code comment
+ License text:
+ /// upon return. The contents of the array, however, may not be modified (i.e.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-proprietary-license
+ Score : 100.00
+ Match type : REFERENCE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/proprietary-license.LICENSE
+ Files with this license:
+ include/llvm/CodeGen/TargetFrameLowering.h [406:406]
+
+KEEP Unicode 78354066acdc4e2d90f26761671108da
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-unicode
+ Score : 49.62
+ Match type : NOTICE
+ Links : http://unicode.org/, http://unicode.org/copyright.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unicode.LICENSE
+ Files with this license:
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [22:30]
+
+KEEP MIT 7c10665d9f336e1c76c4666aea23c0fc
+BELONGS include/ya.make
+ License text:
+ // <license>
+ // The MIT License (MIT)
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : REFERENCE
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ include/llvm/WindowsDriver/MSVCSetupApi.h [12:13]
+
+KEEP NCSA 7d16f63b1760712db326433bcd23e851
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: NCSA
+ Score : 99.55
+ Match type : TEXT
+ Links : http://www.otm.illinois.edu/faculty/forms/opensource.asp, https://spdx.org/licenses/NCSA
+ Files with this license:
+ tools/polly/LICENSE.TXT [249:273]
+
+KEEP NCSA 7e1c6e0177b0a462d79445a1e2d85892
+BELONGS ya.make
+ License text:
+ Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+ ==============================================================================
+ University of Illinois/NCSA
+ Open Source License
+ Scancode info:
+ Original SPDX id: NCSA
+ Score : 100.00
+ Match type : REFERENCE
+ Links : http://www.otm.illinois.edu/faculty/forms/opensource.asp, https://spdx.org/licenses/NCSA
+ Files with this license:
+ LICENSE.TXT [237:240]
+ tools/polly/LICENSE.TXT [237:240]
+
+KEEP MIT 81221564b872d3941ff8a5c9b1ce9b1d
+BELONGS tools/polly/lib/External/isl/ya.make
+FILE_INCLUDE tools/polly/lib/External/isl/AUTHORS found in files: tools/polly/lib/External/isl/imath/LICENSE at line 17
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/imath/LICENSE [4:20]
+
+KEEP Apache-2.0 WITH LLVM-exception 8494a9caed330d9a4f40e19cce7dc770
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TEXT
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ LICENSE.TXT [208:222]
+ tools/polly/LICENSE.TXT [208:222]
+
+KEEP Apache-2.0 WITH LLVM-exception 86617d55e1d949711c543404d91f4cea
+BELONGS include/ya.make
+ License text:
+ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/Support/LICENSE.TXT [6:6]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/Support/LICENSE.TXT [6:6]
+
+KEEP Apache-2.0 WITH LLVM-exception 8a58f39345a246b9ca05473930feef83
+BELONGS include/ya.make
+ License text:
+ |* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/IR/FixedMetadataKinds.def [5:5]
+ include/llvm/ProfileData/InstrProfData.inc [5:5]
+ include/llvm/ProfileData/MIBEntryDef.inc [5:5]
+ include/llvm/ProfileData/MemProfData.inc [7:7]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/IR/FixedMetadataKinds.def [5:5]
+ include/llvm/ProfileData/InstrProfData.inc [5:5]
+ include/llvm/ProfileData/MIBEntryDef.inc [5:5]
+ include/llvm/ProfileData/MemProfData.inc [7:7]
+
+KEEP BSD-2-Clause 8ec579f519e2f4faba98d46eba13d3b9
+BELONGS lib/Support/ya.make
+ License text:
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ Scancode info:
+ Original SPDX id: BSD-2-Clause
+ Score : 100.00
+ Match type : REFERENCE
+ Links : http://opensource.org/licenses/bsd-license.php, http://www.opensource.org/licenses/BSD-2-Clause, https://spdx.org/licenses/BSD-2-Clause
+ Files with this license:
+ lib/Support/xxhash.cpp [5:5]
+
+KEEP NCSA 9033de97f032dbea491e090d7921982d
+BELONGS lib/Target/PowerPC/ya.make lib/TargetParser/ya.make
+FILE_INCLUDE LICENSE.TXT found in files: lib/Target/PowerPC/PPCInstrFuture.td at line 6, lib/Target/PowerPC/PPCInstrFutureMMA.td at line 6, lib/TargetParser/CSKYTargetParser.cpp at line 6
+FILE_INCLUDE include/llvm/Support/LICENSE.TXT found in files: lib/Target/PowerPC/PPCInstrFuture.td at line 6, lib/Target/PowerPC/PPCInstrFutureMMA.td at line 6, lib/TargetParser/CSKYTargetParser.cpp at line 6
+FILE_INCLUDE tools/polly/LICENSE.TXT found in files: lib/Target/PowerPC/PPCInstrFuture.td at line 6, lib/Target/PowerPC/PPCInstrFutureMMA.td at line 6, lib/TargetParser/CSKYTargetParser.cpp at line 6
+ License text:
+ // This file is distributed under the University of Illinois Open Source
+ // License. See LICENSE.TXT for details.
+ Scancode info:
+ Original SPDX id: NCSA
+ Score : 94.00
+ Match type : NOTICE
+ Links : http://www.otm.illinois.edu/faculty/forms/opensource.asp, https://spdx.org/licenses/NCSA
+ Files with this license:
+ lib/Target/PowerPC/PPCInstrFuture.td [5:6]
+ lib/Target/PowerPC/PPCInstrFutureMMA.td [5:6]
+ lib/TargetParser/CSKYTargetParser.cpp [5:6]
+
+KEEP MIT 9757a4e0d96b0b419a33dd80fb8cb416
+BELONGS tools/polly/lib/External/isl/ya.make tools/polly/lib/External/ppcg/ya.make
+ License text:
+ * Use of this software is governed by the MIT license
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-unknown-license-reference
+ Score : 53.33
+ Match type : INTRO
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unknown-license-reference.LICENSE
+ Files with this license:
+ tools/polly/lib/External/isl/basis_reduction_tab.c [4:4]
+ tools/polly/lib/External/isl/basis_reduction_templ.c [5:5]
+ tools/polly/lib/External/isl/extract_key.c [4:4]
+ tools/polly/lib/External/isl/include/isl/arg.h [4:4]
+ tools/polly/lib/External/isl/include/isl/constraint.h [4:4]
+ tools/polly/lib/External/isl/include/isl/ctx.h [4:4]
+ tools/polly/lib/External/isl/include/isl/fixed_box.h [2:2]
+ tools/polly/lib/External/isl/include/isl/hash.h [4:4]
+ tools/polly/lib/External/isl/include/isl/hmap_templ.c [5:5]
+ tools/polly/lib/External/isl/include/isl/ilp.h [4:4]
+ tools/polly/lib/External/isl/include/isl/list.h [4:4]
+ tools/polly/lib/External/isl/include/isl/lp.h [4:4]
+ tools/polly/lib/External/isl/include/isl/map.h [4:4]
+ tools/polly/lib/External/isl/include/isl/mat.h [4:4]
+ tools/polly/lib/External/isl/include/isl/options.h [4:4]
+ tools/polly/lib/External/isl/include/isl/set.h [4:4]
+ tools/polly/lib/External/isl/include/isl/space.h [4:4]
+ tools/polly/lib/External/isl/include/isl/stream.h [4:4]
+ tools/polly/lib/External/isl/include/isl/stride_info.h [2:2]
+ tools/polly/lib/External/isl/include/isl/vec.h [4:4]
+ tools/polly/lib/External/isl/isl_aff.c [10:10]
+ tools/polly/lib/External/isl/isl_aff_lex_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_aff_map.c [6:6]
+ tools/polly/lib/External/isl/isl_affine_hull.c [6:6]
+ tools/polly/lib/External/isl/isl_align_params_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_arg.c [4:4]
+ tools/polly/lib/External/isl/isl_ast.c [4:4]
+ tools/polly/lib/External/isl/isl_ast_build.c [5:5]
+ tools/polly/lib/External/isl/isl_ast_build_expr.c [5:5]
+ tools/polly/lib/External/isl/isl_ast_codegen.c [5:5]
+ tools/polly/lib/External/isl/isl_ast_graft.c [6:6]
+ tools/polly/lib/External/isl/isl_basis_reduction.h [4:4]
+ tools/polly/lib/External/isl/isl_bernstein.c [6:6]
+ tools/polly/lib/External/isl/isl_bind_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_blk.c [4:4]
+ tools/polly/lib/External/isl/isl_blk.h [4:4]
+ tools/polly/lib/External/isl/isl_bound.c [4:4]
+ tools/polly/lib/External/isl/isl_box.c [5:5]
+ tools/polly/lib/External/isl/isl_coalesce.c [9:9]
+ tools/polly/lib/External/isl/isl_constraint.c [5:5]
+ tools/polly/lib/External/isl/isl_convex_hull.c [5:5]
+ tools/polly/lib/External/isl/isl_copy_tuple_id_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_ctx.c [4:4]
+ tools/polly/lib/External/isl/isl_dim_map.c [5:5]
+ tools/polly/lib/External/isl/isl_domain_factor_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_equalities.c [5:5]
+ tools/polly/lib/External/isl/isl_equalities.h [4:4]
+ tools/polly/lib/External/isl/isl_factorization.c [6:6]
+ tools/polly/lib/External/isl/isl_farkas.c [4:4]
+ tools/polly/lib/External/isl/isl_flow.c [8:8]
+ tools/polly/lib/External/isl/isl_fold.c [4:4]
+ tools/polly/lib/External/isl/isl_hash.c [4:4]
+ tools/polly/lib/External/isl/isl_id.c [4:4]
+ tools/polly/lib/External/isl/isl_id_private.h [4:4]
+ tools/polly/lib/External/isl/isl_ilp.c [4:4]
+ tools/polly/lib/External/isl/isl_ilp_opt_multi_val_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_ilp_opt_val_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_input.c [7:7]
+ tools/polly/lib/External/isl/isl_insert_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_int.h [4:4]
+ tools/polly/lib/External/isl/isl_int_sioimath.h [4:4]
+ tools/polly/lib/External/isl/isl_list_read_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_list_templ.c [7:7]
+ tools/polly/lib/External/isl/isl_local.c [5:5]
+ tools/polly/lib/External/isl/isl_local_space.c [5:5]
+ tools/polly/lib/External/isl/isl_lp.c [4:4]
+ tools/polly/lib/External/isl/isl_map.c [10:10]
+ tools/polly/lib/External/isl/isl_map_bound_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_map_lexopt_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_map_private.h [4:4]
+ tools/polly/lib/External/isl/isl_map_simplify.c [7:7]
+ tools/polly/lib/External/isl/isl_map_subtract.c [4:4]
+ tools/polly/lib/External/isl/isl_mat.c [7:7]
+ tools/polly/lib/External/isl/isl_morph.c [5:5]
+ tools/polly/lib/External/isl/isl_morph.h [4:4]
+ tools/polly/lib/External/isl/isl_multi_add_constant_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_align_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_apply_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_arith_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_bind_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_bind_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_cmp.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_coalesce.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_dim_id_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_dims.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_explicit_domain.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_floor.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_from_base_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_gist.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_hash.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_identity_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_insert_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_intersect.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_locals_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_min_max_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_move_dims_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_nan_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_no_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_no_explicit_domain.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_param_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_multi_product_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_pw_aff_explicit_domain.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_read_no_explicit_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_splice_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_tuple_id_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_multi_unbind_params_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_union_add_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_union_pw_aff_explicit_domain.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_zero_space_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_multi_zero_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_obj.c [6:6]
+ tools/polly/lib/External/isl/isl_opt_mpa_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_options.c [4:4]
+ tools/polly/lib/External/isl/isl_output.c [7:7]
+ tools/polly/lib/External/isl/isl_point.c [7:7]
+ tools/polly/lib/External/isl/isl_polynomial.c [4:4]
+ tools/polly/lib/External/isl/isl_project_out_all_params_templ.c [2:2]
+ tools/polly/lib/External/isl/isl_pw_add_constant_multi_val_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_add_constant_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_add_constant_val_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_bind_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_eval.c [5:5]
+ tools/polly/lib/External/isl/isl_pw_hash.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_insert_dims_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_insert_domain_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_lift_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_locals_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_morph_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_move_dims_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_neg_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_opt_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_pullback_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_range_tuple_id_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_pw_sub_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_pw_templ.c [6:6]
+ tools/polly/lib/External/isl/isl_pw_union_opt.c [6:6]
+ tools/polly/lib/External/isl/isl_reordering.c [4:4]
+ tools/polly/lib/External/isl/isl_sample.c [4:4]
+ tools/polly/lib/External/isl/isl_sample.h [4:4]
+ tools/polly/lib/External/isl/isl_scan.c [4:4]
+ tools/polly/lib/External/isl/isl_scan.h [4:4]
+ tools/polly/lib/External/isl/isl_schedule.c [6:6]
+ tools/polly/lib/External/isl/isl_schedule_band.c [5:5]
+ tools/polly/lib/External/isl/isl_schedule_constraints.c [5:5]
+ tools/polly/lib/External/isl/isl_schedule_node.c [6:6]
+ tools/polly/lib/External/isl/isl_schedule_tree.c [6:6]
+ tools/polly/lib/External/isl/isl_scheduler.c [8:8]
+ tools/polly/lib/External/isl/isl_seq.c [5:5]
+ tools/polly/lib/External/isl/isl_seq.h [4:4]
+ tools/polly/lib/External/isl/isl_space.c [7:7]
+ tools/polly/lib/External/isl/isl_stream.c [4:4]
+ tools/polly/lib/External/isl/isl_stride.c [4:4]
+ tools/polly/lib/External/isl/isl_tab.c [7:7]
+ tools/polly/lib/External/isl/isl_tab.h [4:4]
+ tools/polly/lib/External/isl/isl_tab_lexopt_templ.c [6:6]
+ tools/polly/lib/External/isl/isl_tab_pip.c [6:6]
+ tools/polly/lib/External/isl/isl_tarjan.c [5:5]
+ tools/polly/lib/External/isl/isl_transitive_closure.c [4:4]
+ tools/polly/lib/External/isl/isl_type_check_equal_space_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_type_has_equal_space_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_type_has_space_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_unbind_params_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_union_eval.c [4:4]
+ tools/polly/lib/External/isl/isl_union_locals_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_union_map.c [7:7]
+ tools/polly/lib/External/isl/isl_union_map_lex_templ.c [4:4]
+ tools/polly/lib/External/isl/isl_union_multi.c [6:6]
+ tools/polly/lib/External/isl/isl_union_neg.c [4:4]
+ tools/polly/lib/External/isl/isl_union_single.c [5:5]
+ tools/polly/lib/External/isl/isl_union_templ.c [5:5]
+ tools/polly/lib/External/isl/isl_val.c [4:4]
+ tools/polly/lib/External/isl/isl_vec.c [5:5]
+ tools/polly/lib/External/isl/isl_vertices.c [4:4]
+ tools/polly/lib/External/ppcg/cuda.c [4:4]
+ tools/polly/lib/External/ppcg/cuda_common.c [4:4]
+ tools/polly/lib/External/ppcg/gpu.c [6:6]
+ tools/polly/lib/External/ppcg/gpu_group.c [6:6]
+ tools/polly/lib/External/ppcg/gpu_hybrid.c [5:5]
+ tools/polly/lib/External/ppcg/gpu_print.c [4:4]
+ tools/polly/lib/External/ppcg/gpu_tree.c [4:4]
+ tools/polly/lib/External/ppcg/grouping.c [4:4]
+ tools/polly/lib/External/ppcg/hybrid.c [5:5]
+ tools/polly/lib/External/ppcg/ppcg.c [6:6]
+ tools/polly/lib/External/ppcg/ppcg_options.c [4:4]
+ tools/polly/lib/External/ppcg/print.c [4:4]
+ tools/polly/lib/External/ppcg/schedule.c [4:4]
+ tools/polly/lib/External/ppcg/util.c [4:4]
+
+KEEP Public-Domain AND CC0-1.0 99ddf8b4ab335efa05a3c8ec5da12233
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ |* Released into the public domain with CC0 1.0 *|
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-public-domain
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
+ Files with this license:
+ include/llvm-c/blake3.h [10:10]
+ lib/Support/BLAKE3/blake3.c [3:3]
+ Scancode info:
+ Original SPDX id: CC0-1.0
+ Score : 95.00
+ Match type : REFERENCE
+ Links : http://creativecommons.org/publicdomain/zero/1.0/, http://creativecommons.org/publicdomain/zero/1.0/legalcode, https://spdx.org/licenses/CC0-1.0
+ Files with this license:
+ include/llvm-c/blake3.h [10:10]
+ lib/Support/BLAKE3/blake3.c [3:3]
+
+SKIP CC-BY-4.0 9a0d3bef1baf2b971738fdb4dd0ec740
+BELONGS lib/Target/AArch64/ya.make
+ # code comment
+ License text:
+ // We can commute the SELECT_CC by inverting the condition. This
+ Scancode info:
+ Original SPDX id: CC-BY-4.0
+ Score : 50.00
+ Match type : REFERENCE
+ Links : http://creativecommons.org/licenses/by/4.0/, http://creativecommons.org/licenses/by/4.0/legalcode, https://spdx.org/licenses/CC-BY-4.0
+ Files with this license:
+ lib/Target/AArch64/AArch64ISelLowering.cpp [3701:3701]
+
+KEEP Apache-2.0 WITH LLVM-exception 9ac77f65a898755c7eed97099caded94
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ LICENSE.TXT [5:205]
+ tools/polly/LICENSE.TXT [5:205]
+
+KEEP Public-Domain aa9ddf39ee4e3b6b568d5b27c00ae8ec
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-public-domain
+ Score : 86.44
+ Match type : NOTICE
+ Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
+ Files with this license:
+ lib/Support/MD5.cpp [7:25]
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-other-permissive
+ Score : 86.44
+ Match type : NOTICE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/other-permissive.LICENSE
+ Files with this license:
+ lib/Support/MD5.cpp [7:25]
+
+KEEP Apache-2.0 WITH LLVM-exception ab18d6e931e4c835f2a67a82fa8980df
+BELONGS lib/Target/PowerPC/ya.make
+ License text:
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ // See https)//llvm.org/LICENSE.txt for license information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ lib/Target/PowerPC/PPCMacroFusion.def [3:4]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ lib/Target/PowerPC/PPCMacroFusion.def [3:4]
+
+KEEP NCSA b160d8bd561da097b0edd40b062e1c84
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: NCSA
+ Score : 100.00
+ Match type : TEXT
+ Links : http://www.otm.illinois.edu/faculty/forms/opensource.asp, https://spdx.org/licenses/NCSA
+ Files with this license:
+ LICENSE.TXT [253:278]
+
+KEEP CC0-1.0 b266180ea473c503100b353c4882c91e
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ |* SPDX-License-Identifier: CC0-1.0 *|
+ Scancode info:
+ Original SPDX id: CC0-1.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://creativecommons.org/publicdomain/zero/1.0/, http://creativecommons.org/publicdomain/zero/1.0/legalcode, https://spdx.org/licenses/CC0-1.0
+ Files with this license:
+ include/llvm-c/blake3.h [12:12]
+ lib/Support/BLAKE3/blake3.c [5:5]
+
+KEEP Apache-2.0 WITH LLVM-exception b7566a1930e050e1090162bf1d543650
+BELONGS include/ya.make lib/Analysis/ya.make lib/AsmParser/ya.make lib/BinaryFormat/ya.make lib/Bitcode/Reader/ya.make lib/Bitcode/Writer/ya.make lib/Bitstream/Reader/ya.make lib/CodeGen/AsmPrinter/ya.make lib/CodeGen/GlobalISel/ya.make lib/CodeGen/MIRParser/ya.make lib/CodeGen/SelectionDAG/ya.make lib/CodeGen/ya.make lib/DWARFLinker/ya.make lib/DWP/ya.make lib/DebugInfo/CodeView/ya.make lib/DebugInfo/DWARF/ya.make lib/DebugInfo/GSYM/ya.make lib/DebugInfo/MSF/ya.make lib/DebugInfo/PDB/ya.make lib/DebugInfo/Symbolize/ya.make lib/Debuginfod/ya.make lib/Demangle/ya.make lib/ExecutionEngine/Interpreter/ya.make lib/ExecutionEngine/JITLink/ya.make lib/ExecutionEngine/MCJIT/ya.make lib/ExecutionEngine/Orc/Shared/ya.make lib/ExecutionEngine/Orc/TargetProcess/ya.make lib/ExecutionEngine/Orc/ya.make lib/ExecutionEngine/PerfJITEvents/ya.make lib/ExecutionEngine/RuntimeDyld/ya.make lib/ExecutionEngine/ya.make lib/FileCheck/ya.make lib/Frontend/HLSL/ya.make lib/Frontend/OpenACC/ya.make lib/Frontend/OpenMP/ya.make lib/FuzzMutate/ya.make lib/IR/ya.make lib/IRPrinter/ya.make lib/IRReader/ya.make lib/InterfaceStub/ya.make lib/LTO/ya.make lib/LineEditor/ya.make lib/Linker/ya.make lib/MC/MCDisassembler/ya.make lib/MC/MCParser/ya.make lib/MC/ya.make lib/MCA/ya.make lib/ObjCopy/ya.make lib/Object/ya.make lib/ObjectYAML/ya.make lib/Option/ya.make lib/Passes/ya.make lib/ProfileData/Coverage/ya.make lib/ProfileData/ya.make lib/Remarks/ya.make lib/Support/ya.make lib/TableGen/ya.make lib/Target/AArch64/AsmParser/ya.make lib/Target/AArch64/Disassembler/ya.make lib/Target/AArch64/MCTargetDesc/ya.make lib/Target/AArch64/TargetInfo/ya.make lib/Target/AArch64/Utils/ya.make lib/Target/AArch64/ya.make lib/Target/ARM/AsmParser/ya.make lib/Target/ARM/Disassembler/ya.make lib/Target/ARM/MCTargetDesc/ya.make lib/Target/ARM/TargetInfo/ya.make lib/Target/ARM/Utils/ya.make lib/Target/ARM/ya.make lib/Target/BPF/AsmParser/ya.make lib/Target/BPF/Disassembler/ya.make lib/Target/BPF/MCTargetDesc/ya.make lib/Target/BPF/TargetInfo/ya.make lib/Target/BPF/ya.make lib/Target/LoongArch/AsmParser/ya.make lib/Target/LoongArch/Disassembler/ya.make lib/Target/LoongArch/MCTargetDesc/ya.make lib/Target/LoongArch/TargetInfo/ya.make lib/Target/LoongArch/ya.make lib/Target/NVPTX/MCTargetDesc/ya.make lib/Target/NVPTX/TargetInfo/ya.make lib/Target/NVPTX/ya.make lib/Target/PowerPC/AsmParser/ya.make lib/Target/PowerPC/Disassembler/ya.make lib/Target/PowerPC/MCTargetDesc/ya.make lib/Target/PowerPC/TargetInfo/ya.make lib/Target/PowerPC/ya.make lib/Target/WebAssembly/AsmParser/ya.make lib/Target/WebAssembly/Disassembler/ya.make lib/Target/WebAssembly/MCTargetDesc/ya.make lib/Target/WebAssembly/TargetInfo/ya.make lib/Target/WebAssembly/Utils/ya.make lib/Target/WebAssembly/ya.make lib/Target/X86/AsmParser/ya.make lib/Target/X86/Disassembler/ya.make lib/Target/X86/MCA/ya.make lib/Target/X86/MCTargetDesc/ya.make lib/Target/X86/TargetInfo/ya.make lib/Target/X86/ya.make lib/Target/ya.make lib/TargetParser/ya.make lib/TextAPI/ya.make lib/ToolDrivers/llvm-dlltool/ya.make lib/ToolDrivers/llvm-lib/ya.make lib/Transforms/AggressiveInstCombine/ya.make lib/Transforms/CFGuard/ya.make lib/Transforms/Coroutines/ya.make lib/Transforms/IPO/ya.make lib/Transforms/InstCombine/ya.make lib/Transforms/Instrumentation/ya.make lib/Transforms/ObjCARC/ya.make lib/Transforms/Scalar/ya.make lib/Transforms/Utils/ya.make lib/Transforms/Vectorize/ya.make lib/WindowsDriver/ya.make lib/WindowsManifest/ya.make lib/XRay/ya.make tools/bugpoint/ya.make tools/dsymutil/ya.make tools/gold/ya.make tools/llc/ya.make tools/lli/ChildTarget/ya.make tools/lli/ya.make tools/llvm-ar/ya.make tools/llvm-as/ya.make tools/llvm-bcanalyzer/ya.make tools/llvm-cat/ya.make tools/llvm-cfi-verify/lib/ya.make tools/llvm-cfi-verify/ya.make tools/llvm-config/ya.make tools/llvm-cov/ya.make tools/llvm-cvtres/ya.make tools/llvm-cxxdump/ya.make tools/llvm-cxxfilt/ya.make tools/llvm-cxxmap/ya.make tools/llvm-diff/lib/ya.make tools/llvm-diff/ya.make tools/llvm-dis/ya.make tools/llvm-dwarfdump/ya.make tools/llvm-dwp/ya.make tools/llvm-exegesis/lib/AArch64/ya.make tools/llvm-exegesis/lib/PowerPC/ya.make tools/llvm-exegesis/lib/X86/ya.make tools/llvm-exegesis/lib/ya.make tools/llvm-exegesis/ya.make tools/llvm-extract/ya.make tools/llvm-gsymutil/ya.make tools/llvm-ifs/ya.make tools/llvm-jitlink/llvm-jitlink-executor/ya.make tools/llvm-jitlink/ya.make tools/llvm-libtool-darwin/ya.make tools/llvm-link/ya.make tools/llvm-lipo/ya.make tools/llvm-lto/ya.make tools/llvm-lto2/ya.make tools/llvm-mc/ya.make tools/llvm-mca/ya.make tools/llvm-ml/ya.make tools/llvm-modextract/ya.make tools/llvm-mt/ya.make tools/llvm-nm/ya.make tools/llvm-objcopy/ya.make tools/llvm-objdump/ya.make tools/llvm-opt-report/ya.make tools/llvm-pdbutil/ya.make tools/llvm-profdata/ya.make tools/llvm-profgen/ya.make tools/llvm-rc/ya.make tools/llvm-readobj/ya.make tools/llvm-reduce/ya.make tools/llvm-rtdyld/ya.make tools/llvm-size/ya.make tools/llvm-split/ya.make tools/llvm-stress/ya.make tools/llvm-strings/ya.make tools/llvm-symbolizer/ya.make tools/llvm-undname/ya.make tools/llvm-xray/ya.make tools/lto/ya.make tools/obj2yaml/ya.make tools/opt/ya.make tools/polly/lib/ya.make tools/remarks-shlib/ya.make tools/sancov/ya.make tools/sanstats/ya.make tools/verify-uselistorder/ya.make tools/yaml2obj/ya.make utils/TableGen/GlobalISel/ya.make utils/TableGen/ya.make ya.make
+ License text:
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm-c/DebugInfo.h [12:12]
+ include/llvm/ADT/APFixedPoint.h [12:12]
+ include/llvm/ADT/APFloat.h [12:12]
+ include/llvm/ADT/APInt.h [12:12]
+ include/llvm/ADT/APSInt.h [12:12]
+ include/llvm/ADT/AddressRanges.h [12:12]
+ include/llvm/ADT/AllocatorList.h [12:12]
+ include/llvm/ADT/Any.h [12:12]
+ include/llvm/ADT/ArrayRef.h [12:12]
+ include/llvm/ADT/BitVector.h [12:12]
+ include/llvm/ADT/Bitfields.h [12:12]
+ include/llvm/ADT/BitmaskEnum.h [12:12]
+ include/llvm/ADT/BreadthFirstIterator.h [12:12]
+ include/llvm/ADT/CachedHashString.h [12:12]
+ include/llvm/ADT/CoalescingBitVector.h [12:12]
+ include/llvm/ADT/CombinationGenerator.h [12:12]
+ include/llvm/ADT/DAGDeltaAlgorithm.h [12:12]
+ include/llvm/ADT/DeltaAlgorithm.h [12:12]
+ include/llvm/ADT/DenseMap.h [12:12]
+ include/llvm/ADT/DenseMapInfo.h [12:12]
+ include/llvm/ADT/DenseSet.h [12:12]
+ include/llvm/ADT/DepthFirstIterator.h [12:12]
+ include/llvm/ADT/DirectedGraph.h [12:12]
+ include/llvm/ADT/EnumeratedArray.h [12:12]
+ include/llvm/ADT/EpochTracker.h [12:12]
+ include/llvm/ADT/EquivalenceClasses.h [12:12]
+ include/llvm/ADT/FloatingPointMode.h [12:12]
+ include/llvm/ADT/FoldingSet.h [12:12]
+ include/llvm/ADT/FunctionExtras.h [12:12]
+ include/llvm/ADT/GenericCycleImpl.h [12:12]
+ include/llvm/ADT/GenericCycleInfo.h [12:12]
+ include/llvm/ADT/GenericSSAContext.h [12:12]
+ include/llvm/ADT/GenericUniformityImpl.h [12:12]
+ include/llvm/ADT/GenericUniformityInfo.h [12:12]
+ include/llvm/ADT/GraphTraits.h [12:12]
+ include/llvm/ADT/Hashing.h [12:12]
+ include/llvm/ADT/ImmutableList.h [12:12]
+ include/llvm/ADT/ImmutableMap.h [12:12]
+ include/llvm/ADT/ImmutableSet.h [12:12]
+ include/llvm/ADT/IndexedMap.h [12:12]
+ include/llvm/ADT/IntEqClasses.h [12:12]
+ include/llvm/ADT/IntervalMap.h [12:12]
+ include/llvm/ADT/IntervalTree.h [12:12]
+ include/llvm/ADT/IntrusiveRefCntPtr.h [12:12]
+ include/llvm/ADT/MapVector.h [12:12]
+ include/llvm/ADT/None.h [12:12]
+ include/llvm/ADT/Optional.h [12:12]
+ include/llvm/ADT/PackedVector.h [12:12]
+ include/llvm/ADT/PointerEmbeddedInt.h [12:12]
+ include/llvm/ADT/PointerIntPair.h [12:12]
+ include/llvm/ADT/PointerSumType.h [12:12]
+ include/llvm/ADT/PointerUnion.h [12:12]
+ include/llvm/ADT/PostOrderIterator.h [12:12]
+ include/llvm/ADT/PriorityQueue.h [12:12]
+ include/llvm/ADT/PriorityWorklist.h [12:12]
+ include/llvm/ADT/SCCIterator.h [12:12]
+ include/llvm/ADT/STLExtras.h [12:12]
+ include/llvm/ADT/STLForwardCompat.h [12:12]
+ include/llvm/ADT/STLFunctionalExtras.h [12:12]
+ include/llvm/ADT/ScopeExit.h [12:12]
+ include/llvm/ADT/ScopedHashTable.h [12:12]
+ include/llvm/ADT/Sequence.h [12:12]
+ include/llvm/ADT/SetOperations.h [12:12]
+ include/llvm/ADT/SetVector.h [12:12]
+ include/llvm/ADT/SmallBitVector.h [12:12]
+ include/llvm/ADT/SmallPtrSet.h [12:12]
+ include/llvm/ADT/SmallSet.h [12:12]
+ include/llvm/ADT/SmallString.h [12:12]
+ include/llvm/ADT/SmallVector.h [12:12]
+ include/llvm/ADT/SparseBitVector.h [12:12]
+ include/llvm/ADT/SparseMultiSet.h [12:12]
+ include/llvm/ADT/SparseSet.h [12:12]
+ include/llvm/ADT/Statistic.h [12:12]
+ include/llvm/ADT/StringExtras.h [12:12]
+ include/llvm/ADT/StringMap.h [12:12]
+ include/llvm/ADT/StringMapEntry.h [12:12]
+ include/llvm/ADT/StringRef.h [12:12]
+ include/llvm/ADT/StringSet.h [12:12]
+ include/llvm/ADT/StringSwitch.h [12:12]
+ include/llvm/ADT/TinyPtrVector.h [12:12]
+ include/llvm/ADT/Triple.h [12:12]
+ include/llvm/ADT/Twine.h [12:12]
+ include/llvm/ADT/TypeSwitch.h [12:12]
+ include/llvm/ADT/Uniformity.h [12:12]
+ include/llvm/ADT/UniqueVector.h [12:12]
+ include/llvm/ADT/bit.h [12:12]
+ include/llvm/ADT/edit_distance.h [12:12]
+ include/llvm/ADT/fallible_iterator.h [12:12]
+ include/llvm/ADT/identity.h [12:12]
+ include/llvm/ADT/ilist.h [12:12]
+ include/llvm/ADT/ilist_base.h [12:12]
+ include/llvm/ADT/ilist_iterator.h [12:12]
+ include/llvm/ADT/ilist_node.h [12:12]
+ include/llvm/ADT/ilist_node_base.h [12:12]
+ include/llvm/ADT/ilist_node_options.h [12:12]
+ include/llvm/ADT/iterator.h [12:12]
+ include/llvm/ADT/iterator_range.h [12:12]
+ include/llvm/ADT/simple_ilist.h [12:12]
+ include/llvm/Analysis/AliasAnalysis.h [12:12]
+ include/llvm/Analysis/AliasAnalysisEvaluator.h [12:12]
+ include/llvm/Analysis/AliasSetTracker.h [12:12]
+ include/llvm/Analysis/AssumeBundleQueries.h [12:12]
+ include/llvm/Analysis/AssumptionCache.h [12:12]
+ include/llvm/Analysis/BasicAliasAnalysis.h [12:12]
+ include/llvm/Analysis/BlockFrequencyInfo.h [12:12]
+ include/llvm/Analysis/BlockFrequencyInfoImpl.h [12:12]
+ include/llvm/Analysis/BranchProbabilityInfo.h [12:12]
+ include/llvm/Analysis/CFG.h [12:12]
+ include/llvm/Analysis/CFGPrinter.h [12:12]
+ include/llvm/Analysis/CFGSCCPrinter.h [12:12]
+ include/llvm/Analysis/CGSCCPassManager.h [12:12]
+ include/llvm/Analysis/CallGraph.h [12:12]
+ include/llvm/Analysis/CallGraphSCCPass.h [12:12]
+ include/llvm/Analysis/CallPrinter.h [12:12]
+ include/llvm/Analysis/CaptureTracking.h [12:12]
+ include/llvm/Analysis/CmpInstAnalysis.h [12:12]
+ include/llvm/Analysis/CodeMetrics.h [12:12]
+ include/llvm/Analysis/ConstantFolding.h [12:12]
+ include/llvm/Analysis/ConstraintSystem.h [12:12]
+ include/llvm/Analysis/CostModel.h [12:12]
+ include/llvm/Analysis/CycleAnalysis.h [12:12]
+ include/llvm/Analysis/DDG.h [12:12]
+ include/llvm/Analysis/DDGPrinter.h [12:12]
+ include/llvm/Analysis/DOTGraphTraitsPass.h [12:12]
+ include/llvm/Analysis/Delinearization.h [12:12]
+ include/llvm/Analysis/DemandedBits.h [12:12]
+ include/llvm/Analysis/DependenceAnalysis.h [12:12]
+ include/llvm/Analysis/DependenceGraphBuilder.h [12:12]
+ include/llvm/Analysis/DivergenceAnalysis.h [12:12]
+ include/llvm/Analysis/DomPrinter.h [12:12]
+ include/llvm/Analysis/DomTreeUpdater.h [12:12]
+ include/llvm/Analysis/DominanceFrontier.h [12:12]
+ include/llvm/Analysis/DominanceFrontierImpl.h [12:12]
+ include/llvm/Analysis/EHPersonalities.h [12:12]
+ include/llvm/Analysis/FunctionPropertiesAnalysis.h [12:12]
+ include/llvm/Analysis/GlobalsModRef.h [12:12]
+ include/llvm/Analysis/GuardUtils.h [12:12]
+ include/llvm/Analysis/HeatUtils.h [12:12]
+ include/llvm/Analysis/IRSimilarityIdentifier.h [12:12]
+ include/llvm/Analysis/IVDescriptors.h [12:12]
+ include/llvm/Analysis/IVUsers.h [12:12]
+ include/llvm/Analysis/IndirectCallPromotionAnalysis.h [12:12]
+ include/llvm/Analysis/IndirectCallVisitor.h [12:12]
+ include/llvm/Analysis/InlineAdvisor.h [12:12]
+ include/llvm/Analysis/InlineCost.h [12:12]
+ include/llvm/Analysis/InlineModelFeatureMaps.h [12:12]
+ include/llvm/Analysis/InlineOrder.h [12:12]
+ include/llvm/Analysis/InlineSizeEstimatorAnalysis.h [12:12]
+ include/llvm/Analysis/InstCount.h [12:12]
+ include/llvm/Analysis/InstSimplifyFolder.h [12:12]
+ include/llvm/Analysis/InstructionPrecedenceTracking.h [12:12]
+ include/llvm/Analysis/InstructionSimplify.h [12:12]
+ include/llvm/Analysis/Interval.h [12:12]
+ include/llvm/Analysis/IntervalIterator.h [12:12]
+ include/llvm/Analysis/IntervalPartition.h [12:12]
+ include/llvm/Analysis/IteratedDominanceFrontier.h [12:12]
+ include/llvm/Analysis/LazyBlockFrequencyInfo.h [12:12]
+ include/llvm/Analysis/LazyBranchProbabilityInfo.h [12:12]
+ include/llvm/Analysis/LazyCallGraph.h [12:12]
+ include/llvm/Analysis/LazyValueInfo.h [12:12]
+ include/llvm/Analysis/LegacyDivergenceAnalysis.h [12:12]
+ include/llvm/Analysis/Lint.h [12:12]
+ include/llvm/Analysis/Loads.h [12:12]
+ include/llvm/Analysis/LoopAccessAnalysis.h [12:12]
+ include/llvm/Analysis/LoopAnalysisManager.h [12:12]
+ include/llvm/Analysis/LoopCacheAnalysis.h [12:12]
+ include/llvm/Analysis/LoopInfo.h [12:12]
+ include/llvm/Analysis/LoopInfoImpl.h [12:12]
+ include/llvm/Analysis/LoopIterator.h [12:12]
+ include/llvm/Analysis/LoopNestAnalysis.h [12:12]
+ include/llvm/Analysis/LoopPass.h [12:12]
+ include/llvm/Analysis/LoopUnrollAnalyzer.h [12:12]
+ include/llvm/Analysis/MLInlineAdvisor.h [12:12]
+ include/llvm/Analysis/MLModelRunner.h [12:12]
+ include/llvm/Analysis/MemDerefPrinter.h [12:12]
+ include/llvm/Analysis/MemoryBuiltins.h [12:12]
+ include/llvm/Analysis/MemoryDependenceAnalysis.h [12:12]
+ include/llvm/Analysis/MemoryLocation.h [12:12]
+ include/llvm/Analysis/MemoryProfileInfo.h [12:12]
+ include/llvm/Analysis/MemorySSA.h [12:12]
+ include/llvm/Analysis/MemorySSAUpdater.h [12:12]
+ include/llvm/Analysis/ModelUnderTrainingRunner.h [12:12]
+ include/llvm/Analysis/ModuleDebugInfoPrinter.h [12:12]
+ include/llvm/Analysis/ModuleSummaryAnalysis.h [12:12]
+ include/llvm/Analysis/MustExecute.h [12:12]
+ include/llvm/Analysis/NoInferenceModelRunner.h [12:12]
+ include/llvm/Analysis/ObjCARCAliasAnalysis.h [12:12]
+ include/llvm/Analysis/ObjCARCAnalysisUtils.h [12:12]
+ include/llvm/Analysis/ObjCARCInstKind.h [12:12]
+ include/llvm/Analysis/ObjCARCUtil.h [12:12]
+ include/llvm/Analysis/OptimizationRemarkEmitter.h [12:12]
+ include/llvm/Analysis/OverflowInstAnalysis.h [12:12]
+ include/llvm/Analysis/PHITransAddr.h [12:12]
+ include/llvm/Analysis/Passes.h [12:12]
+ include/llvm/Analysis/PhiValues.h [12:12]
+ include/llvm/Analysis/PostDominators.h [12:12]
+ include/llvm/Analysis/ProfileSummaryInfo.h [12:12]
+ include/llvm/Analysis/PtrUseVisitor.h [12:12]
+ include/llvm/Analysis/RegionInfo.h [12:12]
+ include/llvm/Analysis/RegionInfoImpl.h [12:12]
+ include/llvm/Analysis/RegionIterator.h [12:12]
+ include/llvm/Analysis/RegionPass.h [12:12]
+ include/llvm/Analysis/RegionPrinter.h [12:12]
+ include/llvm/Analysis/ReleaseModeModelRunner.h [12:12]
+ include/llvm/Analysis/ReplayInlineAdvisor.h [12:12]
+ include/llvm/Analysis/ScalarEvolution.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionDivision.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionExpressions.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionNormalization.h [12:12]
+ include/llvm/Analysis/ScalarFuncs.def [5:5]
+ include/llvm/Analysis/ScopedNoAliasAA.h [12:12]
+ include/llvm/Analysis/SparsePropagation.h [12:12]
+ include/llvm/Analysis/StackLifetime.h [12:12]
+ include/llvm/Analysis/StackSafetyAnalysis.h [12:12]
+ include/llvm/Analysis/SyncDependenceAnalysis.h [12:12]
+ include/llvm/Analysis/SyntheticCountsUtils.h [12:12]
+ include/llvm/Analysis/TargetFolder.h [12:12]
+ include/llvm/Analysis/TargetLibraryInfo.def [5:5]
+ include/llvm/Analysis/TargetLibraryInfo.h [12:12]
+ include/llvm/Analysis/TargetTransformInfo.h [12:12]
+ include/llvm/Analysis/TargetTransformInfoImpl.h [12:12]
+ include/llvm/Analysis/TensorSpec.h [12:12]
+ include/llvm/Analysis/Trace.h [12:12]
+ include/llvm/Analysis/TypeBasedAliasAnalysis.h [12:12]
+ include/llvm/Analysis/TypeMetadataUtils.h [12:12]
+ include/llvm/Analysis/UniformityAnalysis.h [12:12]
+ include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h [12:12]
+ include/llvm/Analysis/Utils/Local.h [12:12]
+ include/llvm/Analysis/Utils/TFUtils.h [12:12]
+ include/llvm/Analysis/Utils/TrainingLogger.h [12:12]
+ include/llvm/Analysis/ValueLattice.h [12:12]
+ include/llvm/Analysis/ValueLatticeUtils.h [12:12]
+ include/llvm/Analysis/ValueTracking.h [12:12]
+ include/llvm/Analysis/VecFuncs.def [5:5]
+ include/llvm/Analysis/VectorUtils.h [12:12]
+ include/llvm/AsmParser/LLLexer.h [12:12]
+ include/llvm/AsmParser/LLParser.h [12:12]
+ include/llvm/AsmParser/LLToken.h [12:12]
+ include/llvm/AsmParser/Parser.h [12:12]
+ include/llvm/AsmParser/SlotMapping.h [12:12]
+ include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h [12:12]
+ include/llvm/BinaryFormat/COFF.h [12:12]
+ include/llvm/BinaryFormat/DXContainer.h [12:12]
+ include/llvm/BinaryFormat/Dwarf.def [5:5]
+ include/llvm/BinaryFormat/Dwarf.h [12:12]
+ include/llvm/BinaryFormat/ELF.h [12:12]
+ include/llvm/BinaryFormat/GOFF.h [12:12]
+ include/llvm/BinaryFormat/MachO.def [5:5]
+ include/llvm/BinaryFormat/MachO.h [12:12]
+ include/llvm/BinaryFormat/Magic.h [12:12]
+ include/llvm/BinaryFormat/Minidump.h [12:12]
+ include/llvm/BinaryFormat/MinidumpConstants.def [5:5]
+ include/llvm/BinaryFormat/MsgPack.def [5:5]
+ include/llvm/BinaryFormat/MsgPack.h [12:12]
+ include/llvm/BinaryFormat/MsgPackDocument.h [12:12]
+ include/llvm/BinaryFormat/MsgPackReader.h [12:12]
+ include/llvm/BinaryFormat/MsgPackWriter.h [12:12]
+ include/llvm/BinaryFormat/Swift.def [5:5]
+ include/llvm/BinaryFormat/Swift.h [12:12]
+ include/llvm/BinaryFormat/Wasm.h [12:12]
+ include/llvm/BinaryFormat/WasmTraits.h [12:12]
+ include/llvm/BinaryFormat/XCOFF.h [12:12]
+ include/llvm/Bitcode/BitcodeAnalyzer.h [12:12]
+ include/llvm/Bitcode/BitcodeCommon.h [12:12]
+ include/llvm/Bitcode/BitcodeConvenience.h [12:12]
+ include/llvm/Bitcode/BitcodeReader.h [12:12]
+ include/llvm/Bitcode/BitcodeWriter.h [12:12]
+ include/llvm/Bitcode/BitcodeWriterPass.h [12:12]
+ include/llvm/Bitcode/LLVMBitCodes.h [12:12]
+ include/llvm/Bitstream/BitCodeEnums.h [12:12]
+ include/llvm/Bitstream/BitCodes.h [12:12]
+ include/llvm/Bitstream/BitstreamReader.h [12:12]
+ include/llvm/Bitstream/BitstreamWriter.h [12:12]
+ include/llvm/CodeGen/AccelTable.h [12:12]
+ include/llvm/CodeGen/Analysis.h [12:12]
+ include/llvm/CodeGen/AntiDepBreaker.h [12:12]
+ include/llvm/CodeGen/AsmPrinter.h [12:12]
+ include/llvm/CodeGen/AsmPrinterHandler.h [12:12]
+ include/llvm/CodeGen/AtomicExpandUtils.h [12:12]
+ include/llvm/CodeGen/BasicBlockSectionUtils.h [12:12]
+ include/llvm/CodeGen/BasicBlockSectionsProfileReader.h [12:12]
+ include/llvm/CodeGen/BasicTTIImpl.h [12:12]
+ include/llvm/CodeGen/CFIFixup.h [12:12]
+ include/llvm/CodeGen/CSEConfigBase.h [12:12]
+ include/llvm/CodeGen/CalcSpillWeights.h [12:12]
+ include/llvm/CodeGen/CallingConvLower.h [12:12]
+ include/llvm/CodeGen/CodeGenCommonISel.h [12:12]
+ include/llvm/CodeGen/CodeGenPassBuilder.h [12:12]
+ include/llvm/CodeGen/CommandFlags.h [12:12]
+ include/llvm/CodeGen/ComplexDeinterleavingPass.h [12:12]
+ include/llvm/CodeGen/CostTable.h [12:12]
+ include/llvm/CodeGen/DAGCombine.h [12:12]
+ include/llvm/CodeGen/DFAPacketizer.h [12:12]
+ include/llvm/CodeGen/DIE.h [12:12]
+ include/llvm/CodeGen/DIEValue.def [5:5]
+ include/llvm/CodeGen/DbgEntityHistoryCalculator.h [12:12]
+ include/llvm/CodeGen/DebugHandlerBase.h [12:12]
+ include/llvm/CodeGen/DwarfStringPoolEntry.h [12:12]
+ include/llvm/CodeGen/EdgeBundles.h [12:12]
+ include/llvm/CodeGen/ExecutionDomainFix.h [12:12]
+ include/llvm/CodeGen/ExpandReductions.h [12:12]
+ include/llvm/CodeGen/ExpandVectorPredication.h [12:12]
+ include/llvm/CodeGen/FastISel.h [12:12]
+ include/llvm/CodeGen/FaultMaps.h [12:12]
+ include/llvm/CodeGen/FunctionLoweringInfo.h [12:12]
+ include/llvm/CodeGen/GCMetadata.h [12:12]
+ include/llvm/CodeGen/GCMetadataPrinter.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CSEInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CallLowering.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Combiner.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CombinerHelper.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CombinerInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GISelKnownBits.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GISelWorkList.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h [12:12]
+ include/llvm/CodeGen/GlobalISel/IRTranslator.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InstructionSelect.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InstructionSelector.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegacyLegalizerInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Legalizer.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegalizerHelper.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegalizerInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Localizer.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h [12:12]
+ include/llvm/CodeGen/GlobalISel/MIPatternMatch.h [12:12]
+ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h [12:12]
+ include/llvm/CodeGen/GlobalISel/RegBankSelect.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Utils.h [12:12]
+ include/llvm/CodeGen/ISDOpcodes.h [12:12]
+ include/llvm/CodeGen/IndirectThunks.h [12:12]
+ include/llvm/CodeGen/IntrinsicLowering.h [12:12]
+ include/llvm/CodeGen/LatencyPriorityQueue.h [12:12]
+ include/llvm/CodeGen/LexicalScopes.h [12:12]
+ include/llvm/CodeGen/LinkAllAsmWriterComponents.h [12:12]
+ include/llvm/CodeGen/LinkAllCodegenComponents.h [12:12]
+ include/llvm/CodeGen/LiveInterval.h [12:12]
+ include/llvm/CodeGen/LiveIntervalCalc.h [12:12]
+ include/llvm/CodeGen/LiveIntervalUnion.h [12:12]
+ include/llvm/CodeGen/LiveIntervals.h [12:12]
+ include/llvm/CodeGen/LivePhysRegs.h [12:12]
+ include/llvm/CodeGen/LiveRangeCalc.h [12:12]
+ include/llvm/CodeGen/LiveRangeEdit.h [12:12]
+ include/llvm/CodeGen/LiveRegMatrix.h [12:12]
+ include/llvm/CodeGen/LiveRegUnits.h [12:12]
+ include/llvm/CodeGen/LiveStacks.h [12:12]
+ include/llvm/CodeGen/LiveVariables.h [12:12]
+ include/llvm/CodeGen/LoopTraversal.h [12:12]
+ include/llvm/CodeGen/LowLevelType.h [12:12]
+ include/llvm/CodeGen/MBFIWrapper.h [12:12]
+ include/llvm/CodeGen/MIRFSDiscriminator.h [12:12]
+ include/llvm/CodeGen/MIRFormatter.h [12:12]
+ include/llvm/CodeGen/MIRParser/MIParser.h [12:12]
+ include/llvm/CodeGen/MIRParser/MIRParser.h [12:12]
+ include/llvm/CodeGen/MIRPrinter.h [12:12]
+ include/llvm/CodeGen/MIRSampleProfile.h [12:12]
+ include/llvm/CodeGen/MIRYamlMapping.h [12:12]
+ include/llvm/CodeGen/MachORelocation.h [12:12]
+ include/llvm/CodeGen/MachineBasicBlock.h [12:12]
+ include/llvm/CodeGen/MachineBlockFrequencyInfo.h [12:12]
+ include/llvm/CodeGen/MachineBranchProbabilityInfo.h [12:12]
+ include/llvm/CodeGen/MachineCFGPrinter.h [12:12]
+ include/llvm/CodeGen/MachineCombinerPattern.h [13:13]
+ include/llvm/CodeGen/MachineConstantPool.h [12:12]
+ include/llvm/CodeGen/MachineCycleAnalysis.h [12:12]
+ include/llvm/CodeGen/MachineDominanceFrontier.h [12:12]
+ include/llvm/CodeGen/MachineDominators.h [12:12]
+ include/llvm/CodeGen/MachineFrameInfo.h [12:12]
+ include/llvm/CodeGen/MachineFunction.h [12:12]
+ include/llvm/CodeGen/MachineFunctionPass.h [12:12]
+ include/llvm/CodeGen/MachineInstr.h [12:12]
+ include/llvm/CodeGen/MachineInstrBuilder.h [12:12]
+ include/llvm/CodeGen/MachineInstrBundle.h [12:12]
+ include/llvm/CodeGen/MachineInstrBundleIterator.h [12:12]
+ include/llvm/CodeGen/MachineJumpTableInfo.h [12:12]
+ include/llvm/CodeGen/MachineLoopInfo.h [12:12]
+ include/llvm/CodeGen/MachineLoopUtils.h [12:12]
+ include/llvm/CodeGen/MachineMemOperand.h [12:12]
+ include/llvm/CodeGen/MachineModuleInfo.h [12:12]
+ include/llvm/CodeGen/MachineModuleInfoImpls.h [12:12]
+ include/llvm/CodeGen/MachineModuleSlotTracker.h [12:12]
+ include/llvm/CodeGen/MachineOperand.h [12:12]
+ include/llvm/CodeGen/MachineOutliner.h [12:12]
+ include/llvm/CodeGen/MachinePassManager.h [12:12]
+ include/llvm/CodeGen/MachinePassRegistry.def [5:5]
+ include/llvm/CodeGen/MachinePassRegistry.h [12:12]
+ include/llvm/CodeGen/MachinePipeliner.h [12:12]
+ include/llvm/CodeGen/MachinePostDominators.h [12:12]
+ include/llvm/CodeGen/MachineRegionInfo.h [12:12]
+ include/llvm/CodeGen/MachineRegisterInfo.h [12:12]
+ include/llvm/CodeGen/MachineSSAContext.h [12:12]
+ include/llvm/CodeGen/MachineSSAUpdater.h [12:12]
+ include/llvm/CodeGen/MachineScheduler.h [12:12]
+ include/llvm/CodeGen/MachineSizeOpts.h [12:12]
+ include/llvm/CodeGen/MachineStableHash.h [12:12]
+ include/llvm/CodeGen/MachineTraceMetrics.h [12:12]
+ include/llvm/CodeGen/MachineUniformityAnalysis.h [12:12]
+ include/llvm/CodeGen/MacroFusion.h [12:12]
+ include/llvm/CodeGen/ModuloSchedule.h [12:12]
+ include/llvm/CodeGen/MultiHazardRecognizer.h [12:12]
+ include/llvm/CodeGen/NonRelocatableStringpool.h [12:12]
+ include/llvm/CodeGen/PBQP/CostAllocator.h [12:12]
+ include/llvm/CodeGen/PBQP/Graph.h [12:12]
+ include/llvm/CodeGen/PBQP/Math.h [12:12]
+ include/llvm/CodeGen/PBQP/ReductionRules.h [12:12]
+ include/llvm/CodeGen/PBQP/Solution.h [12:12]
+ include/llvm/CodeGen/PBQPRAConstraint.h [12:12]
+ include/llvm/CodeGen/ParallelCG.h [12:12]
+ include/llvm/CodeGen/Passes.h [12:12]
+ include/llvm/CodeGen/PreISelIntrinsicLowering.h [12:12]
+ include/llvm/CodeGen/PseudoSourceValue.h [12:12]
+ include/llvm/CodeGen/RDFGraph.h [12:12]
+ include/llvm/CodeGen/RDFLiveness.h [12:12]
+ include/llvm/CodeGen/RDFRegisters.h [12:12]
+ include/llvm/CodeGen/ReachingDefAnalysis.h [12:12]
+ include/llvm/CodeGen/RegAllocCommon.h [12:12]
+ include/llvm/CodeGen/RegAllocPBQP.h [12:12]
+ include/llvm/CodeGen/RegAllocRegistry.h [12:12]
+ include/llvm/CodeGen/Register.h [12:12]
+ include/llvm/CodeGen/RegisterBank.h [12:12]
+ include/llvm/CodeGen/RegisterBankInfo.h [12:12]
+ include/llvm/CodeGen/RegisterClassInfo.h [12:12]
+ include/llvm/CodeGen/RegisterPressure.h [12:12]
+ include/llvm/CodeGen/RegisterScavenging.h [12:12]
+ include/llvm/CodeGen/RegisterUsageInfo.h [12:12]
+ include/llvm/CodeGen/ReplaceWithVeclib.h [12:12]
+ include/llvm/CodeGen/ResourcePriorityQueue.h [12:12]
+ include/llvm/CodeGen/RuntimeLibcalls.h [12:12]
+ include/llvm/CodeGen/SDNodeProperties.td [5:5]
+ include/llvm/CodeGen/ScheduleDAG.h [12:12]
+ include/llvm/CodeGen/ScheduleDAGInstrs.h [12:12]
+ include/llvm/CodeGen/ScheduleDAGMutation.h [12:12]
+ include/llvm/CodeGen/ScheduleDFS.h [12:12]
+ include/llvm/CodeGen/ScheduleHazardRecognizer.h [12:12]
+ include/llvm/CodeGen/SchedulerRegistry.h [12:12]
+ include/llvm/CodeGen/ScoreboardHazardRecognizer.h [12:12]
+ include/llvm/CodeGen/SelectionDAG.h [12:12]
+ include/llvm/CodeGen/SelectionDAGAddressAnalysis.h [12:12]
+ include/llvm/CodeGen/SelectionDAGISel.h [12:12]
+ include/llvm/CodeGen/SelectionDAGNodes.h [12:12]
+ include/llvm/CodeGen/SelectionDAGTargetInfo.h [12:12]
+ include/llvm/CodeGen/SlotIndexes.h [12:12]
+ include/llvm/CodeGen/Spiller.h [12:12]
+ include/llvm/CodeGen/StableHashing.h [12:12]
+ include/llvm/CodeGen/StackMaps.h [12:12]
+ include/llvm/CodeGen/StackProtector.h [12:12]
+ include/llvm/CodeGen/SwiftErrorValueTracking.h [12:12]
+ include/llvm/CodeGen/SwitchLoweringUtils.h [12:12]
+ include/llvm/CodeGen/TailDuplicator.h [12:12]
+ include/llvm/CodeGen/TargetCallingConv.h [12:12]
+ include/llvm/CodeGen/TargetFrameLowering.h [12:12]
+ include/llvm/CodeGen/TargetInstrInfo.h [12:12]
+ include/llvm/CodeGen/TargetLowering.h [12:12]
+ include/llvm/CodeGen/TargetLoweringObjectFileImpl.h [12:12]
+ include/llvm/CodeGen/TargetOpcodes.h [12:12]
+ include/llvm/CodeGen/TargetPassConfig.h [12:12]
+ include/llvm/CodeGen/TargetRegisterInfo.h [12:12]
+ include/llvm/CodeGen/TargetSchedule.h [12:12]
+ include/llvm/CodeGen/TargetSubtargetInfo.h [12:12]
+ include/llvm/CodeGen/TileShapeInfo.h [12:12]
+ include/llvm/CodeGen/TypePromotion.h [12:12]
+ include/llvm/CodeGen/UnreachableBlockElim.h [12:12]
+ include/llvm/CodeGen/VLIWMachineScheduler.h [12:12]
+ include/llvm/CodeGen/ValueTypes.h [12:12]
+ include/llvm/CodeGen/ValueTypes.td [5:5]
+ include/llvm/CodeGen/VirtRegMap.h [12:12]
+ include/llvm/CodeGen/WasmEHFuncInfo.h [12:12]
+ include/llvm/CodeGen/WinEHFuncInfo.h [12:12]
+ include/llvm/DWARFLinker/DWARFLinker.h [12:12]
+ include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h [12:12]
+ include/llvm/DWARFLinker/DWARFLinkerDeclContext.h [12:12]
+ include/llvm/DWARFLinker/DWARFStreamer.h [12:12]
+ include/llvm/DWARFLinkerParallel/DWARFLinker.h [12:12]
+ include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/CVRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/CVTypeVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeView.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeViewError.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeViewRegisters.def [5:5]
+ include/llvm/DebugInfo/CodeView/CodeViewSymbols.def [5:5]
+ include/llvm/DebugInfo/CodeView/CodeViewTypes.def [5:5]
+ include/llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugCrossExSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugLinesSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugStringTableSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugUnknownSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/EnumTables.h [12:12]
+ include/llvm/DebugInfo/CodeView/Formatters.h [12:12]
+ include/llvm/DebugInfo/CodeView/FunctionId.h [12:12]
+ include/llvm/DebugInfo/CodeView/GUID.h [12:12]
+ include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h [12:12]
+ include/llvm/DebugInfo/CodeView/Line.h [12:12]
+ include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/RecordName.h [12:12]
+ include/llvm/DebugInfo/CodeView/RecordSerialization.h [12:12]
+ include/llvm/DebugInfo/CodeView/SimpleTypeSerializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/StringsAndChecksums.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolDeserializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolDumper.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolSerializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeCollection.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeDeserializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeDumpVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeHashing.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeIndex.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeRecordMapping.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeStreamMerger.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeTableCollection.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h [12:12]
+ include/llvm/DebugInfo/DIContext.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAddressRange.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAttribute.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFContext.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDataExtractor.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAddr.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLine.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDie.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFExpression.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFFormValue.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFListTable.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFLocationExpression.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFObject.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFRelocMap.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFSection.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFTypePrinter.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFUnit.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFVerifier.h [12:12]
+ include/llvm/DebugInfo/GSYM/DwarfTransformer.h [12:12]
+ include/llvm/DebugInfo/GSYM/ExtractRanges.h [12:12]
+ include/llvm/DebugInfo/GSYM/FileEntry.h [12:12]
+ include/llvm/DebugInfo/GSYM/FileWriter.h [12:12]
+ include/llvm/DebugInfo/GSYM/FunctionInfo.h [12:12]
+ include/llvm/DebugInfo/GSYM/GsymCreator.h [12:12]
+ include/llvm/DebugInfo/GSYM/GsymReader.h [12:12]
+ include/llvm/DebugInfo/GSYM/Header.h [12:12]
+ include/llvm/DebugInfo/GSYM/InlineInfo.h [12:12]
+ include/llvm/DebugInfo/GSYM/LineEntry.h [12:12]
+ include/llvm/DebugInfo/GSYM/LineTable.h [12:12]
+ include/llvm/DebugInfo/GSYM/LookupResult.h [12:12]
+ include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h [12:12]
+ include/llvm/DebugInfo/GSYM/StringTable.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVCompare.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVElement.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVLine.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVLocation.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVObject.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVOptions.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVRange.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVReader.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVScope.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVSort.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVSupport.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVType.h [12:12]
+ include/llvm/DebugInfo/LogicalView/LVReaderHandler.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h [12:12]
+ include/llvm/DebugInfo/MSF/IMSFFile.h [12:12]
+ include/llvm/DebugInfo/MSF/MSFBuilder.h [12:12]
+ include/llvm/DebugInfo/MSF/MSFCommon.h [12:12]
+ include/llvm/DebugInfo/MSF/MSFError.h [12:12]
+ include/llvm/DebugInfo/MSF/MappedBlockStream.h [12:12]
+ include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIADataStream.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAError.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASession.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASupport.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIATable.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAUtils.h [12:12]
+ include/llvm/DebugInfo/PDB/GenericError.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBDataStream.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBEnumChildren.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBFrameData.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBInjectedSource.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBLineNumber.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBRawSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBSectionContrib.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBSession.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBSourceFile.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBTable.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleList.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/EnumTables.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/FormatUtil.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/Formatters.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/GlobalsStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/Hash.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/HashTable.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/ISectionContribVisitor.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InfoStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InjectedSourceStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InputFile.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/LinePrinter.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumInjectedSources.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumLineNumbers.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumSymbols.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeFunctionSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeInlineSiteSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeLineNumber.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativePublicSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeSession.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeSourceFile.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeBuiltin.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBFile.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTable.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PublicsStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/RawConstants.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/RawError.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/RawTypes.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/SymbolCache.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/SymbolStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/TpiHashing.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/TpiStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/PDB.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBContext.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBExtras.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymDumper.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolBlock.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCustom.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolData.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolExe.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolFunc.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolLabel.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolThunk.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBTypes.h [12:12]
+ include/llvm/DebugInfo/PDB/UDTLayout.h [12:12]
+ include/llvm/DebugInfo/Symbolize/DIPrinter.h [12:12]
+ include/llvm/DebugInfo/Symbolize/Markup.h [12:12]
+ include/llvm/DebugInfo/Symbolize/MarkupFilter.h [12:12]
+ include/llvm/DebugInfo/Symbolize/SymbolizableModule.h [12:12]
+ include/llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h [12:12]
+ include/llvm/DebugInfo/Symbolize/Symbolize.h [12:12]
+ include/llvm/Debuginfod/BuildIDFetcher.h [12:12]
+ include/llvm/Debuginfod/Debuginfod.h [12:12]
+ include/llvm/Debuginfod/HTTPClient.h [12:12]
+ include/llvm/Debuginfod/HTTPServer.h [12:12]
+ include/llvm/Demangle/Demangle.h [12:12]
+ include/llvm/Demangle/DemangleConfig.h [12:12]
+ include/llvm/Demangle/ItaniumDemangle.h [12:12]
+ include/llvm/Demangle/ItaniumNodes.def [5:5]
+ include/llvm/Demangle/MicrosoftDemangle.h [12:12]
+ include/llvm/Demangle/MicrosoftDemangleNodes.h [12:12]
+ include/llvm/Demangle/StringView.h [12:12]
+ include/llvm/Demangle/Utility.h [12:12]
+ include/llvm/ExecutionEngine/ExecutionEngine.h [12:12]
+ include/llvm/ExecutionEngine/GenericValue.h [12:12]
+ include/llvm/ExecutionEngine/Interpreter.h [12:12]
+ include/llvm/ExecutionEngine/JITEventListener.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/COFF.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_aarch64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_i386.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_loongarch.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_riscv.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/JITLink.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/JITLinkDylib.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/MachO.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/MachO_arm64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/MachO_x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/TableManager.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/aarch64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/i386.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/loongarch.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/riscv.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITSymbol.h [12:12]
+ include/llvm/ExecutionEngine/MCJIT.h [12:12]
+ include/llvm/ExecutionEngine/OProfileWrapper.h [12:12]
+ include/llvm/ExecutionEngine/ObjectCache.h [12:12]
+ include/llvm/ExecutionEngine/Orc/COFFPlatform.h [12:12]
+ include/llvm/ExecutionEngine/Orc/COFFVCRuntimeSupport.h [12:12]
+ include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/CompileUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Core.h [12:12]
+ include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h [12:12]
+ include/llvm/ExecutionEngine/Orc/DebugUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericDylibManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ExecutionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ExecutorProcessControl.h [12:12]
+ include/llvm/ExecutionEngine/Orc/IRCompileLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/IRTransformLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/IndirectionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h [12:12]
+ include/llvm/ExecutionEngine/Orc/LLJIT.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Layer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/LazyReexports.h [12:12]
+ include/llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h [12:12]
+ include/llvm/ExecutionEngine/Orc/MachOPlatform.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Mangling.h [12:12]
+ include/llvm/ExecutionEngine/Orc/MapperJITLinkMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/MemoryMapper.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/OrcABISupport.h [12:12]
+ include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcError.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h [12:12]
+ include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Speculation.h [12:12]
+ include/llvm/ExecutionEngine/Orc/SymbolStringPool.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TaskDispatch.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h [12:12]
+ include/llvm/ExecutionEngine/RTDyldMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/RuntimeDyld.h [12:12]
+ include/llvm/ExecutionEngine/RuntimeDyldChecker.h [12:12]
+ include/llvm/ExecutionEngine/SectionMemoryManager.h [12:12]
+ include/llvm/FileCheck/FileCheck.h [12:12]
+ include/llvm/Frontend/Directive/DirectiveBase.td [5:5]
+ include/llvm/Frontend/HLSL/HLSLResource.h [12:12]
+ include/llvm/Frontend/OpenACC/ACC.td [5:5]
+ include/llvm/Frontend/OpenMP/OMP.td [5:5]
+ include/llvm/Frontend/OpenMP/OMPAssume.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPConstants.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPContext.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPDeviceConstants.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPGridValues.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPIRBuilder.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPKinds.def [5:5]
+ include/llvm/FuzzMutate/FuzzerCLI.h [12:12]
+ include/llvm/FuzzMutate/IRMutator.h [12:12]
+ include/llvm/FuzzMutate/OpDescriptor.h [12:12]
+ include/llvm/FuzzMutate/Operations.h [12:12]
+ include/llvm/FuzzMutate/Random.h [12:12]
+ include/llvm/FuzzMutate/RandomIRBuilder.h [12:12]
+ include/llvm/IR/AbstractCallSite.h [12:12]
+ include/llvm/IR/Argument.h [12:12]
+ include/llvm/IR/AssemblyAnnotationWriter.h [12:12]
+ include/llvm/IR/Assumptions.h [12:12]
+ include/llvm/IR/Attributes.h [12:12]
+ include/llvm/IR/Attributes.td [5:5]
+ include/llvm/IR/AutoUpgrade.h [12:12]
+ include/llvm/IR/BasicBlock.h [12:12]
+ include/llvm/IR/BuiltinGCs.h [12:12]
+ include/llvm/IR/CFG.h [12:12]
+ include/llvm/IR/CallingConv.h [12:12]
+ include/llvm/IR/Comdat.h [12:12]
+ include/llvm/IR/Constant.h [12:12]
+ include/llvm/IR/ConstantFold.h [12:12]
+ include/llvm/IR/ConstantFolder.h [12:12]
+ include/llvm/IR/ConstantRange.h [12:12]
+ include/llvm/IR/Constants.h [12:12]
+ include/llvm/IR/ConstrainedOps.def [5:5]
+ include/llvm/IR/DIBuilder.h [12:12]
+ include/llvm/IR/DataLayout.h [12:12]
+ include/llvm/IR/DebugInfo.h [12:12]
+ include/llvm/IR/DebugInfoFlags.def [5:5]
+ include/llvm/IR/DebugInfoMetadata.h [12:12]
+ include/llvm/IR/DebugLoc.h [12:12]
+ include/llvm/IR/DerivedTypes.h [12:12]
+ include/llvm/IR/DerivedUser.h [12:12]
+ include/llvm/IR/DiagnosticHandler.h [12:12]
+ include/llvm/IR/DiagnosticInfo.h [12:12]
+ include/llvm/IR/DiagnosticPrinter.h [12:12]
+ include/llvm/IR/Dominators.h [12:12]
+ include/llvm/IR/FMF.h [12:12]
+ include/llvm/IR/FPEnv.h [12:12]
+ include/llvm/IR/FixedPointBuilder.h [12:12]
+ include/llvm/IR/Function.h [12:12]
+ include/llvm/IR/GCStrategy.h [12:12]
+ include/llvm/IR/GVMaterializer.h [12:12]
+ include/llvm/IR/GetElementPtrTypeIterator.h [12:12]
+ include/llvm/IR/GlobalAlias.h [12:12]
+ include/llvm/IR/GlobalIFunc.h [12:12]
+ include/llvm/IR/GlobalObject.h [12:12]
+ include/llvm/IR/GlobalValue.h [12:12]
+ include/llvm/IR/GlobalVariable.h [12:12]
+ include/llvm/IR/IRBuilder.h [12:12]
+ include/llvm/IR/IRBuilderFolder.h [12:12]
+ include/llvm/IR/IRPrintingPasses.h [12:12]
+ include/llvm/IR/InlineAsm.h [12:12]
+ include/llvm/IR/InstIterator.h [12:12]
+ include/llvm/IR/InstVisitor.h [12:12]
+ include/llvm/IR/InstrTypes.h [12:12]
+ include/llvm/IR/Instruction.def [5:5]
+ include/llvm/IR/Instruction.h [12:12]
+ include/llvm/IR/Instructions.h [12:12]
+ include/llvm/IR/IntrinsicInst.h [12:12]
+ include/llvm/IR/Intrinsics.h [12:12]
+ include/llvm/IR/Intrinsics.td [5:5]
+ include/llvm/IR/IntrinsicsAArch64.td [5:5]
+ include/llvm/IR/IntrinsicsAMDGPU.td [5:5]
+ include/llvm/IR/IntrinsicsARM.td [5:5]
+ include/llvm/IR/IntrinsicsBPF.td [5:5]
+ include/llvm/IR/IntrinsicsDirectX.td [5:5]
+ include/llvm/IR/IntrinsicsHexagon.td [4:4]
+ include/llvm/IR/IntrinsicsHexagonDep.td [5:5]
+ include/llvm/IR/IntrinsicsLoongArch.td [5:5]
+ include/llvm/IR/IntrinsicsMips.td [5:5]
+ include/llvm/IR/IntrinsicsNVVM.td [5:5]
+ include/llvm/IR/IntrinsicsPowerPC.td [5:5]
+ include/llvm/IR/IntrinsicsRISCV.td [5:5]
+ include/llvm/IR/IntrinsicsSPIRV.td [5:5]
+ include/llvm/IR/IntrinsicsSystemZ.td [5:5]
+ include/llvm/IR/IntrinsicsWebAssembly.td [5:5]
+ include/llvm/IR/IntrinsicsX86.td [5:5]
+ include/llvm/IR/IntrinsicsXCore.td [5:5]
+ include/llvm/IR/LLVMContext.h [12:12]
+ include/llvm/IR/LLVMRemarkStreamer.h [12:12]
+ include/llvm/IR/LegacyPassManager.h [12:12]
+ include/llvm/IR/LegacyPassManagers.h [12:12]
+ include/llvm/IR/LegacyPassNameParser.h [12:12]
+ include/llvm/IR/MDBuilder.h [12:12]
+ include/llvm/IR/Mangler.h [12:12]
+ include/llvm/IR/MatrixBuilder.h [12:12]
+ include/llvm/IR/Metadata.def [5:5]
+ include/llvm/IR/Metadata.h [12:12]
+ include/llvm/IR/Module.h [12:12]
+ include/llvm/IR/ModuleSlotTracker.h [12:12]
+ include/llvm/IR/ModuleSummaryIndex.h [12:12]
+ include/llvm/IR/ModuleSummaryIndexYAML.h [12:12]
+ include/llvm/IR/NoFolder.h [12:12]
+ include/llvm/IR/OperandTraits.h [12:12]
+ include/llvm/IR/Operator.h [12:12]
+ include/llvm/IR/OptBisect.h [12:12]
+ include/llvm/IR/PassInstrumentation.h [12:12]
+ include/llvm/IR/PassManager.h [12:12]
+ include/llvm/IR/PassManagerImpl.h [12:12]
+ include/llvm/IR/PassManagerInternal.h [12:12]
+ include/llvm/IR/PassTimingInfo.h [12:12]
+ include/llvm/IR/PatternMatch.h [12:12]
+ include/llvm/IR/PredIteratorCache.h [12:12]
+ include/llvm/IR/PrintPasses.h [12:12]
+ include/llvm/IR/ProfDataUtils.h [12:12]
+ include/llvm/IR/ProfileSummary.h [12:12]
+ include/llvm/IR/PseudoProbe.h [12:12]
+ include/llvm/IR/ReplaceConstant.h [12:12]
+ include/llvm/IR/RuntimeLibcalls.def [5:5]
+ include/llvm/IR/SSAContext.h [12:12]
+ include/llvm/IR/SafepointIRVerifier.h [12:12]
+ include/llvm/IR/Statepoint.h [12:12]
+ include/llvm/IR/StructuralHash.h [12:12]
+ include/llvm/IR/SymbolTableListTraits.h [12:12]
+ include/llvm/IR/TrackingMDRef.h [12:12]
+ include/llvm/IR/Type.h [12:12]
+ include/llvm/IR/TypeFinder.h [12:12]
+ include/llvm/IR/TypedPointerType.h [12:12]
+ include/llvm/IR/Use.h [12:12]
+ include/llvm/IR/UseListOrder.h [12:12]
+ include/llvm/IR/User.h [12:12]
+ include/llvm/IR/VPIntrinsics.def [5:5]
+ include/llvm/IR/Value.def [5:5]
+ include/llvm/IR/Value.h [12:12]
+ include/llvm/IR/ValueHandle.h [12:12]
+ include/llvm/IR/ValueMap.h [12:12]
+ include/llvm/IR/ValueSymbolTable.h [12:12]
+ include/llvm/IR/VectorBuilder.h [12:12]
+ include/llvm/IR/Verifier.h [12:12]
+ include/llvm/IRPrinter/IRPrintingPasses.h [12:12]
+ include/llvm/IRReader/IRReader.h [12:12]
+ include/llvm/InitializePasses.h [12:12]
+ include/llvm/InterfaceStub/ELFObjHandler.h [12:12]
+ include/llvm/InterfaceStub/IFSHandler.h [12:12]
+ include/llvm/InterfaceStub/IFSStub.h [12:12]
+ include/llvm/LTO/Config.h [12:12]
+ include/llvm/LTO/LTO.h [12:12]
+ include/llvm/LTO/LTOBackend.h [12:12]
+ include/llvm/LTO/SummaryBasedOptimizations.h [12:12]
+ include/llvm/LTO/legacy/LTOCodeGenerator.h [12:12]
+ include/llvm/LTO/legacy/LTOModule.h [12:12]
+ include/llvm/LTO/legacy/ThinLTOCodeGenerator.h [12:12]
+ include/llvm/LTO/legacy/UpdateCompilerUsed.h [12:12]
+ include/llvm/LineEditor/LineEditor.h [12:12]
+ include/llvm/LinkAllIR.h [12:12]
+ include/llvm/LinkAllPasses.h [12:12]
+ include/llvm/Linker/IRMover.h [12:12]
+ include/llvm/Linker/Linker.h [12:12]
+ include/llvm/MC/ConstantPools.h [12:12]
+ include/llvm/MC/LaneBitmask.h [12:12]
+ include/llvm/MC/MCAsmBackend.h [12:12]
+ include/llvm/MC/MCAsmInfo.h [12:12]
+ include/llvm/MC/MCAsmInfoCOFF.h [12:12]
+ include/llvm/MC/MCAsmInfoDarwin.h [12:12]
+ include/llvm/MC/MCAsmInfoELF.h [12:12]
+ include/llvm/MC/MCAsmInfoGOFF.h [12:12]
+ include/llvm/MC/MCAsmInfoWasm.h [12:12]
+ include/llvm/MC/MCAsmInfoXCOFF.h [12:12]
+ include/llvm/MC/MCAsmLayout.h [12:12]
+ include/llvm/MC/MCAsmMacro.h [12:12]
+ include/llvm/MC/MCAssembler.h [12:12]
+ include/llvm/MC/MCCodeEmitter.h [12:12]
+ include/llvm/MC/MCCodeView.h [12:12]
+ include/llvm/MC/MCContext.h [12:12]
+ include/llvm/MC/MCDXContainerStreamer.h [12:12]
+ include/llvm/MC/MCDXContainerWriter.h [12:12]
+ include/llvm/MC/MCDecoderOps.h [12:12]
+ include/llvm/MC/MCDirectives.h [12:12]
+ include/llvm/MC/MCDisassembler/MCDisassembler.h [12:12]
+ include/llvm/MC/MCDisassembler/MCExternalSymbolizer.h [12:12]
+ include/llvm/MC/MCDisassembler/MCRelocationInfo.h [12:12]
+ include/llvm/MC/MCDisassembler/MCSymbolizer.h [12:12]
+ include/llvm/MC/MCDwarf.h [12:12]
+ include/llvm/MC/MCELFObjectWriter.h [12:12]
+ include/llvm/MC/MCELFStreamer.h [12:12]
+ include/llvm/MC/MCExpr.h [12:12]
+ include/llvm/MC/MCFixup.h [12:12]
+ include/llvm/MC/MCFixupKindInfo.h [12:12]
+ include/llvm/MC/MCFragment.h [12:12]
+ include/llvm/MC/MCInst.h [12:12]
+ include/llvm/MC/MCInstBuilder.h [12:12]
+ include/llvm/MC/MCInstPrinter.h [12:12]
+ include/llvm/MC/MCInstrAnalysis.h [12:12]
+ include/llvm/MC/MCInstrDesc.h [12:12]
+ include/llvm/MC/MCInstrInfo.h [12:12]
+ include/llvm/MC/MCInstrItineraries.h [12:12]
+ include/llvm/MC/MCLabel.h [12:12]
+ include/llvm/MC/MCLinkerOptimizationHint.h [13:13]
+ include/llvm/MC/MCMachObjectWriter.h [12:12]
+ include/llvm/MC/MCObjectFileInfo.h [12:12]
+ include/llvm/MC/MCObjectStreamer.h [12:12]
+ include/llvm/MC/MCObjectWriter.h [12:12]
+ include/llvm/MC/MCParser/AsmCond.h [12:12]
+ include/llvm/MC/MCParser/AsmLexer.h [12:12]
+ include/llvm/MC/MCParser/MCAsmLexer.h [12:12]
+ include/llvm/MC/MCParser/MCAsmParser.h [12:12]
+ include/llvm/MC/MCParser/MCAsmParserExtension.h [12:12]
+ include/llvm/MC/MCParser/MCAsmParserUtils.h [12:12]
+ include/llvm/MC/MCParser/MCParsedAsmOperand.h [12:12]
+ include/llvm/MC/MCParser/MCTargetAsmParser.h [12:12]
+ include/llvm/MC/MCPseudoProbe.h [12:12]
+ include/llvm/MC/MCRegister.h [12:12]
+ include/llvm/MC/MCRegisterInfo.h [12:12]
+ include/llvm/MC/MCSPIRVObjectWriter.h [12:12]
+ include/llvm/MC/MCSPIRVStreamer.h [12:12]
+ include/llvm/MC/MCSchedule.h [12:12]
+ include/llvm/MC/MCSection.h [12:12]
+ include/llvm/MC/MCSectionCOFF.h [12:12]
+ include/llvm/MC/MCSectionDXContainer.h [12:12]
+ include/llvm/MC/MCSectionELF.h [12:12]
+ include/llvm/MC/MCSectionGOFF.h [12:12]
+ include/llvm/MC/MCSectionMachO.h [12:12]
+ include/llvm/MC/MCSectionSPIRV.h [12:12]
+ include/llvm/MC/MCSectionWasm.h [12:12]
+ include/llvm/MC/MCSectionXCOFF.h [12:12]
+ include/llvm/MC/MCStreamer.h [12:12]
+ include/llvm/MC/MCSubtargetInfo.h [12:12]
+ include/llvm/MC/MCSymbol.h [12:12]
+ include/llvm/MC/MCSymbolCOFF.h [12:12]
+ include/llvm/MC/MCSymbolELF.h [12:12]
+ include/llvm/MC/MCSymbolGOFF.h [12:12]
+ include/llvm/MC/MCSymbolMachO.h [12:12]
+ include/llvm/MC/MCSymbolWasm.h [12:12]
+ include/llvm/MC/MCSymbolXCOFF.h [12:12]
+ include/llvm/MC/MCTargetOptions.h [12:12]
+ include/llvm/MC/MCTargetOptionsCommandFlags.h [12:12]
+ include/llvm/MC/MCValue.h [12:12]
+ include/llvm/MC/MCWasmObjectWriter.h [12:12]
+ include/llvm/MC/MCWasmStreamer.h [12:12]
+ include/llvm/MC/MCWin64EH.h [12:12]
+ include/llvm/MC/MCWinCOFFObjectWriter.h [12:12]
+ include/llvm/MC/MCWinCOFFStreamer.h [12:12]
+ include/llvm/MC/MCWinEH.h [12:12]
+ include/llvm/MC/MCXCOFFObjectWriter.h [12:12]
+ include/llvm/MC/MCXCOFFStreamer.h [12:12]
+ include/llvm/MC/MachineLocation.h [12:12]
+ include/llvm/MC/SectionKind.h [12:12]
+ include/llvm/MC/StringTableBuilder.h [12:12]
+ include/llvm/MC/SubtargetFeature.h [12:12]
+ include/llvm/MC/TargetRegistry.h [12:12]
+ include/llvm/MCA/CodeEmitter.h [12:12]
+ include/llvm/MCA/Context.h [12:12]
+ include/llvm/MCA/CustomBehaviour.h [12:12]
+ include/llvm/MCA/HWEventListener.h [12:12]
+ include/llvm/MCA/HardwareUnits/HardwareUnit.h [12:12]
+ include/llvm/MCA/HardwareUnits/LSUnit.h [12:12]
+ include/llvm/MCA/HardwareUnits/RegisterFile.h [12:12]
+ include/llvm/MCA/HardwareUnits/ResourceManager.h [12:12]
+ include/llvm/MCA/HardwareUnits/RetireControlUnit.h [12:12]
+ include/llvm/MCA/HardwareUnits/Scheduler.h [12:12]
+ include/llvm/MCA/IncrementalSourceMgr.h [12:12]
+ include/llvm/MCA/InstrBuilder.h [12:12]
+ include/llvm/MCA/Instruction.h [12:12]
+ include/llvm/MCA/Pipeline.h [12:12]
+ include/llvm/MCA/SourceMgr.h [12:12]
+ include/llvm/MCA/Stages/DispatchStage.h [12:12]
+ include/llvm/MCA/Stages/EntryStage.h [12:12]
+ include/llvm/MCA/Stages/ExecuteStage.h [12:12]
+ include/llvm/MCA/Stages/InOrderIssueStage.h [12:12]
+ include/llvm/MCA/Stages/InstructionTables.h [12:12]
+ include/llvm/MCA/Stages/MicroOpQueueStage.h [12:12]
+ include/llvm/MCA/Stages/RetireStage.h [12:12]
+ include/llvm/MCA/Stages/Stage.h [12:12]
+ include/llvm/MCA/Support.h [12:12]
+ include/llvm/MCA/View.h [12:12]
+ include/llvm/ObjCopy/COFF/COFFConfig.h [12:12]
+ include/llvm/ObjCopy/COFF/COFFObjcopy.h [12:12]
+ include/llvm/ObjCopy/CommonConfig.h [12:12]
+ include/llvm/ObjCopy/ConfigManager.h [12:12]
+ include/llvm/ObjCopy/ELF/ELFConfig.h [12:12]
+ include/llvm/ObjCopy/ELF/ELFObjcopy.h [12:12]
+ include/llvm/ObjCopy/MachO/MachOConfig.h [12:12]
+ include/llvm/ObjCopy/MachO/MachOObjcopy.h [12:12]
+ include/llvm/ObjCopy/MultiFormatConfig.h [12:12]
+ include/llvm/ObjCopy/ObjCopy.h [12:12]
+ include/llvm/ObjCopy/XCOFF/XCOFFConfig.h [12:12]
+ include/llvm/ObjCopy/XCOFF/XCOFFObjcopy.h [12:12]
+ include/llvm/ObjCopy/wasm/WasmConfig.h [12:12]
+ include/llvm/ObjCopy/wasm/WasmObjcopy.h [12:12]
+ include/llvm/Object/Archive.h [12:12]
+ include/llvm/Object/ArchiveWriter.h [12:12]
+ include/llvm/Object/Binary.h [12:12]
+ include/llvm/Object/BuildID.h [12:12]
+ include/llvm/Object/COFF.h [12:12]
+ include/llvm/Object/COFFImportFile.h [12:12]
+ include/llvm/Object/COFFModuleDefinition.h [12:12]
+ include/llvm/Object/CVDebugRecord.h [12:12]
+ include/llvm/Object/DXContainer.h [12:12]
+ include/llvm/Object/Decompressor.h [12:12]
+ include/llvm/Object/ELF.h [12:12]
+ include/llvm/Object/ELFObjectFile.h [12:12]
+ include/llvm/Object/ELFTypes.h [12:12]
+ include/llvm/Object/Error.h [12:12]
+ include/llvm/Object/FaultMapParser.h [12:12]
+ include/llvm/Object/IRObjectFile.h [12:12]
+ include/llvm/Object/IRSymtab.h [12:12]
+ include/llvm/Object/MachO.h [12:12]
+ include/llvm/Object/MachOUniversal.h [12:12]
+ include/llvm/Object/MachOUniversalWriter.h [12:12]
+ include/llvm/Object/Minidump.h [12:12]
+ include/llvm/Object/ModuleSymbolTable.h [12:12]
+ include/llvm/Object/ObjectFile.h [12:12]
+ include/llvm/Object/OffloadBinary.h [12:12]
+ include/llvm/Object/RelocationResolver.h [12:12]
+ include/llvm/Object/StackMapParser.h [12:12]
+ include/llvm/Object/SymbolSize.h [12:12]
+ include/llvm/Object/SymbolicFile.h [12:12]
+ include/llvm/Object/TapiFile.h [12:12]
+ include/llvm/Object/TapiUniversal.h [12:12]
+ include/llvm/Object/Wasm.h [12:12]
+ include/llvm/Object/WindowsMachineFlag.h [12:12]
+ include/llvm/Object/WindowsResource.h [12:12]
+ include/llvm/Object/XCOFFObjectFile.h [12:12]
+ include/llvm/ObjectYAML/ArchiveYAML.h [12:12]
+ include/llvm/ObjectYAML/COFFYAML.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLSymbols.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLTypes.h [12:12]
+ include/llvm/ObjectYAML/DWARFEmitter.h [12:12]
+ include/llvm/ObjectYAML/DWARFYAML.h [12:12]
+ include/llvm/ObjectYAML/DXContainerYAML.h [12:12]
+ include/llvm/ObjectYAML/ELFYAML.h [12:12]
+ include/llvm/ObjectYAML/MachOYAML.h [12:12]
+ include/llvm/ObjectYAML/MinidumpYAML.h [12:12]
+ include/llvm/ObjectYAML/ObjectYAML.h [12:12]
+ include/llvm/ObjectYAML/OffloadYAML.h [12:12]
+ include/llvm/ObjectYAML/WasmYAML.h [12:12]
+ include/llvm/ObjectYAML/XCOFFYAML.h [12:12]
+ include/llvm/ObjectYAML/YAML.h [12:12]
+ include/llvm/ObjectYAML/yaml2obj.h [12:12]
+ include/llvm/Option/Arg.h [12:12]
+ include/llvm/Option/ArgList.h [12:12]
+ include/llvm/Option/OptParser.td [5:5]
+ include/llvm/Option/OptSpecifier.h [12:12]
+ include/llvm/Option/OptTable.h [12:12]
+ include/llvm/Option/Option.h [12:12]
+ include/llvm/Pass.h [12:12]
+ include/llvm/PassAnalysisSupport.h [12:12]
+ include/llvm/PassInfo.h [12:12]
+ include/llvm/PassRegistry.h [12:12]
+ include/llvm/PassSupport.h [12:12]
+ include/llvm/Passes/OptimizationLevel.h [12:12]
+ include/llvm/Passes/PassBuilder.h [12:12]
+ include/llvm/Passes/PassPlugin.h [12:12]
+ include/llvm/Passes/StandardInstrumentations.h [12:12]
+ include/llvm/ProfileData/Coverage/CoverageMapping.h [12:12]
+ include/llvm/ProfileData/Coverage/CoverageMappingReader.h [12:12]
+ include/llvm/ProfileData/Coverage/CoverageMappingWriter.h [12:12]
+ include/llvm/ProfileData/GCOV.h [12:12]
+ include/llvm/ProfileData/InstrProf.h [12:12]
+ include/llvm/ProfileData/InstrProfCorrelator.h [12:12]
+ include/llvm/ProfileData/InstrProfReader.h [12:12]
+ include/llvm/ProfileData/InstrProfWriter.h [12:12]
+ include/llvm/ProfileData/ProfileCommon.h [12:12]
+ include/llvm/ProfileData/RawMemProfReader.h [14:14]
+ include/llvm/ProfileData/SampleProf.h [12:12]
+ include/llvm/ProfileData/SampleProfReader.h [12:12]
+ include/llvm/ProfileData/SampleProfWriter.h [12:12]
+ include/llvm/Remarks/BitstreamRemarkContainer.h [12:12]
+ include/llvm/Remarks/BitstreamRemarkParser.h [12:12]
+ include/llvm/Remarks/BitstreamRemarkSerializer.h [12:12]
+ include/llvm/Remarks/HotnessThresholdParser.h [12:12]
+ include/llvm/Remarks/Remark.h [12:12]
+ include/llvm/Remarks/RemarkFormat.h [12:12]
+ include/llvm/Remarks/RemarkLinker.h [12:12]
+ include/llvm/Remarks/RemarkParser.h [12:12]
+ include/llvm/Remarks/RemarkSerializer.h [12:12]
+ include/llvm/Remarks/RemarkStreamer.h [12:12]
+ include/llvm/Remarks/RemarkStringTable.h [12:12]
+ include/llvm/Remarks/YAMLRemarkSerializer.h [12:12]
+ include/llvm/Support/AArch64TargetParser.h [12:12]
+ include/llvm/Support/AMDGPUMetadata.h [12:12]
+ include/llvm/Support/AMDHSAKernelDescriptor.h [12:12]
+ include/llvm/Support/ARMAttributeParser.h [12:12]
+ include/llvm/Support/ARMBuildAttributes.h [12:12]
+ include/llvm/Support/ARMEHABI.h [12:12]
+ include/llvm/Support/ARMTargetParser.h [12:12]
+ include/llvm/Support/ARMTargetParserCommon.h [12:12]
+ include/llvm/Support/ARMWinEH.h [12:12]
+ include/llvm/Support/AlignOf.h [12:12]
+ include/llvm/Support/Alignment.h [12:12]
+ include/llvm/Support/Allocator.h [12:12]
+ include/llvm/Support/AllocatorBase.h [12:12]
+ include/llvm/Support/ArrayRecycler.h [12:12]
+ include/llvm/Support/Atomic.h [12:12]
+ include/llvm/Support/AtomicOrdering.h [12:12]
+ include/llvm/Support/AutoConvert.h [12:12]
+ include/llvm/Support/Automaton.h [12:12]
+ include/llvm/Support/BCD.h [12:12]
+ include/llvm/Support/BLAKE3.h [12:12]
+ include/llvm/Support/Base64.h [12:12]
+ include/llvm/Support/BinaryByteStream.h [12:12]
+ include/llvm/Support/BinaryItemStream.h [12:12]
+ include/llvm/Support/BinaryStream.h [12:12]
+ include/llvm/Support/BinaryStreamArray.h [12:12]
+ include/llvm/Support/BinaryStreamError.h [12:12]
+ include/llvm/Support/BinaryStreamReader.h [12:12]
+ include/llvm/Support/BinaryStreamRef.h [12:12]
+ include/llvm/Support/BinaryStreamWriter.h [12:12]
+ include/llvm/Support/BlockFrequency.h [12:12]
+ include/llvm/Support/BranchProbability.h [12:12]
+ include/llvm/Support/BuryPointer.h [12:12]
+ include/llvm/Support/CBindingWrapping.h [12:12]
+ include/llvm/Support/CFGDiff.h [12:12]
+ include/llvm/Support/CFGUpdate.h [12:12]
+ include/llvm/Support/COM.h [12:12]
+ include/llvm/Support/CRC.h [12:12]
+ include/llvm/Support/CSKYAttributeParser.h [12:12]
+ include/llvm/Support/CSKYAttributes.h [12:12]
+ include/llvm/Support/CSKYTargetParser.h [12:12]
+ include/llvm/Support/CachePruning.h [12:12]
+ include/llvm/Support/Caching.h [12:12]
+ include/llvm/Support/Capacity.h [12:12]
+ include/llvm/Support/Casting.h [12:12]
+ include/llvm/Support/CheckedArithmetic.h [12:12]
+ include/llvm/Support/Chrono.h [12:12]
+ include/llvm/Support/CodeGen.h [12:12]
+ include/llvm/Support/CodeGenCoverage.h [12:12]
+ include/llvm/Support/CommandLine.h [12:12]
+ include/llvm/Support/Compiler.h [12:12]
+ include/llvm/Support/Compression.h [12:12]
+ include/llvm/Support/CrashRecoveryContext.h [12:12]
+ include/llvm/Support/DJB.h [12:12]
+ include/llvm/Support/DOTGraphTraits.h [12:12]
+ include/llvm/Support/DXILOperationCommon.h [12:12]
+ include/llvm/Support/DataExtractor.h [12:12]
+ include/llvm/Support/DataTypes.h [12:12]
+ include/llvm/Support/Debug.h [12:12]
+ include/llvm/Support/DebugCounter.h [12:12]
+ include/llvm/Support/Discriminator.h [12:12]
+ include/llvm/Support/DivisionByConstantInfo.h [12:12]
+ include/llvm/Support/Duration.h [12:12]
+ include/llvm/Support/DynamicLibrary.h [12:12]
+ include/llvm/Support/ELFAttributeParser.h [12:12]
+ include/llvm/Support/ELFAttributes.h [12:12]
+ include/llvm/Support/Endian.h [12:12]
+ include/llvm/Support/EndianStream.h [12:12]
+ include/llvm/Support/Errc.h [12:12]
+ include/llvm/Support/Errno.h [12:12]
+ include/llvm/Support/Error.h [12:12]
+ include/llvm/Support/ErrorHandling.h [12:12]
+ include/llvm/Support/ErrorOr.h [12:12]
+ include/llvm/Support/ExitCodes.h [12:12]
+ include/llvm/Support/ExtensibleRTTI.h [12:12]
+ include/llvm/Support/FileCollector.h [12:12]
+ include/llvm/Support/FileOutputBuffer.h [12:12]
+ include/llvm/Support/FileSystem.h [12:12]
+ include/llvm/Support/FileSystem/UniqueID.h [12:12]
+ include/llvm/Support/FileUtilities.h [12:12]
+ include/llvm/Support/Format.h [12:12]
+ include/llvm/Support/FormatAdapters.h [12:12]
+ include/llvm/Support/FormatCommon.h [12:12]
+ include/llvm/Support/FormatProviders.h [12:12]
+ include/llvm/Support/FormatVariadic.h [12:12]
+ include/llvm/Support/FormatVariadicDetails.h [12:12]
+ include/llvm/Support/FormattedStream.h [12:12]
+ include/llvm/Support/GenericDomTree.h [12:12]
+ include/llvm/Support/GenericDomTreeConstruction.h [12:12]
+ include/llvm/Support/GenericIteratedDominanceFrontier.h [12:12]
+ include/llvm/Support/GlobPattern.h [12:12]
+ include/llvm/Support/GraphWriter.h [12:12]
+ include/llvm/Support/HashBuilder.h [12:12]
+ include/llvm/Support/Host.h [12:12]
+ include/llvm/Support/InitLLVM.h [12:12]
+ include/llvm/Support/InstructionCost.h [12:12]
+ include/llvm/Support/ItaniumManglingCanonicalizer.h [12:12]
+ include/llvm/Support/JSON.h [12:12]
+ include/llvm/Support/KnownBits.h [12:12]
+ include/llvm/Support/LEB128.h [12:12]
+ include/llvm/Support/LineIterator.h [12:12]
+ include/llvm/Support/LockFileManager.h [12:12]
+ include/llvm/Support/LoongArchTargetParser.h [12:12]
+ include/llvm/Support/LowLevelTypeImpl.h [12:12]
+ include/llvm/Support/MSP430AttributeParser.h [12:12]
+ include/llvm/Support/MSP430Attributes.h [12:12]
+ include/llvm/Support/MSVCErrorWorkarounds.h [12:12]
+ include/llvm/Support/MachineValueType.h [12:12]
+ include/llvm/Support/ManagedStatic.h [12:12]
+ include/llvm/Support/MathExtras.h [12:12]
+ include/llvm/Support/MemAlloc.h [12:12]
+ include/llvm/Support/Memory.h [12:12]
+ include/llvm/Support/MemoryBuffer.h [12:12]
+ include/llvm/Support/MemoryBufferRef.h [12:12]
+ include/llvm/Support/MipsABIFlags.h [12:12]
+ include/llvm/Support/ModRef.h [12:12]
+ include/llvm/Support/Mutex.h [12:12]
+ include/llvm/Support/NativeFormatting.h [12:12]
+ include/llvm/Support/OnDiskHashTable.h [12:12]
+ include/llvm/Support/OptimizedStructLayout.h [12:12]
+ include/llvm/Support/PGOOptions.h [12:12]
+ include/llvm/Support/Parallel.h [12:12]
+ include/llvm/Support/Path.h [12:12]
+ include/llvm/Support/PluginLoader.h [12:12]
+ include/llvm/Support/PointerLikeTypeTraits.h [12:12]
+ include/llvm/Support/PrettyStackTrace.h [12:12]
+ include/llvm/Support/Printable.h [12:12]
+ include/llvm/Support/Process.h [12:12]
+ include/llvm/Support/Program.h [12:12]
+ include/llvm/Support/RISCVAttributeParser.h [12:12]
+ include/llvm/Support/RISCVAttributes.h [12:12]
+ include/llvm/Support/RISCVISAInfo.h [12:12]
+ include/llvm/Support/RWMutex.h [12:12]
+ include/llvm/Support/RandomNumberGenerator.h [12:12]
+ include/llvm/Support/Recycler.h [12:12]
+ include/llvm/Support/RecyclingAllocator.h [12:12]
+ include/llvm/Support/Regex.h [12:12]
+ include/llvm/Support/Registry.h [12:12]
+ include/llvm/Support/SHA1.h [12:12]
+ include/llvm/Support/SHA256.h [12:12]
+ include/llvm/Support/SMLoc.h [12:12]
+ include/llvm/Support/SMTAPI.h [12:12]
+ include/llvm/Support/SaveAndRestore.h [12:12]
+ include/llvm/Support/ScaledNumber.h [12:12]
+ include/llvm/Support/ScopedPrinter.h [12:12]
+ include/llvm/Support/Signals.h [12:12]
+ include/llvm/Support/Signposts.h [12:12]
+ include/llvm/Support/SmallVectorMemoryBuffer.h [12:12]
+ include/llvm/Support/SourceMgr.h [12:12]
+ include/llvm/Support/SpecialCaseList.h [12:12]
+ include/llvm/Support/StringSaver.h [12:12]
+ include/llvm/Support/SuffixTree.h [12:12]
+ include/llvm/Support/SwapByteOrder.h [12:12]
+ include/llvm/Support/SymbolRemappingReader.h [12:12]
+ include/llvm/Support/SystemUtils.h [12:12]
+ include/llvm/Support/TarWriter.h [12:12]
+ include/llvm/Support/TargetOpcodes.def [5:5]
+ include/llvm/Support/TargetParser.h [12:12]
+ include/llvm/Support/TargetSelect.h [12:12]
+ include/llvm/Support/TaskQueue.h [12:12]
+ include/llvm/Support/ThreadPool.h [12:12]
+ include/llvm/Support/Threading.h [12:12]
+ include/llvm/Support/TimeProfiler.h [12:12]
+ include/llvm/Support/Timer.h [12:12]
+ include/llvm/Support/ToolOutputFile.h [12:12]
+ include/llvm/Support/TrailingObjects.h [12:12]
+ include/llvm/Support/TrigramIndex.h [12:12]
+ include/llvm/Support/TypeName.h [12:12]
+ include/llvm/Support/TypeSize.h [12:12]
+ include/llvm/Support/Unicode.h [12:12]
+ include/llvm/Support/UnicodeCharRanges.h [12:12]
+ include/llvm/Support/Valgrind.h [12:12]
+ include/llvm/Support/VersionTuple.h [12:12]
+ include/llvm/Support/VirtualFileSystem.h [12:12]
+ include/llvm/Support/Watchdog.h [12:12]
+ include/llvm/Support/Win64EH.h [12:12]
+ include/llvm/Support/Windows/WindowsSupport.h [12:12]
+ include/llvm/Support/WindowsError.h [12:12]
+ include/llvm/Support/WithColor.h [12:12]
+ include/llvm/Support/X86DisassemblerDecoderCommon.h [12:12]
+ include/llvm/Support/X86TargetParser.def [5:5]
+ include/llvm/Support/X86TargetParser.h [12:12]
+ include/llvm/Support/YAMLParser.h [12:12]
+ include/llvm/Support/YAMLTraits.h [12:12]
+ include/llvm/Support/circular_raw_ostream.h [12:12]
+ include/llvm/Support/raw_os_ostream.h [12:12]
+ include/llvm/Support/raw_ostream.h [12:12]
+ include/llvm/Support/raw_sha1_ostream.h [12:12]
+ include/llvm/Support/thread.h [12:12]
+ include/llvm/Support/type_traits.h [12:12]
+ include/llvm/TableGen/Automaton.td [5:5]
+ include/llvm/TableGen/Error.h [12:12]
+ include/llvm/TableGen/Main.h [12:12]
+ include/llvm/TableGen/Parser.h [12:12]
+ include/llvm/TableGen/Record.h [12:12]
+ include/llvm/TableGen/SearchableTable.td [5:5]
+ include/llvm/TableGen/SetTheory.h [12:12]
+ include/llvm/TableGen/StringMatcher.h [12:12]
+ include/llvm/TableGen/StringToOffsetTable.h [12:12]
+ include/llvm/TableGen/TableGenBackend.h [12:12]
+ include/llvm/Target/CGPassBuilderOption.h [12:12]
+ include/llvm/Target/CodeGenCWrappers.h [12:12]
+ include/llvm/Target/GenericOpcodes.td [5:5]
+ include/llvm/Target/GlobalISel/Combine.td [5:5]
+ include/llvm/Target/GlobalISel/RegisterBank.td [5:5]
+ include/llvm/Target/GlobalISel/SelectionDAGCompat.td [5:5]
+ include/llvm/Target/GlobalISel/Target.td [5:5]
+ include/llvm/Target/Target.td [5:5]
+ include/llvm/Target/TargetCallingConv.td [5:5]
+ include/llvm/Target/TargetInstrPredicate.td [5:5]
+ include/llvm/Target/TargetIntrinsicInfo.h [12:12]
+ include/llvm/Target/TargetItinerary.td [5:5]
+ include/llvm/Target/TargetLoweringObjectFile.h [12:12]
+ include/llvm/Target/TargetMachine.h [12:12]
+ include/llvm/Target/TargetOptions.h [12:12]
+ include/llvm/Target/TargetPfmCounters.td [5:5]
+ include/llvm/Target/TargetSchedule.td [5:5]
+ include/llvm/Target/TargetSelectionDAG.td [5:5]
+ include/llvm/TargetParser/AArch64TargetParser.h [12:12]
+ include/llvm/TargetParser/ARMTargetParser.def [5:5]
+ include/llvm/TargetParser/ARMTargetParser.h [12:12]
+ include/llvm/TargetParser/ARMTargetParserCommon.h [12:12]
+ include/llvm/TargetParser/CSKYTargetParser.def [5:5]
+ include/llvm/TargetParser/CSKYTargetParser.h [13:13]
+ include/llvm/TargetParser/Host.h [12:12]
+ include/llvm/TargetParser/LoongArchTargetParser.h [12:12]
+ include/llvm/TargetParser/RISCVTargetParser.h [12:12]
+ include/llvm/TargetParser/TargetParser.h [12:12]
+ include/llvm/TargetParser/Triple.h [12:12]
+ include/llvm/TargetParser/X86TargetParser.def [5:5]
+ include/llvm/TargetParser/X86TargetParser.h [12:12]
+ include/llvm/Testing/ADT/StringMap.h [12:12]
+ include/llvm/Testing/ADT/StringMapEntry.h [12:12]
+ include/llvm/Testing/Annotations/Annotations.h [12:12]
+ include/llvm/Testing/Support/Error.h [12:12]
+ include/llvm/Testing/Support/SupportHelpers.h [12:12]
+ include/llvm/TextAPI/Architecture.def [5:5]
+ include/llvm/TextAPI/Architecture.h [12:12]
+ include/llvm/TextAPI/ArchitectureSet.h [12:12]
+ include/llvm/TextAPI/InterfaceFile.h [12:12]
+ include/llvm/TextAPI/PackedVersion.h [12:12]
+ include/llvm/TextAPI/Platform.h [12:12]
+ include/llvm/TextAPI/Symbol.h [12:12]
+ include/llvm/TextAPI/Target.h [12:12]
+ include/llvm/TextAPI/TextAPIReader.h [12:12]
+ include/llvm/TextAPI/TextAPIWriter.h [12:12]
+ include/llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h [12:12]
+ include/llvm/ToolDrivers/llvm-lib/LibDriver.h [12:12]
+ include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h [12:12]
+ include/llvm/Transforms/CFGuard.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroCleanup.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroConditionalWrapper.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroEarly.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroElide.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroSplit.h [12:12]
+ include/llvm/Transforms/IPO.h [12:12]
+ include/llvm/Transforms/IPO/AlwaysInliner.h [12:12]
+ include/llvm/Transforms/IPO/Annotation2Metadata.h [12:12]
+ include/llvm/Transforms/IPO/ArgumentPromotion.h [12:12]
+ include/llvm/Transforms/IPO/Attributor.h [12:12]
+ include/llvm/Transforms/IPO/BlockExtractor.h [12:12]
+ include/llvm/Transforms/IPO/CalledValuePropagation.h [12:12]
+ include/llvm/Transforms/IPO/ConstantMerge.h [12:12]
+ include/llvm/Transforms/IPO/CrossDSOCFI.h [12:12]
+ include/llvm/Transforms/IPO/DeadArgumentElimination.h [12:12]
+ include/llvm/Transforms/IPO/ElimAvailExtern.h [12:12]
+ include/llvm/Transforms/IPO/ExtractGV.h [12:12]
+ include/llvm/Transforms/IPO/ForceFunctionAttrs.h [12:12]
+ include/llvm/Transforms/IPO/FunctionAttrs.h [12:12]
+ include/llvm/Transforms/IPO/FunctionImport.h [12:12]
+ include/llvm/Transforms/IPO/FunctionSpecialization.h [12:12]
+ include/llvm/Transforms/IPO/GlobalDCE.h [12:12]
+ include/llvm/Transforms/IPO/GlobalOpt.h [12:12]
+ include/llvm/Transforms/IPO/GlobalSplit.h [12:12]
+ include/llvm/Transforms/IPO/HotColdSplitting.h [12:12]
+ include/llvm/Transforms/IPO/IROutliner.h [12:12]
+ include/llvm/Transforms/IPO/InferFunctionAttrs.h [12:12]
+ include/llvm/Transforms/IPO/Inliner.h [12:12]
+ include/llvm/Transforms/IPO/Internalize.h [12:12]
+ include/llvm/Transforms/IPO/LoopExtractor.h [12:12]
+ include/llvm/Transforms/IPO/LowerTypeTests.h [12:12]
+ include/llvm/Transforms/IPO/MergeFunctions.h [12:12]
+ include/llvm/Transforms/IPO/ModuleInliner.h [12:12]
+ include/llvm/Transforms/IPO/OpenMPOpt.h [12:12]
+ include/llvm/Transforms/IPO/PartialInlining.h [12:12]
+ include/llvm/Transforms/IPO/PassManagerBuilder.h [12:12]
+ include/llvm/Transforms/IPO/ProfiledCallGraph.h [12:12]
+ include/llvm/Transforms/IPO/SCCP.h [12:12]
+ include/llvm/Transforms/IPO/SampleContextTracker.h [12:12]
+ include/llvm/Transforms/IPO/SampleProfile.h [12:12]
+ include/llvm/Transforms/IPO/SampleProfileProbe.h [12:12]
+ include/llvm/Transforms/IPO/StripDeadPrototypes.h [12:12]
+ include/llvm/Transforms/IPO/StripSymbols.h [12:12]
+ include/llvm/Transforms/IPO/SyntheticCountsPropagation.h [12:12]
+ include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h [12:12]
+ include/llvm/Transforms/IPO/WholeProgramDevirt.h [12:12]
+ include/llvm/Transforms/InstCombine/InstCombine.h [12:12]
+ include/llvm/Transforms/InstCombine/InstCombiner.h [12:12]
+ include/llvm/Transforms/Instrumentation.h [12:12]
+ include/llvm/Transforms/Instrumentation/AddressSanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h [12:12]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerOptions.h [12:12]
+ include/llvm/Transforms/Instrumentation/BoundsChecking.h [12:12]
+ include/llvm/Transforms/Instrumentation/CGProfile.h [12:12]
+ include/llvm/Transforms/Instrumentation/ControlHeightReduction.h [12:12]
+ include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/GCOVProfiler.h [12:12]
+ include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/InstrOrderFile.h [12:12]
+ include/llvm/Transforms/Instrumentation/InstrProfiling.h [12:12]
+ include/llvm/Transforms/Instrumentation/KCFI.h [12:12]
+ include/llvm/Transforms/Instrumentation/MemProfiler.h [12:12]
+ include/llvm/Transforms/Instrumentation/MemorySanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/PGOInstrumentation.h [12:12]
+ include/llvm/Transforms/Instrumentation/PoisonChecking.h [12:12]
+ include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h [12:12]
+ include/llvm/Transforms/Instrumentation/SanitizerCoverage.h [14:14]
+ include/llvm/Transforms/Instrumentation/ThreadSanitizer.h [12:12]
+ include/llvm/Transforms/ObjCARC.h [12:12]
+ include/llvm/Transforms/Scalar.h [12:12]
+ include/llvm/Transforms/Scalar/ADCE.h [12:12]
+ include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h [12:12]
+ include/llvm/Transforms/Scalar/AnnotationRemarks.h [12:12]
+ include/llvm/Transforms/Scalar/BDCE.h [12:12]
+ include/llvm/Transforms/Scalar/CallSiteSplitting.h [12:12]
+ include/llvm/Transforms/Scalar/ConstantHoisting.h [12:12]
+ include/llvm/Transforms/Scalar/ConstraintElimination.h [12:12]
+ include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h [12:12]
+ include/llvm/Transforms/Scalar/DCE.h [12:12]
+ include/llvm/Transforms/Scalar/DFAJumpThreading.h [12:12]
+ include/llvm/Transforms/Scalar/DeadStoreElimination.h [12:12]
+ include/llvm/Transforms/Scalar/DivRemPairs.h [12:12]
+ include/llvm/Transforms/Scalar/EarlyCSE.h [12:12]
+ include/llvm/Transforms/Scalar/FlattenCFG.h [12:12]
+ include/llvm/Transforms/Scalar/Float2Int.h [12:12]
+ include/llvm/Transforms/Scalar/GVN.h [12:12]
+ include/llvm/Transforms/Scalar/GVNExpression.h [12:12]
+ include/llvm/Transforms/Scalar/GuardWidening.h [12:12]
+ include/llvm/Transforms/Scalar/IVUsersPrinter.h [12:12]
+ include/llvm/Transforms/Scalar/IndVarSimplify.h [12:12]
+ include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h [12:12]
+ include/llvm/Transforms/Scalar/InferAddressSpaces.h [12:12]
+ include/llvm/Transforms/Scalar/InstSimplifyPass.h [12:12]
+ include/llvm/Transforms/Scalar/JumpThreading.h [12:12]
+ include/llvm/Transforms/Scalar/LICM.h [12:12]
+ include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h [12:12]
+ include/llvm/Transforms/Scalar/LoopBoundSplit.h [12:12]
+ include/llvm/Transforms/Scalar/LoopDataPrefetch.h [13:13]
+ include/llvm/Transforms/Scalar/LoopDeletion.h [12:12]
+ include/llvm/Transforms/Scalar/LoopDistribute.h [12:12]
+ include/llvm/Transforms/Scalar/LoopFlatten.h [12:12]
+ include/llvm/Transforms/Scalar/LoopFuse.h [12:12]
+ include/llvm/Transforms/Scalar/LoopIdiomRecognize.h [12:12]
+ include/llvm/Transforms/Scalar/LoopInstSimplify.h [12:12]
+ include/llvm/Transforms/Scalar/LoopInterchange.h [12:12]
+ include/llvm/Transforms/Scalar/LoopLoadElimination.h [12:12]
+ include/llvm/Transforms/Scalar/LoopPassManager.h [12:12]
+ include/llvm/Transforms/Scalar/LoopPredication.h [12:12]
+ include/llvm/Transforms/Scalar/LoopReroll.h [12:12]
+ include/llvm/Transforms/Scalar/LoopRotation.h [12:12]
+ include/llvm/Transforms/Scalar/LoopSimplifyCFG.h [12:12]
+ include/llvm/Transforms/Scalar/LoopSink.h [12:12]
+ include/llvm/Transforms/Scalar/LoopStrengthReduce.h [12:12]
+ include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h [12:12]
+ include/llvm/Transforms/Scalar/LoopUnrollPass.h [12:12]
+ include/llvm/Transforms/Scalar/LoopVersioningLICM.h [12:12]
+ include/llvm/Transforms/Scalar/LowerAtomicPass.h [12:12]
+ include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h [12:12]
+ include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h [12:12]
+ include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h [12:12]
+ include/llvm/Transforms/Scalar/LowerMatrixIntrinsics.h [12:12]
+ include/llvm/Transforms/Scalar/LowerWidenableCondition.h [12:12]
+ include/llvm/Transforms/Scalar/MakeGuardsExplicit.h [12:12]
+ include/llvm/Transforms/Scalar/MemCpyOptimizer.h [12:12]
+ include/llvm/Transforms/Scalar/MergeICmps.h [12:12]
+ include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h [12:12]
+ include/llvm/Transforms/Scalar/NaryReassociate.h [12:12]
+ include/llvm/Transforms/Scalar/NewGVN.h [12:12]
+ include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h [12:12]
+ include/llvm/Transforms/Scalar/Reassociate.h [12:12]
+ include/llvm/Transforms/Scalar/Reg2Mem.h [12:12]
+ include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h [12:12]
+ include/llvm/Transforms/Scalar/SCCP.h [12:12]
+ include/llvm/Transforms/Scalar/SROA.h [12:12]
+ include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h [13:13]
+ include/llvm/Transforms/Scalar/Scalarizer.h [12:12]
+ include/llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h [12:12]
+ include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h [12:12]
+ include/llvm/Transforms/Scalar/SimplifyCFG.h [12:12]
+ include/llvm/Transforms/Scalar/Sink.h [12:12]
+ include/llvm/Transforms/Scalar/SpeculativeExecution.h [12:12]
+ include/llvm/Transforms/Scalar/StraightLineStrengthReduce.h [12:12]
+ include/llvm/Transforms/Scalar/StructurizeCFG.h [12:12]
+ include/llvm/Transforms/Scalar/TLSVariableHoist.h [12:12]
+ include/llvm/Transforms/Scalar/TailRecursionElimination.h [12:12]
+ include/llvm/Transforms/Scalar/WarnMissedTransforms.h [12:12]
+ include/llvm/Transforms/Utils.h [12:12]
+ include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h [12:12]
+ include/llvm/Transforms/Utils/ASanStackFrameLayout.h [12:12]
+ include/llvm/Transforms/Utils/AddDiscriminators.h [12:12]
+ include/llvm/Transforms/Utils/AssumeBundleBuilder.h [12:12]
+ include/llvm/Transforms/Utils/BasicBlockUtils.h [12:12]
+ include/llvm/Transforms/Utils/BreakCriticalEdges.h [12:12]
+ include/llvm/Transforms/Utils/BuildLibCalls.h [12:12]
+ include/llvm/Transforms/Utils/BypassSlowDivision.h [12:12]
+ include/llvm/Transforms/Utils/CallGraphUpdater.h [12:12]
+ include/llvm/Transforms/Utils/CallPromotionUtils.h [12:12]
+ include/llvm/Transforms/Utils/CanonicalizeAliases.h [12:12]
+ include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h [12:12]
+ include/llvm/Transforms/Utils/Cloning.h [12:12]
+ include/llvm/Transforms/Utils/CodeExtractor.h [12:12]
+ include/llvm/Transforms/Utils/CodeLayout.h [12:12]
+ include/llvm/Transforms/Utils/CodeMoverUtils.h [12:12]
+ include/llvm/Transforms/Utils/CtorUtils.h [12:12]
+ include/llvm/Transforms/Utils/Debugify.h [12:12]
+ include/llvm/Transforms/Utils/EntryExitInstrumenter.h [12:12]
+ include/llvm/Transforms/Utils/EscapeEnumerator.h [12:12]
+ include/llvm/Transforms/Utils/Evaluator.h [12:12]
+ include/llvm/Transforms/Utils/FixIrreducible.h [12:12]
+ include/llvm/Transforms/Utils/FunctionComparator.h [12:12]
+ include/llvm/Transforms/Utils/FunctionImportUtils.h [12:12]
+ include/llvm/Transforms/Utils/GlobalStatus.h [12:12]
+ include/llvm/Transforms/Utils/GuardUtils.h [12:12]
+ include/llvm/Transforms/Utils/HelloWorld.h [12:12]
+ include/llvm/Transforms/Utils/InjectTLIMappings.h [12:12]
+ include/llvm/Transforms/Utils/InstructionNamer.h [12:12]
+ include/llvm/Transforms/Utils/InstructionWorklist.h [12:12]
+ include/llvm/Transforms/Utils/IntegerDivision.h [12:12]
+ include/llvm/Transforms/Utils/LCSSA.h [12:12]
+ include/llvm/Transforms/Utils/LibCallsShrinkWrap.h [12:12]
+ include/llvm/Transforms/Utils/Local.h [12:12]
+ include/llvm/Transforms/Utils/LoopPeel.h [12:12]
+ include/llvm/Transforms/Utils/LoopRotationUtils.h [12:12]
+ include/llvm/Transforms/Utils/LoopSimplify.h [12:12]
+ include/llvm/Transforms/Utils/LoopUtils.h [12:12]
+ include/llvm/Transforms/Utils/LoopVersioning.h [12:12]
+ include/llvm/Transforms/Utils/LowerAtomic.h [12:12]
+ include/llvm/Transforms/Utils/LowerGlobalDtors.h [12:12]
+ include/llvm/Transforms/Utils/LowerIFunc.h [12:12]
+ include/llvm/Transforms/Utils/LowerInvoke.h [12:12]
+ include/llvm/Transforms/Utils/LowerMemIntrinsics.h [12:12]
+ include/llvm/Transforms/Utils/LowerSwitch.h [12:12]
+ include/llvm/Transforms/Utils/MatrixUtils.h [12:12]
+ include/llvm/Transforms/Utils/Mem2Reg.h [12:12]
+ include/llvm/Transforms/Utils/MemoryOpRemark.h [12:12]
+ include/llvm/Transforms/Utils/MemoryTaggingSupport.h [11:11]
+ include/llvm/Transforms/Utils/MetaRenamer.h [12:12]
+ include/llvm/Transforms/Utils/MisExpect.h [12:12]
+ include/llvm/Transforms/Utils/ModuleUtils.h [12:12]
+ include/llvm/Transforms/Utils/NameAnonGlobals.h [12:12]
+ include/llvm/Transforms/Utils/PredicateInfo.h [12:12]
+ include/llvm/Transforms/Utils/PromoteMemToReg.h [12:12]
+ include/llvm/Transforms/Utils/RelLookupTableConverter.h [12:12]
+ include/llvm/Transforms/Utils/SCCPSolver.h [12:12]
+ include/llvm/Transforms/Utils/SSAUpdater.h [12:12]
+ include/llvm/Transforms/Utils/SSAUpdaterBulk.h [12:12]
+ include/llvm/Transforms/Utils/SSAUpdaterImpl.h [12:12]
+ include/llvm/Transforms/Utils/SampleProfileInference.h [12:12]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h [12:12]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h [12:12]
+ include/llvm/Transforms/Utils/SanitizerStats.h [12:12]
+ include/llvm/Transforms/Utils/ScalarEvolutionExpander.h [12:12]
+ include/llvm/Transforms/Utils/SimplifyCFGOptions.h [12:12]
+ include/llvm/Transforms/Utils/SimplifyIndVar.h [12:12]
+ include/llvm/Transforms/Utils/SimplifyLibCalls.h [12:12]
+ include/llvm/Transforms/Utils/SizeOpts.h [12:12]
+ include/llvm/Transforms/Utils/SplitModule.h [12:12]
+ include/llvm/Transforms/Utils/StripGCRelocates.h [12:12]
+ include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h [12:12]
+ include/llvm/Transforms/Utils/SymbolRewriter.h [12:12]
+ include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h [12:12]
+ include/llvm/Transforms/Utils/UnifyLoopExits.h [12:12]
+ include/llvm/Transforms/Utils/UnrollLoop.h [12:12]
+ include/llvm/Transforms/Utils/VNCoercion.h [12:12]
+ include/llvm/Transforms/Utils/ValueMapper.h [12:12]
+ include/llvm/Transforms/Vectorize.h [12:12]
+ include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h [12:12]
+ include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h [12:12]
+ include/llvm/Transforms/Vectorize/LoopVectorize.h [12:12]
+ include/llvm/Transforms/Vectorize/SLPVectorizer.h [12:12]
+ include/llvm/Transforms/Vectorize/VectorCombine.h [12:12]
+ include/llvm/WindowsDriver/MSVCPaths.h [12:12]
+ include/llvm/WindowsManifest/WindowsManifestMerger.h [12:12]
+ include/llvm/WindowsResource/ResourceProcessor.h [12:12]
+ include/llvm/WindowsResource/ResourceScriptToken.h [12:12]
+ include/llvm/WindowsResource/ResourceScriptTokenList.h [12:12]
+ include/llvm/XRay/BlockIndexer.h [12:12]
+ include/llvm/XRay/BlockPrinter.h [12:12]
+ include/llvm/XRay/BlockVerifier.h [12:12]
+ include/llvm/XRay/FDRLogBuilder.h [12:12]
+ include/llvm/XRay/FDRRecordConsumer.h [12:12]
+ include/llvm/XRay/FDRRecordProducer.h [12:12]
+ include/llvm/XRay/FDRRecords.h [12:12]
+ include/llvm/XRay/FDRTraceExpander.h [12:12]
+ include/llvm/XRay/FDRTraceWriter.h [12:12]
+ include/llvm/XRay/FileHeaderReader.h [12:12]
+ include/llvm/XRay/Graph.h [12:12]
+ include/llvm/XRay/InstrumentationMap.h [12:12]
+ include/llvm/XRay/Profile.h [12:12]
+ include/llvm/XRay/RecordPrinter.h [12:12]
+ include/llvm/XRay/Trace.h [12:12]
+ include/llvm/XRay/XRayRecord.h [12:12]
+ include/llvm/XRay/YAMLXRayRecord.h [12:12]
+ lib/Analysis/AliasAnalysis.cpp [5:5]
+ lib/Analysis/AliasAnalysisEvaluator.cpp [5:5]
+ lib/Analysis/AliasAnalysisSummary.h [5:5]
+ lib/Analysis/AliasSetTracker.cpp [5:5]
+ lib/Analysis/Analysis.cpp [5:5]
+ lib/Analysis/AssumeBundleQueries.cpp [5:5]
+ lib/Analysis/AssumptionCache.cpp [5:5]
+ lib/Analysis/BasicAliasAnalysis.cpp [5:5]
+ lib/Analysis/BlockFrequencyInfo.cpp [5:5]
+ lib/Analysis/BlockFrequencyInfoImpl.cpp [5:5]
+ lib/Analysis/BranchProbabilityInfo.cpp [5:5]
+ lib/Analysis/CFG.cpp [5:5]
+ lib/Analysis/CFGPrinter.cpp [5:5]
+ lib/Analysis/CFGSCCPrinter.cpp [5:5]
+ lib/Analysis/CGSCCPassManager.cpp [5:5]
+ lib/Analysis/CallGraph.cpp [5:5]
+ lib/Analysis/CallGraphSCCPass.cpp [5:5]
+ lib/Analysis/CallPrinter.cpp [5:5]
+ lib/Analysis/CaptureTracking.cpp [5:5]
+ lib/Analysis/CmpInstAnalysis.cpp [5:5]
+ lib/Analysis/CodeMetrics.cpp [5:5]
+ lib/Analysis/ConstantFolding.cpp [5:5]
+ lib/Analysis/ConstraintSystem.cpp [5:5]
+ lib/Analysis/CostModel.cpp [5:5]
+ lib/Analysis/CycleAnalysis.cpp [5:5]
+ lib/Analysis/DDG.cpp [5:5]
+ lib/Analysis/DDGPrinter.cpp [5:5]
+ lib/Analysis/Delinearization.cpp [5:5]
+ lib/Analysis/DemandedBits.cpp [5:5]
+ lib/Analysis/DependenceAnalysis.cpp [5:5]
+ lib/Analysis/DependenceGraphBuilder.cpp [5:5]
+ lib/Analysis/DevelopmentModeInlineAdvisor.cpp [5:5]
+ lib/Analysis/DivergenceAnalysis.cpp [5:5]
+ lib/Analysis/DomPrinter.cpp [5:5]
+ lib/Analysis/DomTreeUpdater.cpp [5:5]
+ lib/Analysis/DominanceFrontier.cpp [5:5]
+ lib/Analysis/EHPersonalities.cpp [5:5]
+ lib/Analysis/FunctionPropertiesAnalysis.cpp [5:5]
+ lib/Analysis/GlobalsModRef.cpp [5:5]
+ lib/Analysis/GuardUtils.cpp [5:5]
+ lib/Analysis/HeatUtils.cpp [5:5]
+ lib/Analysis/IRSimilarityIdentifier.cpp [5:5]
+ lib/Analysis/IVDescriptors.cpp [5:5]
+ lib/Analysis/IVUsers.cpp [5:5]
+ lib/Analysis/ImportedFunctionsInliningStatistics.cpp [5:5]
+ lib/Analysis/IndirectCallPromotionAnalysis.cpp [5:5]
+ lib/Analysis/InlineAdvisor.cpp [5:5]
+ lib/Analysis/InlineCost.cpp [5:5]
+ lib/Analysis/InlineOrder.cpp [5:5]
+ lib/Analysis/InlineSizeEstimatorAnalysis.cpp [5:5]
+ lib/Analysis/InstCount.cpp [5:5]
+ lib/Analysis/InstructionPrecedenceTracking.cpp [5:5]
+ lib/Analysis/InstructionSimplify.cpp [5:5]
+ lib/Analysis/Interval.cpp [5:5]
+ lib/Analysis/IntervalPartition.cpp [5:5]
+ lib/Analysis/LazyBlockFrequencyInfo.cpp [5:5]
+ lib/Analysis/LazyBranchProbabilityInfo.cpp [5:5]
+ lib/Analysis/LazyCallGraph.cpp [5:5]
+ lib/Analysis/LazyValueInfo.cpp [5:5]
+ lib/Analysis/LegacyDivergenceAnalysis.cpp [6:6]
+ lib/Analysis/Lint.cpp [5:5]
+ lib/Analysis/Loads.cpp [5:5]
+ lib/Analysis/Local.cpp [5:5]
+ lib/Analysis/LoopAccessAnalysis.cpp [5:5]
+ lib/Analysis/LoopAnalysisManager.cpp [5:5]
+ lib/Analysis/LoopCacheAnalysis.cpp [7:7]
+ lib/Analysis/LoopInfo.cpp [5:5]
+ lib/Analysis/LoopNestAnalysis.cpp [5:5]
+ lib/Analysis/LoopPass.cpp [5:5]
+ lib/Analysis/LoopUnrollAnalyzer.cpp [5:5]
+ lib/Analysis/MLInlineAdvisor.cpp [5:5]
+ lib/Analysis/MemDepPrinter.cpp [5:5]
+ lib/Analysis/MemDerefPrinter.cpp [5:5]
+ lib/Analysis/MemoryBuiltins.cpp [5:5]
+ lib/Analysis/MemoryDependenceAnalysis.cpp [5:5]
+ lib/Analysis/MemoryLocation.cpp [5:5]
+ lib/Analysis/MemoryProfileInfo.cpp [5:5]
+ lib/Analysis/MemorySSA.cpp [5:5]
+ lib/Analysis/MemorySSAUpdater.cpp [5:5]
+ lib/Analysis/ModelUnderTrainingRunner.cpp [5:5]
+ lib/Analysis/ModuleDebugInfoPrinter.cpp [5:5]
+ lib/Analysis/ModuleSummaryAnalysis.cpp [5:5]
+ lib/Analysis/MustExecute.cpp [5:5]
+ lib/Analysis/NoInferenceModelRunner.cpp [5:5]
+ lib/Analysis/ObjCARCAliasAnalysis.cpp [5:5]
+ lib/Analysis/ObjCARCAnalysisUtils.cpp [5:5]
+ lib/Analysis/ObjCARCInstKind.cpp [5:5]
+ lib/Analysis/OptimizationRemarkEmitter.cpp [5:5]
+ lib/Analysis/OverflowInstAnalysis.cpp [5:5]
+ lib/Analysis/PHITransAddr.cpp [5:5]
+ lib/Analysis/PhiValues.cpp [5:5]
+ lib/Analysis/PostDominators.cpp [5:5]
+ lib/Analysis/ProfileSummaryInfo.cpp [5:5]
+ lib/Analysis/PtrUseVisitor.cpp [5:5]
+ lib/Analysis/RegionInfo.cpp [5:5]
+ lib/Analysis/RegionPass.cpp [5:5]
+ lib/Analysis/RegionPrinter.cpp [5:5]
+ lib/Analysis/ReplayInlineAdvisor.cpp [5:5]
+ lib/Analysis/ScalarEvolution.cpp [5:5]
+ lib/Analysis/ScalarEvolutionAliasAnalysis.cpp [5:5]
+ lib/Analysis/ScalarEvolutionDivision.cpp [5:5]
+ lib/Analysis/ScalarEvolutionNormalization.cpp [5:5]
+ lib/Analysis/ScopedNoAliasAA.cpp [5:5]
+ lib/Analysis/StackLifetime.cpp [5:5]
+ lib/Analysis/StackSafetyAnalysis.cpp [5:5]
+ lib/Analysis/SyncDependenceAnalysis.cpp [5:5]
+ lib/Analysis/SyntheticCountsUtils.cpp [5:5]
+ lib/Analysis/TFLiteUtils.cpp [5:5]
+ lib/Analysis/TargetLibraryInfo.cpp [5:5]
+ lib/Analysis/TargetTransformInfo.cpp [5:5]
+ lib/Analysis/TensorSpec.cpp [5:5]
+ lib/Analysis/Trace.cpp [5:5]
+ lib/Analysis/TrainingLogger.cpp [5:5]
+ lib/Analysis/TypeBasedAliasAnalysis.cpp [5:5]
+ lib/Analysis/TypeMetadataUtils.cpp [5:5]
+ lib/Analysis/UniformityAnalysis.cpp [5:5]
+ lib/Analysis/VFABIDemangling.cpp [5:5]
+ lib/Analysis/ValueLattice.cpp [5:5]
+ lib/Analysis/ValueLatticeUtils.cpp [5:5]
+ lib/Analysis/ValueTracking.cpp [5:5]
+ lib/Analysis/VectorUtils.cpp [5:5]
+ lib/AsmParser/LLLexer.cpp [5:5]
+ lib/AsmParser/LLParser.cpp [5:5]
+ lib/AsmParser/Parser.cpp [5:5]
+ lib/BinaryFormat/AMDGPUMetadataVerifier.cpp [5:5]
+ lib/BinaryFormat/COFF.cpp [5:5]
+ lib/BinaryFormat/DXContainer.cpp [6:6]
+ lib/BinaryFormat/Dwarf.cpp [5:5]
+ lib/BinaryFormat/ELF.cpp [5:5]
+ lib/BinaryFormat/MachO.cpp [5:5]
+ lib/BinaryFormat/Magic.cpp [5:5]
+ lib/BinaryFormat/Minidump.cpp [5:5]
+ lib/BinaryFormat/MsgPackDocument.cpp [5:5]
+ lib/BinaryFormat/MsgPackDocumentYAML.cpp [5:5]
+ lib/BinaryFormat/MsgPackReader.cpp [5:5]
+ lib/BinaryFormat/MsgPackWriter.cpp [5:5]
+ lib/BinaryFormat/Wasm.cpp [5:5]
+ lib/BinaryFormat/XCOFF.cpp [5:5]
+ lib/Bitcode/Reader/BitReader.cpp [5:5]
+ lib/Bitcode/Reader/BitcodeAnalyzer.cpp [5:5]
+ lib/Bitcode/Reader/BitcodeReader.cpp [5:5]
+ lib/Bitcode/Reader/MetadataLoader.cpp [5:5]
+ lib/Bitcode/Reader/MetadataLoader.h [5:5]
+ lib/Bitcode/Reader/ValueList.cpp [5:5]
+ lib/Bitcode/Reader/ValueList.h [5:5]
+ lib/Bitcode/Writer/BitWriter.cpp [5:5]
+ lib/Bitcode/Writer/BitcodeWriter.cpp [5:5]
+ lib/Bitcode/Writer/BitcodeWriterPass.cpp [5:5]
+ lib/Bitcode/Writer/ValueEnumerator.cpp [5:5]
+ lib/Bitcode/Writer/ValueEnumerator.h [5:5]
+ lib/Bitstream/Reader/BitstreamReader.cpp [5:5]
+ lib/CodeGen/AggressiveAntiDepBreaker.cpp [5:5]
+ lib/CodeGen/AggressiveAntiDepBreaker.h [5:5]
+ lib/CodeGen/AllocationOrder.cpp [5:5]
+ lib/CodeGen/AllocationOrder.h [5:5]
+ lib/CodeGen/Analysis.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AIXException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/ARMException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AccelTable.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AddressPool.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AddressPool.h [5:5]
+ lib/CodeGen/AsmPrinter/AsmPrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp [5:5]
+ lib/CodeGen/AsmPrinter/ByteStreamer.h [5:5]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.cpp [5:5]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.h [5:5]
+ lib/CodeGen/AsmPrinter/DIE.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DIEHash.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DIEHash.h [5:5]
+ lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DebugLocEntry.h [5:5]
+ lib/CodeGen/AsmPrinter/DebugLocStream.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DebugLocStream.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfCFIException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfDebug.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfDebug.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfException.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfExpression.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfExpression.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfFile.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfFile.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfUnit.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfUnit.h [5:5]
+ lib/CodeGen/AsmPrinter/EHStreamer.cpp [5:5]
+ lib/CodeGen/AsmPrinter/EHStreamer.h [5:5]
+ lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.h [5:5]
+ lib/CodeGen/AsmPrinter/WasmException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/WasmException.h [5:5]
+ lib/CodeGen/AsmPrinter/WinCFGuard.cpp [5:5]
+ lib/CodeGen/AsmPrinter/WinCFGuard.h [5:5]
+ lib/CodeGen/AsmPrinter/WinException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/WinException.h [5:5]
+ lib/CodeGen/AtomicExpandPass.cpp [5:5]
+ lib/CodeGen/BasicBlockSections.cpp [5:5]
+ lib/CodeGen/BasicBlockSectionsProfileReader.cpp [5:5]
+ lib/CodeGen/BasicTargetTransformInfo.cpp [5:5]
+ lib/CodeGen/BranchFolding.cpp [5:5]
+ lib/CodeGen/BranchFolding.h [5:5]
+ lib/CodeGen/BranchRelaxation.cpp [5:5]
+ lib/CodeGen/BreakFalseDeps.cpp [5:5]
+ lib/CodeGen/CFGuardLongjmp.cpp [5:5]
+ lib/CodeGen/CFIFixup.cpp [5:5]
+ lib/CodeGen/CFIInstrInserter.cpp [5:5]
+ lib/CodeGen/CalcSpillWeights.cpp [5:5]
+ lib/CodeGen/CallingConvLower.cpp [5:5]
+ lib/CodeGen/CodeGen.cpp [5:5]
+ lib/CodeGen/CodeGenCommonISel.cpp [5:5]
+ lib/CodeGen/CodeGenPassBuilder.cpp [5:5]
+ lib/CodeGen/CodeGenPrepare.cpp [5:5]
+ lib/CodeGen/CommandFlags.cpp [5:5]
+ lib/CodeGen/ComplexDeinterleavingPass.cpp [5:5]
+ lib/CodeGen/CriticalAntiDepBreaker.cpp [5:5]
+ lib/CodeGen/CriticalAntiDepBreaker.h [5:5]
+ lib/CodeGen/DFAPacketizer.cpp [5:5]
+ lib/CodeGen/DeadMachineInstructionElim.cpp [5:5]
+ lib/CodeGen/DetectDeadLanes.cpp [5:5]
+ lib/CodeGen/DwarfEHPrepare.cpp [5:5]
+ lib/CodeGen/EHContGuardCatchret.cpp [5:5]
+ lib/CodeGen/EarlyIfConversion.cpp [5:5]
+ lib/CodeGen/EdgeBundles.cpp [5:5]
+ lib/CodeGen/ExecutionDomainFix.cpp [5:5]
+ lib/CodeGen/ExpandLargeDivRem.cpp [5:5]
+ lib/CodeGen/ExpandLargeFpConvert.cpp [5:5]
+ lib/CodeGen/ExpandMemCmp.cpp [5:5]
+ lib/CodeGen/ExpandPostRAPseudos.cpp [5:5]
+ lib/CodeGen/ExpandReductions.cpp [5:5]
+ lib/CodeGen/ExpandVectorPredication.cpp [5:5]
+ lib/CodeGen/FEntryInserter.cpp [5:5]
+ lib/CodeGen/FaultMaps.cpp [5:5]
+ lib/CodeGen/FinalizeISel.cpp [5:5]
+ lib/CodeGen/FixupStatepointCallerSaved.cpp [5:5]
+ lib/CodeGen/FuncletLayout.cpp [5:5]
+ lib/CodeGen/GCMetadata.cpp [5:5]
+ lib/CodeGen/GCMetadataPrinter.cpp [5:5]
+ lib/CodeGen/GCRootLowering.cpp [5:5]
+ lib/CodeGen/GlobalISel/CSEInfo.cpp [5:5]
+ lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp [5:5]
+ lib/CodeGen/GlobalISel/CallLowering.cpp [5:5]
+ lib/CodeGen/GlobalISel/Combiner.cpp [5:5]
+ lib/CodeGen/GlobalISel/CombinerHelper.cpp [5:5]
+ lib/CodeGen/GlobalISel/GISelChangeObserver.cpp [5:5]
+ lib/CodeGen/GlobalISel/GISelKnownBits.cpp [5:5]
+ lib/CodeGen/GlobalISel/GlobalISel.cpp [5:5]
+ lib/CodeGen/GlobalISel/IRTranslator.cpp [5:5]
+ lib/CodeGen/GlobalISel/InlineAsmLowering.cpp [5:5]
+ lib/CodeGen/GlobalISel/InstructionSelect.cpp [5:5]
+ lib/CodeGen/GlobalISel/InstructionSelector.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalityPredicates.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalizeMutations.cpp [5:5]
+ lib/CodeGen/GlobalISel/Legalizer.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalizerHelper.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalizerInfo.cpp [5:5]
+ lib/CodeGen/GlobalISel/LoadStoreOpt.cpp [5:5]
+ lib/CodeGen/GlobalISel/Localizer.cpp [5:5]
+ lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp [5:5]
+ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp [5:5]
+ lib/CodeGen/GlobalISel/RegBankSelect.cpp [5:5]
+ lib/CodeGen/GlobalISel/Utils.cpp [5:5]
+ lib/CodeGen/GlobalMerge.cpp [5:5]
+ lib/CodeGen/HardwareLoops.cpp [5:5]
+ lib/CodeGen/IfConversion.cpp [5:5]
+ lib/CodeGen/ImplicitNullChecks.cpp [5:5]
+ lib/CodeGen/IndirectBrExpandPass.cpp [5:5]
+ lib/CodeGen/InlineSpiller.cpp [5:5]
+ lib/CodeGen/InterferenceCache.cpp [5:5]
+ lib/CodeGen/InterferenceCache.h [5:5]
+ lib/CodeGen/InterleavedAccessPass.cpp [5:5]
+ lib/CodeGen/InterleavedLoadCombinePass.cpp [5:5]
+ lib/CodeGen/IntrinsicLowering.cpp [5:5]
+ lib/CodeGen/JMCInstrumenter.cpp [5:5]
+ lib/CodeGen/LLVMTargetMachine.cpp [5:5]
+ lib/CodeGen/LatencyPriorityQueue.cpp [5:5]
+ lib/CodeGen/LexicalScopes.cpp [5:5]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp [5:5]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h [5:5]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp [5:5]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.h [5:5]
+ lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp [5:5]
+ lib/CodeGen/LiveDebugVariables.cpp [5:5]
+ lib/CodeGen/LiveDebugVariables.h [5:5]
+ lib/CodeGen/LiveInterval.cpp [5:5]
+ lib/CodeGen/LiveIntervalCalc.cpp [5:5]
+ lib/CodeGen/LiveIntervalUnion.cpp [5:5]
+ lib/CodeGen/LiveIntervals.cpp [5:5]
+ lib/CodeGen/LivePhysRegs.cpp [5:5]
+ lib/CodeGen/LiveRangeCalc.cpp [5:5]
+ lib/CodeGen/LiveRangeEdit.cpp [5:5]
+ lib/CodeGen/LiveRangeShrink.cpp [5:5]
+ lib/CodeGen/LiveRangeUtils.h [5:5]
+ lib/CodeGen/LiveRegMatrix.cpp [5:5]
+ lib/CodeGen/LiveRegUnits.cpp [5:5]
+ lib/CodeGen/LiveStacks.cpp [5:5]
+ lib/CodeGen/LiveVariables.cpp [5:5]
+ lib/CodeGen/LocalStackSlotAllocation.cpp [5:5]
+ lib/CodeGen/LoopTraversal.cpp [5:5]
+ lib/CodeGen/LowLevelType.cpp [5:5]
+ lib/CodeGen/LowerEmuTLS.cpp [5:5]
+ lib/CodeGen/MBFIWrapper.cpp [5:5]
+ lib/CodeGen/MIRCanonicalizerPass.cpp [5:5]
+ lib/CodeGen/MIRFSDiscriminator.cpp [5:5]
+ lib/CodeGen/MIRNamerPass.cpp [5:5]
+ lib/CodeGen/MIRParser/MILexer.cpp [5:5]
+ lib/CodeGen/MIRParser/MILexer.h [5:5]
+ lib/CodeGen/MIRParser/MIParser.cpp [5:5]
+ lib/CodeGen/MIRParser/MIRParser.cpp [5:5]
+ lib/CodeGen/MIRPrinter.cpp [5:5]
+ lib/CodeGen/MIRPrintingPass.cpp [5:5]
+ lib/CodeGen/MIRSampleProfile.cpp [5:5]
+ lib/CodeGen/MIRVRegNamerUtils.cpp [5:5]
+ lib/CodeGen/MIRVRegNamerUtils.h [6:6]
+ lib/CodeGen/MIRYamlMapping.cpp [5:5]
+ lib/CodeGen/MLRegallocEvictAdvisor.cpp [5:5]
+ lib/CodeGen/MLRegallocEvictAdvisor.h [5:5]
+ lib/CodeGen/MLRegallocPriorityAdvisor.cpp [5:5]
+ lib/CodeGen/MachineBasicBlock.cpp [5:5]
+ lib/CodeGen/MachineBlockFrequencyInfo.cpp [5:5]
+ lib/CodeGen/MachineBlockPlacement.cpp [5:5]
+ lib/CodeGen/MachineBranchProbabilityInfo.cpp [5:5]
+ lib/CodeGen/MachineCFGPrinter.cpp [5:5]
+ lib/CodeGen/MachineCSE.cpp [5:5]
+ lib/CodeGen/MachineCheckDebugify.cpp [5:5]
+ lib/CodeGen/MachineCombiner.cpp [5:5]
+ lib/CodeGen/MachineCopyPropagation.cpp [5:5]
+ lib/CodeGen/MachineCycleAnalysis.cpp [5:5]
+ lib/CodeGen/MachineDebugify.cpp [5:5]
+ lib/CodeGen/MachineDominanceFrontier.cpp [5:5]
+ lib/CodeGen/MachineDominators.cpp [5:5]
+ lib/CodeGen/MachineFrameInfo.cpp [5:5]
+ lib/CodeGen/MachineFunction.cpp [5:5]
+ lib/CodeGen/MachineFunctionPass.cpp [5:5]
+ lib/CodeGen/MachineFunctionPrinterPass.cpp [5:5]
+ lib/CodeGen/MachineFunctionSplitter.cpp [5:5]
+ lib/CodeGen/MachineInstr.cpp [5:5]
+ lib/CodeGen/MachineInstrBundle.cpp [5:5]
+ lib/CodeGen/MachineLICM.cpp [5:5]
+ lib/CodeGen/MachineLateInstrsCleanup.cpp [5:5]
+ lib/CodeGen/MachineLoopInfo.cpp [5:5]
+ lib/CodeGen/MachineLoopUtils.cpp [5:5]
+ lib/CodeGen/MachineModuleInfo.cpp [5:5]
+ lib/CodeGen/MachineModuleInfoImpls.cpp [5:5]
+ lib/CodeGen/MachineModuleSlotTracker.cpp [5:5]
+ lib/CodeGen/MachineOperand.cpp [5:5]
+ lib/CodeGen/MachineOutliner.cpp [5:5]
+ lib/CodeGen/MachinePassManager.cpp [5:5]
+ lib/CodeGen/MachinePipeliner.cpp [5:5]
+ lib/CodeGen/MachinePostDominators.cpp [5:5]
+ lib/CodeGen/MachineRegionInfo.cpp [5:5]
+ lib/CodeGen/MachineRegisterInfo.cpp [5:5]
+ lib/CodeGen/MachineSSAContext.cpp [5:5]
+ lib/CodeGen/MachineSSAUpdater.cpp [5:5]
+ lib/CodeGen/MachineScheduler.cpp [5:5]
+ lib/CodeGen/MachineSink.cpp [5:5]
+ lib/CodeGen/MachineSizeOpts.cpp [5:5]
+ lib/CodeGen/MachineStableHash.cpp [5:5]
+ lib/CodeGen/MachineStripDebug.cpp [5:5]
+ lib/CodeGen/MachineTraceMetrics.cpp [5:5]
+ lib/CodeGen/MachineUniformityAnalysis.cpp [5:5]
+ lib/CodeGen/MachineVerifier.cpp [5:5]
+ lib/CodeGen/MacroFusion.cpp [5:5]
+ lib/CodeGen/ModuloSchedule.cpp [5:5]
+ lib/CodeGen/MultiHazardRecognizer.cpp [5:5]
+ lib/CodeGen/NonRelocatableStringpool.cpp [5:5]
+ lib/CodeGen/OptimizePHIs.cpp [5:5]
+ lib/CodeGen/PHIElimination.cpp [5:5]
+ lib/CodeGen/PHIEliminationUtils.cpp [5:5]
+ lib/CodeGen/PHIEliminationUtils.h [5:5]
+ lib/CodeGen/ParallelCG.cpp [5:5]
+ lib/CodeGen/PatchableFunction.cpp [5:5]
+ lib/CodeGen/PeepholeOptimizer.cpp [5:5]
+ lib/CodeGen/PostRAHazardRecognizer.cpp [5:5]
+ lib/CodeGen/PostRASchedulerList.cpp [5:5]
+ lib/CodeGen/PreISelIntrinsicLowering.cpp [5:5]
+ lib/CodeGen/ProcessImplicitDefs.cpp [5:5]
+ lib/CodeGen/PrologEpilogInserter.cpp [5:5]
+ lib/CodeGen/PseudoProbeInserter.cpp [5:5]
+ lib/CodeGen/PseudoSourceValue.cpp [5:5]
+ lib/CodeGen/RDFGraph.cpp [5:5]
+ lib/CodeGen/RDFLiveness.cpp [5:5]
+ lib/CodeGen/RDFRegisters.cpp [5:5]
+ lib/CodeGen/ReachingDefAnalysis.cpp [5:5]
+ lib/CodeGen/RegAllocBase.cpp [5:5]
+ lib/CodeGen/RegAllocBase.h [5:5]
+ lib/CodeGen/RegAllocBasic.cpp [5:5]
+ lib/CodeGen/RegAllocEvictionAdvisor.cpp [5:5]
+ lib/CodeGen/RegAllocEvictionAdvisor.h [5:5]
+ lib/CodeGen/RegAllocFast.cpp [5:5]
+ lib/CodeGen/RegAllocGreedy.cpp [5:5]
+ lib/CodeGen/RegAllocGreedy.h [5:5]
+ lib/CodeGen/RegAllocPBQP.cpp [5:5]
+ lib/CodeGen/RegAllocPriorityAdvisor.cpp [5:5]
+ lib/CodeGen/RegAllocPriorityAdvisor.h [5:5]
+ lib/CodeGen/RegAllocScore.cpp [5:5]
+ lib/CodeGen/RegAllocScore.h [5:5]
+ lib/CodeGen/RegUsageInfoCollector.cpp [5:5]
+ lib/CodeGen/RegUsageInfoPropagate.cpp [5:5]
+ lib/CodeGen/RegisterBank.cpp [5:5]
+ lib/CodeGen/RegisterBankInfo.cpp [5:5]
+ lib/CodeGen/RegisterClassInfo.cpp [5:5]
+ lib/CodeGen/RegisterCoalescer.cpp [5:5]
+ lib/CodeGen/RegisterCoalescer.h [5:5]
+ lib/CodeGen/RegisterPressure.cpp [5:5]
+ lib/CodeGen/RegisterScavenging.cpp [5:5]
+ lib/CodeGen/RegisterUsageInfo.cpp [5:5]
+ lib/CodeGen/RemoveRedundantDebugValues.cpp [5:5]
+ lib/CodeGen/RenameIndependentSubregs.cpp [5:5]
+ lib/CodeGen/ReplaceWithVeclib.cpp [5:5]
+ lib/CodeGen/ResetMachineFunctionPass.cpp [5:5]
+ lib/CodeGen/SafeStack.cpp [5:5]
+ lib/CodeGen/SafeStackLayout.cpp [5:5]
+ lib/CodeGen/SafeStackLayout.h [5:5]
+ lib/CodeGen/SanitizerBinaryMetadata.cpp [6:6]
+ lib/CodeGen/ScheduleDAG.cpp [5:5]
+ lib/CodeGen/ScheduleDAGInstrs.cpp [5:5]
+ lib/CodeGen/ScheduleDAGPrinter.cpp [5:5]
+ lib/CodeGen/ScoreboardHazardRecognizer.cpp [5:5]
+ lib/CodeGen/SelectOptimize.cpp [5:5]
+ lib/CodeGen/SelectionDAG/DAGCombiner.cpp [5:5]
+ lib/CodeGen/SelectionDAG/FastISel.cpp [5:5]
+ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp [5:5]
+ lib/CodeGen/SelectionDAG/InstrEmitter.cpp [5:5]
+ lib/CodeGen/SelectionDAG/InstrEmitter.h [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.h [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SDNodeDbgValue.h [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAG.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGTargetInfo.cpp [5:5]
+ lib/CodeGen/SelectionDAG/StatepointLowering.cpp [5:5]
+ lib/CodeGen/SelectionDAG/StatepointLowering.h [5:5]
+ lib/CodeGen/SelectionDAG/TargetLowering.cpp [5:5]
+ lib/CodeGen/ShadowStackGCLowering.cpp [5:5]
+ lib/CodeGen/ShrinkWrap.cpp [5:5]
+ lib/CodeGen/SjLjEHPrepare.cpp [5:5]
+ lib/CodeGen/SlotIndexes.cpp [5:5]
+ lib/CodeGen/SpillPlacement.cpp [5:5]
+ lib/CodeGen/SpillPlacement.h [5:5]
+ lib/CodeGen/SplitKit.cpp [5:5]
+ lib/CodeGen/SplitKit.h [5:5]
+ lib/CodeGen/StackColoring.cpp [5:5]
+ lib/CodeGen/StackFrameLayoutAnalysisPass.cpp [6:6]
+ lib/CodeGen/StackMapLivenessAnalysis.cpp [5:5]
+ lib/CodeGen/StackMaps.cpp [5:5]
+ lib/CodeGen/StackProtector.cpp [5:5]
+ lib/CodeGen/StackSlotColoring.cpp [5:5]
+ lib/CodeGen/SwiftErrorValueTracking.cpp [5:5]
+ lib/CodeGen/SwitchLoweringUtils.cpp [5:5]
+ lib/CodeGen/TailDuplication.cpp [5:5]
+ lib/CodeGen/TailDuplicator.cpp [5:5]
+ lib/CodeGen/TargetFrameLoweringImpl.cpp [5:5]
+ lib/CodeGen/TargetInstrInfo.cpp [5:5]
+ lib/CodeGen/TargetLoweringBase.cpp [5:5]
+ lib/CodeGen/TargetLoweringObjectFileImpl.cpp [5:5]
+ lib/CodeGen/TargetOptionsImpl.cpp [5:5]
+ lib/CodeGen/TargetPassConfig.cpp [5:5]
+ lib/CodeGen/TargetRegisterInfo.cpp [5:5]
+ lib/CodeGen/TargetSchedule.cpp [5:5]
+ lib/CodeGen/TargetSubtargetInfo.cpp [5:5]
+ lib/CodeGen/TwoAddressInstructionPass.cpp [5:5]
+ lib/CodeGen/TypePromotion.cpp [5:5]
+ lib/CodeGen/UnreachableBlockElim.cpp [5:5]
+ lib/CodeGen/VLIWMachineScheduler.cpp [5:5]
+ lib/CodeGen/ValueTypes.cpp [5:5]
+ lib/CodeGen/VirtRegMap.cpp [5:5]
+ lib/CodeGen/WasmEHPrepare.cpp [5:5]
+ lib/CodeGen/WinEHPrepare.cpp [5:5]
+ lib/CodeGen/XRayInstrumentation.cpp [5:5]
+ lib/DWARFLinker/DWARFLinker.cpp [5:5]
+ lib/DWARFLinker/DWARFLinkerCompileUnit.cpp [5:5]
+ lib/DWARFLinker/DWARFLinkerDeclContext.cpp [5:5]
+ lib/DWARFLinker/DWARFStreamer.cpp [5:5]
+ lib/DWARFLinkerParallel/DWARFLinker.cpp [5:5]
+ lib/DWP/DWP.cpp [5:5]
+ lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp [5:5]
+ lib/DebugInfo/CodeView/CVSymbolVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/CVTypeVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/CodeViewError.cpp [5:5]
+ lib/DebugInfo/CodeView/CodeViewRecordIO.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugChecksumsSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugCrossExSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugFrameDataSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugInlineeLinesSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugLinesSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSymbolRVASubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSymbolsSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/EnumTables.cpp [5:5]
+ lib/DebugInfo/CodeView/Formatters.cpp [5:5]
+ lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp [5:5]
+ lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp [5:5]
+ lib/DebugInfo/CodeView/Line.cpp [5:5]
+ lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp [5:5]
+ lib/DebugInfo/CodeView/RecordName.cpp [5:5]
+ lib/DebugInfo/CodeView/RecordSerialization.cpp [5:5]
+ lib/DebugInfo/CodeView/SimpleTypeSerializer.cpp [5:5]
+ lib/DebugInfo/CodeView/StringsAndChecksums.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolDumper.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolRecordMapping.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolSerializer.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeDumpVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeHashing.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeIndex.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeRecordHelpers.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeRecordMapping.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeStreamMerger.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeTableCollection.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFAddressRange.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFCompileUnit.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFContext.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDataExtractor.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugAddr.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugAranges.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugFrame.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugInfoEntry.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugLine.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugLoc.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugMacro.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugPubTable.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugRnglists.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDie.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFExpression.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFFormValue.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFGdbIndex.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFListTable.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFLocationExpression.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFTypeUnit.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFUnit.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFUnitIndex.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFVerifier.cpp [5:5]
+ lib/DebugInfo/GSYM/DwarfTransformer.cpp [5:5]
+ lib/DebugInfo/GSYM/ExtractRanges.cpp [5:5]
+ lib/DebugInfo/GSYM/FileWriter.cpp [5:5]
+ lib/DebugInfo/GSYM/FunctionInfo.cpp [5:5]
+ lib/DebugInfo/GSYM/GsymCreator.cpp [5:5]
+ lib/DebugInfo/GSYM/GsymReader.cpp [5:5]
+ lib/DebugInfo/GSYM/Header.cpp [5:5]
+ lib/DebugInfo/GSYM/InlineInfo.cpp [5:5]
+ lib/DebugInfo/GSYM/LineTable.cpp [5:5]
+ lib/DebugInfo/GSYM/LookupResult.cpp [5:5]
+ lib/DebugInfo/GSYM/ObjectFileTransformer.cpp [5:5]
+ lib/DebugInfo/MSF/MSFBuilder.cpp [5:5]
+ lib/DebugInfo/MSF/MSFCommon.cpp [5:5]
+ lib/DebugInfo/MSF/MSFError.cpp [5:5]
+ lib/DebugInfo/MSF/MappedBlockStream.cpp [5:5]
+ lib/DebugInfo/PDB/GenericError.cpp [5:5]
+ lib/DebugInfo/PDB/IPDBSourceFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptor.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiModuleList.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/EnumTables.cpp [5:5]
+ lib/DebugInfo/PDB/Native/FormatUtil.cpp [5:5]
+ lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/GlobalsStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/Hash.cpp [5:5]
+ lib/DebugInfo/PDB/Native/HashTable.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InfoStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InfoStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InjectedSourceStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InputFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/LinePrinter.cpp [5:5]
+ lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NamedStreamMap.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumGlobals.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumInjectedSources.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumLineNumbers.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumModules.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumSymbols.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeFunctionSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeInlineSiteSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeLineNumber.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativePublicSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeSession.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeSourceFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeSymbolEnumerator.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeArray.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeBuiltin.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeEnum.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeFunctionSig.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypePointer.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeUDT.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBStringTable.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PublicsStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/SymbolStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/TpiHashing.cpp [5:5]
+ lib/DebugInfo/PDB/Native/TpiStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/TpiStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/PDB.cpp [5:5]
+ lib/DebugInfo/PDB/PDBContext.cpp [5:5]
+ lib/DebugInfo/PDB/PDBExtras.cpp [5:5]
+ lib/DebugInfo/PDB/PDBInterfaceAnchors.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymDumper.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolAnnotation.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolBlock.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCompiland.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCompilandDetails.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCompilandEnv.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCustom.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolData.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolExe.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolFunc.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugEnd.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugStart.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolLabel.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolPublicSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolThunk.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeArray.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeBaseClass.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeBuiltin.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeCustom.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeDimension.cpp [6:6]
+ lib/DebugInfo/PDB/PDBSymbolTypeEnum.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeFriend.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionArg.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeManaged.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypePointer.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeTypedef.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeUDT.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTable.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTableShape.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolUnknown.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolUsingNamespace.cpp [5:5]
+ lib/DebugInfo/PDB/UDTLayout.cpp [5:5]
+ lib/DebugInfo/Symbolize/DIPrinter.cpp [5:5]
+ lib/DebugInfo/Symbolize/Markup.cpp [5:5]
+ lib/DebugInfo/Symbolize/MarkupFilter.cpp [5:5]
+ lib/DebugInfo/Symbolize/SymbolizableObjectFile.cpp [5:5]
+ lib/DebugInfo/Symbolize/Symbolize.cpp [5:5]
+ lib/Debuginfod/BuildIDFetcher.cpp [5:5]
+ lib/Debuginfod/Debuginfod.cpp [5:5]
+ lib/Debuginfod/HTTPClient.cpp [5:5]
+ lib/Debuginfod/HTTPServer.cpp [5:5]
+ lib/Demangle/DLangDemangle.cpp [5:5]
+ lib/Demangle/Demangle.cpp [5:5]
+ lib/Demangle/ItaniumDemangle.cpp [5:5]
+ lib/Demangle/MicrosoftDemangle.cpp [5:5]
+ lib/Demangle/MicrosoftDemangleNodes.cpp [5:5]
+ lib/Demangle/RustDemangle.cpp [5:5]
+ lib/ExecutionEngine/ExecutionEngine.cpp [5:5]
+ lib/ExecutionEngine/ExecutionEngineBindings.cpp [5:5]
+ lib/ExecutionEngine/GDBRegistrationListener.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/Execution.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/Interpreter.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/Interpreter.h [5:5]
+ lib/ExecutionEngine/JITLink/COFF.cpp [5:5]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp [5:5]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.h [5:5]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp [5:5]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/COFF_x86_64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp [5:5]
+ lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h [5:5]
+ lib/ExecutionEngine/JITLink/EHFrameSupport.cpp [5:5]
+ lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h [5:5]
+ lib/ExecutionEngine/JITLink/ELF.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/ELF_aarch64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_i386.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_loongarch.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_riscv.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_x86_64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/JITLink.cpp [5:5]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp [5:5]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.h [5:5]
+ lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachO.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/MachO_arm64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachO_x86_64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/SEHFrameSupport.h [5:5]
+ lib/ExecutionEngine/JITLink/aarch64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/i386.cpp [5:5]
+ lib/ExecutionEngine/JITLink/loongarch.cpp [5:5]
+ lib/ExecutionEngine/JITLink/riscv.cpp [5:5]
+ lib/ExecutionEngine/JITLink/x86_64.cpp [5:5]
+ lib/ExecutionEngine/MCJIT/MCJIT.cpp [5:5]
+ lib/ExecutionEngine/MCJIT/MCJIT.h [5:5]
+ lib/ExecutionEngine/Orc/COFFPlatform.cpp [5:5]
+ lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp [5:5]
+ lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/CompileUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/Core.cpp [5:5]
+ lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp [5:5]
+ lib/ExecutionEngine/Orc/DebugUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp [5:5]
+ lib/ExecutionEngine/Orc/ELFNixPlatform.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/ExecutionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp [5:5]
+ lib/ExecutionEngine/Orc/IRCompileLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/IRTransformLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/IndirectionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp [5:5]
+ lib/ExecutionEngine/Orc/LLJIT.cpp [5:5]
+ lib/ExecutionEngine/Orc/Layer.cpp [5:5]
+ lib/ExecutionEngine/Orc/LazyReexports.cpp [5:5]
+ lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp [5:5]
+ lib/ExecutionEngine/Orc/MachOPlatform.cpp [5:5]
+ lib/ExecutionEngine/Orc/Mangling.cpp [5:5]
+ lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/MemoryMapper.cpp [5:5]
+ lib/ExecutionEngine/Orc/ObjectFileInterface.cpp [5:5]
+ lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/OrcABISupport.cpp [5:5]
+ lib/ExecutionEngine/Orc/OrcV2CBindings.cpp [5:5]
+ lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/OrcError.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp [5:5]
+ lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp [5:5]
+ lib/ExecutionEngine/Orc/Speculation.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/TaskDispatch.cpp [5:5]
+ lib/ExecutionEngine/Orc/ThreadSafeModule.cpp [6:6]
+ lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h [6:6]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h [5:5]
+ lib/ExecutionEngine/SectionMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/TargetSelect.cpp [5:5]
+ lib/FileCheck/FileCheck.cpp [5:5]
+ lib/FileCheck/FileCheckImpl.h [5:5]
+ lib/Frontend/HLSL/HLSLResource.cpp [5:5]
+ lib/Frontend/OpenACC/ACC.cpp [5:5]
+ lib/Frontend/OpenMP/OMP.cpp [5:5]
+ lib/Frontend/OpenMP/OMPContext.cpp [5:5]
+ lib/Frontend/OpenMP/OMPIRBuilder.cpp [5:5]
+ lib/FuzzMutate/IRMutator.cpp [5:5]
+ lib/FuzzMutate/OpDescriptor.cpp [5:5]
+ lib/FuzzMutate/Operations.cpp [5:5]
+ lib/FuzzMutate/RandomIRBuilder.cpp [5:5]
+ lib/IR/AbstractCallSite.cpp [5:5]
+ lib/IR/AsmWriter.cpp [5:5]
+ lib/IR/Assumptions.cpp [5:5]
+ lib/IR/AttributeImpl.h [5:5]
+ lib/IR/Attributes.cpp [5:5]
+ lib/IR/AutoUpgrade.cpp [5:5]
+ lib/IR/BasicBlock.cpp [5:5]
+ lib/IR/BuiltinGCs.cpp [5:5]
+ lib/IR/Comdat.cpp [5:5]
+ lib/IR/ConstantFold.cpp [5:5]
+ lib/IR/ConstantRange.cpp [5:5]
+ lib/IR/Constants.cpp [5:5]
+ lib/IR/ConstantsContext.h [5:5]
+ lib/IR/Core.cpp [5:5]
+ lib/IR/DIBuilder.cpp [5:5]
+ lib/IR/DataLayout.cpp [5:5]
+ lib/IR/DebugInfo.cpp [5:5]
+ lib/IR/DebugInfoMetadata.cpp [5:5]
+ lib/IR/DebugLoc.cpp [5:5]
+ lib/IR/DiagnosticHandler.cpp [5:5]
+ lib/IR/DiagnosticInfo.cpp [5:5]
+ lib/IR/DiagnosticPrinter.cpp [5:5]
+ lib/IR/Dominators.cpp [5:5]
+ lib/IR/FPEnv.cpp [5:5]
+ lib/IR/Function.cpp [5:5]
+ lib/IR/GCStrategy.cpp [5:5]
+ lib/IR/GVMaterializer.cpp [5:5]
+ lib/IR/Globals.cpp [5:5]
+ lib/IR/IRBuilder.cpp [5:5]
+ lib/IR/IRPrintingPasses.cpp [5:5]
+ lib/IR/InlineAsm.cpp [5:5]
+ lib/IR/Instruction.cpp [5:5]
+ lib/IR/Instructions.cpp [5:5]
+ lib/IR/IntrinsicInst.cpp [5:5]
+ lib/IR/LLVMContext.cpp [5:5]
+ lib/IR/LLVMContextImpl.cpp [5:5]
+ lib/IR/LLVMContextImpl.h [5:5]
+ lib/IR/LLVMRemarkStreamer.cpp [5:5]
+ lib/IR/LegacyPassManager.cpp [5:5]
+ lib/IR/MDBuilder.cpp [5:5]
+ lib/IR/Mangler.cpp [5:5]
+ lib/IR/Metadata.cpp [5:5]
+ lib/IR/MetadataImpl.h [5:5]
+ lib/IR/Module.cpp [5:5]
+ lib/IR/ModuleSummaryIndex.cpp [5:5]
+ lib/IR/Operator.cpp [5:5]
+ lib/IR/OptBisect.cpp [5:5]
+ lib/IR/Pass.cpp [5:5]
+ lib/IR/PassInstrumentation.cpp [5:5]
+ lib/IR/PassManager.cpp [5:5]
+ lib/IR/PassRegistry.cpp [5:5]
+ lib/IR/PassTimingInfo.cpp [5:5]
+ lib/IR/PrintPasses.cpp [5:5]
+ lib/IR/ProfDataUtils.cpp [5:5]
+ lib/IR/ProfileSummary.cpp [5:5]
+ lib/IR/PseudoProbe.cpp [5:5]
+ lib/IR/ReplaceConstant.cpp [5:5]
+ lib/IR/SSAContext.cpp [5:5]
+ lib/IR/SafepointIRVerifier.cpp [5:5]
+ lib/IR/Statepoint.cpp [5:5]
+ lib/IR/StructuralHash.cpp [5:5]
+ lib/IR/SymbolTableListTraitsImpl.h [5:5]
+ lib/IR/Type.cpp [5:5]
+ lib/IR/TypeFinder.cpp [5:5]
+ lib/IR/TypedPointerType.cpp [5:5]
+ lib/IR/Use.cpp [5:5]
+ lib/IR/User.cpp [5:5]
+ lib/IR/Value.cpp [5:5]
+ lib/IR/ValueSymbolTable.cpp [5:5]
+ lib/IR/VectorBuilder.cpp [5:5]
+ lib/IR/Verifier.cpp [5:5]
+ lib/IRPrinter/IRPrintingPasses.cpp [5:5]
+ lib/IRReader/IRReader.cpp [5:5]
+ lib/InterfaceStub/ELFObjHandler.cpp [5:5]
+ lib/InterfaceStub/IFSHandler.cpp [5:5]
+ lib/InterfaceStub/IFSStub.cpp [5:5]
+ lib/LTO/LTO.cpp [5:5]
+ lib/LTO/LTOBackend.cpp [5:5]
+ lib/LTO/LTOCodeGenerator.cpp [5:5]
+ lib/LTO/LTOModule.cpp [5:5]
+ lib/LTO/SummaryBasedOptimizations.cpp [5:5]
+ lib/LTO/ThinLTOCodeGenerator.cpp [5:5]
+ lib/LTO/UpdateCompilerUsed.cpp [5:5]
+ lib/LineEditor/LineEditor.cpp [5:5]
+ lib/Linker/IRMover.cpp [5:5]
+ lib/Linker/LinkDiagnosticInfo.h [5:5]
+ lib/Linker/LinkModules.cpp [5:5]
+ lib/MC/ConstantPools.cpp [5:5]
+ lib/MC/ELFObjectWriter.cpp [5:5]
+ lib/MC/MCAsmBackend.cpp [5:5]
+ lib/MC/MCAsmInfo.cpp [5:5]
+ lib/MC/MCAsmInfoCOFF.cpp [5:5]
+ lib/MC/MCAsmInfoDarwin.cpp [5:5]
+ lib/MC/MCAsmInfoELF.cpp [5:5]
+ lib/MC/MCAsmInfoGOFF.cpp [5:5]
+ lib/MC/MCAsmInfoWasm.cpp [5:5]
+ lib/MC/MCAsmInfoXCOFF.cpp [5:5]
+ lib/MC/MCAsmMacro.cpp [5:5]
+ lib/MC/MCAsmStreamer.cpp [5:5]
+ lib/MC/MCAssembler.cpp [5:5]
+ lib/MC/MCCodeEmitter.cpp [5:5]
+ lib/MC/MCCodeView.cpp [5:5]
+ lib/MC/MCContext.cpp [5:5]
+ lib/MC/MCDXContainerStreamer.cpp [5:5]
+ lib/MC/MCDXContainerWriter.cpp [5:5]
+ lib/MC/MCDisassembler/Disassembler.cpp [5:5]
+ lib/MC/MCDisassembler/Disassembler.h [5:5]
+ lib/MC/MCDisassembler/MCDisassembler.cpp [5:5]
+ lib/MC/MCDisassembler/MCExternalSymbolizer.cpp [5:5]
+ lib/MC/MCDisassembler/MCRelocationInfo.cpp [5:5]
+ lib/MC/MCDisassembler/MCSymbolizer.cpp [5:5]
+ lib/MC/MCDwarf.cpp [5:5]
+ lib/MC/MCELFObjectTargetWriter.cpp [5:5]
+ lib/MC/MCELFStreamer.cpp [5:5]
+ lib/MC/MCExpr.cpp [5:5]
+ lib/MC/MCFragment.cpp [5:5]
+ lib/MC/MCInst.cpp [5:5]
+ lib/MC/MCInstPrinter.cpp [5:5]
+ lib/MC/MCInstrAnalysis.cpp [5:5]
+ lib/MC/MCInstrDesc.cpp [5:5]
+ lib/MC/MCInstrInfo.cpp [5:5]
+ lib/MC/MCLabel.cpp [5:5]
+ lib/MC/MCLinkerOptimizationHint.cpp [5:5]
+ lib/MC/MCMachOStreamer.cpp [5:5]
+ lib/MC/MCMachObjectTargetWriter.cpp [5:5]
+ lib/MC/MCNullStreamer.cpp [5:5]
+ lib/MC/MCObjectFileInfo.cpp [5:5]
+ lib/MC/MCObjectStreamer.cpp [5:5]
+ lib/MC/MCObjectWriter.cpp [5:5]
+ lib/MC/MCParser/AsmLexer.cpp [5:5]
+ lib/MC/MCParser/AsmParser.cpp [5:5]
+ lib/MC/MCParser/COFFAsmParser.cpp [5:5]
+ lib/MC/MCParser/COFFMasmParser.cpp [5:5]
+ lib/MC/MCParser/DarwinAsmParser.cpp [5:5]
+ lib/MC/MCParser/ELFAsmParser.cpp [5:5]
+ lib/MC/MCParser/GOFFAsmParser.cpp [5:5]
+ lib/MC/MCParser/MCAsmLexer.cpp [5:5]
+ lib/MC/MCParser/MCAsmParser.cpp [5:5]
+ lib/MC/MCParser/MCAsmParserExtension.cpp [5:5]
+ lib/MC/MCParser/MCTargetAsmParser.cpp [5:5]
+ lib/MC/MCParser/MasmParser.cpp [5:5]
+ lib/MC/MCParser/WasmAsmParser.cpp [5:5]
+ lib/MC/MCParser/XCOFFAsmParser.cpp [6:6]
+ lib/MC/MCPseudoProbe.cpp [5:5]
+ lib/MC/MCRegisterInfo.cpp [5:5]
+ lib/MC/MCSPIRVStreamer.cpp [5:5]
+ lib/MC/MCSchedule.cpp [5:5]
+ lib/MC/MCSection.cpp [5:5]
+ lib/MC/MCSectionCOFF.cpp [5:5]
+ lib/MC/MCSectionDXContainer.cpp [5:5]
+ lib/MC/MCSectionELF.cpp [5:5]
+ lib/MC/MCSectionMachO.cpp [5:5]
+ lib/MC/MCSectionWasm.cpp [5:5]
+ lib/MC/MCSectionXCOFF.cpp [5:5]
+ lib/MC/MCStreamer.cpp [5:5]
+ lib/MC/MCSubtargetInfo.cpp [5:5]
+ lib/MC/MCSymbol.cpp [5:5]
+ lib/MC/MCSymbolELF.cpp [5:5]
+ lib/MC/MCSymbolXCOFF.cpp [5:5]
+ lib/MC/MCTargetOptions.cpp [5:5]
+ lib/MC/MCTargetOptionsCommandFlags.cpp [5:5]
+ lib/MC/MCValue.cpp [5:5]
+ lib/MC/MCWasmObjectTargetWriter.cpp [5:5]
+ lib/MC/MCWasmStreamer.cpp [5:5]
+ lib/MC/MCWin64EH.cpp [5:5]
+ lib/MC/MCWinCOFFStreamer.cpp [5:5]
+ lib/MC/MCWinEH.cpp [5:5]
+ lib/MC/MCXCOFFObjectTargetWriter.cpp [5:5]
+ lib/MC/MCXCOFFStreamer.cpp [5:5]
+ lib/MC/MachObjectWriter.cpp [5:5]
+ lib/MC/SPIRVObjectWriter.cpp [5:5]
+ lib/MC/StringTableBuilder.cpp [5:5]
+ lib/MC/SubtargetFeature.cpp [5:5]
+ lib/MC/TargetRegistry.cpp [5:5]
+ lib/MC/WasmObjectWriter.cpp [5:5]
+ lib/MC/WinCOFFObjectWriter.cpp [5:5]
+ lib/MC/XCOFFObjectWriter.cpp [5:5]
+ lib/MCA/CodeEmitter.cpp [5:5]
+ lib/MCA/Context.cpp [5:5]
+ lib/MCA/CustomBehaviour.cpp [5:5]
+ lib/MCA/HWEventListener.cpp [5:5]
+ lib/MCA/HardwareUnits/HardwareUnit.cpp [5:5]
+ lib/MCA/HardwareUnits/LSUnit.cpp [5:5]
+ lib/MCA/HardwareUnits/RegisterFile.cpp [5:5]
+ lib/MCA/HardwareUnits/ResourceManager.cpp [5:5]
+ lib/MCA/HardwareUnits/RetireControlUnit.cpp [5:5]
+ lib/MCA/HardwareUnits/Scheduler.cpp [5:5]
+ lib/MCA/IncrementalSourceMgr.cpp [5:5]
+ lib/MCA/InstrBuilder.cpp [5:5]
+ lib/MCA/Instruction.cpp [5:5]
+ lib/MCA/Pipeline.cpp [5:5]
+ lib/MCA/Stages/DispatchStage.cpp [5:5]
+ lib/MCA/Stages/EntryStage.cpp [5:5]
+ lib/MCA/Stages/ExecuteStage.cpp [5:5]
+ lib/MCA/Stages/InOrderIssueStage.cpp [5:5]
+ lib/MCA/Stages/InstructionTables.cpp [5:5]
+ lib/MCA/Stages/MicroOpQueueStage.cpp [5:5]
+ lib/MCA/Stages/RetireStage.cpp [5:5]
+ lib/MCA/Stages/Stage.cpp [5:5]
+ lib/MCA/Support.cpp [5:5]
+ lib/MCA/View.cpp [5:5]
+ lib/ObjCopy/Archive.cpp [5:5]
+ lib/ObjCopy/Archive.h [5:5]
+ lib/ObjCopy/COFF/COFFObjcopy.cpp [5:5]
+ lib/ObjCopy/COFF/COFFObject.cpp [5:5]
+ lib/ObjCopy/COFF/COFFObject.h [5:5]
+ lib/ObjCopy/COFF/COFFReader.cpp [5:5]
+ lib/ObjCopy/COFF/COFFReader.h [5:5]
+ lib/ObjCopy/COFF/COFFWriter.cpp [5:5]
+ lib/ObjCopy/COFF/COFFWriter.h [5:5]
+ lib/ObjCopy/CommonConfig.cpp [5:5]
+ lib/ObjCopy/ConfigManager.cpp [5:5]
+ lib/ObjCopy/ELF/ELFObjcopy.cpp [5:5]
+ lib/ObjCopy/ELF/ELFObject.cpp [5:5]
+ lib/ObjCopy/ELF/ELFObject.h [5:5]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.cpp [5:5]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.h [5:5]
+ lib/ObjCopy/MachO/MachOObjcopy.cpp [5:5]
+ lib/ObjCopy/MachO/MachOObject.cpp [5:5]
+ lib/ObjCopy/MachO/MachOObject.h [5:5]
+ lib/ObjCopy/MachO/MachOReader.cpp [5:5]
+ lib/ObjCopy/MachO/MachOReader.h [5:5]
+ lib/ObjCopy/MachO/MachOWriter.cpp [5:5]
+ lib/ObjCopy/MachO/MachOWriter.h [5:5]
+ lib/ObjCopy/ObjCopy.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFObjcopy.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFObject.h [5:5]
+ lib/ObjCopy/XCOFF/XCOFFReader.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFReader.h [5:5]
+ lib/ObjCopy/XCOFF/XCOFFWriter.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFWriter.h [5:5]
+ lib/ObjCopy/wasm/WasmObjcopy.cpp [5:5]
+ lib/ObjCopy/wasm/WasmObject.cpp [5:5]
+ lib/ObjCopy/wasm/WasmObject.h [5:5]
+ lib/ObjCopy/wasm/WasmReader.cpp [5:5]
+ lib/ObjCopy/wasm/WasmReader.h [5:5]
+ lib/ObjCopy/wasm/WasmWriter.cpp [5:5]
+ lib/ObjCopy/wasm/WasmWriter.h [5:5]
+ lib/Object/Archive.cpp [5:5]
+ lib/Object/ArchiveWriter.cpp [5:5]
+ lib/Object/Binary.cpp [5:5]
+ lib/Object/BuildID.cpp [5:5]
+ lib/Object/COFFImportFile.cpp [5:5]
+ lib/Object/COFFModuleDefinition.cpp [5:5]
+ lib/Object/COFFObjectFile.cpp [5:5]
+ lib/Object/DXContainer.cpp [5:5]
+ lib/Object/Decompressor.cpp [5:5]
+ lib/Object/ELF.cpp [5:5]
+ lib/Object/ELFObjectFile.cpp [5:5]
+ lib/Object/Error.cpp [5:5]
+ lib/Object/FaultMapParser.cpp [5:5]
+ lib/Object/IRObjectFile.cpp [5:5]
+ lib/Object/IRSymtab.cpp [5:5]
+ lib/Object/MachOObjectFile.cpp [5:5]
+ lib/Object/MachOUniversal.cpp [5:5]
+ lib/Object/MachOUniversalWriter.cpp [5:5]
+ lib/Object/Minidump.cpp [5:5]
+ lib/Object/ModuleSymbolTable.cpp [5:5]
+ lib/Object/Object.cpp [5:5]
+ lib/Object/ObjectFile.cpp [5:5]
+ lib/Object/OffloadBinary.cpp [5:5]
+ lib/Object/RecordStreamer.cpp [5:5]
+ lib/Object/RecordStreamer.h [5:5]
+ lib/Object/RelocationResolver.cpp [5:5]
+ lib/Object/SymbolSize.cpp [5:5]
+ lib/Object/SymbolicFile.cpp [5:5]
+ lib/Object/TapiFile.cpp [5:5]
+ lib/Object/TapiUniversal.cpp [5:5]
+ lib/Object/WasmObjectFile.cpp [5:5]
+ lib/Object/WindowsMachineFlag.cpp [5:5]
+ lib/Object/WindowsResource.cpp [5:5]
+ lib/Object/XCOFFObjectFile.cpp [5:5]
+ lib/ObjectYAML/ArchiveEmitter.cpp [5:5]
+ lib/ObjectYAML/ArchiveYAML.cpp [5:5]
+ lib/ObjectYAML/COFFEmitter.cpp [5:5]
+ lib/ObjectYAML/COFFYAML.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLDebugSections.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLSymbols.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLTypes.cpp [5:5]
+ lib/ObjectYAML/DWARFEmitter.cpp [5:5]
+ lib/ObjectYAML/DWARFYAML.cpp [5:5]
+ lib/ObjectYAML/DXContainerEmitter.cpp [5:5]
+ lib/ObjectYAML/DXContainerYAML.cpp [5:5]
+ lib/ObjectYAML/ELFEmitter.cpp [5:5]
+ lib/ObjectYAML/ELFYAML.cpp [5:5]
+ lib/ObjectYAML/MachOEmitter.cpp [5:5]
+ lib/ObjectYAML/MachOYAML.cpp [5:5]
+ lib/ObjectYAML/MinidumpEmitter.cpp [5:5]
+ lib/ObjectYAML/MinidumpYAML.cpp [5:5]
+ lib/ObjectYAML/ObjectYAML.cpp [5:5]
+ lib/ObjectYAML/OffloadEmitter.cpp [5:5]
+ lib/ObjectYAML/OffloadYAML.cpp [5:5]
+ lib/ObjectYAML/WasmEmitter.cpp [5:5]
+ lib/ObjectYAML/WasmYAML.cpp [5:5]
+ lib/ObjectYAML/XCOFFEmitter.cpp [5:5]
+ lib/ObjectYAML/XCOFFYAML.cpp [5:5]
+ lib/ObjectYAML/YAML.cpp [5:5]
+ lib/ObjectYAML/yaml2obj.cpp [5:5]
+ lib/Option/Arg.cpp [5:5]
+ lib/Option/ArgList.cpp [5:5]
+ lib/Option/OptTable.cpp [5:5]
+ lib/Option/Option.cpp [5:5]
+ lib/Passes/OptimizationLevel.cpp [5:5]
+ lib/Passes/PassBuilder.cpp [5:5]
+ lib/Passes/PassBuilderBindings.cpp [5:5]
+ lib/Passes/PassBuilderPipelines.cpp [5:5]
+ lib/Passes/PassPlugin.cpp [5:5]
+ lib/Passes/PassRegistry.def [5:5]
+ lib/Passes/StandardInstrumentations.cpp [5:5]
+ lib/ProfileData/Coverage/CoverageMapping.cpp [5:5]
+ lib/ProfileData/Coverage/CoverageMappingReader.cpp [5:5]
+ lib/ProfileData/Coverage/CoverageMappingWriter.cpp [5:5]
+ lib/ProfileData/GCOV.cpp [5:5]
+ lib/ProfileData/InstrProf.cpp [5:5]
+ lib/ProfileData/InstrProfCorrelator.cpp [5:5]
+ lib/ProfileData/InstrProfReader.cpp [5:5]
+ lib/ProfileData/InstrProfWriter.cpp [5:5]
+ lib/ProfileData/ProfileSummaryBuilder.cpp [5:5]
+ lib/ProfileData/RawMemProfReader.cpp [5:5]
+ lib/ProfileData/SampleProf.cpp [5:5]
+ lib/ProfileData/SampleProfReader.cpp [5:5]
+ lib/ProfileData/SampleProfWriter.cpp [5:5]
+ lib/Remarks/BitstreamRemarkParser.cpp [5:5]
+ lib/Remarks/BitstreamRemarkParser.h [5:5]
+ lib/Remarks/BitstreamRemarkSerializer.cpp [5:5]
+ lib/Remarks/Remark.cpp [5:5]
+ lib/Remarks/RemarkFormat.cpp [5:5]
+ lib/Remarks/RemarkLinker.cpp [5:5]
+ lib/Remarks/RemarkParser.cpp [5:5]
+ lib/Remarks/RemarkSerializer.cpp [5:5]
+ lib/Remarks/RemarkStreamer.cpp [5:5]
+ lib/Remarks/RemarkStringTable.cpp [5:5]
+ lib/Remarks/YAMLRemarkParser.cpp [5:5]
+ lib/Remarks/YAMLRemarkParser.h [5:5]
+ lib/Remarks/YAMLRemarkSerializer.cpp [5:5]
+ lib/Support/ABIBreak.cpp [5:5]
+ lib/Support/AMDGPUMetadata.cpp [5:5]
+ lib/Support/APFixedPoint.cpp [5:5]
+ lib/Support/APFloat.cpp [5:5]
+ lib/Support/APInt.cpp [5:5]
+ lib/Support/APSInt.cpp [5:5]
+ lib/Support/ARMAttributeParser.cpp [5:5]
+ lib/Support/ARMBuildAttrs.cpp [5:5]
+ lib/Support/ARMWinEH.cpp [5:5]
+ lib/Support/AddressRanges.cpp [5:5]
+ lib/Support/Allocator.cpp [5:5]
+ lib/Support/Atomic.cpp [5:5]
+ lib/Support/AutoConvert.cpp [5:5]
+ lib/Support/Base64.cpp [5:5]
+ lib/Support/BinaryStreamError.cpp [5:5]
+ lib/Support/BinaryStreamReader.cpp [5:5]
+ lib/Support/BinaryStreamRef.cpp [5:5]
+ lib/Support/BinaryStreamWriter.cpp [5:5]
+ lib/Support/BlockFrequency.cpp [5:5]
+ lib/Support/BranchProbability.cpp [5:5]
+ lib/Support/BuryPointer.cpp [5:5]
+ lib/Support/COM.cpp [5:5]
+ lib/Support/CRC.cpp [5:5]
+ lib/Support/CSKYAttributeParser.cpp [5:5]
+ lib/Support/CSKYAttributes.cpp [5:5]
+ lib/Support/CachePruning.cpp [5:5]
+ lib/Support/Caching.cpp [5:5]
+ lib/Support/Chrono.cpp [5:5]
+ lib/Support/CodeGenCoverage.cpp [5:5]
+ lib/Support/CommandLine.cpp [5:5]
+ lib/Support/Compression.cpp [5:5]
+ lib/Support/ConvertUTFWrapper.cpp [5:5]
+ lib/Support/CrashRecoveryContext.cpp [5:5]
+ lib/Support/DAGDeltaAlgorithm.cpp [5:5]
+ lib/Support/DJB.cpp [5:5]
+ lib/Support/DataExtractor.cpp [5:5]
+ lib/Support/Debug.cpp [5:5]
+ lib/Support/DebugOptions.h [5:5]
+ lib/Support/DeltaAlgorithm.cpp [5:5]
+ lib/Support/DivisionByConstantInfo.cpp [5:5]
+ lib/Support/DynamicLibrary.cpp [5:5]
+ lib/Support/ELFAttributeParser.cpp [5:5]
+ lib/Support/ELFAttributes.cpp [5:5]
+ lib/Support/Errno.cpp [5:5]
+ lib/Support/Error.cpp [5:5]
+ lib/Support/ErrorHandling.cpp [5:5]
+ lib/Support/ExtensibleRTTI.cpp [5:5]
+ lib/Support/FileCollector.cpp [5:5]
+ lib/Support/FileOutputBuffer.cpp [5:5]
+ lib/Support/FileUtilities.cpp [5:5]
+ lib/Support/FoldingSet.cpp [5:5]
+ lib/Support/FormatVariadic.cpp [5:5]
+ lib/Support/FormattedStream.cpp [5:5]
+ lib/Support/GlobPattern.cpp [5:5]
+ lib/Support/GraphWriter.cpp [5:5]
+ lib/Support/Hashing.cpp [5:5]
+ lib/Support/InitLLVM.cpp [5:5]
+ lib/Support/InstructionCost.cpp [5:5]
+ lib/Support/IntEqClasses.cpp [5:5]
+ lib/Support/IntervalMap.cpp [5:5]
+ lib/Support/ItaniumManglingCanonicalizer.cpp [5:5]
+ lib/Support/JSON.cpp [5:5]
+ lib/Support/KnownBits.cpp [5:5]
+ lib/Support/LEB128.cpp [5:5]
+ lib/Support/LineIterator.cpp [5:5]
+ lib/Support/LockFileManager.cpp [5:5]
+ lib/Support/LowLevelType.cpp [5:5]
+ lib/Support/MSP430AttributeParser.cpp [5:5]
+ lib/Support/MSP430Attributes.cpp [5:5]
+ lib/Support/ManagedStatic.cpp [5:5]
+ lib/Support/MathExtras.cpp [5:5]
+ lib/Support/MemAlloc.cpp [5:5]
+ lib/Support/Memory.cpp [5:5]
+ lib/Support/MemoryBuffer.cpp [5:5]
+ lib/Support/MemoryBufferRef.cpp [5:5]
+ lib/Support/NativeFormatting.cpp [5:5]
+ lib/Support/OptimizedStructLayout.cpp [5:5]
+ lib/Support/Optional.cpp [5:5]
+ lib/Support/Parallel.cpp [5:5]
+ lib/Support/Path.cpp [5:5]
+ lib/Support/PluginLoader.cpp [5:5]
+ lib/Support/PrettyStackTrace.cpp [5:5]
+ lib/Support/Process.cpp [5:5]
+ lib/Support/Program.cpp [5:5]
+ lib/Support/RISCVAttributeParser.cpp [5:5]
+ lib/Support/RISCVAttributes.cpp [5:5]
+ lib/Support/RISCVISAInfo.cpp [5:5]
+ lib/Support/RWMutex.cpp [5:5]
+ lib/Support/RandomNumberGenerator.cpp [5:5]
+ lib/Support/Regex.cpp [5:5]
+ lib/Support/SHA1.cpp [5:5]
+ lib/Support/SHA256.cpp [5:5]
+ lib/Support/ScaledNumber.cpp [5:5]
+ lib/Support/Signals.cpp [5:5]
+ lib/Support/Signposts.cpp [5:5]
+ lib/Support/SmallPtrSet.cpp [5:5]
+ lib/Support/SmallVector.cpp [5:5]
+ lib/Support/SourceMgr.cpp [5:5]
+ lib/Support/SpecialCaseList.cpp [5:5]
+ lib/Support/Statistic.cpp [5:5]
+ lib/Support/StringExtras.cpp [5:5]
+ lib/Support/StringMap.cpp [5:5]
+ lib/Support/StringRef.cpp [5:5]
+ lib/Support/StringSaver.cpp [5:5]
+ lib/Support/SuffixTree.cpp [5:5]
+ lib/Support/SymbolRemappingReader.cpp [5:5]
+ lib/Support/SystemUtils.cpp [5:5]
+ lib/Support/TarWriter.cpp [5:5]
+ lib/Support/ThreadPool.cpp [5:5]
+ lib/Support/Threading.cpp [5:5]
+ lib/Support/TimeProfiler.cpp [5:5]
+ lib/Support/Timer.cpp [5:5]
+ lib/Support/ToolOutputFile.cpp [5:5]
+ lib/Support/TrigramIndex.cpp [5:5]
+ lib/Support/Twine.cpp [5:5]
+ lib/Support/TypeSize.cpp [5:5]
+ lib/Support/Unicode.cpp [5:5]
+ lib/Support/UnicodeNameToCodepoint.cpp [6:6]
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [5:5]
+ lib/Support/Unix/COM.inc [5:5]
+ lib/Support/Unix/DynamicLibrary.inc [5:5]
+ lib/Support/Unix/Memory.inc [5:5]
+ lib/Support/Unix/Path.inc [5:5]
+ lib/Support/Unix/Process.inc [5:5]
+ lib/Support/Unix/Program.inc [5:5]
+ lib/Support/Unix/Signals.inc [5:5]
+ lib/Support/Unix/Threading.inc [5:5]
+ lib/Support/Unix/Unix.h [5:5]
+ lib/Support/Unix/Watchdog.inc [5:5]
+ lib/Support/Valgrind.cpp [5:5]
+ lib/Support/VersionTuple.cpp [5:5]
+ lib/Support/VirtualFileSystem.cpp [5:5]
+ lib/Support/Watchdog.cpp [5:5]
+ lib/Support/Windows/COM.inc [5:5]
+ lib/Support/Windows/DynamicLibrary.inc [5:5]
+ lib/Support/Windows/Memory.inc [5:5]
+ lib/Support/Windows/Path.inc [5:5]
+ lib/Support/Windows/Process.inc [5:5]
+ lib/Support/Windows/Program.inc [5:5]
+ lib/Support/Windows/Signals.inc [5:5]
+ lib/Support/Windows/Threading.inc [5:5]
+ lib/Support/Windows/Watchdog.inc [5:5]
+ lib/Support/WithColor.cpp [5:5]
+ lib/Support/YAMLParser.cpp [5:5]
+ lib/Support/YAMLTraits.cpp [5:5]
+ lib/Support/Z3Solver.cpp [5:5]
+ lib/Support/circular_raw_ostream.cpp [5:5]
+ lib/Support/raw_os_ostream.cpp [5:5]
+ lib/Support/raw_ostream.cpp [5:5]
+ lib/TableGen/DetailedRecordsBackend.cpp [5:5]
+ lib/TableGen/Error.cpp [5:5]
+ lib/TableGen/JSONBackend.cpp [5:5]
+ lib/TableGen/Main.cpp [5:5]
+ lib/TableGen/Parser.cpp [5:5]
+ lib/TableGen/Record.cpp [5:5]
+ lib/TableGen/SetTheory.cpp [5:5]
+ lib/TableGen/StringMatcher.cpp [5:5]
+ lib/TableGen/TGLexer.cpp [5:5]
+ lib/TableGen/TGLexer.h [5:5]
+ lib/TableGen/TGParser.cpp [5:5]
+ lib/TableGen/TGParser.h [5:5]
+ lib/TableGen/TableGenBackend.cpp [5:5]
+ lib/TableGen/TableGenBackendSkeleton.cpp [5:5]
+ lib/Target/AArch64/AArch64.h [5:5]
+ lib/Target/AArch64/AArch64.td [5:5]
+ lib/Target/AArch64/AArch64A53Fix835769.cpp [5:5]
+ lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp [5:5]
+ lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp [5:5]
+ lib/Target/AArch64/AArch64AsmPrinter.cpp [5:5]
+ lib/Target/AArch64/AArch64BranchTargets.cpp [5:5]
+ lib/Target/AArch64/AArch64CallingConvention.cpp [5:5]
+ lib/Target/AArch64/AArch64CallingConvention.h [5:5]
+ lib/Target/AArch64/AArch64CallingConvention.td [5:5]
+ lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp [5:5]
+ lib/Target/AArch64/AArch64CollectLOH.cpp [5:5]
+ lib/Target/AArch64/AArch64Combine.td [5:5]
+ lib/Target/AArch64/AArch64CompressJumpTables.cpp [5:5]
+ lib/Target/AArch64/AArch64CondBrTuning.cpp [5:5]
+ lib/Target/AArch64/AArch64ConditionOptimizer.cpp [5:5]
+ lib/Target/AArch64/AArch64ConditionalCompares.cpp [5:5]
+ lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp [5:5]
+ lib/Target/AArch64/AArch64ExpandImm.cpp [5:5]
+ lib/Target/AArch64/AArch64ExpandImm.h [5:5]
+ lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp [5:5]
+ lib/Target/AArch64/AArch64FalkorHWPFFix.cpp [5:5]
+ lib/Target/AArch64/AArch64FastISel.cpp [5:5]
+ lib/Target/AArch64/AArch64FrameLowering.cpp [5:5]
+ lib/Target/AArch64/AArch64FrameLowering.h [5:5]
+ lib/Target/AArch64/AArch64GenRegisterBankInfo.def [5:5]
+ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp [5:5]
+ lib/Target/AArch64/AArch64ISelLowering.cpp [5:5]
+ lib/Target/AArch64/AArch64ISelLowering.h [5:5]
+ lib/Target/AArch64/AArch64InstrAtomics.td [5:5]
+ lib/Target/AArch64/AArch64InstrFormats.td [5:5]
+ lib/Target/AArch64/AArch64InstrGISel.td [5:5]
+ lib/Target/AArch64/AArch64InstrInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64InstrInfo.h [5:5]
+ lib/Target/AArch64/AArch64InstrInfo.td [5:5]
+ lib/Target/AArch64/AArch64KCFI.cpp [5:5]
+ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp [5:5]
+ lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp [5:5]
+ lib/Target/AArch64/AArch64MCInstLower.cpp [5:5]
+ lib/Target/AArch64/AArch64MCInstLower.h [5:5]
+ lib/Target/AArch64/AArch64MIPeepholeOpt.cpp [5:5]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.cpp [6:6]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.h [5:5]
+ lib/Target/AArch64/AArch64MachineScheduler.cpp [5:5]
+ lib/Target/AArch64/AArch64MachineScheduler.h [5:5]
+ lib/Target/AArch64/AArch64MacroFusion.cpp [5:5]
+ lib/Target/AArch64/AArch64MacroFusion.h [5:5]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.cpp [5:5]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.h [5:5]
+ lib/Target/AArch64/AArch64PerfectShuffle.h [5:5]
+ lib/Target/AArch64/AArch64PfmCounters.td [5:5]
+ lib/Target/AArch64/AArch64PromoteConstant.cpp [5:5]
+ lib/Target/AArch64/AArch64RedundantCopyElimination.cpp [5:5]
+ lib/Target/AArch64/AArch64RegisterBanks.td [5:5]
+ lib/Target/AArch64/AArch64RegisterInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64RegisterInfo.h [5:5]
+ lib/Target/AArch64/AArch64RegisterInfo.td [5:5]
+ lib/Target/AArch64/AArch64SIMDInstrOpt.cpp [4:4]
+ lib/Target/AArch64/AArch64SLSHardening.cpp [5:5]
+ lib/Target/AArch64/AArch64SMEInstrInfo.td [5:5]
+ lib/Target/AArch64/AArch64SVEInstrInfo.td [5:5]
+ lib/Target/AArch64/AArch64SchedA53.td [5:5]
+ lib/Target/AArch64/AArch64SchedA55.td [5:5]
+ lib/Target/AArch64/AArch64SchedA57.td [5:5]
+ lib/Target/AArch64/AArch64SchedA57WriteRes.td [5:5]
+ lib/Target/AArch64/AArch64SchedA64FX.td [5:5]
+ lib/Target/AArch64/AArch64SchedAmpere1.td [5:5]
+ lib/Target/AArch64/AArch64SchedCyclone.td [5:5]
+ lib/Target/AArch64/AArch64SchedExynosM3.td [5:5]
+ lib/Target/AArch64/AArch64SchedExynosM4.td [5:5]
+ lib/Target/AArch64/AArch64SchedExynosM5.td [5:5]
+ lib/Target/AArch64/AArch64SchedFalkor.td [5:5]
+ lib/Target/AArch64/AArch64SchedFalkorDetails.td [5:5]
+ lib/Target/AArch64/AArch64SchedKryo.td [5:5]
+ lib/Target/AArch64/AArch64SchedKryoDetails.td [5:5]
+ lib/Target/AArch64/AArch64SchedNeoverseN2.td [5:5]
+ lib/Target/AArch64/AArch64SchedPredAmpere.td [5:5]
+ lib/Target/AArch64/AArch64SchedPredExynos.td [5:5]
+ lib/Target/AArch64/AArch64SchedPredicates.td [5:5]
+ lib/Target/AArch64/AArch64SchedTSV110.td [5:5]
+ lib/Target/AArch64/AArch64SchedThunderX.td [5:5]
+ lib/Target/AArch64/AArch64SchedThunderX2T99.td [5:5]
+ lib/Target/AArch64/AArch64SchedThunderX3T110.td [5:5]
+ lib/Target/AArch64/AArch64Schedule.td [5:5]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.h [5:5]
+ lib/Target/AArch64/AArch64SpeculationHardening.cpp [5:5]
+ lib/Target/AArch64/AArch64StackTagging.cpp [5:5]
+ lib/Target/AArch64/AArch64StackTaggingPreRA.cpp [5:5]
+ lib/Target/AArch64/AArch64StorePairSuppress.cpp [5:5]
+ lib/Target/AArch64/AArch64Subtarget.cpp [5:5]
+ lib/Target/AArch64/AArch64Subtarget.h [5:5]
+ lib/Target/AArch64/AArch64SystemOperands.td [5:5]
+ lib/Target/AArch64/AArch64TargetMachine.cpp [5:5]
+ lib/Target/AArch64/AArch64TargetMachine.h [5:5]
+ lib/Target/AArch64/AArch64TargetObjectFile.cpp [5:5]
+ lib/Target/AArch64/AArch64TargetObjectFile.h [5:5]
+ lib/Target/AArch64/AArch64TargetTransformInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64TargetTransformInfo.h [5:5]
+ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp [5:5]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp [5:5]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.h [5:5]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp [5:5]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h [5:5]
+ lib/Target/AArch64/GISel/AArch64CallLowering.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64CallLowering.h [5:5]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h [5:5]
+ lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.h [5:5]
+ lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h [5:5]
+ lib/Target/AArch64/SMEABIPass.cpp [5:5]
+ lib/Target/AArch64/SMEInstrFormats.td [5:5]
+ lib/Target/AArch64/SVEInstrFormats.td [5:5]
+ lib/Target/AArch64/SVEIntrinsicOpts.cpp [5:5]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp [5:5]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.h [5:5]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.cpp [5:5]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.h [5:5]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp [5:5]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.h [5:5]
+ lib/Target/ARM/A15SDOptimizer.cpp [5:5]
+ lib/Target/ARM/ARM.h [5:5]
+ lib/Target/ARM/ARM.td [5:5]
+ lib/Target/ARM/ARMAsmPrinter.cpp [5:5]
+ lib/Target/ARM/ARMAsmPrinter.h [5:5]
+ lib/Target/ARM/ARMBaseInstrInfo.cpp [5:5]
+ lib/Target/ARM/ARMBaseInstrInfo.h [5:5]
+ lib/Target/ARM/ARMBaseRegisterInfo.cpp [5:5]
+ lib/Target/ARM/ARMBaseRegisterInfo.h [5:5]
+ lib/Target/ARM/ARMBasicBlockInfo.cpp [5:5]
+ lib/Target/ARM/ARMBasicBlockInfo.h [5:5]
+ lib/Target/ARM/ARMBlockPlacement.cpp [5:5]
+ lib/Target/ARM/ARMBranchTargets.cpp [5:5]
+ lib/Target/ARM/ARMCallLowering.cpp [5:5]
+ lib/Target/ARM/ARMCallLowering.h [5:5]
+ lib/Target/ARM/ARMCallingConv.cpp [5:5]
+ lib/Target/ARM/ARMCallingConv.h [5:5]
+ lib/Target/ARM/ARMCallingConv.td [5:5]
+ lib/Target/ARM/ARMConstantIslandPass.cpp [5:5]
+ lib/Target/ARM/ARMConstantPoolValue.cpp [5:5]
+ lib/Target/ARM/ARMConstantPoolValue.h [5:5]
+ lib/Target/ARM/ARMExpandPseudoInsts.cpp [5:5]
+ lib/Target/ARM/ARMFastISel.cpp [5:5]
+ lib/Target/ARM/ARMFeatures.h [5:5]
+ lib/Target/ARM/ARMFixCortexA57AES1742098Pass.cpp [5:5]
+ lib/Target/ARM/ARMFrameLowering.cpp [5:5]
+ lib/Target/ARM/ARMFrameLowering.h [5:5]
+ lib/Target/ARM/ARMHazardRecognizer.cpp [5:5]
+ lib/Target/ARM/ARMHazardRecognizer.h [5:5]
+ lib/Target/ARM/ARMISelDAGToDAG.cpp [5:5]
+ lib/Target/ARM/ARMISelLowering.cpp [5:5]
+ lib/Target/ARM/ARMISelLowering.h [5:5]
+ lib/Target/ARM/ARMInstrCDE.td [5:5]
+ lib/Target/ARM/ARMInstrFormats.td [5:5]
+ lib/Target/ARM/ARMInstrInfo.cpp [5:5]
+ lib/Target/ARM/ARMInstrInfo.h [5:5]
+ lib/Target/ARM/ARMInstrInfo.td [5:5]
+ lib/Target/ARM/ARMInstrMVE.td [5:5]
+ lib/Target/ARM/ARMInstrNEON.td [5:5]
+ lib/Target/ARM/ARMInstrThumb.td [5:5]
+ lib/Target/ARM/ARMInstrThumb2.td [5:5]
+ lib/Target/ARM/ARMInstrVFP.td [5:5]
+ lib/Target/ARM/ARMInstructionSelector.cpp [5:5]
+ lib/Target/ARM/ARMLegalizerInfo.cpp [5:5]
+ lib/Target/ARM/ARMLegalizerInfo.h [5:5]
+ lib/Target/ARM/ARMLoadStoreOptimizer.cpp [5:5]
+ lib/Target/ARM/ARMLowOverheadLoops.cpp [5:5]
+ lib/Target/ARM/ARMMCInstLower.cpp [5:5]
+ lib/Target/ARM/ARMMachineFunctionInfo.cpp [5:5]
+ lib/Target/ARM/ARMMachineFunctionInfo.h [5:5]
+ lib/Target/ARM/ARMMacroFusion.cpp [5:5]
+ lib/Target/ARM/ARMMacroFusion.h [5:5]
+ lib/Target/ARM/ARMOptimizeBarriersPass.cpp [6:6]
+ lib/Target/ARM/ARMParallelDSP.cpp [5:5]
+ lib/Target/ARM/ARMPerfectShuffle.h [5:5]
+ lib/Target/ARM/ARMPredicates.td [5:5]
+ lib/Target/ARM/ARMRegisterBankInfo.cpp [5:5]
+ lib/Target/ARM/ARMRegisterBankInfo.h [5:5]
+ lib/Target/ARM/ARMRegisterBanks.td [5:5]
+ lib/Target/ARM/ARMRegisterInfo.cpp [5:5]
+ lib/Target/ARM/ARMRegisterInfo.h [5:5]
+ lib/Target/ARM/ARMRegisterInfo.td [5:5]
+ lib/Target/ARM/ARMSLSHardening.cpp [5:5]
+ lib/Target/ARM/ARMSchedule.td [5:5]
+ lib/Target/ARM/ARMScheduleA57.td [5:5]
+ lib/Target/ARM/ARMScheduleA57WriteRes.td [5:5]
+ lib/Target/ARM/ARMScheduleA8.td [5:5]
+ lib/Target/ARM/ARMScheduleA9.td [5:5]
+ lib/Target/ARM/ARMScheduleM4.td [5:5]
+ lib/Target/ARM/ARMScheduleM55.td [5:5]
+ lib/Target/ARM/ARMScheduleM7.td [5:5]
+ lib/Target/ARM/ARMScheduleR52.td [5:5]
+ lib/Target/ARM/ARMScheduleSwift.td [5:5]
+ lib/Target/ARM/ARMScheduleV6.td [5:5]
+ lib/Target/ARM/ARMSelectionDAGInfo.cpp [5:5]
+ lib/Target/ARM/ARMSelectionDAGInfo.h [5:5]
+ lib/Target/ARM/ARMSubtarget.cpp [5:5]
+ lib/Target/ARM/ARMSubtarget.h [5:5]
+ lib/Target/ARM/ARMSystemRegister.td [5:5]
+ lib/Target/ARM/ARMTargetMachine.cpp [5:5]
+ lib/Target/ARM/ARMTargetMachine.h [5:5]
+ lib/Target/ARM/ARMTargetObjectFile.cpp [5:5]
+ lib/Target/ARM/ARMTargetObjectFile.h [5:5]
+ lib/Target/ARM/ARMTargetTransformInfo.cpp [5:5]
+ lib/Target/ARM/ARMTargetTransformInfo.h [5:5]
+ lib/Target/ARM/AsmParser/ARMAsmParser.cpp [5:5]
+ lib/Target/ARM/Disassembler/ARMDisassembler.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp [5:5]
+ lib/Target/ARM/MLxExpansionPass.cpp [5:5]
+ lib/Target/ARM/MVEGatherScatterLowering.cpp [5:5]
+ lib/Target/ARM/MVELaneInterleavingPass.cpp [5:5]
+ lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp [5:5]
+ lib/Target/ARM/MVETailPredUtils.h [5:5]
+ lib/Target/ARM/MVETailPredication.cpp [5:5]
+ lib/Target/ARM/MVEVPTBlockPass.cpp [5:5]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp [5:5]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.h [5:5]
+ lib/Target/ARM/Thumb1FrameLowering.cpp [5:5]
+ lib/Target/ARM/Thumb1FrameLowering.h [5:5]
+ lib/Target/ARM/Thumb1InstrInfo.cpp [5:5]
+ lib/Target/ARM/Thumb1InstrInfo.h [5:5]
+ lib/Target/ARM/Thumb2ITBlockPass.cpp [5:5]
+ lib/Target/ARM/Thumb2InstrInfo.cpp [5:5]
+ lib/Target/ARM/Thumb2InstrInfo.h [5:5]
+ lib/Target/ARM/Thumb2SizeReduction.cpp [5:5]
+ lib/Target/ARM/ThumbRegisterInfo.cpp [5:5]
+ lib/Target/ARM/ThumbRegisterInfo.h [5:5]
+ lib/Target/ARM/Utils/ARMBaseInfo.cpp [5:5]
+ lib/Target/ARM/Utils/ARMBaseInfo.h [5:5]
+ lib/Target/BPF/AsmParser/BPFAsmParser.cpp [5:5]
+ lib/Target/BPF/BPF.h [5:5]
+ lib/Target/BPF/BPF.td [5:5]
+ lib/Target/BPF/BPFAbstractMemberAccess.cpp [5:5]
+ lib/Target/BPF/BPFAdjustOpt.cpp [5:5]
+ lib/Target/BPF/BPFAsmPrinter.cpp [5:5]
+ lib/Target/BPF/BPFCORE.h [5:5]
+ lib/Target/BPF/BPFCallingConv.td [5:5]
+ lib/Target/BPF/BPFCheckAndAdjustIR.cpp [5:5]
+ lib/Target/BPF/BPFFrameLowering.cpp [5:5]
+ lib/Target/BPF/BPFFrameLowering.h [5:5]
+ lib/Target/BPF/BPFIRPeephole.cpp [5:5]
+ lib/Target/BPF/BPFISelDAGToDAG.cpp [5:5]
+ lib/Target/BPF/BPFISelLowering.cpp [5:5]
+ lib/Target/BPF/BPFISelLowering.h [5:5]
+ lib/Target/BPF/BPFInstrFormats.td [5:5]
+ lib/Target/BPF/BPFInstrInfo.cpp [5:5]
+ lib/Target/BPF/BPFInstrInfo.h [5:5]
+ lib/Target/BPF/BPFInstrInfo.td [5:5]
+ lib/Target/BPF/BPFMCInstLower.cpp [5:5]
+ lib/Target/BPF/BPFMCInstLower.h [5:5]
+ lib/Target/BPF/BPFMIChecking.cpp [5:5]
+ lib/Target/BPF/BPFMIPeephole.cpp [5:5]
+ lib/Target/BPF/BPFMISimplifyPatchable.cpp [5:5]
+ lib/Target/BPF/BPFPreserveDIType.cpp [5:5]
+ lib/Target/BPF/BPFRegisterInfo.cpp [5:5]
+ lib/Target/BPF/BPFRegisterInfo.h [5:5]
+ lib/Target/BPF/BPFRegisterInfo.td [5:5]
+ lib/Target/BPF/BPFSelectionDAGInfo.cpp [5:5]
+ lib/Target/BPF/BPFSelectionDAGInfo.h [5:5]
+ lib/Target/BPF/BPFSubtarget.cpp [5:5]
+ lib/Target/BPF/BPFSubtarget.h [5:5]
+ lib/Target/BPF/BPFTargetMachine.cpp [5:5]
+ lib/Target/BPF/BPFTargetMachine.h [5:5]
+ lib/Target/BPF/BPFTargetTransformInfo.h [5:5]
+ lib/Target/BPF/BTF.def [5:5]
+ lib/Target/BPF/BTF.h [5:5]
+ lib/Target/BPF/BTFDebug.cpp [5:5]
+ lib/Target/BPF/BTFDebug.h [5:5]
+ lib/Target/BPF/Disassembler/BPFDisassembler.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.h [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h [5:5]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp [5:5]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.h [5:5]
+ lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp [5:5]
+ lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp [5:5]
+ lib/Target/LoongArch/LoongArch.h [5:5]
+ lib/Target/LoongArch/LoongArch.td [5:5]
+ lib/Target/LoongArch/LoongArchAsmPrinter.cpp [5:5]
+ lib/Target/LoongArch/LoongArchAsmPrinter.h [5:5]
+ lib/Target/LoongArch/LoongArchCallingConv.td [5:5]
+ lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp [5:5]
+ lib/Target/LoongArch/LoongArchExpandPseudoInsts.cpp [5:5]
+ lib/Target/LoongArch/LoongArchFloat32InstrInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchFloat64InstrInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchFloatInstrFormats.td [5:5]
+ lib/Target/LoongArch/LoongArchFrameLowering.cpp [5:5]
+ lib/Target/LoongArch/LoongArchFrameLowering.h [5:5]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp [5:5]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.h [5:5]
+ lib/Target/LoongArch/LoongArchISelLowering.cpp [5:5]
+ lib/Target/LoongArch/LoongArchISelLowering.h [5:5]
+ lib/Target/LoongArch/LoongArchInstrFormats.td [5:5]
+ lib/Target/LoongArch/LoongArchInstrInfo.cpp [5:5]
+ lib/Target/LoongArch/LoongArchInstrInfo.h [5:5]
+ lib/Target/LoongArch/LoongArchInstrInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchMCInstLower.cpp [5:5]
+ lib/Target/LoongArch/LoongArchMachineFunctionInfo.h [5:5]
+ lib/Target/LoongArch/LoongArchRegisterInfo.cpp [5:5]
+ lib/Target/LoongArch/LoongArchRegisterInfo.h [5:5]
+ lib/Target/LoongArch/LoongArchRegisterInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchSubtarget.cpp [5:5]
+ lib/Target/LoongArch/LoongArchSubtarget.h [5:5]
+ lib/Target/LoongArch/LoongArchTargetMachine.cpp [5:5]
+ lib/Target/LoongArch/LoongArchTargetMachine.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.h [5:5]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.cpp [5:5]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.h [5:5]
+ lib/Target/NVPTX/NVPTX.h [5:5]
+ lib/Target/NVPTX/NVPTX.td [5:5]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.h [5:5]
+ lib/Target/NVPTX/NVPTXAsmPrinter.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAsmPrinter.h [5:5]
+ lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAtomicLower.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAtomicLower.h [5:5]
+ lib/Target/NVPTX/NVPTXFrameLowering.cpp [5:5]
+ lib/Target/NVPTX/NVPTXFrameLowering.h [5:5]
+ lib/Target/NVPTX/NVPTXGenericToNVVM.cpp [5:5]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp [5:5]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.h [5:5]
+ lib/Target/NVPTX/NVPTXISelLowering.cpp [5:5]
+ lib/Target/NVPTX/NVPTXISelLowering.h [5:5]
+ lib/Target/NVPTX/NVPTXImageOptimizer.cpp [5:5]
+ lib/Target/NVPTX/NVPTXInstrFormats.td [5:5]
+ lib/Target/NVPTX/NVPTXInstrInfo.cpp [5:5]
+ lib/Target/NVPTX/NVPTXInstrInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXInstrInfo.td [5:5]
+ lib/Target/NVPTX/NVPTXIntrinsics.td [5:5]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp [5:5]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.h [5:5]
+ lib/Target/NVPTX/NVPTXLowerAlloca.cpp [5:5]
+ lib/Target/NVPTX/NVPTXLowerArgs.cpp [5:5]
+ lib/Target/NVPTX/NVPTXMCExpr.cpp [5:5]
+ lib/Target/NVPTX/NVPTXMCExpr.h [5:5]
+ lib/Target/NVPTX/NVPTXMachineFunctionInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXPeephole.cpp [5:5]
+ lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp [5:5]
+ lib/Target/NVPTX/NVPTXProxyRegErasure.cpp [5:5]
+ lib/Target/NVPTX/NVPTXRegisterInfo.cpp [5:5]
+ lib/Target/NVPTX/NVPTXRegisterInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXRegisterInfo.td [5:5]
+ lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp [5:5]
+ lib/Target/NVPTX/NVPTXSubtarget.cpp [5:5]
+ lib/Target/NVPTX/NVPTXSubtarget.h [5:5]
+ lib/Target/NVPTX/NVPTXTargetMachine.cpp [5:5]
+ lib/Target/NVPTX/NVPTXTargetMachine.h [5:5]
+ lib/Target/NVPTX/NVPTXTargetObjectFile.h [5:5]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp [5:5]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXUtilities.cpp [5:5]
+ lib/Target/NVPTX/NVPTXUtilities.h [5:5]
+ lib/Target/NVPTX/NVVMIntrRange.cpp [5:5]
+ lib/Target/NVPTX/NVVMReflect.cpp [5:5]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp [5:5]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.h [5:5]
+ lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp [5:5]
+ lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCCallLowering.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCCallLowering.h [5:5]
+ lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.h [5:5]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.h [5:5]
+ lib/Target/PowerPC/GISel/PPCRegisterBanks.td [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp [6:6]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.h [5:5]
+ lib/Target/PowerPC/P10InstrResources.td [5:5]
+ lib/Target/PowerPC/P9InstrResources.td [5:5]
+ lib/Target/PowerPC/PPC.h [5:5]
+ lib/Target/PowerPC/PPC.td [5:5]
+ lib/Target/PowerPC/PPCAsmPrinter.cpp [5:5]
+ lib/Target/PowerPC/PPCBoolRetToInt.cpp [5:5]
+ lib/Target/PowerPC/PPCBranchCoalescing.cpp [5:5]
+ lib/Target/PowerPC/PPCBranchSelector.cpp [5:5]
+ lib/Target/PowerPC/PPCCCState.cpp [5:5]
+ lib/Target/PowerPC/PPCCCState.h [5:5]
+ lib/Target/PowerPC/PPCCTRLoops.cpp [5:5]
+ lib/Target/PowerPC/PPCCTRLoopsVerify.cpp [5:5]
+ lib/Target/PowerPC/PPCCallingConv.cpp [5:5]
+ lib/Target/PowerPC/PPCCallingConv.h [5:5]
+ lib/Target/PowerPC/PPCCallingConv.td [5:5]
+ lib/Target/PowerPC/PPCEarlyReturn.cpp [5:5]
+ lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp [5:5]
+ lib/Target/PowerPC/PPCExpandISEL.cpp [5:5]
+ lib/Target/PowerPC/PPCFastISel.cpp [5:5]
+ lib/Target/PowerPC/PPCFrameLowering.cpp [5:5]
+ lib/Target/PowerPC/PPCFrameLowering.h [5:5]
+ lib/Target/PowerPC/PPCGenRegisterBankInfo.def [5:5]
+ lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp [5:5]
+ lib/Target/PowerPC/PPCHazardRecognizers.cpp [5:5]
+ lib/Target/PowerPC/PPCHazardRecognizers.h [5:5]
+ lib/Target/PowerPC/PPCISelDAGToDAG.cpp [5:5]
+ lib/Target/PowerPC/PPCISelLowering.cpp [5:5]
+ lib/Target/PowerPC/PPCISelLowering.h [5:5]
+ lib/Target/PowerPC/PPCInstr64Bit.td [5:5]
+ lib/Target/PowerPC/PPCInstrAltivec.td [5:5]
+ lib/Target/PowerPC/PPCInstrBuilder.h [5:5]
+ lib/Target/PowerPC/PPCInstrFormats.td [5:5]
+ lib/Target/PowerPC/PPCInstrHTM.td [5:5]
+ lib/Target/PowerPC/PPCInstrInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCInstrInfo.h [5:5]
+ lib/Target/PowerPC/PPCInstrInfo.td [5:5]
+ lib/Target/PowerPC/PPCInstrP10.td [7:7]
+ lib/Target/PowerPC/PPCInstrSPE.td [5:5]
+ lib/Target/PowerPC/PPCInstrVSX.td [5:5]
+ lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp [5:5]
+ lib/Target/PowerPC/PPCLowerMASSVEntries.cpp [5:5]
+ lib/Target/PowerPC/PPCMCInstLower.cpp [5:5]
+ lib/Target/PowerPC/PPCMIPeephole.cpp [5:5]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.h [5:5]
+ lib/Target/PowerPC/PPCMachineScheduler.cpp [5:5]
+ lib/Target/PowerPC/PPCMachineScheduler.h [5:5]
+ lib/Target/PowerPC/PPCMacroFusion.cpp [5:5]
+ lib/Target/PowerPC/PPCMacroFusion.h [5:5]
+ lib/Target/PowerPC/PPCPerfectShuffle.h [5:5]
+ lib/Target/PowerPC/PPCPfmCounters.td [5:5]
+ lib/Target/PowerPC/PPCPreEmitPeephole.cpp [5:5]
+ lib/Target/PowerPC/PPCReduceCRLogicals.cpp [5:5]
+ lib/Target/PowerPC/PPCRegisterInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCRegisterInfo.h [5:5]
+ lib/Target/PowerPC/PPCRegisterInfo.td [5:5]
+ lib/Target/PowerPC/PPCRegisterInfoDMR.td [5:5]
+ lib/Target/PowerPC/PPCRegisterInfoMMA.td [5:5]
+ lib/Target/PowerPC/PPCSchedPredicates.td [5:5]
+ lib/Target/PowerPC/PPCSchedule.td [5:5]
+ lib/Target/PowerPC/PPCSchedule440.td [5:5]
+ lib/Target/PowerPC/PPCScheduleA2.td [5:5]
+ lib/Target/PowerPC/PPCScheduleE500.td [5:5]
+ lib/Target/PowerPC/PPCScheduleE500mc.td [5:5]
+ lib/Target/PowerPC/PPCScheduleE5500.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG3.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG4.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG4Plus.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG5.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP10.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP7.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP8.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP9.td [5:5]
+ lib/Target/PowerPC/PPCSubtarget.cpp [5:5]
+ lib/Target/PowerPC/PPCSubtarget.h [5:5]
+ lib/Target/PowerPC/PPCTLSDynamicCall.cpp [5:5]
+ lib/Target/PowerPC/PPCTOCRegDeps.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetMachine.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetMachine.h [5:5]
+ lib/Target/PowerPC/PPCTargetObjectFile.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetObjectFile.h [5:5]
+ lib/Target/PowerPC/PPCTargetStreamer.h [5:5]
+ lib/Target/PowerPC/PPCTargetTransformInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetTransformInfo.h [5:5]
+ lib/Target/PowerPC/PPCVSXCopy.cpp [5:5]
+ lib/Target/PowerPC/PPCVSXFMAMutate.cpp [5:5]
+ lib/Target/PowerPC/PPCVSXSwapRemoval.cpp [5:5]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp [5:5]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.h [5:5]
+ lib/Target/RISCV/GISel/RISCVCallLowering.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVCallLowering.h [5:5]
+ lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.h [5:5]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h [5:5]
+ lib/Target/RISCV/GISel/RISCVRegisterBanks.td [5:5]
+ lib/Target/RISCV/RISCV.h [5:5]
+ lib/Target/RISCV/RISCV.td [5:5]
+ lib/Target/RISCV/RISCVAsmPrinter.cpp [5:5]
+ lib/Target/RISCV/RISCVCallingConv.td [5:5]
+ lib/Target/RISCV/RISCVCodeGenPrepare.cpp [5:5]
+ lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp [5:5]
+ lib/Target/RISCV/RISCVExpandPseudoInsts.cpp [5:5]
+ lib/Target/RISCV/RISCVFeatures.td [5:5]
+ lib/Target/RISCV/RISCVFrameLowering.cpp [5:5]
+ lib/Target/RISCV/RISCVFrameLowering.h [5:5]
+ lib/Target/RISCV/RISCVGatherScatterLowering.cpp [5:5]
+ lib/Target/RISCV/RISCVISelDAGToDAG.cpp [5:5]
+ lib/Target/RISCV/RISCVISelDAGToDAG.h [5:5]
+ lib/Target/RISCV/RISCVISelLowering.cpp [5:5]
+ lib/Target/RISCV/RISCVISelLowering.h [5:5]
+ lib/Target/RISCV/RISCVInsertVSETVLI.cpp [5:5]
+ lib/Target/RISCV/RISCVInstrFormats.td [5:5]
+ lib/Target/RISCV/RISCVInstrFormatsC.td [5:5]
+ lib/Target/RISCV/RISCVInstrFormatsV.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVInstrInfo.h [5:5]
+ lib/Target/RISCV/RISCVInstrInfo.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoA.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoC.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoD.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoF.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoM.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoV.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoVPseudos.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoXTHead.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoXVentana.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZb.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZfh.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZicbo.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZk.td [5:5]
+ lib/Target/RISCV/RISCVMCInstLower.cpp [5:5]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.h [5:5]
+ lib/Target/RISCV/RISCVMacroFusion.cpp [5:5]
+ lib/Target/RISCV/RISCVMacroFusion.h [5:5]
+ lib/Target/RISCV/RISCVMakeCompressible.cpp [5:5]
+ lib/Target/RISCV/RISCVMergeBaseOffset.cpp [5:5]
+ lib/Target/RISCV/RISCVProcessors.td [5:5]
+ lib/Target/RISCV/RISCVRedundantCopyElimination.cpp [5:5]
+ lib/Target/RISCV/RISCVRegisterInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVRegisterInfo.h [5:5]
+ lib/Target/RISCV/RISCVRegisterInfo.td [5:5]
+ lib/Target/RISCV/RISCVSExtWRemoval.cpp [5:5]
+ lib/Target/RISCV/RISCVSchedRocket.td [5:5]
+ lib/Target/RISCV/RISCVSchedSiFive7.td [5:5]
+ lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td [5:5]
+ lib/Target/RISCV/RISCVSchedule.td [5:5]
+ lib/Target/RISCV/RISCVScheduleV.td [5:5]
+ lib/Target/RISCV/RISCVScheduleZb.td [5:5]
+ lib/Target/RISCV/RISCVStripWSuffix.cpp [6:6]
+ lib/Target/RISCV/RISCVSubtarget.cpp [5:5]
+ lib/Target/RISCV/RISCVSubtarget.h [5:5]
+ lib/Target/RISCV/RISCVSystemOperands.td [5:5]
+ lib/Target/RISCV/RISCVTargetMachine.cpp [5:5]
+ lib/Target/RISCV/RISCVTargetMachine.h [5:5]
+ lib/Target/RISCV/RISCVTargetObjectFile.cpp [5:5]
+ lib/Target/RISCV/RISCVTargetObjectFile.h [5:5]
+ lib/Target/RISCV/RISCVTargetTransformInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVTargetTransformInfo.h [5:5]
+ lib/Target/Target.cpp [5:5]
+ lib/Target/TargetIntrinsicInfo.cpp [5:5]
+ lib/Target/TargetLoweringObjectFile.cpp [5:5]
+ lib/Target/TargetMachine.cpp [5:5]
+ lib/Target/TargetMachineC.cpp [5:5]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp [5:5]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp [5:5]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h [5:5]
+ lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp [5:5]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp [5:5]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h [5:5]
+ lib/Target/WebAssembly/WebAssembly.h [5:5]
+ lib/Target/WebAssembly/WebAssembly.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyCFGSort.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFastISel.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyISD.def [5:5]
+ lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrAtomics.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrCall.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrControl.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrConv.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrFloat.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrFormats.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInteger.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrMemory.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrRef.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrSIMD.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrTable.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp [6:6]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyPeephole.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegColoring.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegStackify.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h [5:5]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblySortRegion.h [5:5]
+ lib/Target/WebAssembly/WebAssemblySubtarget.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblySubtarget.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h [5:5]
+ lib/Target/X86/AsmParser/X86AsmParser.cpp [5:5]
+ lib/Target/X86/AsmParser/X86AsmParserCommon.h [5:5]
+ lib/Target/X86/AsmParser/X86Operand.h [5:5]
+ lib/Target/X86/Disassembler/X86Disassembler.cpp [5:5]
+ lib/Target/X86/Disassembler/X86DisassemblerDecoder.h [5:5]
+ lib/Target/X86/ImmutableGraph.h [5:5]
+ lib/Target/X86/MCA/X86CustomBehaviour.cpp [5:5]
+ lib/Target/X86/MCA/X86CustomBehaviour.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86BaseInfo.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86FixupKinds.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstComments.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstComments.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCExpr.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MnemonicTables.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86TargetStreamer.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp [5:5]
+ lib/Target/X86/TargetInfo/X86TargetInfo.cpp [5:5]
+ lib/Target/X86/TargetInfo/X86TargetInfo.h [5:5]
+ lib/Target/X86/X86.h [5:5]
+ lib/Target/X86/X86.td [5:5]
+ lib/Target/X86/X86AsmPrinter.cpp [5:5]
+ lib/Target/X86/X86AsmPrinter.h [5:5]
+ lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp [5:5]
+ lib/Target/X86/X86AvoidTrailingCall.cpp [5:5]
+ lib/Target/X86/X86CallFrameOptimization.cpp [5:5]
+ lib/Target/X86/X86CallLowering.cpp [5:5]
+ lib/Target/X86/X86CallLowering.h [5:5]
+ lib/Target/X86/X86CallingConv.cpp [5:5]
+ lib/Target/X86/X86CallingConv.h [5:5]
+ lib/Target/X86/X86CallingConv.td [5:5]
+ lib/Target/X86/X86CmovConversion.cpp [5:5]
+ lib/Target/X86/X86DiscriminateMemOps.cpp [5:5]
+ lib/Target/X86/X86DomainReassignment.cpp [5:5]
+ lib/Target/X86/X86DynAllocaExpander.cpp [5:5]
+ lib/Target/X86/X86EvexToVex.cpp [6:6]
+ lib/Target/X86/X86ExpandPseudo.cpp [5:5]
+ lib/Target/X86/X86FastISel.cpp [5:5]
+ lib/Target/X86/X86FastPreTileConfig.cpp [5:5]
+ lib/Target/X86/X86FastTileConfig.cpp [5:5]
+ lib/Target/X86/X86FixupBWInsts.cpp [5:5]
+ lib/Target/X86/X86FixupLEAs.cpp [5:5]
+ lib/Target/X86/X86FixupSetCC.cpp [5:5]
+ lib/Target/X86/X86FlagsCopyLowering.cpp [5:5]
+ lib/Target/X86/X86FloatingPoint.cpp [5:5]
+ lib/Target/X86/X86FrameLowering.cpp [5:5]
+ lib/Target/X86/X86FrameLowering.h [5:5]
+ lib/Target/X86/X86GenRegisterBankInfo.def [5:5]
+ lib/Target/X86/X86ISelDAGToDAG.cpp [5:5]
+ lib/Target/X86/X86ISelLowering.cpp [5:5]
+ lib/Target/X86/X86ISelLowering.h [5:5]
+ lib/Target/X86/X86IndirectBranchTracking.cpp [5:5]
+ lib/Target/X86/X86IndirectThunks.cpp [5:5]
+ lib/Target/X86/X86InsertPrefetch.cpp [5:5]
+ lib/Target/X86/X86InsertWait.cpp [5:5]
+ lib/Target/X86/X86InstCombineIntrinsic.cpp [5:5]
+ lib/Target/X86/X86Instr3DNow.td [5:5]
+ lib/Target/X86/X86InstrAMX.td [5:5]
+ lib/Target/X86/X86InstrAVX512.td [5:5]
+ lib/Target/X86/X86InstrArithmetic.td [5:5]
+ lib/Target/X86/X86InstrBuilder.h [5:5]
+ lib/Target/X86/X86InstrCMovSetCC.td [5:5]
+ lib/Target/X86/X86InstrCompiler.td [5:5]
+ lib/Target/X86/X86InstrControl.td [5:5]
+ lib/Target/X86/X86InstrExtension.td [5:5]
+ lib/Target/X86/X86InstrFMA.td [5:5]
+ lib/Target/X86/X86InstrFMA3Info.cpp [5:5]
+ lib/Target/X86/X86InstrFMA3Info.h [5:5]
+ lib/Target/X86/X86InstrFPStack.td [5:5]
+ lib/Target/X86/X86InstrFoldTables.cpp [5:5]
+ lib/Target/X86/X86InstrFoldTables.h [5:5]
+ lib/Target/X86/X86InstrFormats.td [5:5]
+ lib/Target/X86/X86InstrFragmentsSIMD.td [5:5]
+ lib/Target/X86/X86InstrInfo.cpp [5:5]
+ lib/Target/X86/X86InstrInfo.h [5:5]
+ lib/Target/X86/X86InstrInfo.td [5:5]
+ lib/Target/X86/X86InstrKL.td [6:6]
+ lib/Target/X86/X86InstrMMX.td [5:5]
+ lib/Target/X86/X86InstrRAOINT.td [5:5]
+ lib/Target/X86/X86InstrSGX.td [5:5]
+ lib/Target/X86/X86InstrSNP.td [5:5]
+ lib/Target/X86/X86InstrSSE.td [5:5]
+ lib/Target/X86/X86InstrSVM.td [5:5]
+ lib/Target/X86/X86InstrShiftRotate.td [5:5]
+ lib/Target/X86/X86InstrSystem.td [5:5]
+ lib/Target/X86/X86InstrTDX.td [5:5]
+ lib/Target/X86/X86InstrTSX.td [5:5]
+ lib/Target/X86/X86InstrVMX.td [5:5]
+ lib/Target/X86/X86InstrVecCompiler.td [5:5]
+ lib/Target/X86/X86InstrXOP.td [5:5]
+ lib/Target/X86/X86InstructionSelector.cpp [5:5]
+ lib/Target/X86/X86InterleavedAccess.cpp [5:5]
+ lib/Target/X86/X86IntrinsicsInfo.h [5:5]
+ lib/Target/X86/X86KCFI.cpp [5:5]
+ lib/Target/X86/X86LegalizerInfo.cpp [5:5]
+ lib/Target/X86/X86LegalizerInfo.h [6:6]
+ lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp [5:5]
+ lib/Target/X86/X86LoadValueInjectionRetHardening.cpp [5:5]
+ lib/Target/X86/X86LowerAMXIntrinsics.cpp [5:5]
+ lib/Target/X86/X86LowerAMXType.cpp [5:5]
+ lib/Target/X86/X86LowerTileCopy.cpp [5:5]
+ lib/Target/X86/X86MCInstLower.cpp [5:5]
+ lib/Target/X86/X86MachineFunctionInfo.cpp [5:5]
+ lib/Target/X86/X86MachineFunctionInfo.h [5:5]
+ lib/Target/X86/X86MacroFusion.cpp [5:5]
+ lib/Target/X86/X86MacroFusion.h [5:5]
+ lib/Target/X86/X86OptimizeLEAs.cpp [5:5]
+ lib/Target/X86/X86PadShortFunction.cpp [5:5]
+ lib/Target/X86/X86PartialReduction.cpp [5:5]
+ lib/Target/X86/X86PfmCounters.td [5:5]
+ lib/Target/X86/X86PreAMXConfig.cpp [5:5]
+ lib/Target/X86/X86PreTileConfig.cpp [5:5]
+ lib/Target/X86/X86RegisterBankInfo.cpp [5:5]
+ lib/Target/X86/X86RegisterBankInfo.h [5:5]
+ lib/Target/X86/X86RegisterBanks.td [5:5]
+ lib/Target/X86/X86RegisterInfo.cpp [5:5]
+ lib/Target/X86/X86RegisterInfo.h [5:5]
+ lib/Target/X86/X86RegisterInfo.td [5:5]
+ lib/Target/X86/X86ReturnThunks.cpp [5:5]
+ lib/Target/X86/X86SchedAlderlakeP.td [5:5]
+ lib/Target/X86/X86SchedBroadwell.td [5:5]
+ lib/Target/X86/X86SchedHaswell.td [5:5]
+ lib/Target/X86/X86SchedIceLake.td [5:5]
+ lib/Target/X86/X86SchedPredicates.td [5:5]
+ lib/Target/X86/X86SchedSandyBridge.td [5:5]
+ lib/Target/X86/X86SchedSkylakeClient.td [5:5]
+ lib/Target/X86/X86SchedSkylakeServer.td [5:5]
+ lib/Target/X86/X86Schedule.td [5:5]
+ lib/Target/X86/X86ScheduleAtom.td [5:5]
+ lib/Target/X86/X86ScheduleBdVer2.td [5:5]
+ lib/Target/X86/X86ScheduleBtVer2.td [5:5]
+ lib/Target/X86/X86ScheduleSLM.td [5:5]
+ lib/Target/X86/X86ScheduleZnver1.td [5:5]
+ lib/Target/X86/X86ScheduleZnver2.td [5:5]
+ lib/Target/X86/X86ScheduleZnver3.td [5:5]
+ lib/Target/X86/X86SelectionDAGInfo.cpp [5:5]
+ lib/Target/X86/X86SelectionDAGInfo.h [5:5]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.cpp [5:5]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.h [5:5]
+ lib/Target/X86/X86SpeculativeExecutionSideEffectSuppression.cpp [5:5]
+ lib/Target/X86/X86SpeculativeLoadHardening.cpp [5:5]
+ lib/Target/X86/X86Subtarget.cpp [5:5]
+ lib/Target/X86/X86Subtarget.h [5:5]
+ lib/Target/X86/X86TargetMachine.cpp [5:5]
+ lib/Target/X86/X86TargetMachine.h [5:5]
+ lib/Target/X86/X86TargetObjectFile.cpp [5:5]
+ lib/Target/X86/X86TargetObjectFile.h [5:5]
+ lib/Target/X86/X86TargetTransformInfo.cpp [5:5]
+ lib/Target/X86/X86TargetTransformInfo.h [5:5]
+ lib/Target/X86/X86TileConfig.cpp [5:5]
+ lib/Target/X86/X86VZeroUpper.cpp [5:5]
+ lib/Target/X86/X86WinEHState.cpp [5:5]
+ lib/TargetParser/AArch64TargetParser.cpp [5:5]
+ lib/TargetParser/ARMTargetParser.cpp [5:5]
+ lib/TargetParser/ARMTargetParserCommon.cpp [5:5]
+ lib/TargetParser/Host.cpp [5:5]
+ lib/TargetParser/LoongArchTargetParser.cpp [5:5]
+ lib/TargetParser/RISCVTargetParser.cpp [5:5]
+ lib/TargetParser/TargetParser.cpp [5:5]
+ lib/TargetParser/Triple.cpp [5:5]
+ lib/TargetParser/Unix/Host.inc [5:5]
+ lib/TargetParser/Windows/Host.inc [5:5]
+ lib/TargetParser/X86TargetParser.cpp [5:5]
+ lib/TextAPI/Architecture.cpp [5:5]
+ lib/TextAPI/ArchitectureSet.cpp [5:5]
+ lib/TextAPI/InterfaceFile.cpp [5:5]
+ lib/TextAPI/PackedVersion.cpp [5:5]
+ lib/TextAPI/Platform.cpp [5:5]
+ lib/TextAPI/Symbol.cpp [5:5]
+ lib/TextAPI/Target.cpp [5:5]
+ lib/TextAPI/TextAPIContext.h [5:5]
+ lib/TextAPI/TextStub.cpp [5:5]
+ lib/TextAPI/TextStubCommon.cpp [5:5]
+ lib/TextAPI/TextStubCommon.h [5:5]
+ lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp [5:5]
+ lib/ToolDrivers/llvm-lib/LibDriver.cpp [5:5]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp [5:5]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h [5:5]
+ lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp [5:5]
+ lib/Transforms/CFGuard/CFGuard.cpp [5:5]
+ lib/Transforms/Coroutines/CoroCleanup.cpp [5:5]
+ lib/Transforms/Coroutines/CoroConditionalWrapper.cpp [5:5]
+ lib/Transforms/Coroutines/CoroEarly.cpp [5:5]
+ lib/Transforms/Coroutines/CoroElide.cpp [5:5]
+ lib/Transforms/Coroutines/CoroFrame.cpp [5:5]
+ lib/Transforms/Coroutines/CoroInstr.h [5:5]
+ lib/Transforms/Coroutines/CoroInternal.h [5:5]
+ lib/Transforms/Coroutines/CoroSplit.cpp [5:5]
+ lib/Transforms/Coroutines/Coroutines.cpp [5:5]
+ lib/Transforms/IPO/AlwaysInliner.cpp [5:5]
+ lib/Transforms/IPO/Annotation2Metadata.cpp [5:5]
+ lib/Transforms/IPO/ArgumentPromotion.cpp [5:5]
+ lib/Transforms/IPO/Attributor.cpp [5:5]
+ lib/Transforms/IPO/AttributorAttributes.cpp [5:5]
+ lib/Transforms/IPO/BarrierNoopPass.cpp [5:5]
+ lib/Transforms/IPO/BlockExtractor.cpp [5:5]
+ lib/Transforms/IPO/CalledValuePropagation.cpp [5:5]
+ lib/Transforms/IPO/ConstantMerge.cpp [5:5]
+ lib/Transforms/IPO/CrossDSOCFI.cpp [5:5]
+ lib/Transforms/IPO/DeadArgumentElimination.cpp [5:5]
+ lib/Transforms/IPO/ElimAvailExtern.cpp [5:5]
+ lib/Transforms/IPO/ExtractGV.cpp [5:5]
+ lib/Transforms/IPO/ForceFunctionAttrs.cpp [5:5]
+ lib/Transforms/IPO/FunctionAttrs.cpp [5:5]
+ lib/Transforms/IPO/FunctionImport.cpp [5:5]
+ lib/Transforms/IPO/FunctionSpecialization.cpp [5:5]
+ lib/Transforms/IPO/GlobalDCE.cpp [5:5]
+ lib/Transforms/IPO/GlobalOpt.cpp [5:5]
+ lib/Transforms/IPO/GlobalSplit.cpp [5:5]
+ lib/Transforms/IPO/HotColdSplitting.cpp [5:5]
+ lib/Transforms/IPO/IPO.cpp [5:5]
+ lib/Transforms/IPO/IROutliner.cpp [5:5]
+ lib/Transforms/IPO/InferFunctionAttrs.cpp [5:5]
+ lib/Transforms/IPO/InlineSimple.cpp [5:5]
+ lib/Transforms/IPO/Inliner.cpp [5:5]
+ lib/Transforms/IPO/Internalize.cpp [5:5]
+ lib/Transforms/IPO/LoopExtractor.cpp [5:5]
+ lib/Transforms/IPO/LowerTypeTests.cpp [5:5]
+ lib/Transforms/IPO/MergeFunctions.cpp [5:5]
+ lib/Transforms/IPO/ModuleInliner.cpp [5:5]
+ lib/Transforms/IPO/OpenMPOpt.cpp [5:5]
+ lib/Transforms/IPO/PartialInlining.cpp [5:5]
+ lib/Transforms/IPO/PassManagerBuilder.cpp [5:5]
+ lib/Transforms/IPO/SCCP.cpp [5:5]
+ lib/Transforms/IPO/SampleContextTracker.cpp [5:5]
+ lib/Transforms/IPO/SampleProfile.cpp [5:5]
+ lib/Transforms/IPO/SampleProfileProbe.cpp [5:5]
+ lib/Transforms/IPO/StripDeadPrototypes.cpp [5:5]
+ lib/Transforms/IPO/StripSymbols.cpp [5:5]
+ lib/Transforms/IPO/SyntheticCountsPropagation.cpp [5:5]
+ lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp [5:5]
+ lib/Transforms/IPO/WholeProgramDevirt.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineAddSub.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineAndOrXor.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineCalls.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineCasts.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineCompares.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineInternal.h [5:5]
+ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineNegator.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombinePHI.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineSelect.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineShifts.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineVectorOps.cpp [5:5]
+ lib/Transforms/InstCombine/InstructionCombining.cpp [5:5]
+ lib/Transforms/Instrumentation/AddressSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/BoundsChecking.cpp [5:5]
+ lib/Transforms/Instrumentation/CFGMST.h [5:5]
+ lib/Transforms/Instrumentation/CGProfile.cpp [5:5]
+ lib/Transforms/Instrumentation/ControlHeightReduction.cpp [5:5]
+ lib/Transforms/Instrumentation/DataFlowSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/GCOVProfiling.cpp [5:5]
+ lib/Transforms/Instrumentation/HWAddressSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/IndirectCallPromotion.cpp [5:5]
+ lib/Transforms/Instrumentation/InstrOrderFile.cpp [5:5]
+ lib/Transforms/Instrumentation/InstrProfiling.cpp [5:5]
+ lib/Transforms/Instrumentation/Instrumentation.cpp [5:5]
+ lib/Transforms/Instrumentation/KCFI.cpp [5:5]
+ lib/Transforms/Instrumentation/MemProfiler.cpp [5:5]
+ lib/Transforms/Instrumentation/MemorySanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/PGOInstrumentation.cpp [5:5]
+ lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp [5:5]
+ lib/Transforms/Instrumentation/PoisonChecking.cpp [5:5]
+ lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp [5:5]
+ lib/Transforms/Instrumentation/SanitizerCoverage.cpp [5:5]
+ lib/Transforms/Instrumentation/ThreadSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/ValueProfileCollector.cpp [5:5]
+ lib/Transforms/Instrumentation/ValueProfileCollector.h [5:5]
+ lib/Transforms/Instrumentation/ValueProfilePlugins.inc [5:5]
+ lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h [5:5]
+ lib/Transforms/ObjCARC/BlotMapVector.h [5:5]
+ lib/Transforms/ObjCARC/DependencyAnalysis.cpp [5:5]
+ lib/Transforms/ObjCARC/DependencyAnalysis.h [5:5]
+ lib/Transforms/ObjCARC/ObjCARC.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARC.h [5:5]
+ lib/Transforms/ObjCARC/ObjCARCAPElim.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARCContract.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARCExpand.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARCOpts.cpp [5:5]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp [5:5]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.h [5:5]
+ lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp [5:5]
+ lib/Transforms/ObjCARC/PtrState.cpp [5:5]
+ lib/Transforms/ObjCARC/PtrState.h [5:5]
+ lib/Transforms/Scalar/ADCE.cpp [5:5]
+ lib/Transforms/Scalar/AlignmentFromAssumptions.cpp [6:6]
+ lib/Transforms/Scalar/AnnotationRemarks.cpp [5:5]
+ lib/Transforms/Scalar/BDCE.cpp [5:5]
+ lib/Transforms/Scalar/CallSiteSplitting.cpp [5:5]
+ lib/Transforms/Scalar/ConstantHoisting.cpp [5:5]
+ lib/Transforms/Scalar/ConstraintElimination.cpp [5:5]
+ lib/Transforms/Scalar/CorrelatedValuePropagation.cpp [5:5]
+ lib/Transforms/Scalar/DCE.cpp [5:5]
+ lib/Transforms/Scalar/DFAJumpThreading.cpp [5:5]
+ lib/Transforms/Scalar/DeadStoreElimination.cpp [5:5]
+ lib/Transforms/Scalar/DivRemPairs.cpp [5:5]
+ lib/Transforms/Scalar/EarlyCSE.cpp [5:5]
+ lib/Transforms/Scalar/FlattenCFGPass.cpp [5:5]
+ lib/Transforms/Scalar/Float2Int.cpp [5:5]
+ lib/Transforms/Scalar/GVN.cpp [5:5]
+ lib/Transforms/Scalar/GVNHoist.cpp [5:5]
+ lib/Transforms/Scalar/GVNSink.cpp [5:5]
+ lib/Transforms/Scalar/GuardWidening.cpp [5:5]
+ lib/Transforms/Scalar/IVUsersPrinter.cpp [5:5]
+ lib/Transforms/Scalar/IndVarSimplify.cpp [5:5]
+ lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp [5:5]
+ lib/Transforms/Scalar/InferAddressSpaces.cpp [5:5]
+ lib/Transforms/Scalar/InstSimplifyPass.cpp [5:5]
+ lib/Transforms/Scalar/JumpThreading.cpp [5:5]
+ lib/Transforms/Scalar/LICM.cpp [5:5]
+ lib/Transforms/Scalar/LoopAccessAnalysisPrinter.cpp [5:5]
+ lib/Transforms/Scalar/LoopBoundSplit.cpp [5:5]
+ lib/Transforms/Scalar/LoopDataPrefetch.cpp [5:5]
+ lib/Transforms/Scalar/LoopDeletion.cpp [5:5]
+ lib/Transforms/Scalar/LoopDistribute.cpp [5:5]
+ lib/Transforms/Scalar/LoopFlatten.cpp [5:5]
+ lib/Transforms/Scalar/LoopFuse.cpp [5:5]
+ lib/Transforms/Scalar/LoopIdiomRecognize.cpp [5:5]
+ lib/Transforms/Scalar/LoopInstSimplify.cpp [5:5]
+ lib/Transforms/Scalar/LoopInterchange.cpp [5:5]
+ lib/Transforms/Scalar/LoopLoadElimination.cpp [5:5]
+ lib/Transforms/Scalar/LoopPassManager.cpp [5:5]
+ lib/Transforms/Scalar/LoopPredication.cpp [5:5]
+ lib/Transforms/Scalar/LoopRerollPass.cpp [5:5]
+ lib/Transforms/Scalar/LoopRotation.cpp [5:5]
+ lib/Transforms/Scalar/LoopSimplifyCFG.cpp [5:5]
+ lib/Transforms/Scalar/LoopSink.cpp [5:5]
+ lib/Transforms/Scalar/LoopStrengthReduce.cpp [5:5]
+ lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp [5:5]
+ lib/Transforms/Scalar/LoopUnrollPass.cpp [5:5]
+ lib/Transforms/Scalar/LoopVersioningLICM.cpp [5:5]
+ lib/Transforms/Scalar/LowerAtomicPass.cpp [5:5]
+ lib/Transforms/Scalar/LowerConstantIntrinsics.cpp [5:5]
+ lib/Transforms/Scalar/LowerExpectIntrinsic.cpp [5:5]
+ lib/Transforms/Scalar/LowerGuardIntrinsic.cpp [5:5]
+ lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp [5:5]
+ lib/Transforms/Scalar/LowerWidenableCondition.cpp [5:5]
+ lib/Transforms/Scalar/MakeGuardsExplicit.cpp [5:5]
+ lib/Transforms/Scalar/MemCpyOptimizer.cpp [5:5]
+ lib/Transforms/Scalar/MergeICmps.cpp [5:5]
+ lib/Transforms/Scalar/MergedLoadStoreMotion.cpp [5:5]
+ lib/Transforms/Scalar/NaryReassociate.cpp [5:5]
+ lib/Transforms/Scalar/NewGVN.cpp [5:5]
+ lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp [5:5]
+ lib/Transforms/Scalar/PlaceSafepoints.cpp [5:5]
+ lib/Transforms/Scalar/Reassociate.cpp [5:5]
+ lib/Transforms/Scalar/Reg2Mem.cpp [5:5]
+ lib/Transforms/Scalar/RewriteStatepointsForGC.cpp [5:5]
+ lib/Transforms/Scalar/SCCP.cpp [5:5]
+ lib/Transforms/Scalar/SROA.cpp [5:5]
+ lib/Transforms/Scalar/Scalar.cpp [5:5]
+ lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp [6:6]
+ lib/Transforms/Scalar/Scalarizer.cpp [5:5]
+ lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp [5:5]
+ lib/Transforms/Scalar/SimpleLoopUnswitch.cpp [5:5]
+ lib/Transforms/Scalar/SimplifyCFGPass.cpp [5:5]
+ lib/Transforms/Scalar/Sink.cpp [5:5]
+ lib/Transforms/Scalar/SpeculativeExecution.cpp [5:5]
+ lib/Transforms/Scalar/StraightLineStrengthReduce.cpp [5:5]
+ lib/Transforms/Scalar/StructurizeCFG.cpp [5:5]
+ lib/Transforms/Scalar/TLSVariableHoist.cpp [5:5]
+ lib/Transforms/Scalar/TailRecursionElimination.cpp [5:5]
+ lib/Transforms/Scalar/WarnMissedTransforms.cpp [5:5]
+ lib/Transforms/Utils/AMDGPUEmitPrintf.cpp [5:5]
+ lib/Transforms/Utils/ASanStackFrameLayout.cpp [5:5]
+ lib/Transforms/Utils/AddDiscriminators.cpp [5:5]
+ lib/Transforms/Utils/AssumeBundleBuilder.cpp [5:5]
+ lib/Transforms/Utils/BasicBlockUtils.cpp [5:5]
+ lib/Transforms/Utils/BreakCriticalEdges.cpp [5:5]
+ lib/Transforms/Utils/BuildLibCalls.cpp [5:5]
+ lib/Transforms/Utils/BypassSlowDivision.cpp [5:5]
+ lib/Transforms/Utils/CallGraphUpdater.cpp [5:5]
+ lib/Transforms/Utils/CallPromotionUtils.cpp [5:5]
+ lib/Transforms/Utils/CanonicalizeAliases.cpp [5:5]
+ lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp [5:5]
+ lib/Transforms/Utils/CloneFunction.cpp [5:5]
+ lib/Transforms/Utils/CloneModule.cpp [5:5]
+ lib/Transforms/Utils/CodeExtractor.cpp [5:5]
+ lib/Transforms/Utils/CodeLayout.cpp [5:5]
+ lib/Transforms/Utils/CodeMoverUtils.cpp [5:5]
+ lib/Transforms/Utils/CtorUtils.cpp [5:5]
+ lib/Transforms/Utils/Debugify.cpp [5:5]
+ lib/Transforms/Utils/DemoteRegToStack.cpp [5:5]
+ lib/Transforms/Utils/EntryExitInstrumenter.cpp [5:5]
+ lib/Transforms/Utils/EscapeEnumerator.cpp [5:5]
+ lib/Transforms/Utils/Evaluator.cpp [5:5]
+ lib/Transforms/Utils/FixIrreducible.cpp [5:5]
+ lib/Transforms/Utils/FlattenCFG.cpp [5:5]
+ lib/Transforms/Utils/FunctionComparator.cpp [5:5]
+ lib/Transforms/Utils/FunctionImportUtils.cpp [5:5]
+ lib/Transforms/Utils/GlobalStatus.cpp [5:5]
+ lib/Transforms/Utils/GuardUtils.cpp [5:5]
+ lib/Transforms/Utils/HelloWorld.cpp [5:5]
+ lib/Transforms/Utils/InjectTLIMappings.cpp [5:5]
+ lib/Transforms/Utils/InlineFunction.cpp [5:5]
+ lib/Transforms/Utils/InstructionNamer.cpp [5:5]
+ lib/Transforms/Utils/IntegerDivision.cpp [5:5]
+ lib/Transforms/Utils/LCSSA.cpp [5:5]
+ lib/Transforms/Utils/LibCallsShrinkWrap.cpp [5:5]
+ lib/Transforms/Utils/Local.cpp [5:5]
+ lib/Transforms/Utils/LoopPeel.cpp [5:5]
+ lib/Transforms/Utils/LoopRotationUtils.cpp [5:5]
+ lib/Transforms/Utils/LoopSimplify.cpp [5:5]
+ lib/Transforms/Utils/LoopUnroll.cpp [5:5]
+ lib/Transforms/Utils/LoopUnrollAndJam.cpp [5:5]
+ lib/Transforms/Utils/LoopUnrollRuntime.cpp [5:5]
+ lib/Transforms/Utils/LoopUtils.cpp [5:5]
+ lib/Transforms/Utils/LoopVersioning.cpp [5:5]
+ lib/Transforms/Utils/LowerAtomic.cpp [5:5]
+ lib/Transforms/Utils/LowerGlobalDtors.cpp [5:5]
+ lib/Transforms/Utils/LowerIFunc.cpp [5:5]
+ lib/Transforms/Utils/LowerInvoke.cpp [5:5]
+ lib/Transforms/Utils/LowerMemIntrinsics.cpp [5:5]
+ lib/Transforms/Utils/LowerSwitch.cpp [5:5]
+ lib/Transforms/Utils/MatrixUtils.cpp [5:5]
+ lib/Transforms/Utils/Mem2Reg.cpp [5:5]
+ lib/Transforms/Utils/MemoryOpRemark.cpp [5:5]
+ lib/Transforms/Utils/MemoryTaggingSupport.cpp [4:4]
+ lib/Transforms/Utils/MetaRenamer.cpp [5:5]
+ lib/Transforms/Utils/MisExpect.cpp [5:5]
+ lib/Transforms/Utils/ModuleUtils.cpp [5:5]
+ lib/Transforms/Utils/NameAnonGlobals.cpp [5:5]
+ lib/Transforms/Utils/PredicateInfo.cpp [5:5]
+ lib/Transforms/Utils/PromoteMemoryToRegister.cpp [5:5]
+ lib/Transforms/Utils/RelLookupTableConverter.cpp [5:5]
+ lib/Transforms/Utils/SCCPSolver.cpp [5:5]
+ lib/Transforms/Utils/SSAUpdater.cpp [5:5]
+ lib/Transforms/Utils/SSAUpdaterBulk.cpp [5:5]
+ lib/Transforms/Utils/SampleProfileInference.cpp [5:5]
+ lib/Transforms/Utils/SampleProfileLoaderBaseUtil.cpp [5:5]
+ lib/Transforms/Utils/SanitizerStats.cpp [5:5]
+ lib/Transforms/Utils/ScalarEvolutionExpander.cpp [5:5]
+ lib/Transforms/Utils/SimplifyCFG.cpp [5:5]
+ lib/Transforms/Utils/SimplifyIndVar.cpp [5:5]
+ lib/Transforms/Utils/SimplifyLibCalls.cpp [5:5]
+ lib/Transforms/Utils/SizeOpts.cpp [5:5]
+ lib/Transforms/Utils/SplitModule.cpp [5:5]
+ lib/Transforms/Utils/StripGCRelocates.cpp [5:5]
+ lib/Transforms/Utils/StripNonLineTableDebugInfo.cpp [5:5]
+ lib/Transforms/Utils/SymbolRewriter.cpp [5:5]
+ lib/Transforms/Utils/UnifyFunctionExitNodes.cpp [5:5]
+ lib/Transforms/Utils/UnifyLoopExits.cpp [5:5]
+ lib/Transforms/Utils/Utils.cpp [5:5]
+ lib/Transforms/Utils/ValueMapper.cpp [5:5]
+ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp [5:5]
+ lib/Transforms/Vectorize/LoopVectorizationLegality.cpp [5:5]
+ lib/Transforms/Vectorize/LoopVectorizationPlanner.h [5:5]
+ lib/Transforms/Vectorize/LoopVectorize.cpp [5:5]
+ lib/Transforms/Vectorize/SLPVectorizer.cpp [5:5]
+ lib/Transforms/Vectorize/VPRecipeBuilder.h [5:5]
+ lib/Transforms/Vectorize/VPlan.cpp [5:5]
+ lib/Transforms/Vectorize/VPlan.h [5:5]
+ lib/Transforms/Vectorize/VPlanCFG.h [5:5]
+ lib/Transforms/Vectorize/VPlanDominatorTree.h [5:5]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.h [5:5]
+ lib/Transforms/Vectorize/VPlanRecipes.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanSLP.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanTransforms.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanTransforms.h [5:5]
+ lib/Transforms/Vectorize/VPlanValue.h [5:5]
+ lib/Transforms/Vectorize/VPlanVerifier.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanVerifier.h [5:5]
+ lib/Transforms/Vectorize/VectorCombine.cpp [5:5]
+ lib/Transforms/Vectorize/Vectorize.cpp [5:5]
+ lib/WindowsDriver/MSVCPaths.cpp [5:5]
+ lib/WindowsManifest/WindowsManifestMerger.cpp [5:5]
+ lib/XRay/BlockIndexer.cpp [5:5]
+ lib/XRay/BlockPrinter.cpp [5:5]
+ lib/XRay/BlockVerifier.cpp [5:5]
+ lib/XRay/FDRRecordProducer.cpp [5:5]
+ lib/XRay/FDRRecords.cpp [5:5]
+ lib/XRay/FDRTraceExpander.cpp [5:5]
+ lib/XRay/FDRTraceWriter.cpp [5:5]
+ lib/XRay/FileHeaderReader.cpp [5:5]
+ lib/XRay/InstrumentationMap.cpp [5:5]
+ lib/XRay/LogBuilderConsumer.cpp [5:5]
+ lib/XRay/Profile.cpp [5:5]
+ lib/XRay/RecordInitializer.cpp [5:5]
+ lib/XRay/RecordPrinter.cpp [5:5]
+ lib/XRay/Trace.cpp [5:5]
+ tools/bugpoint/BugDriver.cpp [5:5]
+ tools/bugpoint/BugDriver.h [5:5]
+ tools/bugpoint/CrashDebugger.cpp [5:5]
+ tools/bugpoint/ExecutionDriver.cpp [5:5]
+ tools/bugpoint/ExtractFunction.cpp [5:5]
+ tools/bugpoint/FindBugs.cpp [5:5]
+ tools/bugpoint/ListReducer.h [5:5]
+ tools/bugpoint/Miscompilation.cpp [5:5]
+ tools/bugpoint/OptimizerDriver.cpp [5:5]
+ tools/bugpoint/ToolRunner.cpp [5:5]
+ tools/bugpoint/ToolRunner.h [5:5]
+ tools/bugpoint/bugpoint.cpp [5:5]
+ tools/dsymutil/BinaryHolder.cpp [5:5]
+ tools/dsymutil/BinaryHolder.h [5:5]
+ tools/dsymutil/CFBundle.cpp [5:5]
+ tools/dsymutil/CFBundle.h [5:5]
+ tools/dsymutil/DebugMap.cpp [5:5]
+ tools/dsymutil/DebugMap.h [5:5]
+ tools/dsymutil/DwarfLinkerForBinary.cpp [5:5]
+ tools/dsymutil/DwarfLinkerForBinary.h [5:5]
+ tools/dsymutil/LinkUtils.h [5:5]
+ tools/dsymutil/MachODebugMapParser.cpp [5:5]
+ tools/dsymutil/MachOUtils.cpp [5:5]
+ tools/dsymutil/MachOUtils.h [5:5]
+ tools/dsymutil/Reproducer.cpp [5:5]
+ tools/dsymutil/Reproducer.h [5:5]
+ tools/dsymutil/SymbolMap.cpp [5:5]
+ tools/dsymutil/SymbolMap.h [5:5]
+ tools/dsymutil/dsymutil.cpp [5:5]
+ tools/dsymutil/dsymutil.h [5:5]
+ tools/gold/gold-plugin.cpp [5:5]
+ tools/llc/llc.cpp [5:5]
+ tools/lli/ChildTarget/ChildTarget.cpp [5:5]
+ tools/lli/ExecutionUtils.cpp [5:5]
+ tools/lli/ExecutionUtils.h [5:5]
+ tools/lli/ForwardingMemoryManager.h [5:5]
+ tools/lli/lli.cpp [5:5]
+ tools/llvm-ar/llvm-ar-driver.cpp [5:5]
+ tools/llvm-ar/llvm-ar.cpp [5:5]
+ tools/llvm-as/llvm-as.cpp [5:5]
+ tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp [5:5]
+ tools/llvm-cat/llvm-cat.cpp [5:5]
+ tools/llvm-cfi-verify/lib/FileAnalysis.cpp [5:5]
+ tools/llvm-cfi-verify/lib/FileAnalysis.h [5:5]
+ tools/llvm-cfi-verify/lib/GraphBuilder.cpp [5:5]
+ tools/llvm-cfi-verify/lib/GraphBuilder.h [5:5]
+ tools/llvm-cfi-verify/llvm-cfi-verify.cpp [5:5]
+ tools/llvm-config/BuildVariables.inc [5:5]
+ tools/llvm-config/llvm-config.cpp [5:5]
+ tools/llvm-cov/CodeCoverage.cpp [5:5]
+ tools/llvm-cov/CoverageExporter.h [5:5]
+ tools/llvm-cov/CoverageExporterJson.cpp [5:5]
+ tools/llvm-cov/CoverageExporterJson.h [5:5]
+ tools/llvm-cov/CoverageExporterLcov.cpp [5:5]
+ tools/llvm-cov/CoverageExporterLcov.h [5:5]
+ tools/llvm-cov/CoverageFilters.cpp [5:5]
+ tools/llvm-cov/CoverageFilters.h [5:5]
+ tools/llvm-cov/CoverageReport.cpp [5:5]
+ tools/llvm-cov/CoverageReport.h [5:5]
+ tools/llvm-cov/CoverageSummaryInfo.cpp [5:5]
+ tools/llvm-cov/CoverageSummaryInfo.h [5:5]
+ tools/llvm-cov/CoverageViewOptions.h [5:5]
+ tools/llvm-cov/RenderingSupport.h [5:5]
+ tools/llvm-cov/SourceCoverageView.cpp [5:5]
+ tools/llvm-cov/SourceCoverageView.h [5:5]
+ tools/llvm-cov/SourceCoverageViewHTML.cpp [5:5]
+ tools/llvm-cov/SourceCoverageViewHTML.h [5:5]
+ tools/llvm-cov/SourceCoverageViewText.cpp [5:5]
+ tools/llvm-cov/SourceCoverageViewText.h [5:5]
+ tools/llvm-cov/TestingSupport.cpp [5:5]
+ tools/llvm-cov/gcov.cpp [5:5]
+ tools/llvm-cov/llvm-cov.cpp [5:5]
+ tools/llvm-cvtres/llvm-cvtres.cpp [5:5]
+ tools/llvm-cxxdump/Error.cpp [5:5]
+ tools/llvm-cxxdump/Error.h [5:5]
+ tools/llvm-cxxdump/llvm-cxxdump.cpp [5:5]
+ tools/llvm-cxxdump/llvm-cxxdump.h [5:5]
+ tools/llvm-cxxfilt/llvm-cxxfilt-driver.cpp [5:5]
+ tools/llvm-cxxfilt/llvm-cxxfilt.cpp [5:5]
+ tools/llvm-cxxmap/llvm-cxxmap.cpp [5:5]
+ tools/llvm-diff/lib/DiffConsumer.cpp [5:5]
+ tools/llvm-diff/lib/DiffConsumer.h [5:5]
+ tools/llvm-diff/lib/DiffLog.cpp [5:5]
+ tools/llvm-diff/lib/DiffLog.h [5:5]
+ tools/llvm-diff/lib/DifferenceEngine.cpp [5:5]
+ tools/llvm-diff/lib/DifferenceEngine.h [5:5]
+ tools/llvm-diff/llvm-diff.cpp [5:5]
+ tools/llvm-dis/llvm-dis.cpp [5:5]
+ tools/llvm-dwarfdump/SectionSizes.cpp [5:5]
+ tools/llvm-dwarfdump/Statistics.cpp [5:5]
+ tools/llvm-dwarfdump/llvm-dwarfdump.cpp [5:5]
+ tools/llvm-dwarfdump/llvm-dwarfdump.h [5:5]
+ tools/llvm-dwp/llvm-dwp.cpp [5:5]
+ tools/llvm-exegesis/lib/AArch64/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/Analysis.cpp [5:5]
+ tools/llvm-exegesis/lib/Analysis.h [5:5]
+ tools/llvm-exegesis/lib/Assembler.cpp [5:5]
+ tools/llvm-exegesis/lib/Assembler.h [5:5]
+ tools/llvm-exegesis/lib/BenchmarkCode.h [5:5]
+ tools/llvm-exegesis/lib/BenchmarkResult.cpp [5:5]
+ tools/llvm-exegesis/lib/BenchmarkResult.h [5:5]
+ tools/llvm-exegesis/lib/BenchmarkRunner.cpp [5:5]
+ tools/llvm-exegesis/lib/BenchmarkRunner.h [5:5]
+ tools/llvm-exegesis/lib/Clustering.cpp [5:5]
+ tools/llvm-exegesis/lib/Clustering.h [5:5]
+ tools/llvm-exegesis/lib/CodeTemplate.cpp [5:5]
+ tools/llvm-exegesis/lib/CodeTemplate.h [5:5]
+ tools/llvm-exegesis/lib/Error.cpp [5:5]
+ tools/llvm-exegesis/lib/Error.h [5:5]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.cpp [5:5]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.h [5:5]
+ tools/llvm-exegesis/lib/LlvmState.cpp [5:5]
+ tools/llvm-exegesis/lib/LlvmState.h [5:5]
+ tools/llvm-exegesis/lib/MCInstrDescView.cpp [5:5]
+ tools/llvm-exegesis/lib/MCInstrDescView.h [5:5]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp [5:5]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.h [5:5]
+ tools/llvm-exegesis/lib/PerfHelper.cpp [5:5]
+ tools/llvm-exegesis/lib/PerfHelper.h [5:5]
+ tools/llvm-exegesis/lib/PowerPC/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/ProgressMeter.h [5:5]
+ tools/llvm-exegesis/lib/RegisterAliasing.cpp [5:5]
+ tools/llvm-exegesis/lib/RegisterAliasing.h [5:5]
+ tools/llvm-exegesis/lib/RegisterValue.cpp [5:5]
+ tools/llvm-exegesis/lib/RegisterValue.h [5:5]
+ tools/llvm-exegesis/lib/SchedClassResolution.cpp [5:5]
+ tools/llvm-exegesis/lib/SchedClassResolution.h [5:5]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp [5:5]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.h [5:5]
+ tools/llvm-exegesis/lib/SnippetFile.cpp [5:5]
+ tools/llvm-exegesis/lib/SnippetFile.h [5:5]
+ tools/llvm-exegesis/lib/SnippetGenerator.cpp [5:5]
+ tools/llvm-exegesis/lib/SnippetGenerator.h [5:5]
+ tools/llvm-exegesis/lib/SnippetRepetitor.cpp [5:5]
+ tools/llvm-exegesis/lib/SnippetRepetitor.h [5:5]
+ tools/llvm-exegesis/lib/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/Target.h [5:5]
+ tools/llvm-exegesis/lib/TargetSelect.h [5:5]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.cpp [5:5]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.h [5:5]
+ tools/llvm-exegesis/lib/X86/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/X86/X86Counter.cpp [5:5]
+ tools/llvm-exegesis/lib/X86/X86Counter.h [5:5]
+ tools/llvm-exegesis/llvm-exegesis.cpp [5:5]
+ tools/llvm-extract/llvm-extract.cpp [5:5]
+ tools/llvm-gsymutil/llvm-gsymutil.cpp [5:5]
+ tools/llvm-ifs/ErrorCollector.cpp [5:5]
+ tools/llvm-ifs/ErrorCollector.h [5:5]
+ tools/llvm-ifs/llvm-ifs-driver.cpp [5:5]
+ tools/llvm-ifs/llvm-ifs.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-coff.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-elf.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-macho.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink.h [5:5]
+ tools/llvm-libtool-darwin/DependencyInfo.h [5:5]
+ tools/llvm-libtool-darwin/llvm-libtool-darwin.cpp [5:5]
+ tools/llvm-link/llvm-link.cpp [5:5]
+ tools/llvm-lipo/llvm-lipo-driver.cpp [5:5]
+ tools/llvm-lipo/llvm-lipo.cpp [5:5]
+ tools/llvm-lto/llvm-lto.cpp [5:5]
+ tools/llvm-lto2/llvm-lto2.cpp [5:5]
+ tools/llvm-mc/Disassembler.cpp [5:5]
+ tools/llvm-mc/Disassembler.h [5:5]
+ tools/llvm-mc/llvm-mc.cpp [5:5]
+ tools/llvm-mca/CodeRegion.cpp [5:5]
+ tools/llvm-mca/CodeRegion.h [5:5]
+ tools/llvm-mca/CodeRegionGenerator.cpp [5:5]
+ tools/llvm-mca/CodeRegionGenerator.h [5:5]
+ tools/llvm-mca/PipelinePrinter.cpp [5:5]
+ tools/llvm-mca/PipelinePrinter.h [5:5]
+ tools/llvm-mca/Views/BottleneckAnalysis.cpp [5:5]
+ tools/llvm-mca/Views/BottleneckAnalysis.h [5:5]
+ tools/llvm-mca/Views/DispatchStatistics.cpp [5:5]
+ tools/llvm-mca/Views/DispatchStatistics.h [5:5]
+ tools/llvm-mca/Views/InstructionInfoView.cpp [5:5]
+ tools/llvm-mca/Views/InstructionInfoView.h [5:5]
+ tools/llvm-mca/Views/InstructionView.cpp [5:5]
+ tools/llvm-mca/Views/InstructionView.h [5:5]
+ tools/llvm-mca/Views/RegisterFileStatistics.cpp [5:5]
+ tools/llvm-mca/Views/RegisterFileStatistics.h [5:5]
+ tools/llvm-mca/Views/ResourcePressureView.cpp [5:5]
+ tools/llvm-mca/Views/ResourcePressureView.h [5:5]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.cpp [5:5]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.h [5:5]
+ tools/llvm-mca/Views/SchedulerStatistics.cpp [5:5]
+ tools/llvm-mca/Views/SchedulerStatistics.h [5:5]
+ tools/llvm-mca/Views/SummaryView.cpp [5:5]
+ tools/llvm-mca/Views/SummaryView.h [5:5]
+ tools/llvm-mca/Views/TimelineView.cpp [5:5]
+ tools/llvm-mca/Views/TimelineView.h [5:5]
+ tools/llvm-mca/llvm-mca.cpp [5:5]
+ tools/llvm-ml/Disassembler.cpp [5:5]
+ tools/llvm-ml/Disassembler.h [5:5]
+ tools/llvm-ml/llvm-ml.cpp [5:5]
+ tools/llvm-modextract/llvm-modextract.cpp [5:5]
+ tools/llvm-mt/llvm-mt-driver.cpp [5:5]
+ tools/llvm-mt/llvm-mt.cpp [5:5]
+ tools/llvm-nm/llvm-nm-driver.cpp [5:5]
+ tools/llvm-nm/llvm-nm.cpp [5:5]
+ tools/llvm-objcopy/BitcodeStripOpts.td [5:5]
+ tools/llvm-objcopy/InstallNameToolOpts.td [5:5]
+ tools/llvm-objcopy/ObjcopyOptions.cpp [5:5]
+ tools/llvm-objcopy/ObjcopyOptions.h [5:5]
+ tools/llvm-objcopy/llvm-objcopy-driver.cpp [5:5]
+ tools/llvm-objcopy/llvm-objcopy.cpp [5:5]
+ tools/llvm-objdump/COFFDump.cpp [5:5]
+ tools/llvm-objdump/COFFDump.h [5:5]
+ tools/llvm-objdump/ELFDump.cpp [5:5]
+ tools/llvm-objdump/ELFDump.h [5:5]
+ tools/llvm-objdump/MachODump.cpp [5:5]
+ tools/llvm-objdump/MachODump.h [5:5]
+ tools/llvm-objdump/OffloadDump.cpp [5:5]
+ tools/llvm-objdump/OffloadDump.h [5:5]
+ tools/llvm-objdump/SourcePrinter.cpp [5:5]
+ tools/llvm-objdump/SourcePrinter.h [5:5]
+ tools/llvm-objdump/WasmDump.cpp [5:5]
+ tools/llvm-objdump/WasmDump.h [5:5]
+ tools/llvm-objdump/XCOFFDump.cpp [5:5]
+ tools/llvm-objdump/XCOFFDump.h [5:5]
+ tools/llvm-objdump/llvm-objdump.cpp [5:5]
+ tools/llvm-objdump/llvm-objdump.h [5:5]
+ tools/llvm-opt-report/OptReport.cpp [5:5]
+ tools/llvm-pdbutil/BytesOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/BytesOutputStyle.h [5:5]
+ tools/llvm-pdbutil/DumpOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/DumpOutputStyle.h [5:5]
+ tools/llvm-pdbutil/ExplainOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/ExplainOutputStyle.h [5:5]
+ tools/llvm-pdbutil/MinimalSymbolDumper.cpp [5:5]
+ tools/llvm-pdbutil/MinimalSymbolDumper.h [5:5]
+ tools/llvm-pdbutil/MinimalTypeDumper.cpp [5:5]
+ tools/llvm-pdbutil/MinimalTypeDumper.h [5:5]
+ tools/llvm-pdbutil/OutputStyle.h [5:5]
+ tools/llvm-pdbutil/PdbYaml.cpp [5:5]
+ tools/llvm-pdbutil/PdbYaml.h [5:5]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyCompilandDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyCompilandDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyEnumDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyEnumDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyFunctionDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyFunctionDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyTypeDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyTypeDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyTypedefDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyTypedefDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyVariableDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyVariableDumper.h [5:5]
+ tools/llvm-pdbutil/StreamUtil.cpp [5:5]
+ tools/llvm-pdbutil/StreamUtil.h [5:5]
+ tools/llvm-pdbutil/TypeReferenceTracker.cpp [5:5]
+ tools/llvm-pdbutil/TypeReferenceTracker.h [5:5]
+ tools/llvm-pdbutil/YAMLOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/YAMLOutputStyle.h [5:5]
+ tools/llvm-pdbutil/llvm-pdbutil.cpp [5:5]
+ tools/llvm-pdbutil/llvm-pdbutil.h [5:5]
+ tools/llvm-profdata/llvm-profdata-driver.cpp [5:5]
+ tools/llvm-profdata/llvm-profdata.cpp [5:5]
+ tools/llvm-profgen/CSPreInliner.cpp [5:5]
+ tools/llvm-profgen/CSPreInliner.h [5:5]
+ tools/llvm-profgen/CallContext.h [5:5]
+ tools/llvm-profgen/ErrorHandling.h [5:5]
+ tools/llvm-profgen/MissingFrameInferrer.cpp [5:5]
+ tools/llvm-profgen/MissingFrameInferrer.h [5:5]
+ tools/llvm-profgen/PerfReader.cpp [5:5]
+ tools/llvm-profgen/PerfReader.h [5:5]
+ tools/llvm-profgen/ProfileGenerator.cpp [5:5]
+ tools/llvm-profgen/ProfileGenerator.h [5:5]
+ tools/llvm-profgen/ProfiledBinary.cpp [5:5]
+ tools/llvm-profgen/ProfiledBinary.h [5:5]
+ tools/llvm-profgen/llvm-profgen.cpp [5:5]
+ tools/llvm-rc/ResourceFileWriter.cpp [5:5]
+ tools/llvm-rc/ResourceFileWriter.h [5:5]
+ tools/llvm-rc/ResourceScriptCppFilter.cpp [5:5]
+ tools/llvm-rc/ResourceScriptCppFilter.h [5:5]
+ tools/llvm-rc/ResourceScriptParser.cpp [5:5]
+ tools/llvm-rc/ResourceScriptParser.h [5:5]
+ tools/llvm-rc/ResourceScriptStmt.cpp [4:4]
+ tools/llvm-rc/ResourceScriptStmt.h [5:5]
+ tools/llvm-rc/ResourceScriptToken.cpp [5:5]
+ tools/llvm-rc/ResourceScriptToken.h [5:5]
+ tools/llvm-rc/ResourceScriptTokenList.def [5:5]
+ tools/llvm-rc/ResourceVisitor.h [5:5]
+ tools/llvm-rc/llvm-rc-driver.cpp [5:5]
+ tools/llvm-rc/llvm-rc.cpp [5:5]
+ tools/llvm-readobj/ARMEHABIPrinter.h [5:5]
+ tools/llvm-readobj/ARMWinEHPrinter.cpp [5:5]
+ tools/llvm-readobj/ARMWinEHPrinter.h [5:5]
+ tools/llvm-readobj/COFFDumper.cpp [5:5]
+ tools/llvm-readobj/COFFImportDumper.cpp [5:5]
+ tools/llvm-readobj/DwarfCFIEHPrinter.h [5:5]
+ tools/llvm-readobj/ELFDumper.cpp [5:5]
+ tools/llvm-readobj/MachODumper.cpp [5:5]
+ tools/llvm-readobj/ObjDumper.cpp [5:5]
+ tools/llvm-readobj/ObjDumper.h [5:5]
+ tools/llvm-readobj/StackMapPrinter.h [5:5]
+ tools/llvm-readobj/WasmDumper.cpp [5:5]
+ tools/llvm-readobj/Win64EHDumper.cpp [5:5]
+ tools/llvm-readobj/Win64EHDumper.h [5:5]
+ tools/llvm-readobj/WindowsResourceDumper.cpp [5:5]
+ tools/llvm-readobj/WindowsResourceDumper.h [5:5]
+ tools/llvm-readobj/XCOFFDumper.cpp [5:5]
+ tools/llvm-readobj/llvm-readobj-driver.cpp [5:5]
+ tools/llvm-readobj/llvm-readobj.cpp [5:5]
+ tools/llvm-readobj/llvm-readobj.h [5:5]
+ tools/llvm-reduce/DeltaManager.cpp [5:5]
+ tools/llvm-reduce/DeltaManager.h [5:5]
+ tools/llvm-reduce/ReducerWorkItem.cpp [5:5]
+ tools/llvm-reduce/ReducerWorkItem.h [5:5]
+ tools/llvm-reduce/TestRunner.cpp [5:5]
+ tools/llvm-reduce/TestRunner.h [5:5]
+ tools/llvm-reduce/deltas/Delta.cpp [5:5]
+ tools/llvm-reduce/deltas/Delta.h [5:5]
+ tools/llvm-reduce/deltas/ReduceAliases.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceAliases.h [5:5]
+ tools/llvm-reduce/deltas/ReduceArguments.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceArguments.h [5:5]
+ tools/llvm-reduce/deltas/ReduceAttributes.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceAttributes.h [5:5]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.h [5:5]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.h [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.h [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctions.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctions.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.h [5:5]
+ tools/llvm-reduce/deltas/ReduceIRReferences.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceIRReferences.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructions.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructions.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInvokes.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInvokes.h [5:5]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.h [5:5]
+ tools/llvm-reduce/deltas/ReduceMetadata.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceMetadata.h [5:5]
+ tools/llvm-reduce/deltas/ReduceModuleData.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceModuleData.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOpcodes.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOpcodes.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperands.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperands.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.h [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.h [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.h [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.h [5:5]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.h [5:5]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.h [5:5]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.h [5:5]
+ tools/llvm-reduce/deltas/RunIRPasses.cpp [5:5]
+ tools/llvm-reduce/deltas/RunIRPasses.h [5:5]
+ tools/llvm-reduce/deltas/SimplifyInstructions.cpp [5:5]
+ tools/llvm-reduce/deltas/SimplifyInstructions.h [5:5]
+ tools/llvm-reduce/deltas/StripDebugInfo.cpp [5:5]
+ tools/llvm-reduce/deltas/StripDebugInfo.h [5:5]
+ tools/llvm-reduce/deltas/Utils.cpp [5:5]
+ tools/llvm-reduce/deltas/Utils.h [5:5]
+ tools/llvm-reduce/llvm-reduce.cpp [5:5]
+ tools/llvm-rtdyld/llvm-rtdyld.cpp [5:5]
+ tools/llvm-size/llvm-size-driver.cpp [5:5]
+ tools/llvm-size/llvm-size.cpp [5:5]
+ tools/llvm-split/llvm-split.cpp [5:5]
+ tools/llvm-stress/llvm-stress.cpp [5:5]
+ tools/llvm-strings/llvm-strings.cpp [5:5]
+ tools/llvm-symbolizer/llvm-symbolizer.cpp [5:5]
+ tools/llvm-undname/llvm-undname.cpp [6:6]
+ tools/llvm-xray/func-id-helper.cpp [5:5]
+ tools/llvm-xray/func-id-helper.h [5:5]
+ tools/llvm-xray/llvm-xray.cpp [5:5]
+ tools/llvm-xray/trie-node.h [5:5]
+ tools/llvm-xray/xray-account.cpp [5:5]
+ tools/llvm-xray/xray-account.h [5:5]
+ tools/llvm-xray/xray-color-helper.cpp [5:5]
+ tools/llvm-xray/xray-color-helper.h [5:5]
+ tools/llvm-xray/xray-converter.cpp [5:5]
+ tools/llvm-xray/xray-converter.h [5:5]
+ tools/llvm-xray/xray-extract.cpp [5:5]
+ tools/llvm-xray/xray-fdr-dump.cpp [5:5]
+ tools/llvm-xray/xray-graph-diff.cpp [5:5]
+ tools/llvm-xray/xray-graph-diff.h [5:5]
+ tools/llvm-xray/xray-graph.cpp [5:5]
+ tools/llvm-xray/xray-graph.h [5:5]
+ tools/llvm-xray/xray-registry.cpp [5:5]
+ tools/llvm-xray/xray-registry.h [5:5]
+ tools/llvm-xray/xray-stacks.cpp [5:5]
+ tools/lto/LTODisassembler.cpp [5:5]
+ tools/lto/lto.cpp [5:5]
+ tools/obj2yaml/archive2yaml.cpp [5:5]
+ tools/obj2yaml/coff2yaml.cpp [5:5]
+ tools/obj2yaml/dwarf2yaml.cpp [5:5]
+ tools/obj2yaml/dxcontainer2yaml.cpp [5:5]
+ tools/obj2yaml/elf2yaml.cpp [5:5]
+ tools/obj2yaml/macho2yaml.cpp [5:5]
+ tools/obj2yaml/minidump2yaml.cpp [5:5]
+ tools/obj2yaml/obj2yaml.cpp [5:5]
+ tools/obj2yaml/obj2yaml.h [5:5]
+ tools/obj2yaml/offload2yaml.cpp [5:5]
+ tools/obj2yaml/wasm2yaml.cpp [5:5]
+ tools/obj2yaml/xcoff2yaml.cpp [5:5]
+ tools/opt/AnalysisWrappers.cpp [5:5]
+ tools/opt/BreakpointPrinter.cpp [5:5]
+ tools/opt/BreakpointPrinter.h [5:5]
+ tools/opt/NewPMDriver.cpp [5:5]
+ tools/opt/NewPMDriver.h [5:5]
+ tools/opt/opt.cpp [5:5]
+ tools/polly/include/polly/Canonicalization.h [5:5]
+ tools/polly/include/polly/CodeGen/BlockGenerators.h [5:5]
+ tools/polly/include/polly/CodeGen/CodeGeneration.h [5:5]
+ tools/polly/include/polly/CodeGen/IRBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/IslAst.h [5:5]
+ tools/polly/include/polly/CodeGen/IslExprBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/IslNodeBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/LoopGenerators.h [5:5]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsGOMP.h [5:5]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsKMP.h [5:5]
+ tools/polly/include/polly/CodeGen/PPCGCodeGeneration.h [5:5]
+ tools/polly/include/polly/CodeGen/PerfMonitor.h [5:5]
+ tools/polly/include/polly/CodeGen/RuntimeDebugBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/Utils.h [5:5]
+ tools/polly/include/polly/CodePreparation.h [5:5]
+ tools/polly/include/polly/Config/config.h [5:5]
+ tools/polly/include/polly/DeLICM.h [5:5]
+ tools/polly/include/polly/DeadCodeElimination.h [5:5]
+ tools/polly/include/polly/DependenceInfo.h [5:5]
+ tools/polly/include/polly/FlattenAlgo.h [5:5]
+ tools/polly/include/polly/FlattenSchedule.h [5:5]
+ tools/polly/include/polly/ForwardOpTree.h [5:5]
+ tools/polly/include/polly/JSONExporter.h [5:5]
+ tools/polly/include/polly/LinkAllPasses.h [5:5]
+ tools/polly/include/polly/ManualOptimizer.h [5:5]
+ tools/polly/include/polly/MatmulOptimizer.h [5:5]
+ tools/polly/include/polly/MaximalStaticExpansion.h [5:5]
+ tools/polly/include/polly/Options.h [5:5]
+ tools/polly/include/polly/PolyhedralInfo.h [5:5]
+ tools/polly/include/polly/PruneUnprofitable.h [5:5]
+ tools/polly/include/polly/RegisterPasses.h [5:5]
+ tools/polly/include/polly/ScheduleOptimizer.h [5:5]
+ tools/polly/include/polly/ScheduleTreeTransform.h [5:5]
+ tools/polly/include/polly/ScopBuilder.h [5:5]
+ tools/polly/include/polly/ScopDetection.h [5:5]
+ tools/polly/include/polly/ScopDetectionDiagnostic.h [5:5]
+ tools/polly/include/polly/ScopGraphPrinter.h [5:5]
+ tools/polly/include/polly/ScopInfo.h [5:5]
+ tools/polly/include/polly/ScopPass.h [5:5]
+ tools/polly/include/polly/Simplify.h [5:5]
+ tools/polly/include/polly/Support/DumpFunctionPass.h [5:5]
+ tools/polly/include/polly/Support/DumpModulePass.h [5:5]
+ tools/polly/include/polly/Support/GICHelper.h [5:5]
+ tools/polly/include/polly/Support/ISLOStream.h [5:5]
+ tools/polly/include/polly/Support/ISLOperators.h [5:5]
+ tools/polly/include/polly/Support/ISLTools.h [5:5]
+ tools/polly/include/polly/Support/SCEVAffinator.h [5:5]
+ tools/polly/include/polly/Support/SCEVValidator.h [5:5]
+ tools/polly/include/polly/Support/ScopHelper.h [5:5]
+ tools/polly/include/polly/Support/ScopLocation.h [5:5]
+ tools/polly/include/polly/Support/VirtualInstruction.h [5:5]
+ tools/polly/include/polly/ZoneAlgo.h [5:5]
+ tools/polly/lib/Analysis/DependenceInfo.cpp [5:5]
+ tools/polly/lib/Analysis/PolyhedralInfo.cpp [5:5]
+ tools/polly/lib/Analysis/PruneUnprofitable.cpp [5:5]
+ tools/polly/lib/Analysis/ScopBuilder.cpp [5:5]
+ tools/polly/lib/Analysis/ScopDetection.cpp [5:5]
+ tools/polly/lib/Analysis/ScopDetectionDiagnostic.cpp [5:5]
+ tools/polly/lib/Analysis/ScopGraphPrinter.cpp [5:5]
+ tools/polly/lib/Analysis/ScopInfo.cpp [5:5]
+ tools/polly/lib/Analysis/ScopPass.cpp [5:5]
+ tools/polly/lib/CodeGen/BlockGenerators.cpp [5:5]
+ tools/polly/lib/CodeGen/CodeGeneration.cpp [5:5]
+ tools/polly/lib/CodeGen/CodegenCleanup.cpp [5:5]
+ tools/polly/lib/CodeGen/IRBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/IslAst.cpp [5:5]
+ tools/polly/lib/CodeGen/IslExprBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/IslNodeBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/LoopGenerators.cpp [5:5]
+ tools/polly/lib/CodeGen/LoopGeneratorsGOMP.cpp [5:5]
+ tools/polly/lib/CodeGen/LoopGeneratorsKMP.cpp [5:5]
+ tools/polly/lib/CodeGen/ManagedMemoryRewrite.cpp [5:5]
+ tools/polly/lib/CodeGen/PPCGCodeGeneration.cpp [5:5]
+ tools/polly/lib/CodeGen/PerfMonitor.cpp [5:5]
+ tools/polly/lib/CodeGen/RuntimeDebugBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/Utils.cpp [5:5]
+ tools/polly/lib/Exchange/JSONExporter.cpp [5:5]
+ tools/polly/lib/Support/DumpFunctionPass.cpp [5:5]
+ tools/polly/lib/Support/DumpModulePass.cpp [5:5]
+ tools/polly/lib/Support/GICHelper.cpp [5:5]
+ tools/polly/lib/Support/ISLTools.cpp [5:5]
+ tools/polly/lib/Support/RegisterPasses.cpp [5:5]
+ tools/polly/lib/Support/SCEVAffinator.cpp [5:5]
+ tools/polly/lib/Support/ScopHelper.cpp [5:5]
+ tools/polly/lib/Support/ScopLocation.cpp [5:5]
+ tools/polly/lib/Support/VirtualInstruction.cpp [5:5]
+ tools/polly/lib/Transform/Canonicalization.cpp [5:5]
+ tools/polly/lib/Transform/CodePreparation.cpp [5:5]
+ tools/polly/lib/Transform/DeLICM.cpp [5:5]
+ tools/polly/lib/Transform/DeadCodeElimination.cpp [5:5]
+ tools/polly/lib/Transform/FlattenAlgo.cpp [5:5]
+ tools/polly/lib/Transform/FlattenSchedule.cpp [5:5]
+ tools/polly/lib/Transform/ForwardOpTree.cpp [5:5]
+ tools/polly/lib/Transform/ManualOptimizer.cpp [5:5]
+ tools/polly/lib/Transform/MatmulOptimizer.cpp [5:5]
+ tools/polly/lib/Transform/MaximalStaticExpansion.cpp [5:5]
+ tools/polly/lib/Transform/ScheduleOptimizer.cpp [5:5]
+ tools/polly/lib/Transform/ScheduleTreeTransform.cpp [5:5]
+ tools/polly/lib/Transform/ScopInliner.cpp [5:5]
+ tools/polly/lib/Transform/Simplify.cpp [5:5]
+ tools/polly/lib/Transform/ZoneAlgo.cpp [5:5]
+ tools/remarks-shlib/libremarks.cpp [5:5]
+ tools/sancov/sancov.cpp [5:5]
+ tools/sanstats/sanstats.cpp [5:5]
+ tools/verify-uselistorder/verify-uselistorder.cpp [5:5]
+ tools/yaml2obj/yaml2obj.cpp [5:5]
+ utils/TableGen/AsmMatcherEmitter.cpp [5:5]
+ utils/TableGen/AsmWriterEmitter.cpp [5:5]
+ utils/TableGen/AsmWriterInst.cpp [5:5]
+ utils/TableGen/AsmWriterInst.h [5:5]
+ utils/TableGen/Attributes.cpp [5:5]
+ utils/TableGen/CTagsEmitter.cpp [5:5]
+ utils/TableGen/CallingConvEmitter.cpp [5:5]
+ utils/TableGen/CodeEmitterGen.cpp [5:5]
+ utils/TableGen/CodeGenDAGPatterns.cpp [5:5]
+ utils/TableGen/CodeGenDAGPatterns.h [5:5]
+ utils/TableGen/CodeGenHwModes.cpp [5:5]
+ utils/TableGen/CodeGenHwModes.h [5:5]
+ utils/TableGen/CodeGenInstruction.cpp [5:5]
+ utils/TableGen/CodeGenInstruction.h [5:5]
+ utils/TableGen/CodeGenIntrinsics.h [5:5]
+ utils/TableGen/CodeGenMapTable.cpp [5:5]
+ utils/TableGen/CodeGenRegisters.cpp [5:5]
+ utils/TableGen/CodeGenRegisters.h [5:5]
+ utils/TableGen/CodeGenSchedule.cpp [5:5]
+ utils/TableGen/CodeGenSchedule.h [5:5]
+ utils/TableGen/CodeGenTarget.cpp [5:5]
+ utils/TableGen/CodeGenTarget.h [5:5]
+ utils/TableGen/CompressInstEmitter.cpp [5:5]
+ utils/TableGen/DAGISelEmitter.cpp [5:5]
+ utils/TableGen/DAGISelMatcher.cpp [5:5]
+ utils/TableGen/DAGISelMatcher.h [5:5]
+ utils/TableGen/DAGISelMatcherEmitter.cpp [5:5]
+ utils/TableGen/DAGISelMatcherGen.cpp [5:5]
+ utils/TableGen/DAGISelMatcherOpt.cpp [5:5]
+ utils/TableGen/DFAEmitter.cpp [5:5]
+ utils/TableGen/DFAEmitter.h [5:5]
+ utils/TableGen/DFAPacketizerEmitter.cpp [5:5]
+ utils/TableGen/DXILEmitter.cpp [5:5]
+ utils/TableGen/DecoderEmitter.cpp [5:5]
+ utils/TableGen/DirectiveEmitter.cpp [5:5]
+ utils/TableGen/DisassemblerEmitter.cpp [5:5]
+ utils/TableGen/ExegesisEmitter.cpp [5:5]
+ utils/TableGen/FastISelEmitter.cpp [5:5]
+ utils/TableGen/GICombinerEmitter.cpp [5:5]
+ utils/TableGen/GlobalISel/CodeExpander.cpp [5:5]
+ utils/TableGen/GlobalISel/CodeExpander.h [5:5]
+ utils/TableGen/GlobalISel/CodeExpansions.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDag.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDag.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchTree.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchTree.h [5:5]
+ utils/TableGen/GlobalISelEmitter.cpp [5:5]
+ utils/TableGen/InfoByHwMode.cpp [5:5]
+ utils/TableGen/InfoByHwMode.h [5:5]
+ utils/TableGen/InstrDocsEmitter.cpp [5:5]
+ utils/TableGen/InstrInfoEmitter.cpp [5:5]
+ utils/TableGen/IntrinsicEmitter.cpp [5:5]
+ utils/TableGen/OptEmitter.cpp [5:5]
+ utils/TableGen/OptEmitter.h [5:5]
+ utils/TableGen/OptParserEmitter.cpp [5:5]
+ utils/TableGen/OptRSTEmitter.cpp [5:5]
+ utils/TableGen/PredicateExpander.cpp [5:5]
+ utils/TableGen/PredicateExpander.h [5:5]
+ utils/TableGen/PseudoLoweringEmitter.cpp [5:5]
+ utils/TableGen/RISCVTargetDefEmitter.cpp [5:5]
+ utils/TableGen/RegisterBankEmitter.cpp [5:5]
+ utils/TableGen/RegisterInfoEmitter.cpp [5:5]
+ utils/TableGen/SDNodeProperties.cpp [5:5]
+ utils/TableGen/SDNodeProperties.h [5:5]
+ utils/TableGen/SearchableTableEmitter.cpp [5:5]
+ utils/TableGen/SequenceToOffsetTable.h [5:5]
+ utils/TableGen/SubtargetEmitter.cpp [5:5]
+ utils/TableGen/SubtargetFeatureInfo.cpp [5:5]
+ utils/TableGen/SubtargetFeatureInfo.h [5:5]
+ utils/TableGen/TableGen.cpp [5:5]
+ utils/TableGen/TableGenBackends.h [5:5]
+ utils/TableGen/Types.cpp [5:5]
+ utils/TableGen/Types.h [5:5]
+ utils/TableGen/VarLenCodeEmitterGen.cpp [5:5]
+ utils/TableGen/VarLenCodeEmitterGen.h [5:5]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.cpp [5:5]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.h [5:5]
+ utils/TableGen/X86DisassemblerShared.h [5:5]
+ utils/TableGen/X86DisassemblerTables.cpp [5:5]
+ utils/TableGen/X86DisassemblerTables.h [5:5]
+ utils/TableGen/X86EVEX2VEXTablesEmitter.cpp [5:5]
+ utils/TableGen/X86FoldTablesEmitter.cpp [5:5]
+ utils/TableGen/X86MnemonicTables.cpp [5:5]
+ utils/TableGen/X86ModRMFilters.cpp [5:5]
+ utils/TableGen/X86ModRMFilters.h [5:5]
+ utils/TableGen/X86RecognizableInstr.cpp [5:5]
+ utils/TableGen/X86RecognizableInstr.h [5:5]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm-c/DebugInfo.h [12:12]
+ include/llvm/ADT/APFixedPoint.h [12:12]
+ include/llvm/ADT/APFloat.h [12:12]
+ include/llvm/ADT/APInt.h [12:12]
+ include/llvm/ADT/APSInt.h [12:12]
+ include/llvm/ADT/AddressRanges.h [12:12]
+ include/llvm/ADT/AllocatorList.h [12:12]
+ include/llvm/ADT/Any.h [12:12]
+ include/llvm/ADT/ArrayRef.h [12:12]
+ include/llvm/ADT/BitVector.h [12:12]
+ include/llvm/ADT/Bitfields.h [12:12]
+ include/llvm/ADT/BitmaskEnum.h [12:12]
+ include/llvm/ADT/BreadthFirstIterator.h [12:12]
+ include/llvm/ADT/CachedHashString.h [12:12]
+ include/llvm/ADT/CoalescingBitVector.h [12:12]
+ include/llvm/ADT/CombinationGenerator.h [12:12]
+ include/llvm/ADT/DAGDeltaAlgorithm.h [12:12]
+ include/llvm/ADT/DeltaAlgorithm.h [12:12]
+ include/llvm/ADT/DenseMap.h [12:12]
+ include/llvm/ADT/DenseMapInfo.h [12:12]
+ include/llvm/ADT/DenseSet.h [12:12]
+ include/llvm/ADT/DepthFirstIterator.h [12:12]
+ include/llvm/ADT/DirectedGraph.h [12:12]
+ include/llvm/ADT/EnumeratedArray.h [12:12]
+ include/llvm/ADT/EpochTracker.h [12:12]
+ include/llvm/ADT/EquivalenceClasses.h [12:12]
+ include/llvm/ADT/FloatingPointMode.h [12:12]
+ include/llvm/ADT/FoldingSet.h [12:12]
+ include/llvm/ADT/FunctionExtras.h [12:12]
+ include/llvm/ADT/GenericCycleImpl.h [12:12]
+ include/llvm/ADT/GenericCycleInfo.h [12:12]
+ include/llvm/ADT/GenericSSAContext.h [12:12]
+ include/llvm/ADT/GenericUniformityImpl.h [12:12]
+ include/llvm/ADT/GenericUniformityInfo.h [12:12]
+ include/llvm/ADT/GraphTraits.h [12:12]
+ include/llvm/ADT/Hashing.h [12:12]
+ include/llvm/ADT/ImmutableList.h [12:12]
+ include/llvm/ADT/ImmutableMap.h [12:12]
+ include/llvm/ADT/ImmutableSet.h [12:12]
+ include/llvm/ADT/IndexedMap.h [12:12]
+ include/llvm/ADT/IntEqClasses.h [12:12]
+ include/llvm/ADT/IntervalMap.h [12:12]
+ include/llvm/ADT/IntervalTree.h [12:12]
+ include/llvm/ADT/IntrusiveRefCntPtr.h [12:12]
+ include/llvm/ADT/MapVector.h [12:12]
+ include/llvm/ADT/None.h [12:12]
+ include/llvm/ADT/Optional.h [12:12]
+ include/llvm/ADT/PackedVector.h [12:12]
+ include/llvm/ADT/PointerEmbeddedInt.h [12:12]
+ include/llvm/ADT/PointerIntPair.h [12:12]
+ include/llvm/ADT/PointerSumType.h [12:12]
+ include/llvm/ADT/PointerUnion.h [12:12]
+ include/llvm/ADT/PostOrderIterator.h [12:12]
+ include/llvm/ADT/PriorityQueue.h [12:12]
+ include/llvm/ADT/PriorityWorklist.h [12:12]
+ include/llvm/ADT/SCCIterator.h [12:12]
+ include/llvm/ADT/STLExtras.h [12:12]
+ include/llvm/ADT/STLForwardCompat.h [12:12]
+ include/llvm/ADT/STLFunctionalExtras.h [12:12]
+ include/llvm/ADT/ScopeExit.h [12:12]
+ include/llvm/ADT/ScopedHashTable.h [12:12]
+ include/llvm/ADT/Sequence.h [12:12]
+ include/llvm/ADT/SetOperations.h [12:12]
+ include/llvm/ADT/SetVector.h [12:12]
+ include/llvm/ADT/SmallBitVector.h [12:12]
+ include/llvm/ADT/SmallPtrSet.h [12:12]
+ include/llvm/ADT/SmallSet.h [12:12]
+ include/llvm/ADT/SmallString.h [12:12]
+ include/llvm/ADT/SmallVector.h [12:12]
+ include/llvm/ADT/SparseBitVector.h [12:12]
+ include/llvm/ADT/SparseMultiSet.h [12:12]
+ include/llvm/ADT/SparseSet.h [12:12]
+ include/llvm/ADT/Statistic.h [12:12]
+ include/llvm/ADT/StringExtras.h [12:12]
+ include/llvm/ADT/StringMap.h [12:12]
+ include/llvm/ADT/StringMapEntry.h [12:12]
+ include/llvm/ADT/StringRef.h [12:12]
+ include/llvm/ADT/StringSet.h [12:12]
+ include/llvm/ADT/StringSwitch.h [12:12]
+ include/llvm/ADT/TinyPtrVector.h [12:12]
+ include/llvm/ADT/Triple.h [12:12]
+ include/llvm/ADT/Twine.h [12:12]
+ include/llvm/ADT/TypeSwitch.h [12:12]
+ include/llvm/ADT/Uniformity.h [12:12]
+ include/llvm/ADT/UniqueVector.h [12:12]
+ include/llvm/ADT/bit.h [12:12]
+ include/llvm/ADT/edit_distance.h [12:12]
+ include/llvm/ADT/fallible_iterator.h [12:12]
+ include/llvm/ADT/identity.h [12:12]
+ include/llvm/ADT/ilist.h [12:12]
+ include/llvm/ADT/ilist_base.h [12:12]
+ include/llvm/ADT/ilist_iterator.h [12:12]
+ include/llvm/ADT/ilist_node.h [12:12]
+ include/llvm/ADT/ilist_node_base.h [12:12]
+ include/llvm/ADT/ilist_node_options.h [12:12]
+ include/llvm/ADT/iterator.h [12:12]
+ include/llvm/ADT/iterator_range.h [12:12]
+ include/llvm/ADT/simple_ilist.h [12:12]
+ include/llvm/Analysis/AliasAnalysis.h [12:12]
+ include/llvm/Analysis/AliasAnalysisEvaluator.h [12:12]
+ include/llvm/Analysis/AliasSetTracker.h [12:12]
+ include/llvm/Analysis/AssumeBundleQueries.h [12:12]
+ include/llvm/Analysis/AssumptionCache.h [12:12]
+ include/llvm/Analysis/BasicAliasAnalysis.h [12:12]
+ include/llvm/Analysis/BlockFrequencyInfo.h [12:12]
+ include/llvm/Analysis/BlockFrequencyInfoImpl.h [12:12]
+ include/llvm/Analysis/BranchProbabilityInfo.h [12:12]
+ include/llvm/Analysis/CFG.h [12:12]
+ include/llvm/Analysis/CFGPrinter.h [12:12]
+ include/llvm/Analysis/CFGSCCPrinter.h [12:12]
+ include/llvm/Analysis/CGSCCPassManager.h [12:12]
+ include/llvm/Analysis/CallGraph.h [12:12]
+ include/llvm/Analysis/CallGraphSCCPass.h [12:12]
+ include/llvm/Analysis/CallPrinter.h [12:12]
+ include/llvm/Analysis/CaptureTracking.h [12:12]
+ include/llvm/Analysis/CmpInstAnalysis.h [12:12]
+ include/llvm/Analysis/CodeMetrics.h [12:12]
+ include/llvm/Analysis/ConstantFolding.h [12:12]
+ include/llvm/Analysis/ConstraintSystem.h [12:12]
+ include/llvm/Analysis/CostModel.h [12:12]
+ include/llvm/Analysis/CycleAnalysis.h [12:12]
+ include/llvm/Analysis/DDG.h [12:12]
+ include/llvm/Analysis/DDGPrinter.h [12:12]
+ include/llvm/Analysis/DOTGraphTraitsPass.h [12:12]
+ include/llvm/Analysis/Delinearization.h [12:12]
+ include/llvm/Analysis/DemandedBits.h [12:12]
+ include/llvm/Analysis/DependenceAnalysis.h [12:12]
+ include/llvm/Analysis/DependenceGraphBuilder.h [12:12]
+ include/llvm/Analysis/DivergenceAnalysis.h [12:12]
+ include/llvm/Analysis/DomPrinter.h [12:12]
+ include/llvm/Analysis/DomTreeUpdater.h [12:12]
+ include/llvm/Analysis/DominanceFrontier.h [12:12]
+ include/llvm/Analysis/DominanceFrontierImpl.h [12:12]
+ include/llvm/Analysis/EHPersonalities.h [12:12]
+ include/llvm/Analysis/FunctionPropertiesAnalysis.h [12:12]
+ include/llvm/Analysis/GlobalsModRef.h [12:12]
+ include/llvm/Analysis/GuardUtils.h [12:12]
+ include/llvm/Analysis/HeatUtils.h [12:12]
+ include/llvm/Analysis/IRSimilarityIdentifier.h [12:12]
+ include/llvm/Analysis/IVDescriptors.h [12:12]
+ include/llvm/Analysis/IVUsers.h [12:12]
+ include/llvm/Analysis/IndirectCallPromotionAnalysis.h [12:12]
+ include/llvm/Analysis/IndirectCallVisitor.h [12:12]
+ include/llvm/Analysis/InlineAdvisor.h [12:12]
+ include/llvm/Analysis/InlineCost.h [12:12]
+ include/llvm/Analysis/InlineModelFeatureMaps.h [12:12]
+ include/llvm/Analysis/InlineOrder.h [12:12]
+ include/llvm/Analysis/InlineSizeEstimatorAnalysis.h [12:12]
+ include/llvm/Analysis/InstCount.h [12:12]
+ include/llvm/Analysis/InstSimplifyFolder.h [12:12]
+ include/llvm/Analysis/InstructionPrecedenceTracking.h [12:12]
+ include/llvm/Analysis/InstructionSimplify.h [12:12]
+ include/llvm/Analysis/Interval.h [12:12]
+ include/llvm/Analysis/IntervalIterator.h [12:12]
+ include/llvm/Analysis/IntervalPartition.h [12:12]
+ include/llvm/Analysis/IteratedDominanceFrontier.h [12:12]
+ include/llvm/Analysis/LazyBlockFrequencyInfo.h [12:12]
+ include/llvm/Analysis/LazyBranchProbabilityInfo.h [12:12]
+ include/llvm/Analysis/LazyCallGraph.h [12:12]
+ include/llvm/Analysis/LazyValueInfo.h [12:12]
+ include/llvm/Analysis/LegacyDivergenceAnalysis.h [12:12]
+ include/llvm/Analysis/Lint.h [12:12]
+ include/llvm/Analysis/Loads.h [12:12]
+ include/llvm/Analysis/LoopAccessAnalysis.h [12:12]
+ include/llvm/Analysis/LoopAnalysisManager.h [12:12]
+ include/llvm/Analysis/LoopCacheAnalysis.h [12:12]
+ include/llvm/Analysis/LoopInfo.h [12:12]
+ include/llvm/Analysis/LoopInfoImpl.h [12:12]
+ include/llvm/Analysis/LoopIterator.h [12:12]
+ include/llvm/Analysis/LoopNestAnalysis.h [12:12]
+ include/llvm/Analysis/LoopPass.h [12:12]
+ include/llvm/Analysis/LoopUnrollAnalyzer.h [12:12]
+ include/llvm/Analysis/MLInlineAdvisor.h [12:12]
+ include/llvm/Analysis/MLModelRunner.h [12:12]
+ include/llvm/Analysis/MemDerefPrinter.h [12:12]
+ include/llvm/Analysis/MemoryBuiltins.h [12:12]
+ include/llvm/Analysis/MemoryDependenceAnalysis.h [12:12]
+ include/llvm/Analysis/MemoryLocation.h [12:12]
+ include/llvm/Analysis/MemoryProfileInfo.h [12:12]
+ include/llvm/Analysis/MemorySSA.h [12:12]
+ include/llvm/Analysis/MemorySSAUpdater.h [12:12]
+ include/llvm/Analysis/ModelUnderTrainingRunner.h [12:12]
+ include/llvm/Analysis/ModuleDebugInfoPrinter.h [12:12]
+ include/llvm/Analysis/ModuleSummaryAnalysis.h [12:12]
+ include/llvm/Analysis/MustExecute.h [12:12]
+ include/llvm/Analysis/NoInferenceModelRunner.h [12:12]
+ include/llvm/Analysis/ObjCARCAliasAnalysis.h [12:12]
+ include/llvm/Analysis/ObjCARCAnalysisUtils.h [12:12]
+ include/llvm/Analysis/ObjCARCInstKind.h [12:12]
+ include/llvm/Analysis/ObjCARCUtil.h [12:12]
+ include/llvm/Analysis/OptimizationRemarkEmitter.h [12:12]
+ include/llvm/Analysis/OverflowInstAnalysis.h [12:12]
+ include/llvm/Analysis/PHITransAddr.h [12:12]
+ include/llvm/Analysis/Passes.h [12:12]
+ include/llvm/Analysis/PhiValues.h [12:12]
+ include/llvm/Analysis/PostDominators.h [12:12]
+ include/llvm/Analysis/ProfileSummaryInfo.h [12:12]
+ include/llvm/Analysis/PtrUseVisitor.h [12:12]
+ include/llvm/Analysis/RegionInfo.h [12:12]
+ include/llvm/Analysis/RegionInfoImpl.h [12:12]
+ include/llvm/Analysis/RegionIterator.h [12:12]
+ include/llvm/Analysis/RegionPass.h [12:12]
+ include/llvm/Analysis/RegionPrinter.h [12:12]
+ include/llvm/Analysis/ReleaseModeModelRunner.h [12:12]
+ include/llvm/Analysis/ReplayInlineAdvisor.h [12:12]
+ include/llvm/Analysis/ScalarEvolution.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionDivision.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionExpressions.h [12:12]
+ include/llvm/Analysis/ScalarEvolutionNormalization.h [12:12]
+ include/llvm/Analysis/ScalarFuncs.def [5:5]
+ include/llvm/Analysis/ScopedNoAliasAA.h [12:12]
+ include/llvm/Analysis/SparsePropagation.h [12:12]
+ include/llvm/Analysis/StackLifetime.h [12:12]
+ include/llvm/Analysis/StackSafetyAnalysis.h [12:12]
+ include/llvm/Analysis/SyncDependenceAnalysis.h [12:12]
+ include/llvm/Analysis/SyntheticCountsUtils.h [12:12]
+ include/llvm/Analysis/TargetFolder.h [12:12]
+ include/llvm/Analysis/TargetLibraryInfo.def [5:5]
+ include/llvm/Analysis/TargetLibraryInfo.h [12:12]
+ include/llvm/Analysis/TargetTransformInfo.h [12:12]
+ include/llvm/Analysis/TargetTransformInfoImpl.h [12:12]
+ include/llvm/Analysis/TensorSpec.h [12:12]
+ include/llvm/Analysis/Trace.h [12:12]
+ include/llvm/Analysis/TypeBasedAliasAnalysis.h [12:12]
+ include/llvm/Analysis/TypeMetadataUtils.h [12:12]
+ include/llvm/Analysis/UniformityAnalysis.h [12:12]
+ include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h [12:12]
+ include/llvm/Analysis/Utils/Local.h [12:12]
+ include/llvm/Analysis/Utils/TFUtils.h [12:12]
+ include/llvm/Analysis/Utils/TrainingLogger.h [12:12]
+ include/llvm/Analysis/ValueLattice.h [12:12]
+ include/llvm/Analysis/ValueLatticeUtils.h [12:12]
+ include/llvm/Analysis/ValueTracking.h [12:12]
+ include/llvm/Analysis/VecFuncs.def [5:5]
+ include/llvm/Analysis/VectorUtils.h [12:12]
+ include/llvm/AsmParser/LLLexer.h [12:12]
+ include/llvm/AsmParser/LLParser.h [12:12]
+ include/llvm/AsmParser/LLToken.h [12:12]
+ include/llvm/AsmParser/Parser.h [12:12]
+ include/llvm/AsmParser/SlotMapping.h [12:12]
+ include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h [12:12]
+ include/llvm/BinaryFormat/COFF.h [12:12]
+ include/llvm/BinaryFormat/DXContainer.h [12:12]
+ include/llvm/BinaryFormat/Dwarf.def [5:5]
+ include/llvm/BinaryFormat/Dwarf.h [12:12]
+ include/llvm/BinaryFormat/ELF.h [12:12]
+ include/llvm/BinaryFormat/GOFF.h [12:12]
+ include/llvm/BinaryFormat/MachO.def [5:5]
+ include/llvm/BinaryFormat/MachO.h [12:12]
+ include/llvm/BinaryFormat/Magic.h [12:12]
+ include/llvm/BinaryFormat/Minidump.h [12:12]
+ include/llvm/BinaryFormat/MinidumpConstants.def [5:5]
+ include/llvm/BinaryFormat/MsgPack.def [5:5]
+ include/llvm/BinaryFormat/MsgPack.h [12:12]
+ include/llvm/BinaryFormat/MsgPackDocument.h [12:12]
+ include/llvm/BinaryFormat/MsgPackReader.h [12:12]
+ include/llvm/BinaryFormat/MsgPackWriter.h [12:12]
+ include/llvm/BinaryFormat/Swift.def [5:5]
+ include/llvm/BinaryFormat/Swift.h [12:12]
+ include/llvm/BinaryFormat/Wasm.h [12:12]
+ include/llvm/BinaryFormat/WasmTraits.h [12:12]
+ include/llvm/BinaryFormat/XCOFF.h [12:12]
+ include/llvm/Bitcode/BitcodeAnalyzer.h [12:12]
+ include/llvm/Bitcode/BitcodeCommon.h [12:12]
+ include/llvm/Bitcode/BitcodeConvenience.h [12:12]
+ include/llvm/Bitcode/BitcodeReader.h [12:12]
+ include/llvm/Bitcode/BitcodeWriter.h [12:12]
+ include/llvm/Bitcode/BitcodeWriterPass.h [12:12]
+ include/llvm/Bitcode/LLVMBitCodes.h [12:12]
+ include/llvm/Bitstream/BitCodeEnums.h [12:12]
+ include/llvm/Bitstream/BitCodes.h [12:12]
+ include/llvm/Bitstream/BitstreamReader.h [12:12]
+ include/llvm/Bitstream/BitstreamWriter.h [12:12]
+ include/llvm/CodeGen/AccelTable.h [12:12]
+ include/llvm/CodeGen/Analysis.h [12:12]
+ include/llvm/CodeGen/AntiDepBreaker.h [12:12]
+ include/llvm/CodeGen/AsmPrinter.h [12:12]
+ include/llvm/CodeGen/AsmPrinterHandler.h [12:12]
+ include/llvm/CodeGen/AtomicExpandUtils.h [12:12]
+ include/llvm/CodeGen/BasicBlockSectionUtils.h [12:12]
+ include/llvm/CodeGen/BasicBlockSectionsProfileReader.h [12:12]
+ include/llvm/CodeGen/BasicTTIImpl.h [12:12]
+ include/llvm/CodeGen/CFIFixup.h [12:12]
+ include/llvm/CodeGen/CSEConfigBase.h [12:12]
+ include/llvm/CodeGen/CalcSpillWeights.h [12:12]
+ include/llvm/CodeGen/CallingConvLower.h [12:12]
+ include/llvm/CodeGen/CodeGenCommonISel.h [12:12]
+ include/llvm/CodeGen/CodeGenPassBuilder.h [12:12]
+ include/llvm/CodeGen/CommandFlags.h [12:12]
+ include/llvm/CodeGen/ComplexDeinterleavingPass.h [12:12]
+ include/llvm/CodeGen/CostTable.h [12:12]
+ include/llvm/CodeGen/DAGCombine.h [12:12]
+ include/llvm/CodeGen/DFAPacketizer.h [12:12]
+ include/llvm/CodeGen/DIE.h [12:12]
+ include/llvm/CodeGen/DIEValue.def [5:5]
+ include/llvm/CodeGen/DbgEntityHistoryCalculator.h [12:12]
+ include/llvm/CodeGen/DebugHandlerBase.h [12:12]
+ include/llvm/CodeGen/DwarfStringPoolEntry.h [12:12]
+ include/llvm/CodeGen/EdgeBundles.h [12:12]
+ include/llvm/CodeGen/ExecutionDomainFix.h [12:12]
+ include/llvm/CodeGen/ExpandReductions.h [12:12]
+ include/llvm/CodeGen/ExpandVectorPredication.h [12:12]
+ include/llvm/CodeGen/FastISel.h [12:12]
+ include/llvm/CodeGen/FaultMaps.h [12:12]
+ include/llvm/CodeGen/FunctionLoweringInfo.h [12:12]
+ include/llvm/CodeGen/GCMetadata.h [12:12]
+ include/llvm/CodeGen/GCMetadataPrinter.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CSEInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CallLowering.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Combiner.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CombinerHelper.h [12:12]
+ include/llvm/CodeGen/GlobalISel/CombinerInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GISelKnownBits.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GISelWorkList.h [12:12]
+ include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h [12:12]
+ include/llvm/CodeGen/GlobalISel/IRTranslator.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InstructionSelect.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InstructionSelector.h [12:12]
+ include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegacyLegalizerInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Legalizer.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegalizerHelper.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LegalizerInfo.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Localizer.h [12:12]
+ include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h [12:12]
+ include/llvm/CodeGen/GlobalISel/MIPatternMatch.h [12:12]
+ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h [12:12]
+ include/llvm/CodeGen/GlobalISel/RegBankSelect.h [12:12]
+ include/llvm/CodeGen/GlobalISel/Utils.h [12:12]
+ include/llvm/CodeGen/ISDOpcodes.h [12:12]
+ include/llvm/CodeGen/IndirectThunks.h [12:12]
+ include/llvm/CodeGen/IntrinsicLowering.h [12:12]
+ include/llvm/CodeGen/LatencyPriorityQueue.h [12:12]
+ include/llvm/CodeGen/LexicalScopes.h [12:12]
+ include/llvm/CodeGen/LinkAllAsmWriterComponents.h [12:12]
+ include/llvm/CodeGen/LinkAllCodegenComponents.h [12:12]
+ include/llvm/CodeGen/LiveInterval.h [12:12]
+ include/llvm/CodeGen/LiveIntervalCalc.h [12:12]
+ include/llvm/CodeGen/LiveIntervalUnion.h [12:12]
+ include/llvm/CodeGen/LiveIntervals.h [12:12]
+ include/llvm/CodeGen/LivePhysRegs.h [12:12]
+ include/llvm/CodeGen/LiveRangeCalc.h [12:12]
+ include/llvm/CodeGen/LiveRangeEdit.h [12:12]
+ include/llvm/CodeGen/LiveRegMatrix.h [12:12]
+ include/llvm/CodeGen/LiveRegUnits.h [12:12]
+ include/llvm/CodeGen/LiveStacks.h [12:12]
+ include/llvm/CodeGen/LiveVariables.h [12:12]
+ include/llvm/CodeGen/LoopTraversal.h [12:12]
+ include/llvm/CodeGen/LowLevelType.h [12:12]
+ include/llvm/CodeGen/MBFIWrapper.h [12:12]
+ include/llvm/CodeGen/MIRFSDiscriminator.h [12:12]
+ include/llvm/CodeGen/MIRFormatter.h [12:12]
+ include/llvm/CodeGen/MIRParser/MIParser.h [12:12]
+ include/llvm/CodeGen/MIRParser/MIRParser.h [12:12]
+ include/llvm/CodeGen/MIRPrinter.h [12:12]
+ include/llvm/CodeGen/MIRSampleProfile.h [12:12]
+ include/llvm/CodeGen/MIRYamlMapping.h [12:12]
+ include/llvm/CodeGen/MachORelocation.h [12:12]
+ include/llvm/CodeGen/MachineBasicBlock.h [12:12]
+ include/llvm/CodeGen/MachineBlockFrequencyInfo.h [12:12]
+ include/llvm/CodeGen/MachineBranchProbabilityInfo.h [12:12]
+ include/llvm/CodeGen/MachineCFGPrinter.h [12:12]
+ include/llvm/CodeGen/MachineCombinerPattern.h [13:13]
+ include/llvm/CodeGen/MachineConstantPool.h [12:12]
+ include/llvm/CodeGen/MachineCycleAnalysis.h [12:12]
+ include/llvm/CodeGen/MachineDominanceFrontier.h [12:12]
+ include/llvm/CodeGen/MachineDominators.h [12:12]
+ include/llvm/CodeGen/MachineFrameInfo.h [12:12]
+ include/llvm/CodeGen/MachineFunction.h [12:12]
+ include/llvm/CodeGen/MachineFunctionPass.h [12:12]
+ include/llvm/CodeGen/MachineInstr.h [12:12]
+ include/llvm/CodeGen/MachineInstrBuilder.h [12:12]
+ include/llvm/CodeGen/MachineInstrBundle.h [12:12]
+ include/llvm/CodeGen/MachineInstrBundleIterator.h [12:12]
+ include/llvm/CodeGen/MachineJumpTableInfo.h [12:12]
+ include/llvm/CodeGen/MachineLoopInfo.h [12:12]
+ include/llvm/CodeGen/MachineLoopUtils.h [12:12]
+ include/llvm/CodeGen/MachineMemOperand.h [12:12]
+ include/llvm/CodeGen/MachineModuleInfo.h [12:12]
+ include/llvm/CodeGen/MachineModuleInfoImpls.h [12:12]
+ include/llvm/CodeGen/MachineModuleSlotTracker.h [12:12]
+ include/llvm/CodeGen/MachineOperand.h [12:12]
+ include/llvm/CodeGen/MachineOutliner.h [12:12]
+ include/llvm/CodeGen/MachinePassManager.h [12:12]
+ include/llvm/CodeGen/MachinePassRegistry.def [5:5]
+ include/llvm/CodeGen/MachinePassRegistry.h [12:12]
+ include/llvm/CodeGen/MachinePipeliner.h [12:12]
+ include/llvm/CodeGen/MachinePostDominators.h [12:12]
+ include/llvm/CodeGen/MachineRegionInfo.h [12:12]
+ include/llvm/CodeGen/MachineRegisterInfo.h [12:12]
+ include/llvm/CodeGen/MachineSSAContext.h [12:12]
+ include/llvm/CodeGen/MachineSSAUpdater.h [12:12]
+ include/llvm/CodeGen/MachineScheduler.h [12:12]
+ include/llvm/CodeGen/MachineSizeOpts.h [12:12]
+ include/llvm/CodeGen/MachineStableHash.h [12:12]
+ include/llvm/CodeGen/MachineTraceMetrics.h [12:12]
+ include/llvm/CodeGen/MachineUniformityAnalysis.h [12:12]
+ include/llvm/CodeGen/MacroFusion.h [12:12]
+ include/llvm/CodeGen/ModuloSchedule.h [12:12]
+ include/llvm/CodeGen/MultiHazardRecognizer.h [12:12]
+ include/llvm/CodeGen/NonRelocatableStringpool.h [12:12]
+ include/llvm/CodeGen/PBQP/CostAllocator.h [12:12]
+ include/llvm/CodeGen/PBQP/Graph.h [12:12]
+ include/llvm/CodeGen/PBQP/Math.h [12:12]
+ include/llvm/CodeGen/PBQP/ReductionRules.h [12:12]
+ include/llvm/CodeGen/PBQP/Solution.h [12:12]
+ include/llvm/CodeGen/PBQPRAConstraint.h [12:12]
+ include/llvm/CodeGen/ParallelCG.h [12:12]
+ include/llvm/CodeGen/Passes.h [12:12]
+ include/llvm/CodeGen/PreISelIntrinsicLowering.h [12:12]
+ include/llvm/CodeGen/PseudoSourceValue.h [12:12]
+ include/llvm/CodeGen/RDFGraph.h [12:12]
+ include/llvm/CodeGen/RDFLiveness.h [12:12]
+ include/llvm/CodeGen/RDFRegisters.h [12:12]
+ include/llvm/CodeGen/ReachingDefAnalysis.h [12:12]
+ include/llvm/CodeGen/RegAllocCommon.h [12:12]
+ include/llvm/CodeGen/RegAllocPBQP.h [12:12]
+ include/llvm/CodeGen/RegAllocRegistry.h [12:12]
+ include/llvm/CodeGen/Register.h [12:12]
+ include/llvm/CodeGen/RegisterBank.h [12:12]
+ include/llvm/CodeGen/RegisterBankInfo.h [12:12]
+ include/llvm/CodeGen/RegisterClassInfo.h [12:12]
+ include/llvm/CodeGen/RegisterPressure.h [12:12]
+ include/llvm/CodeGen/RegisterScavenging.h [12:12]
+ include/llvm/CodeGen/RegisterUsageInfo.h [12:12]
+ include/llvm/CodeGen/ReplaceWithVeclib.h [12:12]
+ include/llvm/CodeGen/ResourcePriorityQueue.h [12:12]
+ include/llvm/CodeGen/RuntimeLibcalls.h [12:12]
+ include/llvm/CodeGen/SDNodeProperties.td [5:5]
+ include/llvm/CodeGen/ScheduleDAG.h [12:12]
+ include/llvm/CodeGen/ScheduleDAGInstrs.h [12:12]
+ include/llvm/CodeGen/ScheduleDAGMutation.h [12:12]
+ include/llvm/CodeGen/ScheduleDFS.h [12:12]
+ include/llvm/CodeGen/ScheduleHazardRecognizer.h [12:12]
+ include/llvm/CodeGen/SchedulerRegistry.h [12:12]
+ include/llvm/CodeGen/ScoreboardHazardRecognizer.h [12:12]
+ include/llvm/CodeGen/SelectionDAG.h [12:12]
+ include/llvm/CodeGen/SelectionDAGAddressAnalysis.h [12:12]
+ include/llvm/CodeGen/SelectionDAGISel.h [12:12]
+ include/llvm/CodeGen/SelectionDAGNodes.h [12:12]
+ include/llvm/CodeGen/SelectionDAGTargetInfo.h [12:12]
+ include/llvm/CodeGen/SlotIndexes.h [12:12]
+ include/llvm/CodeGen/Spiller.h [12:12]
+ include/llvm/CodeGen/StableHashing.h [12:12]
+ include/llvm/CodeGen/StackMaps.h [12:12]
+ include/llvm/CodeGen/StackProtector.h [12:12]
+ include/llvm/CodeGen/SwiftErrorValueTracking.h [12:12]
+ include/llvm/CodeGen/SwitchLoweringUtils.h [12:12]
+ include/llvm/CodeGen/TailDuplicator.h [12:12]
+ include/llvm/CodeGen/TargetCallingConv.h [12:12]
+ include/llvm/CodeGen/TargetFrameLowering.h [12:12]
+ include/llvm/CodeGen/TargetInstrInfo.h [12:12]
+ include/llvm/CodeGen/TargetLowering.h [12:12]
+ include/llvm/CodeGen/TargetLoweringObjectFileImpl.h [12:12]
+ include/llvm/CodeGen/TargetOpcodes.h [12:12]
+ include/llvm/CodeGen/TargetPassConfig.h [12:12]
+ include/llvm/CodeGen/TargetRegisterInfo.h [12:12]
+ include/llvm/CodeGen/TargetSchedule.h [12:12]
+ include/llvm/CodeGen/TargetSubtargetInfo.h [12:12]
+ include/llvm/CodeGen/TileShapeInfo.h [12:12]
+ include/llvm/CodeGen/TypePromotion.h [12:12]
+ include/llvm/CodeGen/UnreachableBlockElim.h [12:12]
+ include/llvm/CodeGen/VLIWMachineScheduler.h [12:12]
+ include/llvm/CodeGen/ValueTypes.h [12:12]
+ include/llvm/CodeGen/ValueTypes.td [5:5]
+ include/llvm/CodeGen/VirtRegMap.h [12:12]
+ include/llvm/CodeGen/WasmEHFuncInfo.h [12:12]
+ include/llvm/CodeGen/WinEHFuncInfo.h [12:12]
+ include/llvm/DWARFLinker/DWARFLinker.h [12:12]
+ include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h [12:12]
+ include/llvm/DWARFLinker/DWARFLinkerDeclContext.h [12:12]
+ include/llvm/DWARFLinker/DWARFStreamer.h [12:12]
+ include/llvm/DWARFLinkerParallel/DWARFLinker.h [12:12]
+ include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/CVRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/CVTypeVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeView.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeViewError.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h [12:12]
+ include/llvm/DebugInfo/CodeView/CodeViewRegisters.def [5:5]
+ include/llvm/DebugInfo/CodeView/CodeViewSymbols.def [5:5]
+ include/llvm/DebugInfo/CodeView/CodeViewTypes.def [5:5]
+ include/llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugCrossExSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugLinesSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugStringTableSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/DebugUnknownSubsection.h [12:12]
+ include/llvm/DebugInfo/CodeView/EnumTables.h [12:12]
+ include/llvm/DebugInfo/CodeView/Formatters.h [12:12]
+ include/llvm/DebugInfo/CodeView/FunctionId.h [12:12]
+ include/llvm/DebugInfo/CodeView/GUID.h [12:12]
+ include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h [12:12]
+ include/llvm/DebugInfo/CodeView/Line.h [12:12]
+ include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h [12:12]
+ include/llvm/DebugInfo/CodeView/RecordName.h [12:12]
+ include/llvm/DebugInfo/CodeView/RecordSerialization.h [12:12]
+ include/llvm/DebugInfo/CodeView/SimpleTypeSerializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/StringsAndChecksums.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolDeserializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolDumper.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolSerializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h [12:12]
+ include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeCollection.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeDeserializer.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeDumpVisitor.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeHashing.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeIndex.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeRecord.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeRecordMapping.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeStreamMerger.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeTableCollection.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h [12:12]
+ include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h [12:12]
+ include/llvm/DebugInfo/DIContext.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAddressRange.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFAttribute.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFContext.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDataExtractor.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAddr.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLine.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFDie.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFExpression.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFFormValue.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFListTable.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFLocationExpression.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFObject.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFRelocMap.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFSection.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFTypePrinter.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFUnit.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h [12:12]
+ include/llvm/DebugInfo/DWARF/DWARFVerifier.h [12:12]
+ include/llvm/DebugInfo/GSYM/DwarfTransformer.h [12:12]
+ include/llvm/DebugInfo/GSYM/ExtractRanges.h [12:12]
+ include/llvm/DebugInfo/GSYM/FileEntry.h [12:12]
+ include/llvm/DebugInfo/GSYM/FileWriter.h [12:12]
+ include/llvm/DebugInfo/GSYM/FunctionInfo.h [12:12]
+ include/llvm/DebugInfo/GSYM/GsymCreator.h [12:12]
+ include/llvm/DebugInfo/GSYM/GsymReader.h [12:12]
+ include/llvm/DebugInfo/GSYM/Header.h [12:12]
+ include/llvm/DebugInfo/GSYM/InlineInfo.h [12:12]
+ include/llvm/DebugInfo/GSYM/LineEntry.h [12:12]
+ include/llvm/DebugInfo/GSYM/LineTable.h [12:12]
+ include/llvm/DebugInfo/GSYM/LookupResult.h [12:12]
+ include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h [12:12]
+ include/llvm/DebugInfo/GSYM/StringTable.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVCompare.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVElement.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVLine.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVLocation.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVObject.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVOptions.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVRange.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVReader.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVScope.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVSort.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVSupport.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Core/LVType.h [12:12]
+ include/llvm/DebugInfo/LogicalView/LVReaderHandler.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h [12:12]
+ include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h [12:12]
+ include/llvm/DebugInfo/MSF/IMSFFile.h [12:12]
+ include/llvm/DebugInfo/MSF/MSFBuilder.h [12:12]
+ include/llvm/DebugInfo/MSF/MSFCommon.h [12:12]
+ include/llvm/DebugInfo/MSF/MSFError.h [12:12]
+ include/llvm/DebugInfo/MSF/MappedBlockStream.h [12:12]
+ include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIADataStream.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAError.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASession.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIASupport.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIATable.h [12:12]
+ include/llvm/DebugInfo/PDB/DIA/DIAUtils.h [12:12]
+ include/llvm/DebugInfo/PDB/GenericError.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBDataStream.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBEnumChildren.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBFrameData.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBInjectedSource.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBLineNumber.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBRawSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBSectionContrib.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBSession.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBSourceFile.h [12:12]
+ include/llvm/DebugInfo/PDB/IPDBTable.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiModuleList.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/EnumTables.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/FormatUtil.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/Formatters.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/GlobalsStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/Hash.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/HashTable.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/ISectionContribVisitor.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InfoStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InjectedSourceStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/InputFile.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/LinePrinter.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumInjectedSources.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumLineNumbers.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumSymbols.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeFunctionSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeInlineSiteSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeLineNumber.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativePublicSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeSession.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeSourceFile.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeBuiltin.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBFile.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTable.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/PublicsStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/RawConstants.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/RawError.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/RawTypes.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/SymbolCache.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/SymbolStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/TpiHashing.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/TpiStream.h [12:12]
+ include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h [12:12]
+ include/llvm/DebugInfo/PDB/PDB.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBContext.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBExtras.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymDumper.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolBlock.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolCustom.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolData.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolExe.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolFunc.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolLabel.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolThunk.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h [12:12]
+ include/llvm/DebugInfo/PDB/PDBTypes.h [12:12]
+ include/llvm/DebugInfo/PDB/UDTLayout.h [12:12]
+ include/llvm/DebugInfo/Symbolize/DIPrinter.h [12:12]
+ include/llvm/DebugInfo/Symbolize/Markup.h [12:12]
+ include/llvm/DebugInfo/Symbolize/MarkupFilter.h [12:12]
+ include/llvm/DebugInfo/Symbolize/SymbolizableModule.h [12:12]
+ include/llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h [12:12]
+ include/llvm/DebugInfo/Symbolize/Symbolize.h [12:12]
+ include/llvm/Debuginfod/BuildIDFetcher.h [12:12]
+ include/llvm/Debuginfod/Debuginfod.h [12:12]
+ include/llvm/Debuginfod/HTTPClient.h [12:12]
+ include/llvm/Debuginfod/HTTPServer.h [12:12]
+ include/llvm/Demangle/Demangle.h [12:12]
+ include/llvm/Demangle/DemangleConfig.h [12:12]
+ include/llvm/Demangle/ItaniumDemangle.h [12:12]
+ include/llvm/Demangle/ItaniumNodes.def [5:5]
+ include/llvm/Demangle/MicrosoftDemangle.h [12:12]
+ include/llvm/Demangle/MicrosoftDemangleNodes.h [12:12]
+ include/llvm/Demangle/StringView.h [12:12]
+ include/llvm/Demangle/Utility.h [12:12]
+ include/llvm/ExecutionEngine/ExecutionEngine.h [12:12]
+ include/llvm/ExecutionEngine/GenericValue.h [12:12]
+ include/llvm/ExecutionEngine/Interpreter.h [12:12]
+ include/llvm/ExecutionEngine/JITEventListener.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/COFF.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_aarch64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_i386.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_loongarch.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_riscv.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/ELF_x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/JITLink.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/JITLinkDylib.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/MachO.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/MachO_arm64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/MachO_x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/TableManager.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/aarch64.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/i386.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/loongarch.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/riscv.h [12:12]
+ include/llvm/ExecutionEngine/JITLink/x86_64.h [12:12]
+ include/llvm/ExecutionEngine/JITSymbol.h [12:12]
+ include/llvm/ExecutionEngine/MCJIT.h [12:12]
+ include/llvm/ExecutionEngine/OProfileWrapper.h [12:12]
+ include/llvm/ExecutionEngine/ObjectCache.h [12:12]
+ include/llvm/ExecutionEngine/Orc/COFFPlatform.h [12:12]
+ include/llvm/ExecutionEngine/Orc/COFFVCRuntimeSupport.h [12:12]
+ include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/CompileUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Core.h [12:12]
+ include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h [12:12]
+ include/llvm/ExecutionEngine/Orc/DebugUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericDylibManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ExecutionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ExecutorProcessControl.h [12:12]
+ include/llvm/ExecutionEngine/Orc/IRCompileLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/IRTransformLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/IndirectionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h [12:12]
+ include/llvm/ExecutionEngine/Orc/LLJIT.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Layer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/LazyReexports.h [12:12]
+ include/llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h [12:12]
+ include/llvm/ExecutionEngine/Orc/MachOPlatform.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Mangling.h [12:12]
+ include/llvm/ExecutionEngine/Orc/MapperJITLinkMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/MemoryMapper.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ObjectFileInterface.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/OrcABISupport.h [12:12]
+ include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcError.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h [12:12]
+ include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h [12:12]
+ include/llvm/ExecutionEngine/Orc/Speculation.h [12:12]
+ include/llvm/ExecutionEngine/Orc/SymbolStringPool.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h [12:12]
+ include/llvm/ExecutionEngine/Orc/TaskDispatch.h [12:12]
+ include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h [12:12]
+ include/llvm/ExecutionEngine/RTDyldMemoryManager.h [12:12]
+ include/llvm/ExecutionEngine/RuntimeDyld.h [12:12]
+ include/llvm/ExecutionEngine/RuntimeDyldChecker.h [12:12]
+ include/llvm/ExecutionEngine/SectionMemoryManager.h [12:12]
+ include/llvm/FileCheck/FileCheck.h [12:12]
+ include/llvm/Frontend/Directive/DirectiveBase.td [5:5]
+ include/llvm/Frontend/HLSL/HLSLResource.h [12:12]
+ include/llvm/Frontend/OpenACC/ACC.td [5:5]
+ include/llvm/Frontend/OpenMP/OMP.td [5:5]
+ include/llvm/Frontend/OpenMP/OMPAssume.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPConstants.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPContext.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPDeviceConstants.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPGridValues.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPIRBuilder.h [12:12]
+ include/llvm/Frontend/OpenMP/OMPKinds.def [5:5]
+ include/llvm/FuzzMutate/FuzzerCLI.h [12:12]
+ include/llvm/FuzzMutate/IRMutator.h [12:12]
+ include/llvm/FuzzMutate/OpDescriptor.h [12:12]
+ include/llvm/FuzzMutate/Operations.h [12:12]
+ include/llvm/FuzzMutate/Random.h [12:12]
+ include/llvm/FuzzMutate/RandomIRBuilder.h [12:12]
+ include/llvm/IR/AbstractCallSite.h [12:12]
+ include/llvm/IR/Argument.h [12:12]
+ include/llvm/IR/AssemblyAnnotationWriter.h [12:12]
+ include/llvm/IR/Assumptions.h [12:12]
+ include/llvm/IR/Attributes.h [12:12]
+ include/llvm/IR/Attributes.td [5:5]
+ include/llvm/IR/AutoUpgrade.h [12:12]
+ include/llvm/IR/BasicBlock.h [12:12]
+ include/llvm/IR/BuiltinGCs.h [12:12]
+ include/llvm/IR/CFG.h [12:12]
+ include/llvm/IR/CallingConv.h [12:12]
+ include/llvm/IR/Comdat.h [12:12]
+ include/llvm/IR/Constant.h [12:12]
+ include/llvm/IR/ConstantFold.h [12:12]
+ include/llvm/IR/ConstantFolder.h [12:12]
+ include/llvm/IR/ConstantRange.h [12:12]
+ include/llvm/IR/Constants.h [12:12]
+ include/llvm/IR/ConstrainedOps.def [5:5]
+ include/llvm/IR/DIBuilder.h [12:12]
+ include/llvm/IR/DataLayout.h [12:12]
+ include/llvm/IR/DebugInfo.h [12:12]
+ include/llvm/IR/DebugInfoFlags.def [5:5]
+ include/llvm/IR/DebugInfoMetadata.h [12:12]
+ include/llvm/IR/DebugLoc.h [12:12]
+ include/llvm/IR/DerivedTypes.h [12:12]
+ include/llvm/IR/DerivedUser.h [12:12]
+ include/llvm/IR/DiagnosticHandler.h [12:12]
+ include/llvm/IR/DiagnosticInfo.h [12:12]
+ include/llvm/IR/DiagnosticPrinter.h [12:12]
+ include/llvm/IR/Dominators.h [12:12]
+ include/llvm/IR/FMF.h [12:12]
+ include/llvm/IR/FPEnv.h [12:12]
+ include/llvm/IR/FixedPointBuilder.h [12:12]
+ include/llvm/IR/Function.h [12:12]
+ include/llvm/IR/GCStrategy.h [12:12]
+ include/llvm/IR/GVMaterializer.h [12:12]
+ include/llvm/IR/GetElementPtrTypeIterator.h [12:12]
+ include/llvm/IR/GlobalAlias.h [12:12]
+ include/llvm/IR/GlobalIFunc.h [12:12]
+ include/llvm/IR/GlobalObject.h [12:12]
+ include/llvm/IR/GlobalValue.h [12:12]
+ include/llvm/IR/GlobalVariable.h [12:12]
+ include/llvm/IR/IRBuilder.h [12:12]
+ include/llvm/IR/IRBuilderFolder.h [12:12]
+ include/llvm/IR/IRPrintingPasses.h [12:12]
+ include/llvm/IR/InlineAsm.h [12:12]
+ include/llvm/IR/InstIterator.h [12:12]
+ include/llvm/IR/InstVisitor.h [12:12]
+ include/llvm/IR/InstrTypes.h [12:12]
+ include/llvm/IR/Instruction.def [5:5]
+ include/llvm/IR/Instruction.h [12:12]
+ include/llvm/IR/Instructions.h [12:12]
+ include/llvm/IR/IntrinsicInst.h [12:12]
+ include/llvm/IR/Intrinsics.h [12:12]
+ include/llvm/IR/Intrinsics.td [5:5]
+ include/llvm/IR/IntrinsicsAArch64.td [5:5]
+ include/llvm/IR/IntrinsicsAMDGPU.td [5:5]
+ include/llvm/IR/IntrinsicsARM.td [5:5]
+ include/llvm/IR/IntrinsicsBPF.td [5:5]
+ include/llvm/IR/IntrinsicsDirectX.td [5:5]
+ include/llvm/IR/IntrinsicsHexagon.td [4:4]
+ include/llvm/IR/IntrinsicsHexagonDep.td [5:5]
+ include/llvm/IR/IntrinsicsLoongArch.td [5:5]
+ include/llvm/IR/IntrinsicsMips.td [5:5]
+ include/llvm/IR/IntrinsicsNVVM.td [5:5]
+ include/llvm/IR/IntrinsicsPowerPC.td [5:5]
+ include/llvm/IR/IntrinsicsRISCV.td [5:5]
+ include/llvm/IR/IntrinsicsSPIRV.td [5:5]
+ include/llvm/IR/IntrinsicsSystemZ.td [5:5]
+ include/llvm/IR/IntrinsicsWebAssembly.td [5:5]
+ include/llvm/IR/IntrinsicsX86.td [5:5]
+ include/llvm/IR/IntrinsicsXCore.td [5:5]
+ include/llvm/IR/LLVMContext.h [12:12]
+ include/llvm/IR/LLVMRemarkStreamer.h [12:12]
+ include/llvm/IR/LegacyPassManager.h [12:12]
+ include/llvm/IR/LegacyPassManagers.h [12:12]
+ include/llvm/IR/LegacyPassNameParser.h [12:12]
+ include/llvm/IR/MDBuilder.h [12:12]
+ include/llvm/IR/Mangler.h [12:12]
+ include/llvm/IR/MatrixBuilder.h [12:12]
+ include/llvm/IR/Metadata.def [5:5]
+ include/llvm/IR/Metadata.h [12:12]
+ include/llvm/IR/Module.h [12:12]
+ include/llvm/IR/ModuleSlotTracker.h [12:12]
+ include/llvm/IR/ModuleSummaryIndex.h [12:12]
+ include/llvm/IR/ModuleSummaryIndexYAML.h [12:12]
+ include/llvm/IR/NoFolder.h [12:12]
+ include/llvm/IR/OperandTraits.h [12:12]
+ include/llvm/IR/Operator.h [12:12]
+ include/llvm/IR/OptBisect.h [12:12]
+ include/llvm/IR/PassInstrumentation.h [12:12]
+ include/llvm/IR/PassManager.h [12:12]
+ include/llvm/IR/PassManagerImpl.h [12:12]
+ include/llvm/IR/PassManagerInternal.h [12:12]
+ include/llvm/IR/PassTimingInfo.h [12:12]
+ include/llvm/IR/PatternMatch.h [12:12]
+ include/llvm/IR/PredIteratorCache.h [12:12]
+ include/llvm/IR/PrintPasses.h [12:12]
+ include/llvm/IR/ProfDataUtils.h [12:12]
+ include/llvm/IR/ProfileSummary.h [12:12]
+ include/llvm/IR/PseudoProbe.h [12:12]
+ include/llvm/IR/ReplaceConstant.h [12:12]
+ include/llvm/IR/RuntimeLibcalls.def [5:5]
+ include/llvm/IR/SSAContext.h [12:12]
+ include/llvm/IR/SafepointIRVerifier.h [12:12]
+ include/llvm/IR/Statepoint.h [12:12]
+ include/llvm/IR/StructuralHash.h [12:12]
+ include/llvm/IR/SymbolTableListTraits.h [12:12]
+ include/llvm/IR/TrackingMDRef.h [12:12]
+ include/llvm/IR/Type.h [12:12]
+ include/llvm/IR/TypeFinder.h [12:12]
+ include/llvm/IR/TypedPointerType.h [12:12]
+ include/llvm/IR/Use.h [12:12]
+ include/llvm/IR/UseListOrder.h [12:12]
+ include/llvm/IR/User.h [12:12]
+ include/llvm/IR/VPIntrinsics.def [5:5]
+ include/llvm/IR/Value.def [5:5]
+ include/llvm/IR/Value.h [12:12]
+ include/llvm/IR/ValueHandle.h [12:12]
+ include/llvm/IR/ValueMap.h [12:12]
+ include/llvm/IR/ValueSymbolTable.h [12:12]
+ include/llvm/IR/VectorBuilder.h [12:12]
+ include/llvm/IR/Verifier.h [12:12]
+ include/llvm/IRPrinter/IRPrintingPasses.h [12:12]
+ include/llvm/IRReader/IRReader.h [12:12]
+ include/llvm/InitializePasses.h [12:12]
+ include/llvm/InterfaceStub/ELFObjHandler.h [12:12]
+ include/llvm/InterfaceStub/IFSHandler.h [12:12]
+ include/llvm/InterfaceStub/IFSStub.h [12:12]
+ include/llvm/LTO/Config.h [12:12]
+ include/llvm/LTO/LTO.h [12:12]
+ include/llvm/LTO/LTOBackend.h [12:12]
+ include/llvm/LTO/SummaryBasedOptimizations.h [12:12]
+ include/llvm/LTO/legacy/LTOCodeGenerator.h [12:12]
+ include/llvm/LTO/legacy/LTOModule.h [12:12]
+ include/llvm/LTO/legacy/ThinLTOCodeGenerator.h [12:12]
+ include/llvm/LTO/legacy/UpdateCompilerUsed.h [12:12]
+ include/llvm/LineEditor/LineEditor.h [12:12]
+ include/llvm/LinkAllIR.h [12:12]
+ include/llvm/LinkAllPasses.h [12:12]
+ include/llvm/Linker/IRMover.h [12:12]
+ include/llvm/Linker/Linker.h [12:12]
+ include/llvm/MC/ConstantPools.h [12:12]
+ include/llvm/MC/LaneBitmask.h [12:12]
+ include/llvm/MC/MCAsmBackend.h [12:12]
+ include/llvm/MC/MCAsmInfo.h [12:12]
+ include/llvm/MC/MCAsmInfoCOFF.h [12:12]
+ include/llvm/MC/MCAsmInfoDarwin.h [12:12]
+ include/llvm/MC/MCAsmInfoELF.h [12:12]
+ include/llvm/MC/MCAsmInfoGOFF.h [12:12]
+ include/llvm/MC/MCAsmInfoWasm.h [12:12]
+ include/llvm/MC/MCAsmInfoXCOFF.h [12:12]
+ include/llvm/MC/MCAsmLayout.h [12:12]
+ include/llvm/MC/MCAsmMacro.h [12:12]
+ include/llvm/MC/MCAssembler.h [12:12]
+ include/llvm/MC/MCCodeEmitter.h [12:12]
+ include/llvm/MC/MCCodeView.h [12:12]
+ include/llvm/MC/MCContext.h [12:12]
+ include/llvm/MC/MCDXContainerStreamer.h [12:12]
+ include/llvm/MC/MCDXContainerWriter.h [12:12]
+ include/llvm/MC/MCDecoderOps.h [12:12]
+ include/llvm/MC/MCDirectives.h [12:12]
+ include/llvm/MC/MCDisassembler/MCDisassembler.h [12:12]
+ include/llvm/MC/MCDisassembler/MCExternalSymbolizer.h [12:12]
+ include/llvm/MC/MCDisassembler/MCRelocationInfo.h [12:12]
+ include/llvm/MC/MCDisassembler/MCSymbolizer.h [12:12]
+ include/llvm/MC/MCDwarf.h [12:12]
+ include/llvm/MC/MCELFObjectWriter.h [12:12]
+ include/llvm/MC/MCELFStreamer.h [12:12]
+ include/llvm/MC/MCExpr.h [12:12]
+ include/llvm/MC/MCFixup.h [12:12]
+ include/llvm/MC/MCFixupKindInfo.h [12:12]
+ include/llvm/MC/MCFragment.h [12:12]
+ include/llvm/MC/MCInst.h [12:12]
+ include/llvm/MC/MCInstBuilder.h [12:12]
+ include/llvm/MC/MCInstPrinter.h [12:12]
+ include/llvm/MC/MCInstrAnalysis.h [12:12]
+ include/llvm/MC/MCInstrDesc.h [12:12]
+ include/llvm/MC/MCInstrInfo.h [12:12]
+ include/llvm/MC/MCInstrItineraries.h [12:12]
+ include/llvm/MC/MCLabel.h [12:12]
+ include/llvm/MC/MCLinkerOptimizationHint.h [13:13]
+ include/llvm/MC/MCMachObjectWriter.h [12:12]
+ include/llvm/MC/MCObjectFileInfo.h [12:12]
+ include/llvm/MC/MCObjectStreamer.h [12:12]
+ include/llvm/MC/MCObjectWriter.h [12:12]
+ include/llvm/MC/MCParser/AsmCond.h [12:12]
+ include/llvm/MC/MCParser/AsmLexer.h [12:12]
+ include/llvm/MC/MCParser/MCAsmLexer.h [12:12]
+ include/llvm/MC/MCParser/MCAsmParser.h [12:12]
+ include/llvm/MC/MCParser/MCAsmParserExtension.h [12:12]
+ include/llvm/MC/MCParser/MCAsmParserUtils.h [12:12]
+ include/llvm/MC/MCParser/MCParsedAsmOperand.h [12:12]
+ include/llvm/MC/MCParser/MCTargetAsmParser.h [12:12]
+ include/llvm/MC/MCPseudoProbe.h [12:12]
+ include/llvm/MC/MCRegister.h [12:12]
+ include/llvm/MC/MCRegisterInfo.h [12:12]
+ include/llvm/MC/MCSPIRVObjectWriter.h [12:12]
+ include/llvm/MC/MCSPIRVStreamer.h [12:12]
+ include/llvm/MC/MCSchedule.h [12:12]
+ include/llvm/MC/MCSection.h [12:12]
+ include/llvm/MC/MCSectionCOFF.h [12:12]
+ include/llvm/MC/MCSectionDXContainer.h [12:12]
+ include/llvm/MC/MCSectionELF.h [12:12]
+ include/llvm/MC/MCSectionGOFF.h [12:12]
+ include/llvm/MC/MCSectionMachO.h [12:12]
+ include/llvm/MC/MCSectionSPIRV.h [12:12]
+ include/llvm/MC/MCSectionWasm.h [12:12]
+ include/llvm/MC/MCSectionXCOFF.h [12:12]
+ include/llvm/MC/MCStreamer.h [12:12]
+ include/llvm/MC/MCSubtargetInfo.h [12:12]
+ include/llvm/MC/MCSymbol.h [12:12]
+ include/llvm/MC/MCSymbolCOFF.h [12:12]
+ include/llvm/MC/MCSymbolELF.h [12:12]
+ include/llvm/MC/MCSymbolGOFF.h [12:12]
+ include/llvm/MC/MCSymbolMachO.h [12:12]
+ include/llvm/MC/MCSymbolWasm.h [12:12]
+ include/llvm/MC/MCSymbolXCOFF.h [12:12]
+ include/llvm/MC/MCTargetOptions.h [12:12]
+ include/llvm/MC/MCTargetOptionsCommandFlags.h [12:12]
+ include/llvm/MC/MCValue.h [12:12]
+ include/llvm/MC/MCWasmObjectWriter.h [12:12]
+ include/llvm/MC/MCWasmStreamer.h [12:12]
+ include/llvm/MC/MCWin64EH.h [12:12]
+ include/llvm/MC/MCWinCOFFObjectWriter.h [12:12]
+ include/llvm/MC/MCWinCOFFStreamer.h [12:12]
+ include/llvm/MC/MCWinEH.h [12:12]
+ include/llvm/MC/MCXCOFFObjectWriter.h [12:12]
+ include/llvm/MC/MCXCOFFStreamer.h [12:12]
+ include/llvm/MC/MachineLocation.h [12:12]
+ include/llvm/MC/SectionKind.h [12:12]
+ include/llvm/MC/StringTableBuilder.h [12:12]
+ include/llvm/MC/SubtargetFeature.h [12:12]
+ include/llvm/MC/TargetRegistry.h [12:12]
+ include/llvm/MCA/CodeEmitter.h [12:12]
+ include/llvm/MCA/Context.h [12:12]
+ include/llvm/MCA/CustomBehaviour.h [12:12]
+ include/llvm/MCA/HWEventListener.h [12:12]
+ include/llvm/MCA/HardwareUnits/HardwareUnit.h [12:12]
+ include/llvm/MCA/HardwareUnits/LSUnit.h [12:12]
+ include/llvm/MCA/HardwareUnits/RegisterFile.h [12:12]
+ include/llvm/MCA/HardwareUnits/ResourceManager.h [12:12]
+ include/llvm/MCA/HardwareUnits/RetireControlUnit.h [12:12]
+ include/llvm/MCA/HardwareUnits/Scheduler.h [12:12]
+ include/llvm/MCA/IncrementalSourceMgr.h [12:12]
+ include/llvm/MCA/InstrBuilder.h [12:12]
+ include/llvm/MCA/Instruction.h [12:12]
+ include/llvm/MCA/Pipeline.h [12:12]
+ include/llvm/MCA/SourceMgr.h [12:12]
+ include/llvm/MCA/Stages/DispatchStage.h [12:12]
+ include/llvm/MCA/Stages/EntryStage.h [12:12]
+ include/llvm/MCA/Stages/ExecuteStage.h [12:12]
+ include/llvm/MCA/Stages/InOrderIssueStage.h [12:12]
+ include/llvm/MCA/Stages/InstructionTables.h [12:12]
+ include/llvm/MCA/Stages/MicroOpQueueStage.h [12:12]
+ include/llvm/MCA/Stages/RetireStage.h [12:12]
+ include/llvm/MCA/Stages/Stage.h [12:12]
+ include/llvm/MCA/Support.h [12:12]
+ include/llvm/MCA/View.h [12:12]
+ include/llvm/ObjCopy/COFF/COFFConfig.h [12:12]
+ include/llvm/ObjCopy/COFF/COFFObjcopy.h [12:12]
+ include/llvm/ObjCopy/CommonConfig.h [12:12]
+ include/llvm/ObjCopy/ConfigManager.h [12:12]
+ include/llvm/ObjCopy/ELF/ELFConfig.h [12:12]
+ include/llvm/ObjCopy/ELF/ELFObjcopy.h [12:12]
+ include/llvm/ObjCopy/MachO/MachOConfig.h [12:12]
+ include/llvm/ObjCopy/MachO/MachOObjcopy.h [12:12]
+ include/llvm/ObjCopy/MultiFormatConfig.h [12:12]
+ include/llvm/ObjCopy/ObjCopy.h [12:12]
+ include/llvm/ObjCopy/XCOFF/XCOFFConfig.h [12:12]
+ include/llvm/ObjCopy/XCOFF/XCOFFObjcopy.h [12:12]
+ include/llvm/ObjCopy/wasm/WasmConfig.h [12:12]
+ include/llvm/ObjCopy/wasm/WasmObjcopy.h [12:12]
+ include/llvm/Object/Archive.h [12:12]
+ include/llvm/Object/ArchiveWriter.h [12:12]
+ include/llvm/Object/Binary.h [12:12]
+ include/llvm/Object/BuildID.h [12:12]
+ include/llvm/Object/COFF.h [12:12]
+ include/llvm/Object/COFFImportFile.h [12:12]
+ include/llvm/Object/COFFModuleDefinition.h [12:12]
+ include/llvm/Object/CVDebugRecord.h [12:12]
+ include/llvm/Object/DXContainer.h [12:12]
+ include/llvm/Object/Decompressor.h [12:12]
+ include/llvm/Object/ELF.h [12:12]
+ include/llvm/Object/ELFObjectFile.h [12:12]
+ include/llvm/Object/ELFTypes.h [12:12]
+ include/llvm/Object/Error.h [12:12]
+ include/llvm/Object/FaultMapParser.h [12:12]
+ include/llvm/Object/IRObjectFile.h [12:12]
+ include/llvm/Object/IRSymtab.h [12:12]
+ include/llvm/Object/MachO.h [12:12]
+ include/llvm/Object/MachOUniversal.h [12:12]
+ include/llvm/Object/MachOUniversalWriter.h [12:12]
+ include/llvm/Object/Minidump.h [12:12]
+ include/llvm/Object/ModuleSymbolTable.h [12:12]
+ include/llvm/Object/ObjectFile.h [12:12]
+ include/llvm/Object/OffloadBinary.h [12:12]
+ include/llvm/Object/RelocationResolver.h [12:12]
+ include/llvm/Object/StackMapParser.h [12:12]
+ include/llvm/Object/SymbolSize.h [12:12]
+ include/llvm/Object/SymbolicFile.h [12:12]
+ include/llvm/Object/TapiFile.h [12:12]
+ include/llvm/Object/TapiUniversal.h [12:12]
+ include/llvm/Object/Wasm.h [12:12]
+ include/llvm/Object/WindowsMachineFlag.h [12:12]
+ include/llvm/Object/WindowsResource.h [12:12]
+ include/llvm/Object/XCOFFObjectFile.h [12:12]
+ include/llvm/ObjectYAML/ArchiveYAML.h [12:12]
+ include/llvm/ObjectYAML/COFFYAML.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLSymbols.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h [12:12]
+ include/llvm/ObjectYAML/CodeViewYAMLTypes.h [12:12]
+ include/llvm/ObjectYAML/DWARFEmitter.h [12:12]
+ include/llvm/ObjectYAML/DWARFYAML.h [12:12]
+ include/llvm/ObjectYAML/DXContainerYAML.h [12:12]
+ include/llvm/ObjectYAML/ELFYAML.h [12:12]
+ include/llvm/ObjectYAML/MachOYAML.h [12:12]
+ include/llvm/ObjectYAML/MinidumpYAML.h [12:12]
+ include/llvm/ObjectYAML/ObjectYAML.h [12:12]
+ include/llvm/ObjectYAML/OffloadYAML.h [12:12]
+ include/llvm/ObjectYAML/WasmYAML.h [12:12]
+ include/llvm/ObjectYAML/XCOFFYAML.h [12:12]
+ include/llvm/ObjectYAML/YAML.h [12:12]
+ include/llvm/ObjectYAML/yaml2obj.h [12:12]
+ include/llvm/Option/Arg.h [12:12]
+ include/llvm/Option/ArgList.h [12:12]
+ include/llvm/Option/OptParser.td [5:5]
+ include/llvm/Option/OptSpecifier.h [12:12]
+ include/llvm/Option/OptTable.h [12:12]
+ include/llvm/Option/Option.h [12:12]
+ include/llvm/Pass.h [12:12]
+ include/llvm/PassAnalysisSupport.h [12:12]
+ include/llvm/PassInfo.h [12:12]
+ include/llvm/PassRegistry.h [12:12]
+ include/llvm/PassSupport.h [12:12]
+ include/llvm/Passes/OptimizationLevel.h [12:12]
+ include/llvm/Passes/PassBuilder.h [12:12]
+ include/llvm/Passes/PassPlugin.h [12:12]
+ include/llvm/Passes/StandardInstrumentations.h [12:12]
+ include/llvm/ProfileData/Coverage/CoverageMapping.h [12:12]
+ include/llvm/ProfileData/Coverage/CoverageMappingReader.h [12:12]
+ include/llvm/ProfileData/Coverage/CoverageMappingWriter.h [12:12]
+ include/llvm/ProfileData/GCOV.h [12:12]
+ include/llvm/ProfileData/InstrProf.h [12:12]
+ include/llvm/ProfileData/InstrProfCorrelator.h [12:12]
+ include/llvm/ProfileData/InstrProfReader.h [12:12]
+ include/llvm/ProfileData/InstrProfWriter.h [12:12]
+ include/llvm/ProfileData/ProfileCommon.h [12:12]
+ include/llvm/ProfileData/RawMemProfReader.h [14:14]
+ include/llvm/ProfileData/SampleProf.h [12:12]
+ include/llvm/ProfileData/SampleProfReader.h [12:12]
+ include/llvm/ProfileData/SampleProfWriter.h [12:12]
+ include/llvm/Remarks/BitstreamRemarkContainer.h [12:12]
+ include/llvm/Remarks/BitstreamRemarkParser.h [12:12]
+ include/llvm/Remarks/BitstreamRemarkSerializer.h [12:12]
+ include/llvm/Remarks/HotnessThresholdParser.h [12:12]
+ include/llvm/Remarks/Remark.h [12:12]
+ include/llvm/Remarks/RemarkFormat.h [12:12]
+ include/llvm/Remarks/RemarkLinker.h [12:12]
+ include/llvm/Remarks/RemarkParser.h [12:12]
+ include/llvm/Remarks/RemarkSerializer.h [12:12]
+ include/llvm/Remarks/RemarkStreamer.h [12:12]
+ include/llvm/Remarks/RemarkStringTable.h [12:12]
+ include/llvm/Remarks/YAMLRemarkSerializer.h [12:12]
+ include/llvm/Support/AArch64TargetParser.h [12:12]
+ include/llvm/Support/AMDGPUMetadata.h [12:12]
+ include/llvm/Support/AMDHSAKernelDescriptor.h [12:12]
+ include/llvm/Support/ARMAttributeParser.h [12:12]
+ include/llvm/Support/ARMBuildAttributes.h [12:12]
+ include/llvm/Support/ARMEHABI.h [12:12]
+ include/llvm/Support/ARMTargetParser.h [12:12]
+ include/llvm/Support/ARMTargetParserCommon.h [12:12]
+ include/llvm/Support/ARMWinEH.h [12:12]
+ include/llvm/Support/AlignOf.h [12:12]
+ include/llvm/Support/Alignment.h [12:12]
+ include/llvm/Support/Allocator.h [12:12]
+ include/llvm/Support/AllocatorBase.h [12:12]
+ include/llvm/Support/ArrayRecycler.h [12:12]
+ include/llvm/Support/Atomic.h [12:12]
+ include/llvm/Support/AtomicOrdering.h [12:12]
+ include/llvm/Support/AutoConvert.h [12:12]
+ include/llvm/Support/Automaton.h [12:12]
+ include/llvm/Support/BCD.h [12:12]
+ include/llvm/Support/BLAKE3.h [12:12]
+ include/llvm/Support/Base64.h [12:12]
+ include/llvm/Support/BinaryByteStream.h [12:12]
+ include/llvm/Support/BinaryItemStream.h [12:12]
+ include/llvm/Support/BinaryStream.h [12:12]
+ include/llvm/Support/BinaryStreamArray.h [12:12]
+ include/llvm/Support/BinaryStreamError.h [12:12]
+ include/llvm/Support/BinaryStreamReader.h [12:12]
+ include/llvm/Support/BinaryStreamRef.h [12:12]
+ include/llvm/Support/BinaryStreamWriter.h [12:12]
+ include/llvm/Support/BlockFrequency.h [12:12]
+ include/llvm/Support/BranchProbability.h [12:12]
+ include/llvm/Support/BuryPointer.h [12:12]
+ include/llvm/Support/CBindingWrapping.h [12:12]
+ include/llvm/Support/CFGDiff.h [12:12]
+ include/llvm/Support/CFGUpdate.h [12:12]
+ include/llvm/Support/COM.h [12:12]
+ include/llvm/Support/CRC.h [12:12]
+ include/llvm/Support/CSKYAttributeParser.h [12:12]
+ include/llvm/Support/CSKYAttributes.h [12:12]
+ include/llvm/Support/CSKYTargetParser.h [12:12]
+ include/llvm/Support/CachePruning.h [12:12]
+ include/llvm/Support/Caching.h [12:12]
+ include/llvm/Support/Capacity.h [12:12]
+ include/llvm/Support/Casting.h [12:12]
+ include/llvm/Support/CheckedArithmetic.h [12:12]
+ include/llvm/Support/Chrono.h [12:12]
+ include/llvm/Support/CodeGen.h [12:12]
+ include/llvm/Support/CodeGenCoverage.h [12:12]
+ include/llvm/Support/CommandLine.h [12:12]
+ include/llvm/Support/Compiler.h [12:12]
+ include/llvm/Support/Compression.h [12:12]
+ include/llvm/Support/CrashRecoveryContext.h [12:12]
+ include/llvm/Support/DJB.h [12:12]
+ include/llvm/Support/DOTGraphTraits.h [12:12]
+ include/llvm/Support/DXILOperationCommon.h [12:12]
+ include/llvm/Support/DataExtractor.h [12:12]
+ include/llvm/Support/DataTypes.h [12:12]
+ include/llvm/Support/Debug.h [12:12]
+ include/llvm/Support/DebugCounter.h [12:12]
+ include/llvm/Support/Discriminator.h [12:12]
+ include/llvm/Support/DivisionByConstantInfo.h [12:12]
+ include/llvm/Support/Duration.h [12:12]
+ include/llvm/Support/DynamicLibrary.h [12:12]
+ include/llvm/Support/ELFAttributeParser.h [12:12]
+ include/llvm/Support/ELFAttributes.h [12:12]
+ include/llvm/Support/Endian.h [12:12]
+ include/llvm/Support/EndianStream.h [12:12]
+ include/llvm/Support/Errc.h [12:12]
+ include/llvm/Support/Errno.h [12:12]
+ include/llvm/Support/Error.h [12:12]
+ include/llvm/Support/ErrorHandling.h [12:12]
+ include/llvm/Support/ErrorOr.h [12:12]
+ include/llvm/Support/ExitCodes.h [12:12]
+ include/llvm/Support/ExtensibleRTTI.h [12:12]
+ include/llvm/Support/FileCollector.h [12:12]
+ include/llvm/Support/FileOutputBuffer.h [12:12]
+ include/llvm/Support/FileSystem.h [12:12]
+ include/llvm/Support/FileSystem/UniqueID.h [12:12]
+ include/llvm/Support/FileUtilities.h [12:12]
+ include/llvm/Support/Format.h [12:12]
+ include/llvm/Support/FormatAdapters.h [12:12]
+ include/llvm/Support/FormatCommon.h [12:12]
+ include/llvm/Support/FormatProviders.h [12:12]
+ include/llvm/Support/FormatVariadic.h [12:12]
+ include/llvm/Support/FormatVariadicDetails.h [12:12]
+ include/llvm/Support/FormattedStream.h [12:12]
+ include/llvm/Support/GenericDomTree.h [12:12]
+ include/llvm/Support/GenericDomTreeConstruction.h [12:12]
+ include/llvm/Support/GenericIteratedDominanceFrontier.h [12:12]
+ include/llvm/Support/GlobPattern.h [12:12]
+ include/llvm/Support/GraphWriter.h [12:12]
+ include/llvm/Support/HashBuilder.h [12:12]
+ include/llvm/Support/Host.h [12:12]
+ include/llvm/Support/InitLLVM.h [12:12]
+ include/llvm/Support/InstructionCost.h [12:12]
+ include/llvm/Support/ItaniumManglingCanonicalizer.h [12:12]
+ include/llvm/Support/JSON.h [12:12]
+ include/llvm/Support/KnownBits.h [12:12]
+ include/llvm/Support/LEB128.h [12:12]
+ include/llvm/Support/LineIterator.h [12:12]
+ include/llvm/Support/LockFileManager.h [12:12]
+ include/llvm/Support/LoongArchTargetParser.h [12:12]
+ include/llvm/Support/LowLevelTypeImpl.h [12:12]
+ include/llvm/Support/MSP430AttributeParser.h [12:12]
+ include/llvm/Support/MSP430Attributes.h [12:12]
+ include/llvm/Support/MSVCErrorWorkarounds.h [12:12]
+ include/llvm/Support/MachineValueType.h [12:12]
+ include/llvm/Support/ManagedStatic.h [12:12]
+ include/llvm/Support/MathExtras.h [12:12]
+ include/llvm/Support/MemAlloc.h [12:12]
+ include/llvm/Support/Memory.h [12:12]
+ include/llvm/Support/MemoryBuffer.h [12:12]
+ include/llvm/Support/MemoryBufferRef.h [12:12]
+ include/llvm/Support/MipsABIFlags.h [12:12]
+ include/llvm/Support/ModRef.h [12:12]
+ include/llvm/Support/Mutex.h [12:12]
+ include/llvm/Support/NativeFormatting.h [12:12]
+ include/llvm/Support/OnDiskHashTable.h [12:12]
+ include/llvm/Support/OptimizedStructLayout.h [12:12]
+ include/llvm/Support/PGOOptions.h [12:12]
+ include/llvm/Support/Parallel.h [12:12]
+ include/llvm/Support/Path.h [12:12]
+ include/llvm/Support/PluginLoader.h [12:12]
+ include/llvm/Support/PointerLikeTypeTraits.h [12:12]
+ include/llvm/Support/PrettyStackTrace.h [12:12]
+ include/llvm/Support/Printable.h [12:12]
+ include/llvm/Support/Process.h [12:12]
+ include/llvm/Support/Program.h [12:12]
+ include/llvm/Support/RISCVAttributeParser.h [12:12]
+ include/llvm/Support/RISCVAttributes.h [12:12]
+ include/llvm/Support/RISCVISAInfo.h [12:12]
+ include/llvm/Support/RWMutex.h [12:12]
+ include/llvm/Support/RandomNumberGenerator.h [12:12]
+ include/llvm/Support/Recycler.h [12:12]
+ include/llvm/Support/RecyclingAllocator.h [12:12]
+ include/llvm/Support/Regex.h [12:12]
+ include/llvm/Support/Registry.h [12:12]
+ include/llvm/Support/SHA1.h [12:12]
+ include/llvm/Support/SHA256.h [12:12]
+ include/llvm/Support/SMLoc.h [12:12]
+ include/llvm/Support/SMTAPI.h [12:12]
+ include/llvm/Support/SaveAndRestore.h [12:12]
+ include/llvm/Support/ScaledNumber.h [12:12]
+ include/llvm/Support/ScopedPrinter.h [12:12]
+ include/llvm/Support/Signals.h [12:12]
+ include/llvm/Support/Signposts.h [12:12]
+ include/llvm/Support/SmallVectorMemoryBuffer.h [12:12]
+ include/llvm/Support/SourceMgr.h [12:12]
+ include/llvm/Support/SpecialCaseList.h [12:12]
+ include/llvm/Support/StringSaver.h [12:12]
+ include/llvm/Support/SuffixTree.h [12:12]
+ include/llvm/Support/SwapByteOrder.h [12:12]
+ include/llvm/Support/SymbolRemappingReader.h [12:12]
+ include/llvm/Support/SystemUtils.h [12:12]
+ include/llvm/Support/TarWriter.h [12:12]
+ include/llvm/Support/TargetOpcodes.def [5:5]
+ include/llvm/Support/TargetParser.h [12:12]
+ include/llvm/Support/TargetSelect.h [12:12]
+ include/llvm/Support/TaskQueue.h [12:12]
+ include/llvm/Support/ThreadPool.h [12:12]
+ include/llvm/Support/Threading.h [12:12]
+ include/llvm/Support/TimeProfiler.h [12:12]
+ include/llvm/Support/Timer.h [12:12]
+ include/llvm/Support/ToolOutputFile.h [12:12]
+ include/llvm/Support/TrailingObjects.h [12:12]
+ include/llvm/Support/TrigramIndex.h [12:12]
+ include/llvm/Support/TypeName.h [12:12]
+ include/llvm/Support/TypeSize.h [12:12]
+ include/llvm/Support/Unicode.h [12:12]
+ include/llvm/Support/UnicodeCharRanges.h [12:12]
+ include/llvm/Support/Valgrind.h [12:12]
+ include/llvm/Support/VersionTuple.h [12:12]
+ include/llvm/Support/VirtualFileSystem.h [12:12]
+ include/llvm/Support/Watchdog.h [12:12]
+ include/llvm/Support/Win64EH.h [12:12]
+ include/llvm/Support/Windows/WindowsSupport.h [12:12]
+ include/llvm/Support/WindowsError.h [12:12]
+ include/llvm/Support/WithColor.h [12:12]
+ include/llvm/Support/X86DisassemblerDecoderCommon.h [12:12]
+ include/llvm/Support/X86TargetParser.def [5:5]
+ include/llvm/Support/X86TargetParser.h [12:12]
+ include/llvm/Support/YAMLParser.h [12:12]
+ include/llvm/Support/YAMLTraits.h [12:12]
+ include/llvm/Support/circular_raw_ostream.h [12:12]
+ include/llvm/Support/raw_os_ostream.h [12:12]
+ include/llvm/Support/raw_ostream.h [12:12]
+ include/llvm/Support/raw_sha1_ostream.h [12:12]
+ include/llvm/Support/thread.h [12:12]
+ include/llvm/Support/type_traits.h [12:12]
+ include/llvm/TableGen/Automaton.td [5:5]
+ include/llvm/TableGen/Error.h [12:12]
+ include/llvm/TableGen/Main.h [12:12]
+ include/llvm/TableGen/Parser.h [12:12]
+ include/llvm/TableGen/Record.h [12:12]
+ include/llvm/TableGen/SearchableTable.td [5:5]
+ include/llvm/TableGen/SetTheory.h [12:12]
+ include/llvm/TableGen/StringMatcher.h [12:12]
+ include/llvm/TableGen/StringToOffsetTable.h [12:12]
+ include/llvm/TableGen/TableGenBackend.h [12:12]
+ include/llvm/Target/CGPassBuilderOption.h [12:12]
+ include/llvm/Target/CodeGenCWrappers.h [12:12]
+ include/llvm/Target/GenericOpcodes.td [5:5]
+ include/llvm/Target/GlobalISel/Combine.td [5:5]
+ include/llvm/Target/GlobalISel/RegisterBank.td [5:5]
+ include/llvm/Target/GlobalISel/SelectionDAGCompat.td [5:5]
+ include/llvm/Target/GlobalISel/Target.td [5:5]
+ include/llvm/Target/Target.td [5:5]
+ include/llvm/Target/TargetCallingConv.td [5:5]
+ include/llvm/Target/TargetInstrPredicate.td [5:5]
+ include/llvm/Target/TargetIntrinsicInfo.h [12:12]
+ include/llvm/Target/TargetItinerary.td [5:5]
+ include/llvm/Target/TargetLoweringObjectFile.h [12:12]
+ include/llvm/Target/TargetMachine.h [12:12]
+ include/llvm/Target/TargetOptions.h [12:12]
+ include/llvm/Target/TargetPfmCounters.td [5:5]
+ include/llvm/Target/TargetSchedule.td [5:5]
+ include/llvm/Target/TargetSelectionDAG.td [5:5]
+ include/llvm/TargetParser/AArch64TargetParser.h [12:12]
+ include/llvm/TargetParser/ARMTargetParser.def [5:5]
+ include/llvm/TargetParser/ARMTargetParser.h [12:12]
+ include/llvm/TargetParser/ARMTargetParserCommon.h [12:12]
+ include/llvm/TargetParser/CSKYTargetParser.def [5:5]
+ include/llvm/TargetParser/CSKYTargetParser.h [13:13]
+ include/llvm/TargetParser/Host.h [12:12]
+ include/llvm/TargetParser/LoongArchTargetParser.h [12:12]
+ include/llvm/TargetParser/RISCVTargetParser.h [12:12]
+ include/llvm/TargetParser/TargetParser.h [12:12]
+ include/llvm/TargetParser/Triple.h [12:12]
+ include/llvm/TargetParser/X86TargetParser.def [5:5]
+ include/llvm/TargetParser/X86TargetParser.h [12:12]
+ include/llvm/Testing/ADT/StringMap.h [12:12]
+ include/llvm/Testing/ADT/StringMapEntry.h [12:12]
+ include/llvm/Testing/Annotations/Annotations.h [12:12]
+ include/llvm/Testing/Support/Error.h [12:12]
+ include/llvm/Testing/Support/SupportHelpers.h [12:12]
+ include/llvm/TextAPI/Architecture.def [5:5]
+ include/llvm/TextAPI/Architecture.h [12:12]
+ include/llvm/TextAPI/ArchitectureSet.h [12:12]
+ include/llvm/TextAPI/InterfaceFile.h [12:12]
+ include/llvm/TextAPI/PackedVersion.h [12:12]
+ include/llvm/TextAPI/Platform.h [12:12]
+ include/llvm/TextAPI/Symbol.h [12:12]
+ include/llvm/TextAPI/Target.h [12:12]
+ include/llvm/TextAPI/TextAPIReader.h [12:12]
+ include/llvm/TextAPI/TextAPIWriter.h [12:12]
+ include/llvm/ToolDrivers/llvm-dlltool/DlltoolDriver.h [12:12]
+ include/llvm/ToolDrivers/llvm-lib/LibDriver.h [12:12]
+ include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h [12:12]
+ include/llvm/Transforms/CFGuard.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroCleanup.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroConditionalWrapper.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroEarly.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroElide.h [12:12]
+ include/llvm/Transforms/Coroutines/CoroSplit.h [12:12]
+ include/llvm/Transforms/IPO.h [12:12]
+ include/llvm/Transforms/IPO/AlwaysInliner.h [12:12]
+ include/llvm/Transforms/IPO/Annotation2Metadata.h [12:12]
+ include/llvm/Transforms/IPO/ArgumentPromotion.h [12:12]
+ include/llvm/Transforms/IPO/Attributor.h [12:12]
+ include/llvm/Transforms/IPO/BlockExtractor.h [12:12]
+ include/llvm/Transforms/IPO/CalledValuePropagation.h [12:12]
+ include/llvm/Transforms/IPO/ConstantMerge.h [12:12]
+ include/llvm/Transforms/IPO/CrossDSOCFI.h [12:12]
+ include/llvm/Transforms/IPO/DeadArgumentElimination.h [12:12]
+ include/llvm/Transforms/IPO/ElimAvailExtern.h [12:12]
+ include/llvm/Transforms/IPO/ExtractGV.h [12:12]
+ include/llvm/Transforms/IPO/ForceFunctionAttrs.h [12:12]
+ include/llvm/Transforms/IPO/FunctionAttrs.h [12:12]
+ include/llvm/Transforms/IPO/FunctionImport.h [12:12]
+ include/llvm/Transforms/IPO/FunctionSpecialization.h [12:12]
+ include/llvm/Transforms/IPO/GlobalDCE.h [12:12]
+ include/llvm/Transforms/IPO/GlobalOpt.h [12:12]
+ include/llvm/Transforms/IPO/GlobalSplit.h [12:12]
+ include/llvm/Transforms/IPO/HotColdSplitting.h [12:12]
+ include/llvm/Transforms/IPO/IROutliner.h [12:12]
+ include/llvm/Transforms/IPO/InferFunctionAttrs.h [12:12]
+ include/llvm/Transforms/IPO/Inliner.h [12:12]
+ include/llvm/Transforms/IPO/Internalize.h [12:12]
+ include/llvm/Transforms/IPO/LoopExtractor.h [12:12]
+ include/llvm/Transforms/IPO/LowerTypeTests.h [12:12]
+ include/llvm/Transforms/IPO/MergeFunctions.h [12:12]
+ include/llvm/Transforms/IPO/ModuleInliner.h [12:12]
+ include/llvm/Transforms/IPO/OpenMPOpt.h [12:12]
+ include/llvm/Transforms/IPO/PartialInlining.h [12:12]
+ include/llvm/Transforms/IPO/PassManagerBuilder.h [12:12]
+ include/llvm/Transforms/IPO/ProfiledCallGraph.h [12:12]
+ include/llvm/Transforms/IPO/SCCP.h [12:12]
+ include/llvm/Transforms/IPO/SampleContextTracker.h [12:12]
+ include/llvm/Transforms/IPO/SampleProfile.h [12:12]
+ include/llvm/Transforms/IPO/SampleProfileProbe.h [12:12]
+ include/llvm/Transforms/IPO/StripDeadPrototypes.h [12:12]
+ include/llvm/Transforms/IPO/StripSymbols.h [12:12]
+ include/llvm/Transforms/IPO/SyntheticCountsPropagation.h [12:12]
+ include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h [12:12]
+ include/llvm/Transforms/IPO/WholeProgramDevirt.h [12:12]
+ include/llvm/Transforms/InstCombine/InstCombine.h [12:12]
+ include/llvm/Transforms/InstCombine/InstCombiner.h [12:12]
+ include/llvm/Transforms/Instrumentation.h [12:12]
+ include/llvm/Transforms/Instrumentation/AddressSanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h [12:12]
+ include/llvm/Transforms/Instrumentation/AddressSanitizerOptions.h [12:12]
+ include/llvm/Transforms/Instrumentation/BoundsChecking.h [12:12]
+ include/llvm/Transforms/Instrumentation/CGProfile.h [12:12]
+ include/llvm/Transforms/Instrumentation/ControlHeightReduction.h [12:12]
+ include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/GCOVProfiler.h [12:12]
+ include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/InstrOrderFile.h [12:12]
+ include/llvm/Transforms/Instrumentation/InstrProfiling.h [12:12]
+ include/llvm/Transforms/Instrumentation/KCFI.h [12:12]
+ include/llvm/Transforms/Instrumentation/MemProfiler.h [12:12]
+ include/llvm/Transforms/Instrumentation/MemorySanitizer.h [12:12]
+ include/llvm/Transforms/Instrumentation/PGOInstrumentation.h [12:12]
+ include/llvm/Transforms/Instrumentation/PoisonChecking.h [12:12]
+ include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h [12:12]
+ include/llvm/Transforms/Instrumentation/SanitizerCoverage.h [14:14]
+ include/llvm/Transforms/Instrumentation/ThreadSanitizer.h [12:12]
+ include/llvm/Transforms/ObjCARC.h [12:12]
+ include/llvm/Transforms/Scalar.h [12:12]
+ include/llvm/Transforms/Scalar/ADCE.h [12:12]
+ include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h [12:12]
+ include/llvm/Transforms/Scalar/AnnotationRemarks.h [12:12]
+ include/llvm/Transforms/Scalar/BDCE.h [12:12]
+ include/llvm/Transforms/Scalar/CallSiteSplitting.h [12:12]
+ include/llvm/Transforms/Scalar/ConstantHoisting.h [12:12]
+ include/llvm/Transforms/Scalar/ConstraintElimination.h [12:12]
+ include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h [12:12]
+ include/llvm/Transforms/Scalar/DCE.h [12:12]
+ include/llvm/Transforms/Scalar/DFAJumpThreading.h [12:12]
+ include/llvm/Transforms/Scalar/DeadStoreElimination.h [12:12]
+ include/llvm/Transforms/Scalar/DivRemPairs.h [12:12]
+ include/llvm/Transforms/Scalar/EarlyCSE.h [12:12]
+ include/llvm/Transforms/Scalar/FlattenCFG.h [12:12]
+ include/llvm/Transforms/Scalar/Float2Int.h [12:12]
+ include/llvm/Transforms/Scalar/GVN.h [12:12]
+ include/llvm/Transforms/Scalar/GVNExpression.h [12:12]
+ include/llvm/Transforms/Scalar/GuardWidening.h [12:12]
+ include/llvm/Transforms/Scalar/IVUsersPrinter.h [12:12]
+ include/llvm/Transforms/Scalar/IndVarSimplify.h [12:12]
+ include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h [12:12]
+ include/llvm/Transforms/Scalar/InferAddressSpaces.h [12:12]
+ include/llvm/Transforms/Scalar/InstSimplifyPass.h [12:12]
+ include/llvm/Transforms/Scalar/JumpThreading.h [12:12]
+ include/llvm/Transforms/Scalar/LICM.h [12:12]
+ include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h [12:12]
+ include/llvm/Transforms/Scalar/LoopBoundSplit.h [12:12]
+ include/llvm/Transforms/Scalar/LoopDataPrefetch.h [13:13]
+ include/llvm/Transforms/Scalar/LoopDeletion.h [12:12]
+ include/llvm/Transforms/Scalar/LoopDistribute.h [12:12]
+ include/llvm/Transforms/Scalar/LoopFlatten.h [12:12]
+ include/llvm/Transforms/Scalar/LoopFuse.h [12:12]
+ include/llvm/Transforms/Scalar/LoopIdiomRecognize.h [12:12]
+ include/llvm/Transforms/Scalar/LoopInstSimplify.h [12:12]
+ include/llvm/Transforms/Scalar/LoopInterchange.h [12:12]
+ include/llvm/Transforms/Scalar/LoopLoadElimination.h [12:12]
+ include/llvm/Transforms/Scalar/LoopPassManager.h [12:12]
+ include/llvm/Transforms/Scalar/LoopPredication.h [12:12]
+ include/llvm/Transforms/Scalar/LoopReroll.h [12:12]
+ include/llvm/Transforms/Scalar/LoopRotation.h [12:12]
+ include/llvm/Transforms/Scalar/LoopSimplifyCFG.h [12:12]
+ include/llvm/Transforms/Scalar/LoopSink.h [12:12]
+ include/llvm/Transforms/Scalar/LoopStrengthReduce.h [12:12]
+ include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h [12:12]
+ include/llvm/Transforms/Scalar/LoopUnrollPass.h [12:12]
+ include/llvm/Transforms/Scalar/LoopVersioningLICM.h [12:12]
+ include/llvm/Transforms/Scalar/LowerAtomicPass.h [12:12]
+ include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h [12:12]
+ include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h [12:12]
+ include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h [12:12]
+ include/llvm/Transforms/Scalar/LowerMatrixIntrinsics.h [12:12]
+ include/llvm/Transforms/Scalar/LowerWidenableCondition.h [12:12]
+ include/llvm/Transforms/Scalar/MakeGuardsExplicit.h [12:12]
+ include/llvm/Transforms/Scalar/MemCpyOptimizer.h [12:12]
+ include/llvm/Transforms/Scalar/MergeICmps.h [12:12]
+ include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h [12:12]
+ include/llvm/Transforms/Scalar/NaryReassociate.h [12:12]
+ include/llvm/Transforms/Scalar/NewGVN.h [12:12]
+ include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h [12:12]
+ include/llvm/Transforms/Scalar/Reassociate.h [12:12]
+ include/llvm/Transforms/Scalar/Reg2Mem.h [12:12]
+ include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h [12:12]
+ include/llvm/Transforms/Scalar/SCCP.h [12:12]
+ include/llvm/Transforms/Scalar/SROA.h [12:12]
+ include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h [13:13]
+ include/llvm/Transforms/Scalar/Scalarizer.h [12:12]
+ include/llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h [12:12]
+ include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h [12:12]
+ include/llvm/Transforms/Scalar/SimplifyCFG.h [12:12]
+ include/llvm/Transforms/Scalar/Sink.h [12:12]
+ include/llvm/Transforms/Scalar/SpeculativeExecution.h [12:12]
+ include/llvm/Transforms/Scalar/StraightLineStrengthReduce.h [12:12]
+ include/llvm/Transforms/Scalar/StructurizeCFG.h [12:12]
+ include/llvm/Transforms/Scalar/TLSVariableHoist.h [12:12]
+ include/llvm/Transforms/Scalar/TailRecursionElimination.h [12:12]
+ include/llvm/Transforms/Scalar/WarnMissedTransforms.h [12:12]
+ include/llvm/Transforms/Utils.h [12:12]
+ include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h [12:12]
+ include/llvm/Transforms/Utils/ASanStackFrameLayout.h [12:12]
+ include/llvm/Transforms/Utils/AddDiscriminators.h [12:12]
+ include/llvm/Transforms/Utils/AssumeBundleBuilder.h [12:12]
+ include/llvm/Transforms/Utils/BasicBlockUtils.h [12:12]
+ include/llvm/Transforms/Utils/BreakCriticalEdges.h [12:12]
+ include/llvm/Transforms/Utils/BuildLibCalls.h [12:12]
+ include/llvm/Transforms/Utils/BypassSlowDivision.h [12:12]
+ include/llvm/Transforms/Utils/CallGraphUpdater.h [12:12]
+ include/llvm/Transforms/Utils/CallPromotionUtils.h [12:12]
+ include/llvm/Transforms/Utils/CanonicalizeAliases.h [12:12]
+ include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h [12:12]
+ include/llvm/Transforms/Utils/Cloning.h [12:12]
+ include/llvm/Transforms/Utils/CodeExtractor.h [12:12]
+ include/llvm/Transforms/Utils/CodeLayout.h [12:12]
+ include/llvm/Transforms/Utils/CodeMoverUtils.h [12:12]
+ include/llvm/Transforms/Utils/CtorUtils.h [12:12]
+ include/llvm/Transforms/Utils/Debugify.h [12:12]
+ include/llvm/Transforms/Utils/EntryExitInstrumenter.h [12:12]
+ include/llvm/Transforms/Utils/EscapeEnumerator.h [12:12]
+ include/llvm/Transforms/Utils/Evaluator.h [12:12]
+ include/llvm/Transforms/Utils/FixIrreducible.h [12:12]
+ include/llvm/Transforms/Utils/FunctionComparator.h [12:12]
+ include/llvm/Transforms/Utils/FunctionImportUtils.h [12:12]
+ include/llvm/Transforms/Utils/GlobalStatus.h [12:12]
+ include/llvm/Transforms/Utils/GuardUtils.h [12:12]
+ include/llvm/Transforms/Utils/HelloWorld.h [12:12]
+ include/llvm/Transforms/Utils/InjectTLIMappings.h [12:12]
+ include/llvm/Transforms/Utils/InstructionNamer.h [12:12]
+ include/llvm/Transforms/Utils/InstructionWorklist.h [12:12]
+ include/llvm/Transforms/Utils/IntegerDivision.h [12:12]
+ include/llvm/Transforms/Utils/LCSSA.h [12:12]
+ include/llvm/Transforms/Utils/LibCallsShrinkWrap.h [12:12]
+ include/llvm/Transforms/Utils/Local.h [12:12]
+ include/llvm/Transforms/Utils/LoopPeel.h [12:12]
+ include/llvm/Transforms/Utils/LoopRotationUtils.h [12:12]
+ include/llvm/Transforms/Utils/LoopSimplify.h [12:12]
+ include/llvm/Transforms/Utils/LoopUtils.h [12:12]
+ include/llvm/Transforms/Utils/LoopVersioning.h [12:12]
+ include/llvm/Transforms/Utils/LowerAtomic.h [12:12]
+ include/llvm/Transforms/Utils/LowerGlobalDtors.h [12:12]
+ include/llvm/Transforms/Utils/LowerIFunc.h [12:12]
+ include/llvm/Transforms/Utils/LowerInvoke.h [12:12]
+ include/llvm/Transforms/Utils/LowerMemIntrinsics.h [12:12]
+ include/llvm/Transforms/Utils/LowerSwitch.h [12:12]
+ include/llvm/Transforms/Utils/MatrixUtils.h [12:12]
+ include/llvm/Transforms/Utils/Mem2Reg.h [12:12]
+ include/llvm/Transforms/Utils/MemoryOpRemark.h [12:12]
+ include/llvm/Transforms/Utils/MemoryTaggingSupport.h [11:11]
+ include/llvm/Transforms/Utils/MetaRenamer.h [12:12]
+ include/llvm/Transforms/Utils/MisExpect.h [12:12]
+ include/llvm/Transforms/Utils/ModuleUtils.h [12:12]
+ include/llvm/Transforms/Utils/NameAnonGlobals.h [12:12]
+ include/llvm/Transforms/Utils/PredicateInfo.h [12:12]
+ include/llvm/Transforms/Utils/PromoteMemToReg.h [12:12]
+ include/llvm/Transforms/Utils/RelLookupTableConverter.h [12:12]
+ include/llvm/Transforms/Utils/SCCPSolver.h [12:12]
+ include/llvm/Transforms/Utils/SSAUpdater.h [12:12]
+ include/llvm/Transforms/Utils/SSAUpdaterBulk.h [12:12]
+ include/llvm/Transforms/Utils/SSAUpdaterImpl.h [12:12]
+ include/llvm/Transforms/Utils/SampleProfileInference.h [12:12]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h [12:12]
+ include/llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h [12:12]
+ include/llvm/Transforms/Utils/SanitizerStats.h [12:12]
+ include/llvm/Transforms/Utils/ScalarEvolutionExpander.h [12:12]
+ include/llvm/Transforms/Utils/SimplifyCFGOptions.h [12:12]
+ include/llvm/Transforms/Utils/SimplifyIndVar.h [12:12]
+ include/llvm/Transforms/Utils/SimplifyLibCalls.h [12:12]
+ include/llvm/Transforms/Utils/SizeOpts.h [12:12]
+ include/llvm/Transforms/Utils/SplitModule.h [12:12]
+ include/llvm/Transforms/Utils/StripGCRelocates.h [12:12]
+ include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h [12:12]
+ include/llvm/Transforms/Utils/SymbolRewriter.h [12:12]
+ include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h [12:12]
+ include/llvm/Transforms/Utils/UnifyLoopExits.h [12:12]
+ include/llvm/Transforms/Utils/UnrollLoop.h [12:12]
+ include/llvm/Transforms/Utils/VNCoercion.h [12:12]
+ include/llvm/Transforms/Utils/ValueMapper.h [12:12]
+ include/llvm/Transforms/Vectorize.h [12:12]
+ include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h [12:12]
+ include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h [12:12]
+ include/llvm/Transforms/Vectorize/LoopVectorize.h [12:12]
+ include/llvm/Transforms/Vectorize/SLPVectorizer.h [12:12]
+ include/llvm/Transforms/Vectorize/VectorCombine.h [12:12]
+ include/llvm/WindowsDriver/MSVCPaths.h [12:12]
+ include/llvm/WindowsManifest/WindowsManifestMerger.h [12:12]
+ include/llvm/WindowsResource/ResourceProcessor.h [12:12]
+ include/llvm/WindowsResource/ResourceScriptToken.h [12:12]
+ include/llvm/WindowsResource/ResourceScriptTokenList.h [12:12]
+ include/llvm/XRay/BlockIndexer.h [12:12]
+ include/llvm/XRay/BlockPrinter.h [12:12]
+ include/llvm/XRay/BlockVerifier.h [12:12]
+ include/llvm/XRay/FDRLogBuilder.h [12:12]
+ include/llvm/XRay/FDRRecordConsumer.h [12:12]
+ include/llvm/XRay/FDRRecordProducer.h [12:12]
+ include/llvm/XRay/FDRRecords.h [12:12]
+ include/llvm/XRay/FDRTraceExpander.h [12:12]
+ include/llvm/XRay/FDRTraceWriter.h [12:12]
+ include/llvm/XRay/FileHeaderReader.h [12:12]
+ include/llvm/XRay/Graph.h [12:12]
+ include/llvm/XRay/InstrumentationMap.h [12:12]
+ include/llvm/XRay/Profile.h [12:12]
+ include/llvm/XRay/RecordPrinter.h [12:12]
+ include/llvm/XRay/Trace.h [12:12]
+ include/llvm/XRay/XRayRecord.h [12:12]
+ include/llvm/XRay/YAMLXRayRecord.h [12:12]
+ lib/Analysis/AliasAnalysis.cpp [5:5]
+ lib/Analysis/AliasAnalysisEvaluator.cpp [5:5]
+ lib/Analysis/AliasAnalysisSummary.h [5:5]
+ lib/Analysis/AliasSetTracker.cpp [5:5]
+ lib/Analysis/Analysis.cpp [5:5]
+ lib/Analysis/AssumeBundleQueries.cpp [5:5]
+ lib/Analysis/AssumptionCache.cpp [5:5]
+ lib/Analysis/BasicAliasAnalysis.cpp [5:5]
+ lib/Analysis/BlockFrequencyInfo.cpp [5:5]
+ lib/Analysis/BlockFrequencyInfoImpl.cpp [5:5]
+ lib/Analysis/BranchProbabilityInfo.cpp [5:5]
+ lib/Analysis/CFG.cpp [5:5]
+ lib/Analysis/CFGPrinter.cpp [5:5]
+ lib/Analysis/CFGSCCPrinter.cpp [5:5]
+ lib/Analysis/CGSCCPassManager.cpp [5:5]
+ lib/Analysis/CallGraph.cpp [5:5]
+ lib/Analysis/CallGraphSCCPass.cpp [5:5]
+ lib/Analysis/CallPrinter.cpp [5:5]
+ lib/Analysis/CaptureTracking.cpp [5:5]
+ lib/Analysis/CmpInstAnalysis.cpp [5:5]
+ lib/Analysis/CodeMetrics.cpp [5:5]
+ lib/Analysis/ConstantFolding.cpp [5:5]
+ lib/Analysis/ConstraintSystem.cpp [5:5]
+ lib/Analysis/CostModel.cpp [5:5]
+ lib/Analysis/CycleAnalysis.cpp [5:5]
+ lib/Analysis/DDG.cpp [5:5]
+ lib/Analysis/DDGPrinter.cpp [5:5]
+ lib/Analysis/Delinearization.cpp [5:5]
+ lib/Analysis/DemandedBits.cpp [5:5]
+ lib/Analysis/DependenceAnalysis.cpp [5:5]
+ lib/Analysis/DependenceGraphBuilder.cpp [5:5]
+ lib/Analysis/DevelopmentModeInlineAdvisor.cpp [5:5]
+ lib/Analysis/DivergenceAnalysis.cpp [5:5]
+ lib/Analysis/DomPrinter.cpp [5:5]
+ lib/Analysis/DomTreeUpdater.cpp [5:5]
+ lib/Analysis/DominanceFrontier.cpp [5:5]
+ lib/Analysis/EHPersonalities.cpp [5:5]
+ lib/Analysis/FunctionPropertiesAnalysis.cpp [5:5]
+ lib/Analysis/GlobalsModRef.cpp [5:5]
+ lib/Analysis/GuardUtils.cpp [5:5]
+ lib/Analysis/HeatUtils.cpp [5:5]
+ lib/Analysis/IRSimilarityIdentifier.cpp [5:5]
+ lib/Analysis/IVDescriptors.cpp [5:5]
+ lib/Analysis/IVUsers.cpp [5:5]
+ lib/Analysis/ImportedFunctionsInliningStatistics.cpp [5:5]
+ lib/Analysis/IndirectCallPromotionAnalysis.cpp [5:5]
+ lib/Analysis/InlineAdvisor.cpp [5:5]
+ lib/Analysis/InlineCost.cpp [5:5]
+ lib/Analysis/InlineOrder.cpp [5:5]
+ lib/Analysis/InlineSizeEstimatorAnalysis.cpp [5:5]
+ lib/Analysis/InstCount.cpp [5:5]
+ lib/Analysis/InstructionPrecedenceTracking.cpp [5:5]
+ lib/Analysis/InstructionSimplify.cpp [5:5]
+ lib/Analysis/Interval.cpp [5:5]
+ lib/Analysis/IntervalPartition.cpp [5:5]
+ lib/Analysis/LazyBlockFrequencyInfo.cpp [5:5]
+ lib/Analysis/LazyBranchProbabilityInfo.cpp [5:5]
+ lib/Analysis/LazyCallGraph.cpp [5:5]
+ lib/Analysis/LazyValueInfo.cpp [5:5]
+ lib/Analysis/LegacyDivergenceAnalysis.cpp [6:6]
+ lib/Analysis/Lint.cpp [5:5]
+ lib/Analysis/Loads.cpp [5:5]
+ lib/Analysis/Local.cpp [5:5]
+ lib/Analysis/LoopAccessAnalysis.cpp [5:5]
+ lib/Analysis/LoopAnalysisManager.cpp [5:5]
+ lib/Analysis/LoopCacheAnalysis.cpp [7:7]
+ lib/Analysis/LoopInfo.cpp [5:5]
+ lib/Analysis/LoopNestAnalysis.cpp [5:5]
+ lib/Analysis/LoopPass.cpp [5:5]
+ lib/Analysis/LoopUnrollAnalyzer.cpp [5:5]
+ lib/Analysis/MLInlineAdvisor.cpp [5:5]
+ lib/Analysis/MemDepPrinter.cpp [5:5]
+ lib/Analysis/MemDerefPrinter.cpp [5:5]
+ lib/Analysis/MemoryBuiltins.cpp [5:5]
+ lib/Analysis/MemoryDependenceAnalysis.cpp [5:5]
+ lib/Analysis/MemoryLocation.cpp [5:5]
+ lib/Analysis/MemoryProfileInfo.cpp [5:5]
+ lib/Analysis/MemorySSA.cpp [5:5]
+ lib/Analysis/MemorySSAUpdater.cpp [5:5]
+ lib/Analysis/ModelUnderTrainingRunner.cpp [5:5]
+ lib/Analysis/ModuleDebugInfoPrinter.cpp [5:5]
+ lib/Analysis/ModuleSummaryAnalysis.cpp [5:5]
+ lib/Analysis/MustExecute.cpp [5:5]
+ lib/Analysis/NoInferenceModelRunner.cpp [5:5]
+ lib/Analysis/ObjCARCAliasAnalysis.cpp [5:5]
+ lib/Analysis/ObjCARCAnalysisUtils.cpp [5:5]
+ lib/Analysis/ObjCARCInstKind.cpp [5:5]
+ lib/Analysis/OptimizationRemarkEmitter.cpp [5:5]
+ lib/Analysis/OverflowInstAnalysis.cpp [5:5]
+ lib/Analysis/PHITransAddr.cpp [5:5]
+ lib/Analysis/PhiValues.cpp [5:5]
+ lib/Analysis/PostDominators.cpp [5:5]
+ lib/Analysis/ProfileSummaryInfo.cpp [5:5]
+ lib/Analysis/PtrUseVisitor.cpp [5:5]
+ lib/Analysis/RegionInfo.cpp [5:5]
+ lib/Analysis/RegionPass.cpp [5:5]
+ lib/Analysis/RegionPrinter.cpp [5:5]
+ lib/Analysis/ReplayInlineAdvisor.cpp [5:5]
+ lib/Analysis/ScalarEvolution.cpp [5:5]
+ lib/Analysis/ScalarEvolutionAliasAnalysis.cpp [5:5]
+ lib/Analysis/ScalarEvolutionDivision.cpp [5:5]
+ lib/Analysis/ScalarEvolutionNormalization.cpp [5:5]
+ lib/Analysis/ScopedNoAliasAA.cpp [5:5]
+ lib/Analysis/StackLifetime.cpp [5:5]
+ lib/Analysis/StackSafetyAnalysis.cpp [5:5]
+ lib/Analysis/SyncDependenceAnalysis.cpp [5:5]
+ lib/Analysis/SyntheticCountsUtils.cpp [5:5]
+ lib/Analysis/TFLiteUtils.cpp [5:5]
+ lib/Analysis/TargetLibraryInfo.cpp [5:5]
+ lib/Analysis/TargetTransformInfo.cpp [5:5]
+ lib/Analysis/TensorSpec.cpp [5:5]
+ lib/Analysis/Trace.cpp [5:5]
+ lib/Analysis/TrainingLogger.cpp [5:5]
+ lib/Analysis/TypeBasedAliasAnalysis.cpp [5:5]
+ lib/Analysis/TypeMetadataUtils.cpp [5:5]
+ lib/Analysis/UniformityAnalysis.cpp [5:5]
+ lib/Analysis/VFABIDemangling.cpp [5:5]
+ lib/Analysis/ValueLattice.cpp [5:5]
+ lib/Analysis/ValueLatticeUtils.cpp [5:5]
+ lib/Analysis/ValueTracking.cpp [5:5]
+ lib/Analysis/VectorUtils.cpp [5:5]
+ lib/AsmParser/LLLexer.cpp [5:5]
+ lib/AsmParser/LLParser.cpp [5:5]
+ lib/AsmParser/Parser.cpp [5:5]
+ lib/BinaryFormat/AMDGPUMetadataVerifier.cpp [5:5]
+ lib/BinaryFormat/COFF.cpp [5:5]
+ lib/BinaryFormat/DXContainer.cpp [6:6]
+ lib/BinaryFormat/Dwarf.cpp [5:5]
+ lib/BinaryFormat/ELF.cpp [5:5]
+ lib/BinaryFormat/MachO.cpp [5:5]
+ lib/BinaryFormat/Magic.cpp [5:5]
+ lib/BinaryFormat/Minidump.cpp [5:5]
+ lib/BinaryFormat/MsgPackDocument.cpp [5:5]
+ lib/BinaryFormat/MsgPackDocumentYAML.cpp [5:5]
+ lib/BinaryFormat/MsgPackReader.cpp [5:5]
+ lib/BinaryFormat/MsgPackWriter.cpp [5:5]
+ lib/BinaryFormat/Wasm.cpp [5:5]
+ lib/BinaryFormat/XCOFF.cpp [5:5]
+ lib/Bitcode/Reader/BitReader.cpp [5:5]
+ lib/Bitcode/Reader/BitcodeAnalyzer.cpp [5:5]
+ lib/Bitcode/Reader/BitcodeReader.cpp [5:5]
+ lib/Bitcode/Reader/MetadataLoader.cpp [5:5]
+ lib/Bitcode/Reader/MetadataLoader.h [5:5]
+ lib/Bitcode/Reader/ValueList.cpp [5:5]
+ lib/Bitcode/Reader/ValueList.h [5:5]
+ lib/Bitcode/Writer/BitWriter.cpp [5:5]
+ lib/Bitcode/Writer/BitcodeWriter.cpp [5:5]
+ lib/Bitcode/Writer/BitcodeWriterPass.cpp [5:5]
+ lib/Bitcode/Writer/ValueEnumerator.cpp [5:5]
+ lib/Bitcode/Writer/ValueEnumerator.h [5:5]
+ lib/Bitstream/Reader/BitstreamReader.cpp [5:5]
+ lib/CodeGen/AggressiveAntiDepBreaker.cpp [5:5]
+ lib/CodeGen/AggressiveAntiDepBreaker.h [5:5]
+ lib/CodeGen/AllocationOrder.cpp [5:5]
+ lib/CodeGen/AllocationOrder.h [5:5]
+ lib/CodeGen/Analysis.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AIXException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/ARMException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AccelTable.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AddressPool.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AddressPool.h [5:5]
+ lib/CodeGen/AsmPrinter/AsmPrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp [5:5]
+ lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp [5:5]
+ lib/CodeGen/AsmPrinter/ByteStreamer.h [5:5]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.cpp [5:5]
+ lib/CodeGen/AsmPrinter/CodeViewDebug.h [5:5]
+ lib/CodeGen/AsmPrinter/DIE.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DIEHash.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DIEHash.h [5:5]
+ lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DebugLocEntry.h [5:5]
+ lib/CodeGen/AsmPrinter/DebugLocStream.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DebugLocStream.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfCFIException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfCompileUnit.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfDebug.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfDebug.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfException.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfExpression.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfExpression.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfFile.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfFile.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfStringPool.h [5:5]
+ lib/CodeGen/AsmPrinter/DwarfUnit.cpp [5:5]
+ lib/CodeGen/AsmPrinter/DwarfUnit.h [5:5]
+ lib/CodeGen/AsmPrinter/EHStreamer.cpp [5:5]
+ lib/CodeGen/AsmPrinter/EHStreamer.h [5:5]
+ lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp [5:5]
+ lib/CodeGen/AsmPrinter/PseudoProbePrinter.h [5:5]
+ lib/CodeGen/AsmPrinter/WasmException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/WasmException.h [5:5]
+ lib/CodeGen/AsmPrinter/WinCFGuard.cpp [5:5]
+ lib/CodeGen/AsmPrinter/WinCFGuard.h [5:5]
+ lib/CodeGen/AsmPrinter/WinException.cpp [5:5]
+ lib/CodeGen/AsmPrinter/WinException.h [5:5]
+ lib/CodeGen/AtomicExpandPass.cpp [5:5]
+ lib/CodeGen/BasicBlockSections.cpp [5:5]
+ lib/CodeGen/BasicBlockSectionsProfileReader.cpp [5:5]
+ lib/CodeGen/BasicTargetTransformInfo.cpp [5:5]
+ lib/CodeGen/BranchFolding.cpp [5:5]
+ lib/CodeGen/BranchFolding.h [5:5]
+ lib/CodeGen/BranchRelaxation.cpp [5:5]
+ lib/CodeGen/BreakFalseDeps.cpp [5:5]
+ lib/CodeGen/CFGuardLongjmp.cpp [5:5]
+ lib/CodeGen/CFIFixup.cpp [5:5]
+ lib/CodeGen/CFIInstrInserter.cpp [5:5]
+ lib/CodeGen/CalcSpillWeights.cpp [5:5]
+ lib/CodeGen/CallingConvLower.cpp [5:5]
+ lib/CodeGen/CodeGen.cpp [5:5]
+ lib/CodeGen/CodeGenCommonISel.cpp [5:5]
+ lib/CodeGen/CodeGenPassBuilder.cpp [5:5]
+ lib/CodeGen/CodeGenPrepare.cpp [5:5]
+ lib/CodeGen/CommandFlags.cpp [5:5]
+ lib/CodeGen/ComplexDeinterleavingPass.cpp [5:5]
+ lib/CodeGen/CriticalAntiDepBreaker.cpp [5:5]
+ lib/CodeGen/CriticalAntiDepBreaker.h [5:5]
+ lib/CodeGen/DFAPacketizer.cpp [5:5]
+ lib/CodeGen/DeadMachineInstructionElim.cpp [5:5]
+ lib/CodeGen/DetectDeadLanes.cpp [5:5]
+ lib/CodeGen/DwarfEHPrepare.cpp [5:5]
+ lib/CodeGen/EHContGuardCatchret.cpp [5:5]
+ lib/CodeGen/EarlyIfConversion.cpp [5:5]
+ lib/CodeGen/EdgeBundles.cpp [5:5]
+ lib/CodeGen/ExecutionDomainFix.cpp [5:5]
+ lib/CodeGen/ExpandLargeDivRem.cpp [5:5]
+ lib/CodeGen/ExpandLargeFpConvert.cpp [5:5]
+ lib/CodeGen/ExpandMemCmp.cpp [5:5]
+ lib/CodeGen/ExpandPostRAPseudos.cpp [5:5]
+ lib/CodeGen/ExpandReductions.cpp [5:5]
+ lib/CodeGen/ExpandVectorPredication.cpp [5:5]
+ lib/CodeGen/FEntryInserter.cpp [5:5]
+ lib/CodeGen/FaultMaps.cpp [5:5]
+ lib/CodeGen/FinalizeISel.cpp [5:5]
+ lib/CodeGen/FixupStatepointCallerSaved.cpp [5:5]
+ lib/CodeGen/FuncletLayout.cpp [5:5]
+ lib/CodeGen/GCMetadata.cpp [5:5]
+ lib/CodeGen/GCMetadataPrinter.cpp [5:5]
+ lib/CodeGen/GCRootLowering.cpp [5:5]
+ lib/CodeGen/GlobalISel/CSEInfo.cpp [5:5]
+ lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp [5:5]
+ lib/CodeGen/GlobalISel/CallLowering.cpp [5:5]
+ lib/CodeGen/GlobalISel/Combiner.cpp [5:5]
+ lib/CodeGen/GlobalISel/CombinerHelper.cpp [5:5]
+ lib/CodeGen/GlobalISel/GISelChangeObserver.cpp [5:5]
+ lib/CodeGen/GlobalISel/GISelKnownBits.cpp [5:5]
+ lib/CodeGen/GlobalISel/GlobalISel.cpp [5:5]
+ lib/CodeGen/GlobalISel/IRTranslator.cpp [5:5]
+ lib/CodeGen/GlobalISel/InlineAsmLowering.cpp [5:5]
+ lib/CodeGen/GlobalISel/InstructionSelect.cpp [5:5]
+ lib/CodeGen/GlobalISel/InstructionSelector.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalityPredicates.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalizeMutations.cpp [5:5]
+ lib/CodeGen/GlobalISel/Legalizer.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalizerHelper.cpp [5:5]
+ lib/CodeGen/GlobalISel/LegalizerInfo.cpp [5:5]
+ lib/CodeGen/GlobalISel/LoadStoreOpt.cpp [5:5]
+ lib/CodeGen/GlobalISel/Localizer.cpp [5:5]
+ lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp [5:5]
+ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp [5:5]
+ lib/CodeGen/GlobalISel/RegBankSelect.cpp [5:5]
+ lib/CodeGen/GlobalISel/Utils.cpp [5:5]
+ lib/CodeGen/GlobalMerge.cpp [5:5]
+ lib/CodeGen/HardwareLoops.cpp [5:5]
+ lib/CodeGen/IfConversion.cpp [5:5]
+ lib/CodeGen/ImplicitNullChecks.cpp [5:5]
+ lib/CodeGen/IndirectBrExpandPass.cpp [5:5]
+ lib/CodeGen/InlineSpiller.cpp [5:5]
+ lib/CodeGen/InterferenceCache.cpp [5:5]
+ lib/CodeGen/InterferenceCache.h [5:5]
+ lib/CodeGen/InterleavedAccessPass.cpp [5:5]
+ lib/CodeGen/InterleavedLoadCombinePass.cpp [5:5]
+ lib/CodeGen/IntrinsicLowering.cpp [5:5]
+ lib/CodeGen/JMCInstrumenter.cpp [5:5]
+ lib/CodeGen/LLVMTargetMachine.cpp [5:5]
+ lib/CodeGen/LatencyPriorityQueue.cpp [5:5]
+ lib/CodeGen/LexicalScopes.cpp [5:5]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp [5:5]
+ lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h [5:5]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp [5:5]
+ lib/CodeGen/LiveDebugValues/LiveDebugValues.h [5:5]
+ lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp [5:5]
+ lib/CodeGen/LiveDebugVariables.cpp [5:5]
+ lib/CodeGen/LiveDebugVariables.h [5:5]
+ lib/CodeGen/LiveInterval.cpp [5:5]
+ lib/CodeGen/LiveIntervalCalc.cpp [5:5]
+ lib/CodeGen/LiveIntervalUnion.cpp [5:5]
+ lib/CodeGen/LiveIntervals.cpp [5:5]
+ lib/CodeGen/LivePhysRegs.cpp [5:5]
+ lib/CodeGen/LiveRangeCalc.cpp [5:5]
+ lib/CodeGen/LiveRangeEdit.cpp [5:5]
+ lib/CodeGen/LiveRangeShrink.cpp [5:5]
+ lib/CodeGen/LiveRangeUtils.h [5:5]
+ lib/CodeGen/LiveRegMatrix.cpp [5:5]
+ lib/CodeGen/LiveRegUnits.cpp [5:5]
+ lib/CodeGen/LiveStacks.cpp [5:5]
+ lib/CodeGen/LiveVariables.cpp [5:5]
+ lib/CodeGen/LocalStackSlotAllocation.cpp [5:5]
+ lib/CodeGen/LoopTraversal.cpp [5:5]
+ lib/CodeGen/LowLevelType.cpp [5:5]
+ lib/CodeGen/LowerEmuTLS.cpp [5:5]
+ lib/CodeGen/MBFIWrapper.cpp [5:5]
+ lib/CodeGen/MIRCanonicalizerPass.cpp [5:5]
+ lib/CodeGen/MIRFSDiscriminator.cpp [5:5]
+ lib/CodeGen/MIRNamerPass.cpp [5:5]
+ lib/CodeGen/MIRParser/MILexer.cpp [5:5]
+ lib/CodeGen/MIRParser/MILexer.h [5:5]
+ lib/CodeGen/MIRParser/MIParser.cpp [5:5]
+ lib/CodeGen/MIRParser/MIRParser.cpp [5:5]
+ lib/CodeGen/MIRPrinter.cpp [5:5]
+ lib/CodeGen/MIRPrintingPass.cpp [5:5]
+ lib/CodeGen/MIRSampleProfile.cpp [5:5]
+ lib/CodeGen/MIRVRegNamerUtils.cpp [5:5]
+ lib/CodeGen/MIRVRegNamerUtils.h [6:6]
+ lib/CodeGen/MIRYamlMapping.cpp [5:5]
+ lib/CodeGen/MLRegallocEvictAdvisor.cpp [5:5]
+ lib/CodeGen/MLRegallocEvictAdvisor.h [5:5]
+ lib/CodeGen/MLRegallocPriorityAdvisor.cpp [5:5]
+ lib/CodeGen/MachineBasicBlock.cpp [5:5]
+ lib/CodeGen/MachineBlockFrequencyInfo.cpp [5:5]
+ lib/CodeGen/MachineBlockPlacement.cpp [5:5]
+ lib/CodeGen/MachineBranchProbabilityInfo.cpp [5:5]
+ lib/CodeGen/MachineCFGPrinter.cpp [5:5]
+ lib/CodeGen/MachineCSE.cpp [5:5]
+ lib/CodeGen/MachineCheckDebugify.cpp [5:5]
+ lib/CodeGen/MachineCombiner.cpp [5:5]
+ lib/CodeGen/MachineCopyPropagation.cpp [5:5]
+ lib/CodeGen/MachineCycleAnalysis.cpp [5:5]
+ lib/CodeGen/MachineDebugify.cpp [5:5]
+ lib/CodeGen/MachineDominanceFrontier.cpp [5:5]
+ lib/CodeGen/MachineDominators.cpp [5:5]
+ lib/CodeGen/MachineFrameInfo.cpp [5:5]
+ lib/CodeGen/MachineFunction.cpp [5:5]
+ lib/CodeGen/MachineFunctionPass.cpp [5:5]
+ lib/CodeGen/MachineFunctionPrinterPass.cpp [5:5]
+ lib/CodeGen/MachineFunctionSplitter.cpp [5:5]
+ lib/CodeGen/MachineInstr.cpp [5:5]
+ lib/CodeGen/MachineInstrBundle.cpp [5:5]
+ lib/CodeGen/MachineLICM.cpp [5:5]
+ lib/CodeGen/MachineLateInstrsCleanup.cpp [5:5]
+ lib/CodeGen/MachineLoopInfo.cpp [5:5]
+ lib/CodeGen/MachineLoopUtils.cpp [5:5]
+ lib/CodeGen/MachineModuleInfo.cpp [5:5]
+ lib/CodeGen/MachineModuleInfoImpls.cpp [5:5]
+ lib/CodeGen/MachineModuleSlotTracker.cpp [5:5]
+ lib/CodeGen/MachineOperand.cpp [5:5]
+ lib/CodeGen/MachineOutliner.cpp [5:5]
+ lib/CodeGen/MachinePassManager.cpp [5:5]
+ lib/CodeGen/MachinePipeliner.cpp [5:5]
+ lib/CodeGen/MachinePostDominators.cpp [5:5]
+ lib/CodeGen/MachineRegionInfo.cpp [5:5]
+ lib/CodeGen/MachineRegisterInfo.cpp [5:5]
+ lib/CodeGen/MachineSSAContext.cpp [5:5]
+ lib/CodeGen/MachineSSAUpdater.cpp [5:5]
+ lib/CodeGen/MachineScheduler.cpp [5:5]
+ lib/CodeGen/MachineSink.cpp [5:5]
+ lib/CodeGen/MachineSizeOpts.cpp [5:5]
+ lib/CodeGen/MachineStableHash.cpp [5:5]
+ lib/CodeGen/MachineStripDebug.cpp [5:5]
+ lib/CodeGen/MachineTraceMetrics.cpp [5:5]
+ lib/CodeGen/MachineUniformityAnalysis.cpp [5:5]
+ lib/CodeGen/MachineVerifier.cpp [5:5]
+ lib/CodeGen/MacroFusion.cpp [5:5]
+ lib/CodeGen/ModuloSchedule.cpp [5:5]
+ lib/CodeGen/MultiHazardRecognizer.cpp [5:5]
+ lib/CodeGen/NonRelocatableStringpool.cpp [5:5]
+ lib/CodeGen/OptimizePHIs.cpp [5:5]
+ lib/CodeGen/PHIElimination.cpp [5:5]
+ lib/CodeGen/PHIEliminationUtils.cpp [5:5]
+ lib/CodeGen/PHIEliminationUtils.h [5:5]
+ lib/CodeGen/ParallelCG.cpp [5:5]
+ lib/CodeGen/PatchableFunction.cpp [5:5]
+ lib/CodeGen/PeepholeOptimizer.cpp [5:5]
+ lib/CodeGen/PostRAHazardRecognizer.cpp [5:5]
+ lib/CodeGen/PostRASchedulerList.cpp [5:5]
+ lib/CodeGen/PreISelIntrinsicLowering.cpp [5:5]
+ lib/CodeGen/ProcessImplicitDefs.cpp [5:5]
+ lib/CodeGen/PrologEpilogInserter.cpp [5:5]
+ lib/CodeGen/PseudoProbeInserter.cpp [5:5]
+ lib/CodeGen/PseudoSourceValue.cpp [5:5]
+ lib/CodeGen/RDFGraph.cpp [5:5]
+ lib/CodeGen/RDFLiveness.cpp [5:5]
+ lib/CodeGen/RDFRegisters.cpp [5:5]
+ lib/CodeGen/ReachingDefAnalysis.cpp [5:5]
+ lib/CodeGen/RegAllocBase.cpp [5:5]
+ lib/CodeGen/RegAllocBase.h [5:5]
+ lib/CodeGen/RegAllocBasic.cpp [5:5]
+ lib/CodeGen/RegAllocEvictionAdvisor.cpp [5:5]
+ lib/CodeGen/RegAllocEvictionAdvisor.h [5:5]
+ lib/CodeGen/RegAllocFast.cpp [5:5]
+ lib/CodeGen/RegAllocGreedy.cpp [5:5]
+ lib/CodeGen/RegAllocGreedy.h [5:5]
+ lib/CodeGen/RegAllocPBQP.cpp [5:5]
+ lib/CodeGen/RegAllocPriorityAdvisor.cpp [5:5]
+ lib/CodeGen/RegAllocPriorityAdvisor.h [5:5]
+ lib/CodeGen/RegAllocScore.cpp [5:5]
+ lib/CodeGen/RegAllocScore.h [5:5]
+ lib/CodeGen/RegUsageInfoCollector.cpp [5:5]
+ lib/CodeGen/RegUsageInfoPropagate.cpp [5:5]
+ lib/CodeGen/RegisterBank.cpp [5:5]
+ lib/CodeGen/RegisterBankInfo.cpp [5:5]
+ lib/CodeGen/RegisterClassInfo.cpp [5:5]
+ lib/CodeGen/RegisterCoalescer.cpp [5:5]
+ lib/CodeGen/RegisterCoalescer.h [5:5]
+ lib/CodeGen/RegisterPressure.cpp [5:5]
+ lib/CodeGen/RegisterScavenging.cpp [5:5]
+ lib/CodeGen/RegisterUsageInfo.cpp [5:5]
+ lib/CodeGen/RemoveRedundantDebugValues.cpp [5:5]
+ lib/CodeGen/RenameIndependentSubregs.cpp [5:5]
+ lib/CodeGen/ReplaceWithVeclib.cpp [5:5]
+ lib/CodeGen/ResetMachineFunctionPass.cpp [5:5]
+ lib/CodeGen/SafeStack.cpp [5:5]
+ lib/CodeGen/SafeStackLayout.cpp [5:5]
+ lib/CodeGen/SafeStackLayout.h [5:5]
+ lib/CodeGen/SanitizerBinaryMetadata.cpp [6:6]
+ lib/CodeGen/ScheduleDAG.cpp [5:5]
+ lib/CodeGen/ScheduleDAGInstrs.cpp [5:5]
+ lib/CodeGen/ScheduleDAGPrinter.cpp [5:5]
+ lib/CodeGen/ScoreboardHazardRecognizer.cpp [5:5]
+ lib/CodeGen/SelectOptimize.cpp [5:5]
+ lib/CodeGen/SelectionDAG/DAGCombiner.cpp [5:5]
+ lib/CodeGen/SelectionDAG/FastISel.cpp [5:5]
+ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp [5:5]
+ lib/CodeGen/SelectionDAG/InstrEmitter.cpp [5:5]
+ lib/CodeGen/SelectionDAG/InstrEmitter.h [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeTypes.h [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp [5:5]
+ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SDNodeDbgValue.h [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h [5:5]
+ lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAG.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp [5:5]
+ lib/CodeGen/SelectionDAG/SelectionDAGTargetInfo.cpp [5:5]
+ lib/CodeGen/SelectionDAG/StatepointLowering.cpp [5:5]
+ lib/CodeGen/SelectionDAG/StatepointLowering.h [5:5]
+ lib/CodeGen/SelectionDAG/TargetLowering.cpp [5:5]
+ lib/CodeGen/ShadowStackGCLowering.cpp [5:5]
+ lib/CodeGen/ShrinkWrap.cpp [5:5]
+ lib/CodeGen/SjLjEHPrepare.cpp [5:5]
+ lib/CodeGen/SlotIndexes.cpp [5:5]
+ lib/CodeGen/SpillPlacement.cpp [5:5]
+ lib/CodeGen/SpillPlacement.h [5:5]
+ lib/CodeGen/SplitKit.cpp [5:5]
+ lib/CodeGen/SplitKit.h [5:5]
+ lib/CodeGen/StackColoring.cpp [5:5]
+ lib/CodeGen/StackFrameLayoutAnalysisPass.cpp [6:6]
+ lib/CodeGen/StackMapLivenessAnalysis.cpp [5:5]
+ lib/CodeGen/StackMaps.cpp [5:5]
+ lib/CodeGen/StackProtector.cpp [5:5]
+ lib/CodeGen/StackSlotColoring.cpp [5:5]
+ lib/CodeGen/SwiftErrorValueTracking.cpp [5:5]
+ lib/CodeGen/SwitchLoweringUtils.cpp [5:5]
+ lib/CodeGen/TailDuplication.cpp [5:5]
+ lib/CodeGen/TailDuplicator.cpp [5:5]
+ lib/CodeGen/TargetFrameLoweringImpl.cpp [5:5]
+ lib/CodeGen/TargetInstrInfo.cpp [5:5]
+ lib/CodeGen/TargetLoweringBase.cpp [5:5]
+ lib/CodeGen/TargetLoweringObjectFileImpl.cpp [5:5]
+ lib/CodeGen/TargetOptionsImpl.cpp [5:5]
+ lib/CodeGen/TargetPassConfig.cpp [5:5]
+ lib/CodeGen/TargetRegisterInfo.cpp [5:5]
+ lib/CodeGen/TargetSchedule.cpp [5:5]
+ lib/CodeGen/TargetSubtargetInfo.cpp [5:5]
+ lib/CodeGen/TwoAddressInstructionPass.cpp [5:5]
+ lib/CodeGen/TypePromotion.cpp [5:5]
+ lib/CodeGen/UnreachableBlockElim.cpp [5:5]
+ lib/CodeGen/VLIWMachineScheduler.cpp [5:5]
+ lib/CodeGen/ValueTypes.cpp [5:5]
+ lib/CodeGen/VirtRegMap.cpp [5:5]
+ lib/CodeGen/WasmEHPrepare.cpp [5:5]
+ lib/CodeGen/WinEHPrepare.cpp [5:5]
+ lib/CodeGen/XRayInstrumentation.cpp [5:5]
+ lib/DWARFLinker/DWARFLinker.cpp [5:5]
+ lib/DWARFLinker/DWARFLinkerCompileUnit.cpp [5:5]
+ lib/DWARFLinker/DWARFLinkerDeclContext.cpp [5:5]
+ lib/DWARFLinker/DWARFStreamer.cpp [5:5]
+ lib/DWARFLinkerParallel/DWARFLinker.cpp [5:5]
+ lib/DWP/DWP.cpp [5:5]
+ lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp [5:5]
+ lib/DebugInfo/CodeView/CVSymbolVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/CVTypeVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/CodeViewError.cpp [5:5]
+ lib/DebugInfo/CodeView/CodeViewRecordIO.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugChecksumsSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugCrossExSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugFrameDataSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugInlineeLinesSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugLinesSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSymbolRVASubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/DebugSymbolsSubsection.cpp [5:5]
+ lib/DebugInfo/CodeView/EnumTables.cpp [5:5]
+ lib/DebugInfo/CodeView/Formatters.cpp [5:5]
+ lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp [5:5]
+ lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp [5:5]
+ lib/DebugInfo/CodeView/Line.cpp [5:5]
+ lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp [5:5]
+ lib/DebugInfo/CodeView/RecordName.cpp [5:5]
+ lib/DebugInfo/CodeView/RecordSerialization.cpp [5:5]
+ lib/DebugInfo/CodeView/SimpleTypeSerializer.cpp [5:5]
+ lib/DebugInfo/CodeView/StringsAndChecksums.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolDumper.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolRecordHelpers.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolRecordMapping.cpp [5:5]
+ lib/DebugInfo/CodeView/SymbolSerializer.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeDumpVisitor.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeHashing.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeIndex.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeRecordHelpers.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeRecordMapping.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeStreamMerger.cpp [5:5]
+ lib/DebugInfo/CodeView/TypeTableCollection.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFAddressRange.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFCompileUnit.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFContext.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDataExtractor.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugAddr.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugAranges.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugFrame.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugInfoEntry.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugLine.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugLoc.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugMacro.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugPubTable.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDebugRnglists.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFDie.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFExpression.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFFormValue.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFGdbIndex.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFListTable.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFLocationExpression.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFTypeUnit.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFUnit.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFUnitIndex.cpp [5:5]
+ lib/DebugInfo/DWARF/DWARFVerifier.cpp [5:5]
+ lib/DebugInfo/GSYM/DwarfTransformer.cpp [5:5]
+ lib/DebugInfo/GSYM/ExtractRanges.cpp [5:5]
+ lib/DebugInfo/GSYM/FileWriter.cpp [5:5]
+ lib/DebugInfo/GSYM/FunctionInfo.cpp [5:5]
+ lib/DebugInfo/GSYM/GsymCreator.cpp [5:5]
+ lib/DebugInfo/GSYM/GsymReader.cpp [5:5]
+ lib/DebugInfo/GSYM/Header.cpp [5:5]
+ lib/DebugInfo/GSYM/InlineInfo.cpp [5:5]
+ lib/DebugInfo/GSYM/LineTable.cpp [5:5]
+ lib/DebugInfo/GSYM/LookupResult.cpp [5:5]
+ lib/DebugInfo/GSYM/ObjectFileTransformer.cpp [5:5]
+ lib/DebugInfo/MSF/MSFBuilder.cpp [5:5]
+ lib/DebugInfo/MSF/MSFCommon.cpp [5:5]
+ lib/DebugInfo/MSF/MSFError.cpp [5:5]
+ lib/DebugInfo/MSF/MappedBlockStream.cpp [5:5]
+ lib/DebugInfo/PDB/GenericError.cpp [5:5]
+ lib/DebugInfo/PDB/IPDBSourceFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptor.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiModuleList.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/EnumTables.cpp [5:5]
+ lib/DebugInfo/PDB/Native/FormatUtil.cpp [5:5]
+ lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/GlobalsStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/Hash.cpp [5:5]
+ lib/DebugInfo/PDB/Native/HashTable.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InfoStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InfoStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InjectedSourceStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/InputFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/LinePrinter.cpp [5:5]
+ lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NamedStreamMap.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumGlobals.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumInjectedSources.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumLineNumbers.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumModules.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumSymbols.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeFunctionSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeInlineSiteSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeLineNumber.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativePublicSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeRawSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeSession.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeSourceFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeSymbolEnumerator.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeArray.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeBuiltin.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeEnum.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeFunctionSig.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypePointer.cpp [5:5]
+ lib/DebugInfo/PDB/Native/NativeTypeUDT.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBFile.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBStringTable.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/Native/PublicsStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/SymbolStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/TpiHashing.cpp [5:5]
+ lib/DebugInfo/PDB/Native/TpiStream.cpp [5:5]
+ lib/DebugInfo/PDB/Native/TpiStreamBuilder.cpp [5:5]
+ lib/DebugInfo/PDB/PDB.cpp [5:5]
+ lib/DebugInfo/PDB/PDBContext.cpp [5:5]
+ lib/DebugInfo/PDB/PDBExtras.cpp [5:5]
+ lib/DebugInfo/PDB/PDBInterfaceAnchors.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymDumper.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolAnnotation.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolBlock.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCompiland.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCompilandDetails.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCompilandEnv.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolCustom.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolData.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolExe.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolFunc.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugEnd.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolFuncDebugStart.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolLabel.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolPublicSymbol.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolThunk.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeArray.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeBaseClass.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeBuiltin.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeCustom.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeDimension.cpp [6:6]
+ lib/DebugInfo/PDB/PDBSymbolTypeEnum.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeFriend.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionArg.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeManaged.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypePointer.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeTypedef.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeUDT.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTable.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolTypeVTableShape.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolUnknown.cpp [5:5]
+ lib/DebugInfo/PDB/PDBSymbolUsingNamespace.cpp [5:5]
+ lib/DebugInfo/PDB/UDTLayout.cpp [5:5]
+ lib/DebugInfo/Symbolize/DIPrinter.cpp [5:5]
+ lib/DebugInfo/Symbolize/Markup.cpp [5:5]
+ lib/DebugInfo/Symbolize/MarkupFilter.cpp [5:5]
+ lib/DebugInfo/Symbolize/SymbolizableObjectFile.cpp [5:5]
+ lib/DebugInfo/Symbolize/Symbolize.cpp [5:5]
+ lib/Debuginfod/BuildIDFetcher.cpp [5:5]
+ lib/Debuginfod/Debuginfod.cpp [5:5]
+ lib/Debuginfod/HTTPClient.cpp [5:5]
+ lib/Debuginfod/HTTPServer.cpp [5:5]
+ lib/Demangle/DLangDemangle.cpp [5:5]
+ lib/Demangle/Demangle.cpp [5:5]
+ lib/Demangle/ItaniumDemangle.cpp [5:5]
+ lib/Demangle/MicrosoftDemangle.cpp [5:5]
+ lib/Demangle/MicrosoftDemangleNodes.cpp [5:5]
+ lib/Demangle/RustDemangle.cpp [5:5]
+ lib/ExecutionEngine/ExecutionEngine.cpp [5:5]
+ lib/ExecutionEngine/ExecutionEngineBindings.cpp [5:5]
+ lib/ExecutionEngine/GDBRegistrationListener.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/Execution.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/Interpreter.cpp [5:5]
+ lib/ExecutionEngine/Interpreter/Interpreter.h [5:5]
+ lib/ExecutionEngine/JITLink/COFF.cpp [5:5]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp [5:5]
+ lib/ExecutionEngine/JITLink/COFFDirectiveParser.h [5:5]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp [5:5]
+ lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/COFF_x86_64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp [5:5]
+ lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h [5:5]
+ lib/ExecutionEngine/JITLink/EHFrameSupport.cpp [5:5]
+ lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h [5:5]
+ lib/ExecutionEngine/JITLink/ELF.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/ELF_aarch64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_i386.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_loongarch.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_riscv.cpp [5:5]
+ lib/ExecutionEngine/JITLink/ELF_x86_64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/JITLink.cpp [5:5]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp [5:5]
+ lib/ExecutionEngine/JITLink/JITLinkGeneric.h [5:5]
+ lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachO.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/MachO_arm64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/MachO_x86_64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h [5:5]
+ lib/ExecutionEngine/JITLink/SEHFrameSupport.h [5:5]
+ lib/ExecutionEngine/JITLink/aarch64.cpp [5:5]
+ lib/ExecutionEngine/JITLink/i386.cpp [5:5]
+ lib/ExecutionEngine/JITLink/loongarch.cpp [5:5]
+ lib/ExecutionEngine/JITLink/riscv.cpp [5:5]
+ lib/ExecutionEngine/JITLink/x86_64.cpp [5:5]
+ lib/ExecutionEngine/MCJIT/MCJIT.cpp [5:5]
+ lib/ExecutionEngine/MCJIT/MCJIT.h [5:5]
+ lib/ExecutionEngine/Orc/COFFPlatform.cpp [5:5]
+ lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp [5:5]
+ lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/CompileUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/Core.cpp [5:5]
+ lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp [5:5]
+ lib/ExecutionEngine/Orc/DebugUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp [5:5]
+ lib/ExecutionEngine/Orc/ELFNixPlatform.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/ExecutionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp [5:5]
+ lib/ExecutionEngine/Orc/IRCompileLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/IRTransformLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/IndirectionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp [5:5]
+ lib/ExecutionEngine/Orc/LLJIT.cpp [5:5]
+ lib/ExecutionEngine/Orc/Layer.cpp [5:5]
+ lib/ExecutionEngine/Orc/LazyReexports.cpp [5:5]
+ lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp [5:5]
+ lib/ExecutionEngine/Orc/MachOPlatform.cpp [5:5]
+ lib/ExecutionEngine/Orc/Mangling.cpp [5:5]
+ lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/MemoryMapper.cpp [5:5]
+ lib/ExecutionEngine/Orc/ObjectFileInterface.cpp [5:5]
+ lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/OrcABISupport.cpp [5:5]
+ lib/ExecutionEngine/Orc/OrcV2CBindings.cpp [5:5]
+ lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/OrcError.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp [5:5]
+ lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp [5:5]
+ lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp [5:5]
+ lib/ExecutionEngine/Orc/Speculation.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp [5:5]
+ lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp [5:5]
+ lib/ExecutionEngine/Orc/TaskDispatch.cpp [5:5]
+ lib/ExecutionEngine/Orc/ThreadSafeModule.cpp [6:6]
+ lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h [6:6]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h [5:5]
+ lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h [5:5]
+ lib/ExecutionEngine/SectionMemoryManager.cpp [5:5]
+ lib/ExecutionEngine/TargetSelect.cpp [5:5]
+ lib/FileCheck/FileCheck.cpp [5:5]
+ lib/FileCheck/FileCheckImpl.h [5:5]
+ lib/Frontend/HLSL/HLSLResource.cpp [5:5]
+ lib/Frontend/OpenACC/ACC.cpp [5:5]
+ lib/Frontend/OpenMP/OMP.cpp [5:5]
+ lib/Frontend/OpenMP/OMPContext.cpp [5:5]
+ lib/Frontend/OpenMP/OMPIRBuilder.cpp [5:5]
+ lib/FuzzMutate/IRMutator.cpp [5:5]
+ lib/FuzzMutate/OpDescriptor.cpp [5:5]
+ lib/FuzzMutate/Operations.cpp [5:5]
+ lib/FuzzMutate/RandomIRBuilder.cpp [5:5]
+ lib/IR/AbstractCallSite.cpp [5:5]
+ lib/IR/AsmWriter.cpp [5:5]
+ lib/IR/Assumptions.cpp [5:5]
+ lib/IR/AttributeImpl.h [5:5]
+ lib/IR/Attributes.cpp [5:5]
+ lib/IR/AutoUpgrade.cpp [5:5]
+ lib/IR/BasicBlock.cpp [5:5]
+ lib/IR/BuiltinGCs.cpp [5:5]
+ lib/IR/Comdat.cpp [5:5]
+ lib/IR/ConstantFold.cpp [5:5]
+ lib/IR/ConstantRange.cpp [5:5]
+ lib/IR/Constants.cpp [5:5]
+ lib/IR/ConstantsContext.h [5:5]
+ lib/IR/Core.cpp [5:5]
+ lib/IR/DIBuilder.cpp [5:5]
+ lib/IR/DataLayout.cpp [5:5]
+ lib/IR/DebugInfo.cpp [5:5]
+ lib/IR/DebugInfoMetadata.cpp [5:5]
+ lib/IR/DebugLoc.cpp [5:5]
+ lib/IR/DiagnosticHandler.cpp [5:5]
+ lib/IR/DiagnosticInfo.cpp [5:5]
+ lib/IR/DiagnosticPrinter.cpp [5:5]
+ lib/IR/Dominators.cpp [5:5]
+ lib/IR/FPEnv.cpp [5:5]
+ lib/IR/Function.cpp [5:5]
+ lib/IR/GCStrategy.cpp [5:5]
+ lib/IR/GVMaterializer.cpp [5:5]
+ lib/IR/Globals.cpp [5:5]
+ lib/IR/IRBuilder.cpp [5:5]
+ lib/IR/IRPrintingPasses.cpp [5:5]
+ lib/IR/InlineAsm.cpp [5:5]
+ lib/IR/Instruction.cpp [5:5]
+ lib/IR/Instructions.cpp [5:5]
+ lib/IR/IntrinsicInst.cpp [5:5]
+ lib/IR/LLVMContext.cpp [5:5]
+ lib/IR/LLVMContextImpl.cpp [5:5]
+ lib/IR/LLVMContextImpl.h [5:5]
+ lib/IR/LLVMRemarkStreamer.cpp [5:5]
+ lib/IR/LegacyPassManager.cpp [5:5]
+ lib/IR/MDBuilder.cpp [5:5]
+ lib/IR/Mangler.cpp [5:5]
+ lib/IR/Metadata.cpp [5:5]
+ lib/IR/MetadataImpl.h [5:5]
+ lib/IR/Module.cpp [5:5]
+ lib/IR/ModuleSummaryIndex.cpp [5:5]
+ lib/IR/Operator.cpp [5:5]
+ lib/IR/OptBisect.cpp [5:5]
+ lib/IR/Pass.cpp [5:5]
+ lib/IR/PassInstrumentation.cpp [5:5]
+ lib/IR/PassManager.cpp [5:5]
+ lib/IR/PassRegistry.cpp [5:5]
+ lib/IR/PassTimingInfo.cpp [5:5]
+ lib/IR/PrintPasses.cpp [5:5]
+ lib/IR/ProfDataUtils.cpp [5:5]
+ lib/IR/ProfileSummary.cpp [5:5]
+ lib/IR/PseudoProbe.cpp [5:5]
+ lib/IR/ReplaceConstant.cpp [5:5]
+ lib/IR/SSAContext.cpp [5:5]
+ lib/IR/SafepointIRVerifier.cpp [5:5]
+ lib/IR/Statepoint.cpp [5:5]
+ lib/IR/StructuralHash.cpp [5:5]
+ lib/IR/SymbolTableListTraitsImpl.h [5:5]
+ lib/IR/Type.cpp [5:5]
+ lib/IR/TypeFinder.cpp [5:5]
+ lib/IR/TypedPointerType.cpp [5:5]
+ lib/IR/Use.cpp [5:5]
+ lib/IR/User.cpp [5:5]
+ lib/IR/Value.cpp [5:5]
+ lib/IR/ValueSymbolTable.cpp [5:5]
+ lib/IR/VectorBuilder.cpp [5:5]
+ lib/IR/Verifier.cpp [5:5]
+ lib/IRPrinter/IRPrintingPasses.cpp [5:5]
+ lib/IRReader/IRReader.cpp [5:5]
+ lib/InterfaceStub/ELFObjHandler.cpp [5:5]
+ lib/InterfaceStub/IFSHandler.cpp [5:5]
+ lib/InterfaceStub/IFSStub.cpp [5:5]
+ lib/LTO/LTO.cpp [5:5]
+ lib/LTO/LTOBackend.cpp [5:5]
+ lib/LTO/LTOCodeGenerator.cpp [5:5]
+ lib/LTO/LTOModule.cpp [5:5]
+ lib/LTO/SummaryBasedOptimizations.cpp [5:5]
+ lib/LTO/ThinLTOCodeGenerator.cpp [5:5]
+ lib/LTO/UpdateCompilerUsed.cpp [5:5]
+ lib/LineEditor/LineEditor.cpp [5:5]
+ lib/Linker/IRMover.cpp [5:5]
+ lib/Linker/LinkDiagnosticInfo.h [5:5]
+ lib/Linker/LinkModules.cpp [5:5]
+ lib/MC/ConstantPools.cpp [5:5]
+ lib/MC/ELFObjectWriter.cpp [5:5]
+ lib/MC/MCAsmBackend.cpp [5:5]
+ lib/MC/MCAsmInfo.cpp [5:5]
+ lib/MC/MCAsmInfoCOFF.cpp [5:5]
+ lib/MC/MCAsmInfoDarwin.cpp [5:5]
+ lib/MC/MCAsmInfoELF.cpp [5:5]
+ lib/MC/MCAsmInfoGOFF.cpp [5:5]
+ lib/MC/MCAsmInfoWasm.cpp [5:5]
+ lib/MC/MCAsmInfoXCOFF.cpp [5:5]
+ lib/MC/MCAsmMacro.cpp [5:5]
+ lib/MC/MCAsmStreamer.cpp [5:5]
+ lib/MC/MCAssembler.cpp [5:5]
+ lib/MC/MCCodeEmitter.cpp [5:5]
+ lib/MC/MCCodeView.cpp [5:5]
+ lib/MC/MCContext.cpp [5:5]
+ lib/MC/MCDXContainerStreamer.cpp [5:5]
+ lib/MC/MCDXContainerWriter.cpp [5:5]
+ lib/MC/MCDisassembler/Disassembler.cpp [5:5]
+ lib/MC/MCDisassembler/Disassembler.h [5:5]
+ lib/MC/MCDisassembler/MCDisassembler.cpp [5:5]
+ lib/MC/MCDisassembler/MCExternalSymbolizer.cpp [5:5]
+ lib/MC/MCDisassembler/MCRelocationInfo.cpp [5:5]
+ lib/MC/MCDisassembler/MCSymbolizer.cpp [5:5]
+ lib/MC/MCDwarf.cpp [5:5]
+ lib/MC/MCELFObjectTargetWriter.cpp [5:5]
+ lib/MC/MCELFStreamer.cpp [5:5]
+ lib/MC/MCExpr.cpp [5:5]
+ lib/MC/MCFragment.cpp [5:5]
+ lib/MC/MCInst.cpp [5:5]
+ lib/MC/MCInstPrinter.cpp [5:5]
+ lib/MC/MCInstrAnalysis.cpp [5:5]
+ lib/MC/MCInstrDesc.cpp [5:5]
+ lib/MC/MCInstrInfo.cpp [5:5]
+ lib/MC/MCLabel.cpp [5:5]
+ lib/MC/MCLinkerOptimizationHint.cpp [5:5]
+ lib/MC/MCMachOStreamer.cpp [5:5]
+ lib/MC/MCMachObjectTargetWriter.cpp [5:5]
+ lib/MC/MCNullStreamer.cpp [5:5]
+ lib/MC/MCObjectFileInfo.cpp [5:5]
+ lib/MC/MCObjectStreamer.cpp [5:5]
+ lib/MC/MCObjectWriter.cpp [5:5]
+ lib/MC/MCParser/AsmLexer.cpp [5:5]
+ lib/MC/MCParser/AsmParser.cpp [5:5]
+ lib/MC/MCParser/COFFAsmParser.cpp [5:5]
+ lib/MC/MCParser/COFFMasmParser.cpp [5:5]
+ lib/MC/MCParser/DarwinAsmParser.cpp [5:5]
+ lib/MC/MCParser/ELFAsmParser.cpp [5:5]
+ lib/MC/MCParser/GOFFAsmParser.cpp [5:5]
+ lib/MC/MCParser/MCAsmLexer.cpp [5:5]
+ lib/MC/MCParser/MCAsmParser.cpp [5:5]
+ lib/MC/MCParser/MCAsmParserExtension.cpp [5:5]
+ lib/MC/MCParser/MCTargetAsmParser.cpp [5:5]
+ lib/MC/MCParser/MasmParser.cpp [5:5]
+ lib/MC/MCParser/WasmAsmParser.cpp [5:5]
+ lib/MC/MCParser/XCOFFAsmParser.cpp [6:6]
+ lib/MC/MCPseudoProbe.cpp [5:5]
+ lib/MC/MCRegisterInfo.cpp [5:5]
+ lib/MC/MCSPIRVStreamer.cpp [5:5]
+ lib/MC/MCSchedule.cpp [5:5]
+ lib/MC/MCSection.cpp [5:5]
+ lib/MC/MCSectionCOFF.cpp [5:5]
+ lib/MC/MCSectionDXContainer.cpp [5:5]
+ lib/MC/MCSectionELF.cpp [5:5]
+ lib/MC/MCSectionMachO.cpp [5:5]
+ lib/MC/MCSectionWasm.cpp [5:5]
+ lib/MC/MCSectionXCOFF.cpp [5:5]
+ lib/MC/MCStreamer.cpp [5:5]
+ lib/MC/MCSubtargetInfo.cpp [5:5]
+ lib/MC/MCSymbol.cpp [5:5]
+ lib/MC/MCSymbolELF.cpp [5:5]
+ lib/MC/MCSymbolXCOFF.cpp [5:5]
+ lib/MC/MCTargetOptions.cpp [5:5]
+ lib/MC/MCTargetOptionsCommandFlags.cpp [5:5]
+ lib/MC/MCValue.cpp [5:5]
+ lib/MC/MCWasmObjectTargetWriter.cpp [5:5]
+ lib/MC/MCWasmStreamer.cpp [5:5]
+ lib/MC/MCWin64EH.cpp [5:5]
+ lib/MC/MCWinCOFFStreamer.cpp [5:5]
+ lib/MC/MCWinEH.cpp [5:5]
+ lib/MC/MCXCOFFObjectTargetWriter.cpp [5:5]
+ lib/MC/MCXCOFFStreamer.cpp [5:5]
+ lib/MC/MachObjectWriter.cpp [5:5]
+ lib/MC/SPIRVObjectWriter.cpp [5:5]
+ lib/MC/StringTableBuilder.cpp [5:5]
+ lib/MC/SubtargetFeature.cpp [5:5]
+ lib/MC/TargetRegistry.cpp [5:5]
+ lib/MC/WasmObjectWriter.cpp [5:5]
+ lib/MC/WinCOFFObjectWriter.cpp [5:5]
+ lib/MC/XCOFFObjectWriter.cpp [5:5]
+ lib/MCA/CodeEmitter.cpp [5:5]
+ lib/MCA/Context.cpp [5:5]
+ lib/MCA/CustomBehaviour.cpp [5:5]
+ lib/MCA/HWEventListener.cpp [5:5]
+ lib/MCA/HardwareUnits/HardwareUnit.cpp [5:5]
+ lib/MCA/HardwareUnits/LSUnit.cpp [5:5]
+ lib/MCA/HardwareUnits/RegisterFile.cpp [5:5]
+ lib/MCA/HardwareUnits/ResourceManager.cpp [5:5]
+ lib/MCA/HardwareUnits/RetireControlUnit.cpp [5:5]
+ lib/MCA/HardwareUnits/Scheduler.cpp [5:5]
+ lib/MCA/IncrementalSourceMgr.cpp [5:5]
+ lib/MCA/InstrBuilder.cpp [5:5]
+ lib/MCA/Instruction.cpp [5:5]
+ lib/MCA/Pipeline.cpp [5:5]
+ lib/MCA/Stages/DispatchStage.cpp [5:5]
+ lib/MCA/Stages/EntryStage.cpp [5:5]
+ lib/MCA/Stages/ExecuteStage.cpp [5:5]
+ lib/MCA/Stages/InOrderIssueStage.cpp [5:5]
+ lib/MCA/Stages/InstructionTables.cpp [5:5]
+ lib/MCA/Stages/MicroOpQueueStage.cpp [5:5]
+ lib/MCA/Stages/RetireStage.cpp [5:5]
+ lib/MCA/Stages/Stage.cpp [5:5]
+ lib/MCA/Support.cpp [5:5]
+ lib/MCA/View.cpp [5:5]
+ lib/ObjCopy/Archive.cpp [5:5]
+ lib/ObjCopy/Archive.h [5:5]
+ lib/ObjCopy/COFF/COFFObjcopy.cpp [5:5]
+ lib/ObjCopy/COFF/COFFObject.cpp [5:5]
+ lib/ObjCopy/COFF/COFFObject.h [5:5]
+ lib/ObjCopy/COFF/COFFReader.cpp [5:5]
+ lib/ObjCopy/COFF/COFFReader.h [5:5]
+ lib/ObjCopy/COFF/COFFWriter.cpp [5:5]
+ lib/ObjCopy/COFF/COFFWriter.h [5:5]
+ lib/ObjCopy/CommonConfig.cpp [5:5]
+ lib/ObjCopy/ConfigManager.cpp [5:5]
+ lib/ObjCopy/ELF/ELFObjcopy.cpp [5:5]
+ lib/ObjCopy/ELF/ELFObject.cpp [5:5]
+ lib/ObjCopy/ELF/ELFObject.h [5:5]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.cpp [5:5]
+ lib/ObjCopy/MachO/MachOLayoutBuilder.h [5:5]
+ lib/ObjCopy/MachO/MachOObjcopy.cpp [5:5]
+ lib/ObjCopy/MachO/MachOObject.cpp [5:5]
+ lib/ObjCopy/MachO/MachOObject.h [5:5]
+ lib/ObjCopy/MachO/MachOReader.cpp [5:5]
+ lib/ObjCopy/MachO/MachOReader.h [5:5]
+ lib/ObjCopy/MachO/MachOWriter.cpp [5:5]
+ lib/ObjCopy/MachO/MachOWriter.h [5:5]
+ lib/ObjCopy/ObjCopy.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFObjcopy.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFObject.h [5:5]
+ lib/ObjCopy/XCOFF/XCOFFReader.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFReader.h [5:5]
+ lib/ObjCopy/XCOFF/XCOFFWriter.cpp [5:5]
+ lib/ObjCopy/XCOFF/XCOFFWriter.h [5:5]
+ lib/ObjCopy/wasm/WasmObjcopy.cpp [5:5]
+ lib/ObjCopy/wasm/WasmObject.cpp [5:5]
+ lib/ObjCopy/wasm/WasmObject.h [5:5]
+ lib/ObjCopy/wasm/WasmReader.cpp [5:5]
+ lib/ObjCopy/wasm/WasmReader.h [5:5]
+ lib/ObjCopy/wasm/WasmWriter.cpp [5:5]
+ lib/ObjCopy/wasm/WasmWriter.h [5:5]
+ lib/Object/Archive.cpp [5:5]
+ lib/Object/ArchiveWriter.cpp [5:5]
+ lib/Object/Binary.cpp [5:5]
+ lib/Object/BuildID.cpp [5:5]
+ lib/Object/COFFImportFile.cpp [5:5]
+ lib/Object/COFFModuleDefinition.cpp [5:5]
+ lib/Object/COFFObjectFile.cpp [5:5]
+ lib/Object/DXContainer.cpp [5:5]
+ lib/Object/Decompressor.cpp [5:5]
+ lib/Object/ELF.cpp [5:5]
+ lib/Object/ELFObjectFile.cpp [5:5]
+ lib/Object/Error.cpp [5:5]
+ lib/Object/FaultMapParser.cpp [5:5]
+ lib/Object/IRObjectFile.cpp [5:5]
+ lib/Object/IRSymtab.cpp [5:5]
+ lib/Object/MachOObjectFile.cpp [5:5]
+ lib/Object/MachOUniversal.cpp [5:5]
+ lib/Object/MachOUniversalWriter.cpp [5:5]
+ lib/Object/Minidump.cpp [5:5]
+ lib/Object/ModuleSymbolTable.cpp [5:5]
+ lib/Object/Object.cpp [5:5]
+ lib/Object/ObjectFile.cpp [5:5]
+ lib/Object/OffloadBinary.cpp [5:5]
+ lib/Object/RecordStreamer.cpp [5:5]
+ lib/Object/RecordStreamer.h [5:5]
+ lib/Object/RelocationResolver.cpp [5:5]
+ lib/Object/SymbolSize.cpp [5:5]
+ lib/Object/SymbolicFile.cpp [5:5]
+ lib/Object/TapiFile.cpp [5:5]
+ lib/Object/TapiUniversal.cpp [5:5]
+ lib/Object/WasmObjectFile.cpp [5:5]
+ lib/Object/WindowsMachineFlag.cpp [5:5]
+ lib/Object/WindowsResource.cpp [5:5]
+ lib/Object/XCOFFObjectFile.cpp [5:5]
+ lib/ObjectYAML/ArchiveEmitter.cpp [5:5]
+ lib/ObjectYAML/ArchiveYAML.cpp [5:5]
+ lib/ObjectYAML/COFFEmitter.cpp [5:5]
+ lib/ObjectYAML/COFFYAML.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLDebugSections.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLSymbols.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp [5:5]
+ lib/ObjectYAML/CodeViewYAMLTypes.cpp [5:5]
+ lib/ObjectYAML/DWARFEmitter.cpp [5:5]
+ lib/ObjectYAML/DWARFYAML.cpp [5:5]
+ lib/ObjectYAML/DXContainerEmitter.cpp [5:5]
+ lib/ObjectYAML/DXContainerYAML.cpp [5:5]
+ lib/ObjectYAML/ELFEmitter.cpp [5:5]
+ lib/ObjectYAML/ELFYAML.cpp [5:5]
+ lib/ObjectYAML/MachOEmitter.cpp [5:5]
+ lib/ObjectYAML/MachOYAML.cpp [5:5]
+ lib/ObjectYAML/MinidumpEmitter.cpp [5:5]
+ lib/ObjectYAML/MinidumpYAML.cpp [5:5]
+ lib/ObjectYAML/ObjectYAML.cpp [5:5]
+ lib/ObjectYAML/OffloadEmitter.cpp [5:5]
+ lib/ObjectYAML/OffloadYAML.cpp [5:5]
+ lib/ObjectYAML/WasmEmitter.cpp [5:5]
+ lib/ObjectYAML/WasmYAML.cpp [5:5]
+ lib/ObjectYAML/XCOFFEmitter.cpp [5:5]
+ lib/ObjectYAML/XCOFFYAML.cpp [5:5]
+ lib/ObjectYAML/YAML.cpp [5:5]
+ lib/ObjectYAML/yaml2obj.cpp [5:5]
+ lib/Option/Arg.cpp [5:5]
+ lib/Option/ArgList.cpp [5:5]
+ lib/Option/OptTable.cpp [5:5]
+ lib/Option/Option.cpp [5:5]
+ lib/Passes/OptimizationLevel.cpp [5:5]
+ lib/Passes/PassBuilder.cpp [5:5]
+ lib/Passes/PassBuilderBindings.cpp [5:5]
+ lib/Passes/PassBuilderPipelines.cpp [5:5]
+ lib/Passes/PassPlugin.cpp [5:5]
+ lib/Passes/PassRegistry.def [5:5]
+ lib/Passes/StandardInstrumentations.cpp [5:5]
+ lib/ProfileData/Coverage/CoverageMapping.cpp [5:5]
+ lib/ProfileData/Coverage/CoverageMappingReader.cpp [5:5]
+ lib/ProfileData/Coverage/CoverageMappingWriter.cpp [5:5]
+ lib/ProfileData/GCOV.cpp [5:5]
+ lib/ProfileData/InstrProf.cpp [5:5]
+ lib/ProfileData/InstrProfCorrelator.cpp [5:5]
+ lib/ProfileData/InstrProfReader.cpp [5:5]
+ lib/ProfileData/InstrProfWriter.cpp [5:5]
+ lib/ProfileData/ProfileSummaryBuilder.cpp [5:5]
+ lib/ProfileData/RawMemProfReader.cpp [5:5]
+ lib/ProfileData/SampleProf.cpp [5:5]
+ lib/ProfileData/SampleProfReader.cpp [5:5]
+ lib/ProfileData/SampleProfWriter.cpp [5:5]
+ lib/Remarks/BitstreamRemarkParser.cpp [5:5]
+ lib/Remarks/BitstreamRemarkParser.h [5:5]
+ lib/Remarks/BitstreamRemarkSerializer.cpp [5:5]
+ lib/Remarks/Remark.cpp [5:5]
+ lib/Remarks/RemarkFormat.cpp [5:5]
+ lib/Remarks/RemarkLinker.cpp [5:5]
+ lib/Remarks/RemarkParser.cpp [5:5]
+ lib/Remarks/RemarkSerializer.cpp [5:5]
+ lib/Remarks/RemarkStreamer.cpp [5:5]
+ lib/Remarks/RemarkStringTable.cpp [5:5]
+ lib/Remarks/YAMLRemarkParser.cpp [5:5]
+ lib/Remarks/YAMLRemarkParser.h [5:5]
+ lib/Remarks/YAMLRemarkSerializer.cpp [5:5]
+ lib/Support/ABIBreak.cpp [5:5]
+ lib/Support/AMDGPUMetadata.cpp [5:5]
+ lib/Support/APFixedPoint.cpp [5:5]
+ lib/Support/APFloat.cpp [5:5]
+ lib/Support/APInt.cpp [5:5]
+ lib/Support/APSInt.cpp [5:5]
+ lib/Support/ARMAttributeParser.cpp [5:5]
+ lib/Support/ARMBuildAttrs.cpp [5:5]
+ lib/Support/ARMWinEH.cpp [5:5]
+ lib/Support/AddressRanges.cpp [5:5]
+ lib/Support/Allocator.cpp [5:5]
+ lib/Support/Atomic.cpp [5:5]
+ lib/Support/AutoConvert.cpp [5:5]
+ lib/Support/Base64.cpp [5:5]
+ lib/Support/BinaryStreamError.cpp [5:5]
+ lib/Support/BinaryStreamReader.cpp [5:5]
+ lib/Support/BinaryStreamRef.cpp [5:5]
+ lib/Support/BinaryStreamWriter.cpp [5:5]
+ lib/Support/BlockFrequency.cpp [5:5]
+ lib/Support/BranchProbability.cpp [5:5]
+ lib/Support/BuryPointer.cpp [5:5]
+ lib/Support/COM.cpp [5:5]
+ lib/Support/CRC.cpp [5:5]
+ lib/Support/CSKYAttributeParser.cpp [5:5]
+ lib/Support/CSKYAttributes.cpp [5:5]
+ lib/Support/CachePruning.cpp [5:5]
+ lib/Support/Caching.cpp [5:5]
+ lib/Support/Chrono.cpp [5:5]
+ lib/Support/CodeGenCoverage.cpp [5:5]
+ lib/Support/CommandLine.cpp [5:5]
+ lib/Support/Compression.cpp [5:5]
+ lib/Support/ConvertUTFWrapper.cpp [5:5]
+ lib/Support/CrashRecoveryContext.cpp [5:5]
+ lib/Support/DAGDeltaAlgorithm.cpp [5:5]
+ lib/Support/DJB.cpp [5:5]
+ lib/Support/DataExtractor.cpp [5:5]
+ lib/Support/Debug.cpp [5:5]
+ lib/Support/DebugOptions.h [5:5]
+ lib/Support/DeltaAlgorithm.cpp [5:5]
+ lib/Support/DivisionByConstantInfo.cpp [5:5]
+ lib/Support/DynamicLibrary.cpp [5:5]
+ lib/Support/ELFAttributeParser.cpp [5:5]
+ lib/Support/ELFAttributes.cpp [5:5]
+ lib/Support/Errno.cpp [5:5]
+ lib/Support/Error.cpp [5:5]
+ lib/Support/ErrorHandling.cpp [5:5]
+ lib/Support/ExtensibleRTTI.cpp [5:5]
+ lib/Support/FileCollector.cpp [5:5]
+ lib/Support/FileOutputBuffer.cpp [5:5]
+ lib/Support/FileUtilities.cpp [5:5]
+ lib/Support/FoldingSet.cpp [5:5]
+ lib/Support/FormatVariadic.cpp [5:5]
+ lib/Support/FormattedStream.cpp [5:5]
+ lib/Support/GlobPattern.cpp [5:5]
+ lib/Support/GraphWriter.cpp [5:5]
+ lib/Support/Hashing.cpp [5:5]
+ lib/Support/InitLLVM.cpp [5:5]
+ lib/Support/InstructionCost.cpp [5:5]
+ lib/Support/IntEqClasses.cpp [5:5]
+ lib/Support/IntervalMap.cpp [5:5]
+ lib/Support/ItaniumManglingCanonicalizer.cpp [5:5]
+ lib/Support/JSON.cpp [5:5]
+ lib/Support/KnownBits.cpp [5:5]
+ lib/Support/LEB128.cpp [5:5]
+ lib/Support/LineIterator.cpp [5:5]
+ lib/Support/LockFileManager.cpp [5:5]
+ lib/Support/LowLevelType.cpp [5:5]
+ lib/Support/MSP430AttributeParser.cpp [5:5]
+ lib/Support/MSP430Attributes.cpp [5:5]
+ lib/Support/ManagedStatic.cpp [5:5]
+ lib/Support/MathExtras.cpp [5:5]
+ lib/Support/MemAlloc.cpp [5:5]
+ lib/Support/Memory.cpp [5:5]
+ lib/Support/MemoryBuffer.cpp [5:5]
+ lib/Support/MemoryBufferRef.cpp [5:5]
+ lib/Support/NativeFormatting.cpp [5:5]
+ lib/Support/OptimizedStructLayout.cpp [5:5]
+ lib/Support/Optional.cpp [5:5]
+ lib/Support/Parallel.cpp [5:5]
+ lib/Support/Path.cpp [5:5]
+ lib/Support/PluginLoader.cpp [5:5]
+ lib/Support/PrettyStackTrace.cpp [5:5]
+ lib/Support/Process.cpp [5:5]
+ lib/Support/Program.cpp [5:5]
+ lib/Support/RISCVAttributeParser.cpp [5:5]
+ lib/Support/RISCVAttributes.cpp [5:5]
+ lib/Support/RISCVISAInfo.cpp [5:5]
+ lib/Support/RWMutex.cpp [5:5]
+ lib/Support/RandomNumberGenerator.cpp [5:5]
+ lib/Support/Regex.cpp [5:5]
+ lib/Support/SHA1.cpp [5:5]
+ lib/Support/SHA256.cpp [5:5]
+ lib/Support/ScaledNumber.cpp [5:5]
+ lib/Support/Signals.cpp [5:5]
+ lib/Support/Signposts.cpp [5:5]
+ lib/Support/SmallPtrSet.cpp [5:5]
+ lib/Support/SmallVector.cpp [5:5]
+ lib/Support/SourceMgr.cpp [5:5]
+ lib/Support/SpecialCaseList.cpp [5:5]
+ lib/Support/Statistic.cpp [5:5]
+ lib/Support/StringExtras.cpp [5:5]
+ lib/Support/StringMap.cpp [5:5]
+ lib/Support/StringRef.cpp [5:5]
+ lib/Support/StringSaver.cpp [5:5]
+ lib/Support/SuffixTree.cpp [5:5]
+ lib/Support/SymbolRemappingReader.cpp [5:5]
+ lib/Support/SystemUtils.cpp [5:5]
+ lib/Support/TarWriter.cpp [5:5]
+ lib/Support/ThreadPool.cpp [5:5]
+ lib/Support/Threading.cpp [5:5]
+ lib/Support/TimeProfiler.cpp [5:5]
+ lib/Support/Timer.cpp [5:5]
+ lib/Support/ToolOutputFile.cpp [5:5]
+ lib/Support/TrigramIndex.cpp [5:5]
+ lib/Support/Twine.cpp [5:5]
+ lib/Support/TypeSize.cpp [5:5]
+ lib/Support/Unicode.cpp [5:5]
+ lib/Support/UnicodeNameToCodepoint.cpp [6:6]
+ lib/Support/UnicodeNameToCodepointGenerated.cpp [5:5]
+ lib/Support/Unix/COM.inc [5:5]
+ lib/Support/Unix/DynamicLibrary.inc [5:5]
+ lib/Support/Unix/Memory.inc [5:5]
+ lib/Support/Unix/Path.inc [5:5]
+ lib/Support/Unix/Process.inc [5:5]
+ lib/Support/Unix/Program.inc [5:5]
+ lib/Support/Unix/Signals.inc [5:5]
+ lib/Support/Unix/Threading.inc [5:5]
+ lib/Support/Unix/Unix.h [5:5]
+ lib/Support/Unix/Watchdog.inc [5:5]
+ lib/Support/Valgrind.cpp [5:5]
+ lib/Support/VersionTuple.cpp [5:5]
+ lib/Support/VirtualFileSystem.cpp [5:5]
+ lib/Support/Watchdog.cpp [5:5]
+ lib/Support/Windows/COM.inc [5:5]
+ lib/Support/Windows/DynamicLibrary.inc [5:5]
+ lib/Support/Windows/Memory.inc [5:5]
+ lib/Support/Windows/Path.inc [5:5]
+ lib/Support/Windows/Process.inc [5:5]
+ lib/Support/Windows/Program.inc [5:5]
+ lib/Support/Windows/Signals.inc [5:5]
+ lib/Support/Windows/Threading.inc [5:5]
+ lib/Support/Windows/Watchdog.inc [5:5]
+ lib/Support/WithColor.cpp [5:5]
+ lib/Support/YAMLParser.cpp [5:5]
+ lib/Support/YAMLTraits.cpp [5:5]
+ lib/Support/Z3Solver.cpp [5:5]
+ lib/Support/circular_raw_ostream.cpp [5:5]
+ lib/Support/raw_os_ostream.cpp [5:5]
+ lib/Support/raw_ostream.cpp [5:5]
+ lib/TableGen/DetailedRecordsBackend.cpp [5:5]
+ lib/TableGen/Error.cpp [5:5]
+ lib/TableGen/JSONBackend.cpp [5:5]
+ lib/TableGen/Main.cpp [5:5]
+ lib/TableGen/Parser.cpp [5:5]
+ lib/TableGen/Record.cpp [5:5]
+ lib/TableGen/SetTheory.cpp [5:5]
+ lib/TableGen/StringMatcher.cpp [5:5]
+ lib/TableGen/TGLexer.cpp [5:5]
+ lib/TableGen/TGLexer.h [5:5]
+ lib/TableGen/TGParser.cpp [5:5]
+ lib/TableGen/TGParser.h [5:5]
+ lib/TableGen/TableGenBackend.cpp [5:5]
+ lib/TableGen/TableGenBackendSkeleton.cpp [5:5]
+ lib/Target/AArch64/AArch64.h [5:5]
+ lib/Target/AArch64/AArch64.td [5:5]
+ lib/Target/AArch64/AArch64A53Fix835769.cpp [5:5]
+ lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp [5:5]
+ lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp [5:5]
+ lib/Target/AArch64/AArch64AsmPrinter.cpp [5:5]
+ lib/Target/AArch64/AArch64BranchTargets.cpp [5:5]
+ lib/Target/AArch64/AArch64CallingConvention.cpp [5:5]
+ lib/Target/AArch64/AArch64CallingConvention.h [5:5]
+ lib/Target/AArch64/AArch64CallingConvention.td [5:5]
+ lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp [5:5]
+ lib/Target/AArch64/AArch64CollectLOH.cpp [5:5]
+ lib/Target/AArch64/AArch64Combine.td [5:5]
+ lib/Target/AArch64/AArch64CompressJumpTables.cpp [5:5]
+ lib/Target/AArch64/AArch64CondBrTuning.cpp [5:5]
+ lib/Target/AArch64/AArch64ConditionOptimizer.cpp [5:5]
+ lib/Target/AArch64/AArch64ConditionalCompares.cpp [5:5]
+ lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp [5:5]
+ lib/Target/AArch64/AArch64ExpandImm.cpp [5:5]
+ lib/Target/AArch64/AArch64ExpandImm.h [5:5]
+ lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp [5:5]
+ lib/Target/AArch64/AArch64FalkorHWPFFix.cpp [5:5]
+ lib/Target/AArch64/AArch64FastISel.cpp [5:5]
+ lib/Target/AArch64/AArch64FrameLowering.cpp [5:5]
+ lib/Target/AArch64/AArch64FrameLowering.h [5:5]
+ lib/Target/AArch64/AArch64GenRegisterBankInfo.def [5:5]
+ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp [5:5]
+ lib/Target/AArch64/AArch64ISelLowering.cpp [5:5]
+ lib/Target/AArch64/AArch64ISelLowering.h [5:5]
+ lib/Target/AArch64/AArch64InstrAtomics.td [5:5]
+ lib/Target/AArch64/AArch64InstrFormats.td [5:5]
+ lib/Target/AArch64/AArch64InstrGISel.td [5:5]
+ lib/Target/AArch64/AArch64InstrInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64InstrInfo.h [5:5]
+ lib/Target/AArch64/AArch64InstrInfo.td [5:5]
+ lib/Target/AArch64/AArch64KCFI.cpp [5:5]
+ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp [5:5]
+ lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp [5:5]
+ lib/Target/AArch64/AArch64MCInstLower.cpp [5:5]
+ lib/Target/AArch64/AArch64MCInstLower.h [5:5]
+ lib/Target/AArch64/AArch64MIPeepholeOpt.cpp [5:5]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.cpp [6:6]
+ lib/Target/AArch64/AArch64MachineFunctionInfo.h [5:5]
+ lib/Target/AArch64/AArch64MachineScheduler.cpp [5:5]
+ lib/Target/AArch64/AArch64MachineScheduler.h [5:5]
+ lib/Target/AArch64/AArch64MacroFusion.cpp [5:5]
+ lib/Target/AArch64/AArch64MacroFusion.h [5:5]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.cpp [5:5]
+ lib/Target/AArch64/AArch64PBQPRegAlloc.h [5:5]
+ lib/Target/AArch64/AArch64PerfectShuffle.h [5:5]
+ lib/Target/AArch64/AArch64PfmCounters.td [5:5]
+ lib/Target/AArch64/AArch64PromoteConstant.cpp [5:5]
+ lib/Target/AArch64/AArch64RedundantCopyElimination.cpp [5:5]
+ lib/Target/AArch64/AArch64RegisterBanks.td [5:5]
+ lib/Target/AArch64/AArch64RegisterInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64RegisterInfo.h [5:5]
+ lib/Target/AArch64/AArch64RegisterInfo.td [5:5]
+ lib/Target/AArch64/AArch64SIMDInstrOpt.cpp [4:4]
+ lib/Target/AArch64/AArch64SLSHardening.cpp [5:5]
+ lib/Target/AArch64/AArch64SMEInstrInfo.td [5:5]
+ lib/Target/AArch64/AArch64SVEInstrInfo.td [5:5]
+ lib/Target/AArch64/AArch64SchedA53.td [5:5]
+ lib/Target/AArch64/AArch64SchedA55.td [5:5]
+ lib/Target/AArch64/AArch64SchedA57.td [5:5]
+ lib/Target/AArch64/AArch64SchedA57WriteRes.td [5:5]
+ lib/Target/AArch64/AArch64SchedA64FX.td [5:5]
+ lib/Target/AArch64/AArch64SchedAmpere1.td [5:5]
+ lib/Target/AArch64/AArch64SchedCyclone.td [5:5]
+ lib/Target/AArch64/AArch64SchedExynosM3.td [5:5]
+ lib/Target/AArch64/AArch64SchedExynosM4.td [5:5]
+ lib/Target/AArch64/AArch64SchedExynosM5.td [5:5]
+ lib/Target/AArch64/AArch64SchedFalkor.td [5:5]
+ lib/Target/AArch64/AArch64SchedFalkorDetails.td [5:5]
+ lib/Target/AArch64/AArch64SchedKryo.td [5:5]
+ lib/Target/AArch64/AArch64SchedKryoDetails.td [5:5]
+ lib/Target/AArch64/AArch64SchedNeoverseN2.td [5:5]
+ lib/Target/AArch64/AArch64SchedPredAmpere.td [5:5]
+ lib/Target/AArch64/AArch64SchedPredExynos.td [5:5]
+ lib/Target/AArch64/AArch64SchedPredicates.td [5:5]
+ lib/Target/AArch64/AArch64SchedTSV110.td [5:5]
+ lib/Target/AArch64/AArch64SchedThunderX.td [5:5]
+ lib/Target/AArch64/AArch64SchedThunderX2T99.td [5:5]
+ lib/Target/AArch64/AArch64SchedThunderX3T110.td [5:5]
+ lib/Target/AArch64/AArch64Schedule.td [5:5]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64SelectionDAGInfo.h [5:5]
+ lib/Target/AArch64/AArch64SpeculationHardening.cpp [5:5]
+ lib/Target/AArch64/AArch64StackTagging.cpp [5:5]
+ lib/Target/AArch64/AArch64StackTaggingPreRA.cpp [5:5]
+ lib/Target/AArch64/AArch64StorePairSuppress.cpp [5:5]
+ lib/Target/AArch64/AArch64Subtarget.cpp [5:5]
+ lib/Target/AArch64/AArch64Subtarget.h [5:5]
+ lib/Target/AArch64/AArch64SystemOperands.td [5:5]
+ lib/Target/AArch64/AArch64TargetMachine.cpp [5:5]
+ lib/Target/AArch64/AArch64TargetMachine.h [5:5]
+ lib/Target/AArch64/AArch64TargetObjectFile.cpp [5:5]
+ lib/Target/AArch64/AArch64TargetObjectFile.h [5:5]
+ lib/Target/AArch64/AArch64TargetTransformInfo.cpp [5:5]
+ lib/Target/AArch64/AArch64TargetTransformInfo.h [5:5]
+ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp [5:5]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp [5:5]
+ lib/Target/AArch64/Disassembler/AArch64Disassembler.h [5:5]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp [5:5]
+ lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h [5:5]
+ lib/Target/AArch64/GISel/AArch64CallLowering.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64CallLowering.h [5:5]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h [5:5]
+ lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64LegalizerInfo.h [5:5]
+ lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp [5:5]
+ lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFObjectWriter.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp [5:5]
+ lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.h [5:5]
+ lib/Target/AArch64/SMEABIPass.cpp [5:5]
+ lib/Target/AArch64/SMEInstrFormats.td [5:5]
+ lib/Target/AArch64/SVEInstrFormats.td [5:5]
+ lib/Target/AArch64/SVEIntrinsicOpts.cpp [5:5]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp [5:5]
+ lib/Target/AArch64/TargetInfo/AArch64TargetInfo.h [5:5]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.cpp [5:5]
+ lib/Target/AArch64/Utils/AArch64BaseInfo.h [5:5]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.cpp [5:5]
+ lib/Target/AArch64/Utils/AArch64SMEAttributes.h [5:5]
+ lib/Target/ARM/A15SDOptimizer.cpp [5:5]
+ lib/Target/ARM/ARM.h [5:5]
+ lib/Target/ARM/ARM.td [5:5]
+ lib/Target/ARM/ARMAsmPrinter.cpp [5:5]
+ lib/Target/ARM/ARMAsmPrinter.h [5:5]
+ lib/Target/ARM/ARMBaseInstrInfo.cpp [5:5]
+ lib/Target/ARM/ARMBaseInstrInfo.h [5:5]
+ lib/Target/ARM/ARMBaseRegisterInfo.cpp [5:5]
+ lib/Target/ARM/ARMBaseRegisterInfo.h [5:5]
+ lib/Target/ARM/ARMBasicBlockInfo.cpp [5:5]
+ lib/Target/ARM/ARMBasicBlockInfo.h [5:5]
+ lib/Target/ARM/ARMBlockPlacement.cpp [5:5]
+ lib/Target/ARM/ARMBranchTargets.cpp [5:5]
+ lib/Target/ARM/ARMCallLowering.cpp [5:5]
+ lib/Target/ARM/ARMCallLowering.h [5:5]
+ lib/Target/ARM/ARMCallingConv.cpp [5:5]
+ lib/Target/ARM/ARMCallingConv.h [5:5]
+ lib/Target/ARM/ARMCallingConv.td [5:5]
+ lib/Target/ARM/ARMConstantIslandPass.cpp [5:5]
+ lib/Target/ARM/ARMConstantPoolValue.cpp [5:5]
+ lib/Target/ARM/ARMConstantPoolValue.h [5:5]
+ lib/Target/ARM/ARMExpandPseudoInsts.cpp [5:5]
+ lib/Target/ARM/ARMFastISel.cpp [5:5]
+ lib/Target/ARM/ARMFeatures.h [5:5]
+ lib/Target/ARM/ARMFixCortexA57AES1742098Pass.cpp [5:5]
+ lib/Target/ARM/ARMFrameLowering.cpp [5:5]
+ lib/Target/ARM/ARMFrameLowering.h [5:5]
+ lib/Target/ARM/ARMHazardRecognizer.cpp [5:5]
+ lib/Target/ARM/ARMHazardRecognizer.h [5:5]
+ lib/Target/ARM/ARMISelDAGToDAG.cpp [5:5]
+ lib/Target/ARM/ARMISelLowering.cpp [5:5]
+ lib/Target/ARM/ARMISelLowering.h [5:5]
+ lib/Target/ARM/ARMInstrCDE.td [5:5]
+ lib/Target/ARM/ARMInstrFormats.td [5:5]
+ lib/Target/ARM/ARMInstrInfo.cpp [5:5]
+ lib/Target/ARM/ARMInstrInfo.h [5:5]
+ lib/Target/ARM/ARMInstrInfo.td [5:5]
+ lib/Target/ARM/ARMInstrMVE.td [5:5]
+ lib/Target/ARM/ARMInstrNEON.td [5:5]
+ lib/Target/ARM/ARMInstrThumb.td [5:5]
+ lib/Target/ARM/ARMInstrThumb2.td [5:5]
+ lib/Target/ARM/ARMInstrVFP.td [5:5]
+ lib/Target/ARM/ARMInstructionSelector.cpp [5:5]
+ lib/Target/ARM/ARMLegalizerInfo.cpp [5:5]
+ lib/Target/ARM/ARMLegalizerInfo.h [5:5]
+ lib/Target/ARM/ARMLoadStoreOptimizer.cpp [5:5]
+ lib/Target/ARM/ARMLowOverheadLoops.cpp [5:5]
+ lib/Target/ARM/ARMMCInstLower.cpp [5:5]
+ lib/Target/ARM/ARMMachineFunctionInfo.cpp [5:5]
+ lib/Target/ARM/ARMMachineFunctionInfo.h [5:5]
+ lib/Target/ARM/ARMMacroFusion.cpp [5:5]
+ lib/Target/ARM/ARMMacroFusion.h [5:5]
+ lib/Target/ARM/ARMOptimizeBarriersPass.cpp [6:6]
+ lib/Target/ARM/ARMParallelDSP.cpp [5:5]
+ lib/Target/ARM/ARMPerfectShuffle.h [5:5]
+ lib/Target/ARM/ARMPredicates.td [5:5]
+ lib/Target/ARM/ARMRegisterBankInfo.cpp [5:5]
+ lib/Target/ARM/ARMRegisterBankInfo.h [5:5]
+ lib/Target/ARM/ARMRegisterBanks.td [5:5]
+ lib/Target/ARM/ARMRegisterInfo.cpp [5:5]
+ lib/Target/ARM/ARMRegisterInfo.h [5:5]
+ lib/Target/ARM/ARMRegisterInfo.td [5:5]
+ lib/Target/ARM/ARMSLSHardening.cpp [5:5]
+ lib/Target/ARM/ARMSchedule.td [5:5]
+ lib/Target/ARM/ARMScheduleA57.td [5:5]
+ lib/Target/ARM/ARMScheduleA57WriteRes.td [5:5]
+ lib/Target/ARM/ARMScheduleA8.td [5:5]
+ lib/Target/ARM/ARMScheduleA9.td [5:5]
+ lib/Target/ARM/ARMScheduleM4.td [5:5]
+ lib/Target/ARM/ARMScheduleM55.td [5:5]
+ lib/Target/ARM/ARMScheduleM7.td [5:5]
+ lib/Target/ARM/ARMScheduleR52.td [5:5]
+ lib/Target/ARM/ARMScheduleSwift.td [5:5]
+ lib/Target/ARM/ARMScheduleV6.td [5:5]
+ lib/Target/ARM/ARMSelectionDAGInfo.cpp [5:5]
+ lib/Target/ARM/ARMSelectionDAGInfo.h [5:5]
+ lib/Target/ARM/ARMSubtarget.cpp [5:5]
+ lib/Target/ARM/ARMSubtarget.h [5:5]
+ lib/Target/ARM/ARMSystemRegister.td [5:5]
+ lib/Target/ARM/ARMTargetMachine.cpp [5:5]
+ lib/Target/ARM/ARMTargetMachine.h [5:5]
+ lib/Target/ARM/ARMTargetObjectFile.cpp [5:5]
+ lib/Target/ARM/ARMTargetObjectFile.h [5:5]
+ lib/Target/ARM/ARMTargetTransformInfo.cpp [5:5]
+ lib/Target/ARM/ARMTargetTransformInfo.h [5:5]
+ lib/Target/ARM/AsmParser/ARMAsmParser.cpp [5:5]
+ lib/Target/ARM/Disassembler/ARMDisassembler.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCExpr.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp [5:5]
+ lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp [5:5]
+ lib/Target/ARM/MLxExpansionPass.cpp [5:5]
+ lib/Target/ARM/MVEGatherScatterLowering.cpp [5:5]
+ lib/Target/ARM/MVELaneInterleavingPass.cpp [5:5]
+ lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp [5:5]
+ lib/Target/ARM/MVETailPredUtils.h [5:5]
+ lib/Target/ARM/MVETailPredication.cpp [5:5]
+ lib/Target/ARM/MVEVPTBlockPass.cpp [5:5]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp [5:5]
+ lib/Target/ARM/TargetInfo/ARMTargetInfo.h [5:5]
+ lib/Target/ARM/Thumb1FrameLowering.cpp [5:5]
+ lib/Target/ARM/Thumb1FrameLowering.h [5:5]
+ lib/Target/ARM/Thumb1InstrInfo.cpp [5:5]
+ lib/Target/ARM/Thumb1InstrInfo.h [5:5]
+ lib/Target/ARM/Thumb2ITBlockPass.cpp [5:5]
+ lib/Target/ARM/Thumb2InstrInfo.cpp [5:5]
+ lib/Target/ARM/Thumb2InstrInfo.h [5:5]
+ lib/Target/ARM/Thumb2SizeReduction.cpp [5:5]
+ lib/Target/ARM/ThumbRegisterInfo.cpp [5:5]
+ lib/Target/ARM/ThumbRegisterInfo.h [5:5]
+ lib/Target/ARM/Utils/ARMBaseInfo.cpp [5:5]
+ lib/Target/ARM/Utils/ARMBaseInfo.h [5:5]
+ lib/Target/BPF/AsmParser/BPFAsmParser.cpp [5:5]
+ lib/Target/BPF/BPF.h [5:5]
+ lib/Target/BPF/BPF.td [5:5]
+ lib/Target/BPF/BPFAbstractMemberAccess.cpp [5:5]
+ lib/Target/BPF/BPFAdjustOpt.cpp [5:5]
+ lib/Target/BPF/BPFAsmPrinter.cpp [5:5]
+ lib/Target/BPF/BPFCORE.h [5:5]
+ lib/Target/BPF/BPFCallingConv.td [5:5]
+ lib/Target/BPF/BPFCheckAndAdjustIR.cpp [5:5]
+ lib/Target/BPF/BPFFrameLowering.cpp [5:5]
+ lib/Target/BPF/BPFFrameLowering.h [5:5]
+ lib/Target/BPF/BPFIRPeephole.cpp [5:5]
+ lib/Target/BPF/BPFISelDAGToDAG.cpp [5:5]
+ lib/Target/BPF/BPFISelLowering.cpp [5:5]
+ lib/Target/BPF/BPFISelLowering.h [5:5]
+ lib/Target/BPF/BPFInstrFormats.td [5:5]
+ lib/Target/BPF/BPFInstrInfo.cpp [5:5]
+ lib/Target/BPF/BPFInstrInfo.h [5:5]
+ lib/Target/BPF/BPFInstrInfo.td [5:5]
+ lib/Target/BPF/BPFMCInstLower.cpp [5:5]
+ lib/Target/BPF/BPFMCInstLower.h [5:5]
+ lib/Target/BPF/BPFMIChecking.cpp [5:5]
+ lib/Target/BPF/BPFMIPeephole.cpp [5:5]
+ lib/Target/BPF/BPFMISimplifyPatchable.cpp [5:5]
+ lib/Target/BPF/BPFPreserveDIType.cpp [5:5]
+ lib/Target/BPF/BPFRegisterInfo.cpp [5:5]
+ lib/Target/BPF/BPFRegisterInfo.h [5:5]
+ lib/Target/BPF/BPFRegisterInfo.td [5:5]
+ lib/Target/BPF/BPFSelectionDAGInfo.cpp [5:5]
+ lib/Target/BPF/BPFSelectionDAGInfo.h [5:5]
+ lib/Target/BPF/BPFSubtarget.cpp [5:5]
+ lib/Target/BPF/BPFSubtarget.h [5:5]
+ lib/Target/BPF/BPFTargetMachine.cpp [5:5]
+ lib/Target/BPF/BPFTargetMachine.h [5:5]
+ lib/Target/BPF/BPFTargetTransformInfo.h [5:5]
+ lib/Target/BPF/BTF.def [5:5]
+ lib/Target/BPF/BTF.h [5:5]
+ lib/Target/BPF/BTFDebug.cpp [5:5]
+ lib/Target/BPF/BTFDebug.h [5:5]
+ lib/Target/BPF/Disassembler/BPFDisassembler.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFInstPrinter.h [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp [5:5]
+ lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h [5:5]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.cpp [5:5]
+ lib/Target/BPF/TargetInfo/BPFTargetInfo.h [5:5]
+ lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp [5:5]
+ lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp [5:5]
+ lib/Target/LoongArch/LoongArch.h [5:5]
+ lib/Target/LoongArch/LoongArch.td [5:5]
+ lib/Target/LoongArch/LoongArchAsmPrinter.cpp [5:5]
+ lib/Target/LoongArch/LoongArchAsmPrinter.h [5:5]
+ lib/Target/LoongArch/LoongArchCallingConv.td [5:5]
+ lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp [5:5]
+ lib/Target/LoongArch/LoongArchExpandPseudoInsts.cpp [5:5]
+ lib/Target/LoongArch/LoongArchFloat32InstrInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchFloat64InstrInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchFloatInstrFormats.td [5:5]
+ lib/Target/LoongArch/LoongArchFrameLowering.cpp [5:5]
+ lib/Target/LoongArch/LoongArchFrameLowering.h [5:5]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp [5:5]
+ lib/Target/LoongArch/LoongArchISelDAGToDAG.h [5:5]
+ lib/Target/LoongArch/LoongArchISelLowering.cpp [5:5]
+ lib/Target/LoongArch/LoongArchISelLowering.h [5:5]
+ lib/Target/LoongArch/LoongArchInstrFormats.td [5:5]
+ lib/Target/LoongArch/LoongArchInstrInfo.cpp [5:5]
+ lib/Target/LoongArch/LoongArchInstrInfo.h [5:5]
+ lib/Target/LoongArch/LoongArchInstrInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchMCInstLower.cpp [5:5]
+ lib/Target/LoongArch/LoongArchMachineFunctionInfo.h [5:5]
+ lib/Target/LoongArch/LoongArchRegisterInfo.cpp [5:5]
+ lib/Target/LoongArch/LoongArchRegisterInfo.h [5:5]
+ lib/Target/LoongArch/LoongArchRegisterInfo.td [5:5]
+ lib/Target/LoongArch/LoongArchSubtarget.cpp [5:5]
+ lib/Target/LoongArch/LoongArchSubtarget.h [5:5]
+ lib/Target/LoongArch/LoongArchTargetMachine.cpp [5:5]
+ lib/Target/LoongArch/LoongArchTargetMachine.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchELFStreamer.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchFixupKinds.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCExpr.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.cpp [5:5]
+ lib/Target/LoongArch/MCTargetDesc/LoongArchTargetStreamer.h [5:5]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.cpp [5:5]
+ lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.h [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.cpp [5:5]
+ lib/Target/NVPTX/MCTargetDesc/NVPTXTargetStreamer.h [5:5]
+ lib/Target/NVPTX/NVPTX.h [5:5]
+ lib/Target/NVPTX/NVPTX.td [5:5]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAllocaHoisting.h [5:5]
+ lib/Target/NVPTX/NVPTXAsmPrinter.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAsmPrinter.h [5:5]
+ lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAtomicLower.cpp [5:5]
+ lib/Target/NVPTX/NVPTXAtomicLower.h [5:5]
+ lib/Target/NVPTX/NVPTXFrameLowering.cpp [5:5]
+ lib/Target/NVPTX/NVPTXFrameLowering.h [5:5]
+ lib/Target/NVPTX/NVPTXGenericToNVVM.cpp [5:5]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp [5:5]
+ lib/Target/NVPTX/NVPTXISelDAGToDAG.h [5:5]
+ lib/Target/NVPTX/NVPTXISelLowering.cpp [5:5]
+ lib/Target/NVPTX/NVPTXISelLowering.h [5:5]
+ lib/Target/NVPTX/NVPTXImageOptimizer.cpp [5:5]
+ lib/Target/NVPTX/NVPTXInstrFormats.td [5:5]
+ lib/Target/NVPTX/NVPTXInstrInfo.cpp [5:5]
+ lib/Target/NVPTX/NVPTXInstrInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXInstrInfo.td [5:5]
+ lib/Target/NVPTX/NVPTXIntrinsics.td [5:5]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp [5:5]
+ lib/Target/NVPTX/NVPTXLowerAggrCopies.h [5:5]
+ lib/Target/NVPTX/NVPTXLowerAlloca.cpp [5:5]
+ lib/Target/NVPTX/NVPTXLowerArgs.cpp [5:5]
+ lib/Target/NVPTX/NVPTXMCExpr.cpp [5:5]
+ lib/Target/NVPTX/NVPTXMCExpr.h [5:5]
+ lib/Target/NVPTX/NVPTXMachineFunctionInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXPeephole.cpp [5:5]
+ lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp [5:5]
+ lib/Target/NVPTX/NVPTXProxyRegErasure.cpp [5:5]
+ lib/Target/NVPTX/NVPTXRegisterInfo.cpp [5:5]
+ lib/Target/NVPTX/NVPTXRegisterInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXRegisterInfo.td [5:5]
+ lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp [5:5]
+ lib/Target/NVPTX/NVPTXSubtarget.cpp [5:5]
+ lib/Target/NVPTX/NVPTXSubtarget.h [5:5]
+ lib/Target/NVPTX/NVPTXTargetMachine.cpp [5:5]
+ lib/Target/NVPTX/NVPTXTargetMachine.h [5:5]
+ lib/Target/NVPTX/NVPTXTargetObjectFile.h [5:5]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp [5:5]
+ lib/Target/NVPTX/NVPTXTargetTransformInfo.h [5:5]
+ lib/Target/NVPTX/NVPTXUtilities.cpp [5:5]
+ lib/Target/NVPTX/NVPTXUtilities.h [5:5]
+ lib/Target/NVPTX/NVVMIntrRange.cpp [5:5]
+ lib/Target/NVPTX/NVVMReflect.cpp [5:5]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp [5:5]
+ lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.h [5:5]
+ lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp [5:5]
+ lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCCallLowering.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCCallLowering.h [5:5]
+ lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCLegalizerInfo.h [5:5]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp [5:5]
+ lib/Target/PowerPC/GISel/PPCRegisterBankInfo.h [5:5]
+ lib/Target/PowerPC/GISel/PPCRegisterBanks.td [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCInstPrinter.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp [6:6]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.cpp [5:5]
+ lib/Target/PowerPC/MCTargetDesc/PPCXCOFFStreamer.h [5:5]
+ lib/Target/PowerPC/P10InstrResources.td [5:5]
+ lib/Target/PowerPC/P9InstrResources.td [5:5]
+ lib/Target/PowerPC/PPC.h [5:5]
+ lib/Target/PowerPC/PPC.td [5:5]
+ lib/Target/PowerPC/PPCAsmPrinter.cpp [5:5]
+ lib/Target/PowerPC/PPCBoolRetToInt.cpp [5:5]
+ lib/Target/PowerPC/PPCBranchCoalescing.cpp [5:5]
+ lib/Target/PowerPC/PPCBranchSelector.cpp [5:5]
+ lib/Target/PowerPC/PPCCCState.cpp [5:5]
+ lib/Target/PowerPC/PPCCCState.h [5:5]
+ lib/Target/PowerPC/PPCCTRLoops.cpp [5:5]
+ lib/Target/PowerPC/PPCCTRLoopsVerify.cpp [5:5]
+ lib/Target/PowerPC/PPCCallingConv.cpp [5:5]
+ lib/Target/PowerPC/PPCCallingConv.h [5:5]
+ lib/Target/PowerPC/PPCCallingConv.td [5:5]
+ lib/Target/PowerPC/PPCEarlyReturn.cpp [5:5]
+ lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp [5:5]
+ lib/Target/PowerPC/PPCExpandISEL.cpp [5:5]
+ lib/Target/PowerPC/PPCFastISel.cpp [5:5]
+ lib/Target/PowerPC/PPCFrameLowering.cpp [5:5]
+ lib/Target/PowerPC/PPCFrameLowering.h [5:5]
+ lib/Target/PowerPC/PPCGenRegisterBankInfo.def [5:5]
+ lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp [5:5]
+ lib/Target/PowerPC/PPCHazardRecognizers.cpp [5:5]
+ lib/Target/PowerPC/PPCHazardRecognizers.h [5:5]
+ lib/Target/PowerPC/PPCISelDAGToDAG.cpp [5:5]
+ lib/Target/PowerPC/PPCISelLowering.cpp [5:5]
+ lib/Target/PowerPC/PPCISelLowering.h [5:5]
+ lib/Target/PowerPC/PPCInstr64Bit.td [5:5]
+ lib/Target/PowerPC/PPCInstrAltivec.td [5:5]
+ lib/Target/PowerPC/PPCInstrBuilder.h [5:5]
+ lib/Target/PowerPC/PPCInstrFormats.td [5:5]
+ lib/Target/PowerPC/PPCInstrHTM.td [5:5]
+ lib/Target/PowerPC/PPCInstrInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCInstrInfo.h [5:5]
+ lib/Target/PowerPC/PPCInstrInfo.td [5:5]
+ lib/Target/PowerPC/PPCInstrP10.td [7:7]
+ lib/Target/PowerPC/PPCInstrSPE.td [5:5]
+ lib/Target/PowerPC/PPCInstrVSX.td [5:5]
+ lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp [5:5]
+ lib/Target/PowerPC/PPCLowerMASSVEntries.cpp [5:5]
+ lib/Target/PowerPC/PPCMCInstLower.cpp [5:5]
+ lib/Target/PowerPC/PPCMIPeephole.cpp [5:5]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCMachineFunctionInfo.h [5:5]
+ lib/Target/PowerPC/PPCMachineScheduler.cpp [5:5]
+ lib/Target/PowerPC/PPCMachineScheduler.h [5:5]
+ lib/Target/PowerPC/PPCMacroFusion.cpp [5:5]
+ lib/Target/PowerPC/PPCMacroFusion.h [5:5]
+ lib/Target/PowerPC/PPCPerfectShuffle.h [5:5]
+ lib/Target/PowerPC/PPCPfmCounters.td [5:5]
+ lib/Target/PowerPC/PPCPreEmitPeephole.cpp [5:5]
+ lib/Target/PowerPC/PPCReduceCRLogicals.cpp [5:5]
+ lib/Target/PowerPC/PPCRegisterInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCRegisterInfo.h [5:5]
+ lib/Target/PowerPC/PPCRegisterInfo.td [5:5]
+ lib/Target/PowerPC/PPCRegisterInfoDMR.td [5:5]
+ lib/Target/PowerPC/PPCRegisterInfoMMA.td [5:5]
+ lib/Target/PowerPC/PPCSchedPredicates.td [5:5]
+ lib/Target/PowerPC/PPCSchedule.td [5:5]
+ lib/Target/PowerPC/PPCSchedule440.td [5:5]
+ lib/Target/PowerPC/PPCScheduleA2.td [5:5]
+ lib/Target/PowerPC/PPCScheduleE500.td [5:5]
+ lib/Target/PowerPC/PPCScheduleE500mc.td [5:5]
+ lib/Target/PowerPC/PPCScheduleE5500.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG3.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG4.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG4Plus.td [5:5]
+ lib/Target/PowerPC/PPCScheduleG5.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP10.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP7.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP8.td [5:5]
+ lib/Target/PowerPC/PPCScheduleP9.td [5:5]
+ lib/Target/PowerPC/PPCSubtarget.cpp [5:5]
+ lib/Target/PowerPC/PPCSubtarget.h [5:5]
+ lib/Target/PowerPC/PPCTLSDynamicCall.cpp [5:5]
+ lib/Target/PowerPC/PPCTOCRegDeps.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetMachine.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetMachine.h [5:5]
+ lib/Target/PowerPC/PPCTargetObjectFile.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetObjectFile.h [5:5]
+ lib/Target/PowerPC/PPCTargetStreamer.h [5:5]
+ lib/Target/PowerPC/PPCTargetTransformInfo.cpp [5:5]
+ lib/Target/PowerPC/PPCTargetTransformInfo.h [5:5]
+ lib/Target/PowerPC/PPCVSXCopy.cpp [5:5]
+ lib/Target/PowerPC/PPCVSXFMAMutate.cpp [5:5]
+ lib/Target/PowerPC/PPCVSXSwapRemoval.cpp [5:5]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp [5:5]
+ lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.h [5:5]
+ lib/Target/RISCV/GISel/RISCVCallLowering.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVCallLowering.h [5:5]
+ lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVLegalizerInfo.h [5:5]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp [5:5]
+ lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h [5:5]
+ lib/Target/RISCV/GISel/RISCVRegisterBanks.td [5:5]
+ lib/Target/RISCV/RISCV.h [5:5]
+ lib/Target/RISCV/RISCV.td [5:5]
+ lib/Target/RISCV/RISCVAsmPrinter.cpp [5:5]
+ lib/Target/RISCV/RISCVCallingConv.td [5:5]
+ lib/Target/RISCV/RISCVCodeGenPrepare.cpp [5:5]
+ lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp [5:5]
+ lib/Target/RISCV/RISCVExpandPseudoInsts.cpp [5:5]
+ lib/Target/RISCV/RISCVFeatures.td [5:5]
+ lib/Target/RISCV/RISCVFrameLowering.cpp [5:5]
+ lib/Target/RISCV/RISCVFrameLowering.h [5:5]
+ lib/Target/RISCV/RISCVGatherScatterLowering.cpp [5:5]
+ lib/Target/RISCV/RISCVISelDAGToDAG.cpp [5:5]
+ lib/Target/RISCV/RISCVISelDAGToDAG.h [5:5]
+ lib/Target/RISCV/RISCVISelLowering.cpp [5:5]
+ lib/Target/RISCV/RISCVISelLowering.h [5:5]
+ lib/Target/RISCV/RISCVInsertVSETVLI.cpp [5:5]
+ lib/Target/RISCV/RISCVInstrFormats.td [5:5]
+ lib/Target/RISCV/RISCVInstrFormatsC.td [5:5]
+ lib/Target/RISCV/RISCVInstrFormatsV.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVInstrInfo.h [5:5]
+ lib/Target/RISCV/RISCVInstrInfo.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoA.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoC.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoD.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoF.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoM.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoV.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoVPseudos.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoXTHead.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoXVentana.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZb.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZfh.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZicbo.td [5:5]
+ lib/Target/RISCV/RISCVInstrInfoZk.td [5:5]
+ lib/Target/RISCV/RISCVMCInstLower.cpp [5:5]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVMachineFunctionInfo.h [5:5]
+ lib/Target/RISCV/RISCVMacroFusion.cpp [5:5]
+ lib/Target/RISCV/RISCVMacroFusion.h [5:5]
+ lib/Target/RISCV/RISCVMakeCompressible.cpp [5:5]
+ lib/Target/RISCV/RISCVMergeBaseOffset.cpp [5:5]
+ lib/Target/RISCV/RISCVProcessors.td [5:5]
+ lib/Target/RISCV/RISCVRedundantCopyElimination.cpp [5:5]
+ lib/Target/RISCV/RISCVRegisterInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVRegisterInfo.h [5:5]
+ lib/Target/RISCV/RISCVRegisterInfo.td [5:5]
+ lib/Target/RISCV/RISCVSExtWRemoval.cpp [5:5]
+ lib/Target/RISCV/RISCVSchedRocket.td [5:5]
+ lib/Target/RISCV/RISCVSchedSiFive7.td [5:5]
+ lib/Target/RISCV/RISCVSchedSyntacoreSCR1.td [5:5]
+ lib/Target/RISCV/RISCVSchedule.td [5:5]
+ lib/Target/RISCV/RISCVScheduleV.td [5:5]
+ lib/Target/RISCV/RISCVScheduleZb.td [5:5]
+ lib/Target/RISCV/RISCVStripWSuffix.cpp [6:6]
+ lib/Target/RISCV/RISCVSubtarget.cpp [5:5]
+ lib/Target/RISCV/RISCVSubtarget.h [5:5]
+ lib/Target/RISCV/RISCVSystemOperands.td [5:5]
+ lib/Target/RISCV/RISCVTargetMachine.cpp [5:5]
+ lib/Target/RISCV/RISCVTargetMachine.h [5:5]
+ lib/Target/RISCV/RISCVTargetObjectFile.cpp [5:5]
+ lib/Target/RISCV/RISCVTargetObjectFile.h [5:5]
+ lib/Target/RISCV/RISCVTargetTransformInfo.cpp [5:5]
+ lib/Target/RISCV/RISCVTargetTransformInfo.h [5:5]
+ lib/Target/Target.cpp [5:5]
+ lib/Target/TargetIntrinsicInfo.cpp [5:5]
+ lib/Target/TargetLoweringObjectFile.cpp [5:5]
+ lib/Target/TargetMachine.cpp [5:5]
+ lib/Target/TargetMachineC.cpp [5:5]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp [5:5]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp [5:5]
+ lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h [5:5]
+ lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h [5:5]
+ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp [5:5]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp [5:5]
+ lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp [5:5]
+ lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h [5:5]
+ lib/Target/WebAssembly/WebAssembly.h [5:5]
+ lib/Target/WebAssembly/WebAssembly.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyAsmPrinter.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyCFGSort.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyDebugValueManager.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyExceptionInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFastISel.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyFrameLowering.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyISD.def [5:5]
+ lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyISelLowering.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrAtomics.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrCall.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrControl.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrConv.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrFloat.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrFormats.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInfo.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrInteger.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrMemory.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrRef.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrSIMD.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyInstrTable.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp [6:6]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyMCInstLower.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyPeephole.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegColoring.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegStackify.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyRegisterInfo.td [5:5]
+ lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h [5:5]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h [5:5]
+ lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblySortRegion.h [5:5]
+ lib/Target/WebAssembly/WebAssemblySubtarget.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblySubtarget.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetMachine.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp [5:5]
+ lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h [5:5]
+ lib/Target/X86/AsmParser/X86AsmParser.cpp [5:5]
+ lib/Target/X86/AsmParser/X86AsmParserCommon.h [5:5]
+ lib/Target/X86/AsmParser/X86Operand.h [5:5]
+ lib/Target/X86/Disassembler/X86Disassembler.cpp [5:5]
+ lib/Target/X86/Disassembler/X86DisassemblerDecoder.h [5:5]
+ lib/Target/X86/ImmutableGraph.h [5:5]
+ lib/Target/X86/MCA/X86CustomBehaviour.cpp [5:5]
+ lib/Target/X86/MCA/X86CustomBehaviour.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86BaseInfo.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86FixupKinds.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstComments.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstComments.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCExpr.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86MnemonicTables.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86ShuffleDecode.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86TargetStreamer.h [5:5]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp [5:5]
+ lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp [5:5]
+ lib/Target/X86/TargetInfo/X86TargetInfo.cpp [5:5]
+ lib/Target/X86/TargetInfo/X86TargetInfo.h [5:5]
+ lib/Target/X86/X86.h [5:5]
+ lib/Target/X86/X86.td [5:5]
+ lib/Target/X86/X86AsmPrinter.cpp [5:5]
+ lib/Target/X86/X86AsmPrinter.h [5:5]
+ lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp [5:5]
+ lib/Target/X86/X86AvoidTrailingCall.cpp [5:5]
+ lib/Target/X86/X86CallFrameOptimization.cpp [5:5]
+ lib/Target/X86/X86CallLowering.cpp [5:5]
+ lib/Target/X86/X86CallLowering.h [5:5]
+ lib/Target/X86/X86CallingConv.cpp [5:5]
+ lib/Target/X86/X86CallingConv.h [5:5]
+ lib/Target/X86/X86CallingConv.td [5:5]
+ lib/Target/X86/X86CmovConversion.cpp [5:5]
+ lib/Target/X86/X86DiscriminateMemOps.cpp [5:5]
+ lib/Target/X86/X86DomainReassignment.cpp [5:5]
+ lib/Target/X86/X86DynAllocaExpander.cpp [5:5]
+ lib/Target/X86/X86EvexToVex.cpp [6:6]
+ lib/Target/X86/X86ExpandPseudo.cpp [5:5]
+ lib/Target/X86/X86FastISel.cpp [5:5]
+ lib/Target/X86/X86FastPreTileConfig.cpp [5:5]
+ lib/Target/X86/X86FastTileConfig.cpp [5:5]
+ lib/Target/X86/X86FixupBWInsts.cpp [5:5]
+ lib/Target/X86/X86FixupLEAs.cpp [5:5]
+ lib/Target/X86/X86FixupSetCC.cpp [5:5]
+ lib/Target/X86/X86FlagsCopyLowering.cpp [5:5]
+ lib/Target/X86/X86FloatingPoint.cpp [5:5]
+ lib/Target/X86/X86FrameLowering.cpp [5:5]
+ lib/Target/X86/X86FrameLowering.h [5:5]
+ lib/Target/X86/X86GenRegisterBankInfo.def [5:5]
+ lib/Target/X86/X86ISelDAGToDAG.cpp [5:5]
+ lib/Target/X86/X86ISelLowering.cpp [5:5]
+ lib/Target/X86/X86ISelLowering.h [5:5]
+ lib/Target/X86/X86IndirectBranchTracking.cpp [5:5]
+ lib/Target/X86/X86IndirectThunks.cpp [5:5]
+ lib/Target/X86/X86InsertPrefetch.cpp [5:5]
+ lib/Target/X86/X86InsertWait.cpp [5:5]
+ lib/Target/X86/X86InstCombineIntrinsic.cpp [5:5]
+ lib/Target/X86/X86Instr3DNow.td [5:5]
+ lib/Target/X86/X86InstrAMX.td [5:5]
+ lib/Target/X86/X86InstrAVX512.td [5:5]
+ lib/Target/X86/X86InstrArithmetic.td [5:5]
+ lib/Target/X86/X86InstrBuilder.h [5:5]
+ lib/Target/X86/X86InstrCMovSetCC.td [5:5]
+ lib/Target/X86/X86InstrCompiler.td [5:5]
+ lib/Target/X86/X86InstrControl.td [5:5]
+ lib/Target/X86/X86InstrExtension.td [5:5]
+ lib/Target/X86/X86InstrFMA.td [5:5]
+ lib/Target/X86/X86InstrFMA3Info.cpp [5:5]
+ lib/Target/X86/X86InstrFMA3Info.h [5:5]
+ lib/Target/X86/X86InstrFPStack.td [5:5]
+ lib/Target/X86/X86InstrFoldTables.cpp [5:5]
+ lib/Target/X86/X86InstrFoldTables.h [5:5]
+ lib/Target/X86/X86InstrFormats.td [5:5]
+ lib/Target/X86/X86InstrFragmentsSIMD.td [5:5]
+ lib/Target/X86/X86InstrInfo.cpp [5:5]
+ lib/Target/X86/X86InstrInfo.h [5:5]
+ lib/Target/X86/X86InstrInfo.td [5:5]
+ lib/Target/X86/X86InstrKL.td [6:6]
+ lib/Target/X86/X86InstrMMX.td [5:5]
+ lib/Target/X86/X86InstrRAOINT.td [5:5]
+ lib/Target/X86/X86InstrSGX.td [5:5]
+ lib/Target/X86/X86InstrSNP.td [5:5]
+ lib/Target/X86/X86InstrSSE.td [5:5]
+ lib/Target/X86/X86InstrSVM.td [5:5]
+ lib/Target/X86/X86InstrShiftRotate.td [5:5]
+ lib/Target/X86/X86InstrSystem.td [5:5]
+ lib/Target/X86/X86InstrTDX.td [5:5]
+ lib/Target/X86/X86InstrTSX.td [5:5]
+ lib/Target/X86/X86InstrVMX.td [5:5]
+ lib/Target/X86/X86InstrVecCompiler.td [5:5]
+ lib/Target/X86/X86InstrXOP.td [5:5]
+ lib/Target/X86/X86InstructionSelector.cpp [5:5]
+ lib/Target/X86/X86InterleavedAccess.cpp [5:5]
+ lib/Target/X86/X86IntrinsicsInfo.h [5:5]
+ lib/Target/X86/X86KCFI.cpp [5:5]
+ lib/Target/X86/X86LegalizerInfo.cpp [5:5]
+ lib/Target/X86/X86LegalizerInfo.h [6:6]
+ lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp [5:5]
+ lib/Target/X86/X86LoadValueInjectionRetHardening.cpp [5:5]
+ lib/Target/X86/X86LowerAMXIntrinsics.cpp [5:5]
+ lib/Target/X86/X86LowerAMXType.cpp [5:5]
+ lib/Target/X86/X86LowerTileCopy.cpp [5:5]
+ lib/Target/X86/X86MCInstLower.cpp [5:5]
+ lib/Target/X86/X86MachineFunctionInfo.cpp [5:5]
+ lib/Target/X86/X86MachineFunctionInfo.h [5:5]
+ lib/Target/X86/X86MacroFusion.cpp [5:5]
+ lib/Target/X86/X86MacroFusion.h [5:5]
+ lib/Target/X86/X86OptimizeLEAs.cpp [5:5]
+ lib/Target/X86/X86PadShortFunction.cpp [5:5]
+ lib/Target/X86/X86PartialReduction.cpp [5:5]
+ lib/Target/X86/X86PfmCounters.td [5:5]
+ lib/Target/X86/X86PreAMXConfig.cpp [5:5]
+ lib/Target/X86/X86PreTileConfig.cpp [5:5]
+ lib/Target/X86/X86RegisterBankInfo.cpp [5:5]
+ lib/Target/X86/X86RegisterBankInfo.h [5:5]
+ lib/Target/X86/X86RegisterBanks.td [5:5]
+ lib/Target/X86/X86RegisterInfo.cpp [5:5]
+ lib/Target/X86/X86RegisterInfo.h [5:5]
+ lib/Target/X86/X86RegisterInfo.td [5:5]
+ lib/Target/X86/X86ReturnThunks.cpp [5:5]
+ lib/Target/X86/X86SchedAlderlakeP.td [5:5]
+ lib/Target/X86/X86SchedBroadwell.td [5:5]
+ lib/Target/X86/X86SchedHaswell.td [5:5]
+ lib/Target/X86/X86SchedIceLake.td [5:5]
+ lib/Target/X86/X86SchedPredicates.td [5:5]
+ lib/Target/X86/X86SchedSandyBridge.td [5:5]
+ lib/Target/X86/X86SchedSkylakeClient.td [5:5]
+ lib/Target/X86/X86SchedSkylakeServer.td [5:5]
+ lib/Target/X86/X86Schedule.td [5:5]
+ lib/Target/X86/X86ScheduleAtom.td [5:5]
+ lib/Target/X86/X86ScheduleBdVer2.td [5:5]
+ lib/Target/X86/X86ScheduleBtVer2.td [5:5]
+ lib/Target/X86/X86ScheduleSLM.td [5:5]
+ lib/Target/X86/X86ScheduleZnver1.td [5:5]
+ lib/Target/X86/X86ScheduleZnver2.td [5:5]
+ lib/Target/X86/X86ScheduleZnver3.td [5:5]
+ lib/Target/X86/X86SelectionDAGInfo.cpp [5:5]
+ lib/Target/X86/X86SelectionDAGInfo.h [5:5]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.cpp [5:5]
+ lib/Target/X86/X86ShuffleDecodeConstantPool.h [5:5]
+ lib/Target/X86/X86SpeculativeExecutionSideEffectSuppression.cpp [5:5]
+ lib/Target/X86/X86SpeculativeLoadHardening.cpp [5:5]
+ lib/Target/X86/X86Subtarget.cpp [5:5]
+ lib/Target/X86/X86Subtarget.h [5:5]
+ lib/Target/X86/X86TargetMachine.cpp [5:5]
+ lib/Target/X86/X86TargetMachine.h [5:5]
+ lib/Target/X86/X86TargetObjectFile.cpp [5:5]
+ lib/Target/X86/X86TargetObjectFile.h [5:5]
+ lib/Target/X86/X86TargetTransformInfo.cpp [5:5]
+ lib/Target/X86/X86TargetTransformInfo.h [5:5]
+ lib/Target/X86/X86TileConfig.cpp [5:5]
+ lib/Target/X86/X86VZeroUpper.cpp [5:5]
+ lib/Target/X86/X86WinEHState.cpp [5:5]
+ lib/TargetParser/AArch64TargetParser.cpp [5:5]
+ lib/TargetParser/ARMTargetParser.cpp [5:5]
+ lib/TargetParser/ARMTargetParserCommon.cpp [5:5]
+ lib/TargetParser/Host.cpp [5:5]
+ lib/TargetParser/LoongArchTargetParser.cpp [5:5]
+ lib/TargetParser/RISCVTargetParser.cpp [5:5]
+ lib/TargetParser/TargetParser.cpp [5:5]
+ lib/TargetParser/Triple.cpp [5:5]
+ lib/TargetParser/Unix/Host.inc [5:5]
+ lib/TargetParser/Windows/Host.inc [5:5]
+ lib/TargetParser/X86TargetParser.cpp [5:5]
+ lib/TextAPI/Architecture.cpp [5:5]
+ lib/TextAPI/ArchitectureSet.cpp [5:5]
+ lib/TextAPI/InterfaceFile.cpp [5:5]
+ lib/TextAPI/PackedVersion.cpp [5:5]
+ lib/TextAPI/Platform.cpp [5:5]
+ lib/TextAPI/Symbol.cpp [5:5]
+ lib/TextAPI/Target.cpp [5:5]
+ lib/TextAPI/TextAPIContext.h [5:5]
+ lib/TextAPI/TextStub.cpp [5:5]
+ lib/TextAPI/TextStubCommon.cpp [5:5]
+ lib/TextAPI/TextStubCommon.h [5:5]
+ lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp [5:5]
+ lib/ToolDrivers/llvm-lib/LibDriver.cpp [5:5]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp [5:5]
+ lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h [5:5]
+ lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp [5:5]
+ lib/Transforms/CFGuard/CFGuard.cpp [5:5]
+ lib/Transforms/Coroutines/CoroCleanup.cpp [5:5]
+ lib/Transforms/Coroutines/CoroConditionalWrapper.cpp [5:5]
+ lib/Transforms/Coroutines/CoroEarly.cpp [5:5]
+ lib/Transforms/Coroutines/CoroElide.cpp [5:5]
+ lib/Transforms/Coroutines/CoroFrame.cpp [5:5]
+ lib/Transforms/Coroutines/CoroInstr.h [5:5]
+ lib/Transforms/Coroutines/CoroInternal.h [5:5]
+ lib/Transforms/Coroutines/CoroSplit.cpp [5:5]
+ lib/Transforms/Coroutines/Coroutines.cpp [5:5]
+ lib/Transforms/IPO/AlwaysInliner.cpp [5:5]
+ lib/Transforms/IPO/Annotation2Metadata.cpp [5:5]
+ lib/Transforms/IPO/ArgumentPromotion.cpp [5:5]
+ lib/Transforms/IPO/Attributor.cpp [5:5]
+ lib/Transforms/IPO/AttributorAttributes.cpp [5:5]
+ lib/Transforms/IPO/BarrierNoopPass.cpp [5:5]
+ lib/Transforms/IPO/BlockExtractor.cpp [5:5]
+ lib/Transforms/IPO/CalledValuePropagation.cpp [5:5]
+ lib/Transforms/IPO/ConstantMerge.cpp [5:5]
+ lib/Transforms/IPO/CrossDSOCFI.cpp [5:5]
+ lib/Transforms/IPO/DeadArgumentElimination.cpp [5:5]
+ lib/Transforms/IPO/ElimAvailExtern.cpp [5:5]
+ lib/Transforms/IPO/ExtractGV.cpp [5:5]
+ lib/Transforms/IPO/ForceFunctionAttrs.cpp [5:5]
+ lib/Transforms/IPO/FunctionAttrs.cpp [5:5]
+ lib/Transforms/IPO/FunctionImport.cpp [5:5]
+ lib/Transforms/IPO/FunctionSpecialization.cpp [5:5]
+ lib/Transforms/IPO/GlobalDCE.cpp [5:5]
+ lib/Transforms/IPO/GlobalOpt.cpp [5:5]
+ lib/Transforms/IPO/GlobalSplit.cpp [5:5]
+ lib/Transforms/IPO/HotColdSplitting.cpp [5:5]
+ lib/Transforms/IPO/IPO.cpp [5:5]
+ lib/Transforms/IPO/IROutliner.cpp [5:5]
+ lib/Transforms/IPO/InferFunctionAttrs.cpp [5:5]
+ lib/Transforms/IPO/InlineSimple.cpp [5:5]
+ lib/Transforms/IPO/Inliner.cpp [5:5]
+ lib/Transforms/IPO/Internalize.cpp [5:5]
+ lib/Transforms/IPO/LoopExtractor.cpp [5:5]
+ lib/Transforms/IPO/LowerTypeTests.cpp [5:5]
+ lib/Transforms/IPO/MergeFunctions.cpp [5:5]
+ lib/Transforms/IPO/ModuleInliner.cpp [5:5]
+ lib/Transforms/IPO/OpenMPOpt.cpp [5:5]
+ lib/Transforms/IPO/PartialInlining.cpp [5:5]
+ lib/Transforms/IPO/PassManagerBuilder.cpp [5:5]
+ lib/Transforms/IPO/SCCP.cpp [5:5]
+ lib/Transforms/IPO/SampleContextTracker.cpp [5:5]
+ lib/Transforms/IPO/SampleProfile.cpp [5:5]
+ lib/Transforms/IPO/SampleProfileProbe.cpp [5:5]
+ lib/Transforms/IPO/StripDeadPrototypes.cpp [5:5]
+ lib/Transforms/IPO/StripSymbols.cpp [5:5]
+ lib/Transforms/IPO/SyntheticCountsPropagation.cpp [5:5]
+ lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp [5:5]
+ lib/Transforms/IPO/WholeProgramDevirt.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineAddSub.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineAndOrXor.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineCalls.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineCasts.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineCompares.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineInternal.h [5:5]
+ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineNegator.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombinePHI.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineSelect.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineShifts.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp [5:5]
+ lib/Transforms/InstCombine/InstCombineVectorOps.cpp [5:5]
+ lib/Transforms/InstCombine/InstructionCombining.cpp [5:5]
+ lib/Transforms/Instrumentation/AddressSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/BoundsChecking.cpp [5:5]
+ lib/Transforms/Instrumentation/CFGMST.h [5:5]
+ lib/Transforms/Instrumentation/CGProfile.cpp [5:5]
+ lib/Transforms/Instrumentation/ControlHeightReduction.cpp [5:5]
+ lib/Transforms/Instrumentation/DataFlowSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/GCOVProfiling.cpp [5:5]
+ lib/Transforms/Instrumentation/HWAddressSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/IndirectCallPromotion.cpp [5:5]
+ lib/Transforms/Instrumentation/InstrOrderFile.cpp [5:5]
+ lib/Transforms/Instrumentation/InstrProfiling.cpp [5:5]
+ lib/Transforms/Instrumentation/Instrumentation.cpp [5:5]
+ lib/Transforms/Instrumentation/KCFI.cpp [5:5]
+ lib/Transforms/Instrumentation/MemProfiler.cpp [5:5]
+ lib/Transforms/Instrumentation/MemorySanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/PGOInstrumentation.cpp [5:5]
+ lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp [5:5]
+ lib/Transforms/Instrumentation/PoisonChecking.cpp [5:5]
+ lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp [5:5]
+ lib/Transforms/Instrumentation/SanitizerCoverage.cpp [5:5]
+ lib/Transforms/Instrumentation/ThreadSanitizer.cpp [5:5]
+ lib/Transforms/Instrumentation/ValueProfileCollector.cpp [5:5]
+ lib/Transforms/Instrumentation/ValueProfileCollector.h [5:5]
+ lib/Transforms/Instrumentation/ValueProfilePlugins.inc [5:5]
+ lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h [5:5]
+ lib/Transforms/ObjCARC/BlotMapVector.h [5:5]
+ lib/Transforms/ObjCARC/DependencyAnalysis.cpp [5:5]
+ lib/Transforms/ObjCARC/DependencyAnalysis.h [5:5]
+ lib/Transforms/ObjCARC/ObjCARC.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARC.h [5:5]
+ lib/Transforms/ObjCARC/ObjCARCAPElim.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARCContract.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARCExpand.cpp [5:5]
+ lib/Transforms/ObjCARC/ObjCARCOpts.cpp [5:5]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp [5:5]
+ lib/Transforms/ObjCARC/ProvenanceAnalysis.h [5:5]
+ lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp [5:5]
+ lib/Transforms/ObjCARC/PtrState.cpp [5:5]
+ lib/Transforms/ObjCARC/PtrState.h [5:5]
+ lib/Transforms/Scalar/ADCE.cpp [5:5]
+ lib/Transforms/Scalar/AlignmentFromAssumptions.cpp [6:6]
+ lib/Transforms/Scalar/AnnotationRemarks.cpp [5:5]
+ lib/Transforms/Scalar/BDCE.cpp [5:5]
+ lib/Transforms/Scalar/CallSiteSplitting.cpp [5:5]
+ lib/Transforms/Scalar/ConstantHoisting.cpp [5:5]
+ lib/Transforms/Scalar/ConstraintElimination.cpp [5:5]
+ lib/Transforms/Scalar/CorrelatedValuePropagation.cpp [5:5]
+ lib/Transforms/Scalar/DCE.cpp [5:5]
+ lib/Transforms/Scalar/DFAJumpThreading.cpp [5:5]
+ lib/Transforms/Scalar/DeadStoreElimination.cpp [5:5]
+ lib/Transforms/Scalar/DivRemPairs.cpp [5:5]
+ lib/Transforms/Scalar/EarlyCSE.cpp [5:5]
+ lib/Transforms/Scalar/FlattenCFGPass.cpp [5:5]
+ lib/Transforms/Scalar/Float2Int.cpp [5:5]
+ lib/Transforms/Scalar/GVN.cpp [5:5]
+ lib/Transforms/Scalar/GVNHoist.cpp [5:5]
+ lib/Transforms/Scalar/GVNSink.cpp [5:5]
+ lib/Transforms/Scalar/GuardWidening.cpp [5:5]
+ lib/Transforms/Scalar/IVUsersPrinter.cpp [5:5]
+ lib/Transforms/Scalar/IndVarSimplify.cpp [5:5]
+ lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp [5:5]
+ lib/Transforms/Scalar/InferAddressSpaces.cpp [5:5]
+ lib/Transforms/Scalar/InstSimplifyPass.cpp [5:5]
+ lib/Transforms/Scalar/JumpThreading.cpp [5:5]
+ lib/Transforms/Scalar/LICM.cpp [5:5]
+ lib/Transforms/Scalar/LoopAccessAnalysisPrinter.cpp [5:5]
+ lib/Transforms/Scalar/LoopBoundSplit.cpp [5:5]
+ lib/Transforms/Scalar/LoopDataPrefetch.cpp [5:5]
+ lib/Transforms/Scalar/LoopDeletion.cpp [5:5]
+ lib/Transforms/Scalar/LoopDistribute.cpp [5:5]
+ lib/Transforms/Scalar/LoopFlatten.cpp [5:5]
+ lib/Transforms/Scalar/LoopFuse.cpp [5:5]
+ lib/Transforms/Scalar/LoopIdiomRecognize.cpp [5:5]
+ lib/Transforms/Scalar/LoopInstSimplify.cpp [5:5]
+ lib/Transforms/Scalar/LoopInterchange.cpp [5:5]
+ lib/Transforms/Scalar/LoopLoadElimination.cpp [5:5]
+ lib/Transforms/Scalar/LoopPassManager.cpp [5:5]
+ lib/Transforms/Scalar/LoopPredication.cpp [5:5]
+ lib/Transforms/Scalar/LoopRerollPass.cpp [5:5]
+ lib/Transforms/Scalar/LoopRotation.cpp [5:5]
+ lib/Transforms/Scalar/LoopSimplifyCFG.cpp [5:5]
+ lib/Transforms/Scalar/LoopSink.cpp [5:5]
+ lib/Transforms/Scalar/LoopStrengthReduce.cpp [5:5]
+ lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp [5:5]
+ lib/Transforms/Scalar/LoopUnrollPass.cpp [5:5]
+ lib/Transforms/Scalar/LoopVersioningLICM.cpp [5:5]
+ lib/Transforms/Scalar/LowerAtomicPass.cpp [5:5]
+ lib/Transforms/Scalar/LowerConstantIntrinsics.cpp [5:5]
+ lib/Transforms/Scalar/LowerExpectIntrinsic.cpp [5:5]
+ lib/Transforms/Scalar/LowerGuardIntrinsic.cpp [5:5]
+ lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp [5:5]
+ lib/Transforms/Scalar/LowerWidenableCondition.cpp [5:5]
+ lib/Transforms/Scalar/MakeGuardsExplicit.cpp [5:5]
+ lib/Transforms/Scalar/MemCpyOptimizer.cpp [5:5]
+ lib/Transforms/Scalar/MergeICmps.cpp [5:5]
+ lib/Transforms/Scalar/MergedLoadStoreMotion.cpp [5:5]
+ lib/Transforms/Scalar/NaryReassociate.cpp [5:5]
+ lib/Transforms/Scalar/NewGVN.cpp [5:5]
+ lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp [5:5]
+ lib/Transforms/Scalar/PlaceSafepoints.cpp [5:5]
+ lib/Transforms/Scalar/Reassociate.cpp [5:5]
+ lib/Transforms/Scalar/Reg2Mem.cpp [5:5]
+ lib/Transforms/Scalar/RewriteStatepointsForGC.cpp [5:5]
+ lib/Transforms/Scalar/SCCP.cpp [5:5]
+ lib/Transforms/Scalar/SROA.cpp [5:5]
+ lib/Transforms/Scalar/Scalar.cpp [5:5]
+ lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp [6:6]
+ lib/Transforms/Scalar/Scalarizer.cpp [5:5]
+ lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp [5:5]
+ lib/Transforms/Scalar/SimpleLoopUnswitch.cpp [5:5]
+ lib/Transforms/Scalar/SimplifyCFGPass.cpp [5:5]
+ lib/Transforms/Scalar/Sink.cpp [5:5]
+ lib/Transforms/Scalar/SpeculativeExecution.cpp [5:5]
+ lib/Transforms/Scalar/StraightLineStrengthReduce.cpp [5:5]
+ lib/Transforms/Scalar/StructurizeCFG.cpp [5:5]
+ lib/Transforms/Scalar/TLSVariableHoist.cpp [5:5]
+ lib/Transforms/Scalar/TailRecursionElimination.cpp [5:5]
+ lib/Transforms/Scalar/WarnMissedTransforms.cpp [5:5]
+ lib/Transforms/Utils/AMDGPUEmitPrintf.cpp [5:5]
+ lib/Transforms/Utils/ASanStackFrameLayout.cpp [5:5]
+ lib/Transforms/Utils/AddDiscriminators.cpp [5:5]
+ lib/Transforms/Utils/AssumeBundleBuilder.cpp [5:5]
+ lib/Transforms/Utils/BasicBlockUtils.cpp [5:5]
+ lib/Transforms/Utils/BreakCriticalEdges.cpp [5:5]
+ lib/Transforms/Utils/BuildLibCalls.cpp [5:5]
+ lib/Transforms/Utils/BypassSlowDivision.cpp [5:5]
+ lib/Transforms/Utils/CallGraphUpdater.cpp [5:5]
+ lib/Transforms/Utils/CallPromotionUtils.cpp [5:5]
+ lib/Transforms/Utils/CanonicalizeAliases.cpp [5:5]
+ lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp [5:5]
+ lib/Transforms/Utils/CloneFunction.cpp [5:5]
+ lib/Transforms/Utils/CloneModule.cpp [5:5]
+ lib/Transforms/Utils/CodeExtractor.cpp [5:5]
+ lib/Transforms/Utils/CodeLayout.cpp [5:5]
+ lib/Transforms/Utils/CodeMoverUtils.cpp [5:5]
+ lib/Transforms/Utils/CtorUtils.cpp [5:5]
+ lib/Transforms/Utils/Debugify.cpp [5:5]
+ lib/Transforms/Utils/DemoteRegToStack.cpp [5:5]
+ lib/Transforms/Utils/EntryExitInstrumenter.cpp [5:5]
+ lib/Transforms/Utils/EscapeEnumerator.cpp [5:5]
+ lib/Transforms/Utils/Evaluator.cpp [5:5]
+ lib/Transforms/Utils/FixIrreducible.cpp [5:5]
+ lib/Transforms/Utils/FlattenCFG.cpp [5:5]
+ lib/Transforms/Utils/FunctionComparator.cpp [5:5]
+ lib/Transforms/Utils/FunctionImportUtils.cpp [5:5]
+ lib/Transforms/Utils/GlobalStatus.cpp [5:5]
+ lib/Transforms/Utils/GuardUtils.cpp [5:5]
+ lib/Transforms/Utils/HelloWorld.cpp [5:5]
+ lib/Transforms/Utils/InjectTLIMappings.cpp [5:5]
+ lib/Transforms/Utils/InlineFunction.cpp [5:5]
+ lib/Transforms/Utils/InstructionNamer.cpp [5:5]
+ lib/Transforms/Utils/IntegerDivision.cpp [5:5]
+ lib/Transforms/Utils/LCSSA.cpp [5:5]
+ lib/Transforms/Utils/LibCallsShrinkWrap.cpp [5:5]
+ lib/Transforms/Utils/Local.cpp [5:5]
+ lib/Transforms/Utils/LoopPeel.cpp [5:5]
+ lib/Transforms/Utils/LoopRotationUtils.cpp [5:5]
+ lib/Transforms/Utils/LoopSimplify.cpp [5:5]
+ lib/Transforms/Utils/LoopUnroll.cpp [5:5]
+ lib/Transforms/Utils/LoopUnrollAndJam.cpp [5:5]
+ lib/Transforms/Utils/LoopUnrollRuntime.cpp [5:5]
+ lib/Transforms/Utils/LoopUtils.cpp [5:5]
+ lib/Transforms/Utils/LoopVersioning.cpp [5:5]
+ lib/Transforms/Utils/LowerAtomic.cpp [5:5]
+ lib/Transforms/Utils/LowerGlobalDtors.cpp [5:5]
+ lib/Transforms/Utils/LowerIFunc.cpp [5:5]
+ lib/Transforms/Utils/LowerInvoke.cpp [5:5]
+ lib/Transforms/Utils/LowerMemIntrinsics.cpp [5:5]
+ lib/Transforms/Utils/LowerSwitch.cpp [5:5]
+ lib/Transforms/Utils/MatrixUtils.cpp [5:5]
+ lib/Transforms/Utils/Mem2Reg.cpp [5:5]
+ lib/Transforms/Utils/MemoryOpRemark.cpp [5:5]
+ lib/Transforms/Utils/MemoryTaggingSupport.cpp [4:4]
+ lib/Transforms/Utils/MetaRenamer.cpp [5:5]
+ lib/Transforms/Utils/MisExpect.cpp [5:5]
+ lib/Transforms/Utils/ModuleUtils.cpp [5:5]
+ lib/Transforms/Utils/NameAnonGlobals.cpp [5:5]
+ lib/Transforms/Utils/PredicateInfo.cpp [5:5]
+ lib/Transforms/Utils/PromoteMemoryToRegister.cpp [5:5]
+ lib/Transforms/Utils/RelLookupTableConverter.cpp [5:5]
+ lib/Transforms/Utils/SCCPSolver.cpp [5:5]
+ lib/Transforms/Utils/SSAUpdater.cpp [5:5]
+ lib/Transforms/Utils/SSAUpdaterBulk.cpp [5:5]
+ lib/Transforms/Utils/SampleProfileInference.cpp [5:5]
+ lib/Transforms/Utils/SampleProfileLoaderBaseUtil.cpp [5:5]
+ lib/Transforms/Utils/SanitizerStats.cpp [5:5]
+ lib/Transforms/Utils/ScalarEvolutionExpander.cpp [5:5]
+ lib/Transforms/Utils/SimplifyCFG.cpp [5:5]
+ lib/Transforms/Utils/SimplifyIndVar.cpp [5:5]
+ lib/Transforms/Utils/SimplifyLibCalls.cpp [5:5]
+ lib/Transforms/Utils/SizeOpts.cpp [5:5]
+ lib/Transforms/Utils/SplitModule.cpp [5:5]
+ lib/Transforms/Utils/StripGCRelocates.cpp [5:5]
+ lib/Transforms/Utils/StripNonLineTableDebugInfo.cpp [5:5]
+ lib/Transforms/Utils/SymbolRewriter.cpp [5:5]
+ lib/Transforms/Utils/UnifyFunctionExitNodes.cpp [5:5]
+ lib/Transforms/Utils/UnifyLoopExits.cpp [5:5]
+ lib/Transforms/Utils/Utils.cpp [5:5]
+ lib/Transforms/Utils/ValueMapper.cpp [5:5]
+ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp [5:5]
+ lib/Transforms/Vectorize/LoopVectorizationLegality.cpp [5:5]
+ lib/Transforms/Vectorize/LoopVectorizationPlanner.h [5:5]
+ lib/Transforms/Vectorize/LoopVectorize.cpp [5:5]
+ lib/Transforms/Vectorize/SLPVectorizer.cpp [5:5]
+ lib/Transforms/Vectorize/VPRecipeBuilder.h [5:5]
+ lib/Transforms/Vectorize/VPlan.cpp [5:5]
+ lib/Transforms/Vectorize/VPlan.h [5:5]
+ lib/Transforms/Vectorize/VPlanCFG.h [5:5]
+ lib/Transforms/Vectorize/VPlanDominatorTree.h [5:5]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanHCFGBuilder.h [5:5]
+ lib/Transforms/Vectorize/VPlanRecipes.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanSLP.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanTransforms.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanTransforms.h [5:5]
+ lib/Transforms/Vectorize/VPlanValue.h [5:5]
+ lib/Transforms/Vectorize/VPlanVerifier.cpp [5:5]
+ lib/Transforms/Vectorize/VPlanVerifier.h [5:5]
+ lib/Transforms/Vectorize/VectorCombine.cpp [5:5]
+ lib/Transforms/Vectorize/Vectorize.cpp [5:5]
+ lib/WindowsDriver/MSVCPaths.cpp [5:5]
+ lib/WindowsManifest/WindowsManifestMerger.cpp [5:5]
+ lib/XRay/BlockIndexer.cpp [5:5]
+ lib/XRay/BlockPrinter.cpp [5:5]
+ lib/XRay/BlockVerifier.cpp [5:5]
+ lib/XRay/FDRRecordProducer.cpp [5:5]
+ lib/XRay/FDRRecords.cpp [5:5]
+ lib/XRay/FDRTraceExpander.cpp [5:5]
+ lib/XRay/FDRTraceWriter.cpp [5:5]
+ lib/XRay/FileHeaderReader.cpp [5:5]
+ lib/XRay/InstrumentationMap.cpp [5:5]
+ lib/XRay/LogBuilderConsumer.cpp [5:5]
+ lib/XRay/Profile.cpp [5:5]
+ lib/XRay/RecordInitializer.cpp [5:5]
+ lib/XRay/RecordPrinter.cpp [5:5]
+ lib/XRay/Trace.cpp [5:5]
+ tools/bugpoint/BugDriver.cpp [5:5]
+ tools/bugpoint/BugDriver.h [5:5]
+ tools/bugpoint/CrashDebugger.cpp [5:5]
+ tools/bugpoint/ExecutionDriver.cpp [5:5]
+ tools/bugpoint/ExtractFunction.cpp [5:5]
+ tools/bugpoint/FindBugs.cpp [5:5]
+ tools/bugpoint/ListReducer.h [5:5]
+ tools/bugpoint/Miscompilation.cpp [5:5]
+ tools/bugpoint/OptimizerDriver.cpp [5:5]
+ tools/bugpoint/ToolRunner.cpp [5:5]
+ tools/bugpoint/ToolRunner.h [5:5]
+ tools/bugpoint/bugpoint.cpp [5:5]
+ tools/dsymutil/BinaryHolder.cpp [5:5]
+ tools/dsymutil/BinaryHolder.h [5:5]
+ tools/dsymutil/CFBundle.cpp [5:5]
+ tools/dsymutil/CFBundle.h [5:5]
+ tools/dsymutil/DebugMap.cpp [5:5]
+ tools/dsymutil/DebugMap.h [5:5]
+ tools/dsymutil/DwarfLinkerForBinary.cpp [5:5]
+ tools/dsymutil/DwarfLinkerForBinary.h [5:5]
+ tools/dsymutil/LinkUtils.h [5:5]
+ tools/dsymutil/MachODebugMapParser.cpp [5:5]
+ tools/dsymutil/MachOUtils.cpp [5:5]
+ tools/dsymutil/MachOUtils.h [5:5]
+ tools/dsymutil/Reproducer.cpp [5:5]
+ tools/dsymutil/Reproducer.h [5:5]
+ tools/dsymutil/SymbolMap.cpp [5:5]
+ tools/dsymutil/SymbolMap.h [5:5]
+ tools/dsymutil/dsymutil.cpp [5:5]
+ tools/dsymutil/dsymutil.h [5:5]
+ tools/gold/gold-plugin.cpp [5:5]
+ tools/llc/llc.cpp [5:5]
+ tools/lli/ChildTarget/ChildTarget.cpp [5:5]
+ tools/lli/ExecutionUtils.cpp [5:5]
+ tools/lli/ExecutionUtils.h [5:5]
+ tools/lli/ForwardingMemoryManager.h [5:5]
+ tools/lli/lli.cpp [5:5]
+ tools/llvm-ar/llvm-ar-driver.cpp [5:5]
+ tools/llvm-ar/llvm-ar.cpp [5:5]
+ tools/llvm-as/llvm-as.cpp [5:5]
+ tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp [5:5]
+ tools/llvm-cat/llvm-cat.cpp [5:5]
+ tools/llvm-cfi-verify/lib/FileAnalysis.cpp [5:5]
+ tools/llvm-cfi-verify/lib/FileAnalysis.h [5:5]
+ tools/llvm-cfi-verify/lib/GraphBuilder.cpp [5:5]
+ tools/llvm-cfi-verify/lib/GraphBuilder.h [5:5]
+ tools/llvm-cfi-verify/llvm-cfi-verify.cpp [5:5]
+ tools/llvm-config/BuildVariables.inc [5:5]
+ tools/llvm-config/llvm-config.cpp [5:5]
+ tools/llvm-cov/CodeCoverage.cpp [5:5]
+ tools/llvm-cov/CoverageExporter.h [5:5]
+ tools/llvm-cov/CoverageExporterJson.cpp [5:5]
+ tools/llvm-cov/CoverageExporterJson.h [5:5]
+ tools/llvm-cov/CoverageExporterLcov.cpp [5:5]
+ tools/llvm-cov/CoverageExporterLcov.h [5:5]
+ tools/llvm-cov/CoverageFilters.cpp [5:5]
+ tools/llvm-cov/CoverageFilters.h [5:5]
+ tools/llvm-cov/CoverageReport.cpp [5:5]
+ tools/llvm-cov/CoverageReport.h [5:5]
+ tools/llvm-cov/CoverageSummaryInfo.cpp [5:5]
+ tools/llvm-cov/CoverageSummaryInfo.h [5:5]
+ tools/llvm-cov/CoverageViewOptions.h [5:5]
+ tools/llvm-cov/RenderingSupport.h [5:5]
+ tools/llvm-cov/SourceCoverageView.cpp [5:5]
+ tools/llvm-cov/SourceCoverageView.h [5:5]
+ tools/llvm-cov/SourceCoverageViewHTML.cpp [5:5]
+ tools/llvm-cov/SourceCoverageViewHTML.h [5:5]
+ tools/llvm-cov/SourceCoverageViewText.cpp [5:5]
+ tools/llvm-cov/SourceCoverageViewText.h [5:5]
+ tools/llvm-cov/TestingSupport.cpp [5:5]
+ tools/llvm-cov/gcov.cpp [5:5]
+ tools/llvm-cov/llvm-cov.cpp [5:5]
+ tools/llvm-cvtres/llvm-cvtres.cpp [5:5]
+ tools/llvm-cxxdump/Error.cpp [5:5]
+ tools/llvm-cxxdump/Error.h [5:5]
+ tools/llvm-cxxdump/llvm-cxxdump.cpp [5:5]
+ tools/llvm-cxxdump/llvm-cxxdump.h [5:5]
+ tools/llvm-cxxfilt/llvm-cxxfilt-driver.cpp [5:5]
+ tools/llvm-cxxfilt/llvm-cxxfilt.cpp [5:5]
+ tools/llvm-cxxmap/llvm-cxxmap.cpp [5:5]
+ tools/llvm-diff/lib/DiffConsumer.cpp [5:5]
+ tools/llvm-diff/lib/DiffConsumer.h [5:5]
+ tools/llvm-diff/lib/DiffLog.cpp [5:5]
+ tools/llvm-diff/lib/DiffLog.h [5:5]
+ tools/llvm-diff/lib/DifferenceEngine.cpp [5:5]
+ tools/llvm-diff/lib/DifferenceEngine.h [5:5]
+ tools/llvm-diff/llvm-diff.cpp [5:5]
+ tools/llvm-dis/llvm-dis.cpp [5:5]
+ tools/llvm-dwarfdump/SectionSizes.cpp [5:5]
+ tools/llvm-dwarfdump/Statistics.cpp [5:5]
+ tools/llvm-dwarfdump/llvm-dwarfdump.cpp [5:5]
+ tools/llvm-dwarfdump/llvm-dwarfdump.h [5:5]
+ tools/llvm-dwp/llvm-dwp.cpp [5:5]
+ tools/llvm-exegesis/lib/AArch64/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/Analysis.cpp [5:5]
+ tools/llvm-exegesis/lib/Analysis.h [5:5]
+ tools/llvm-exegesis/lib/Assembler.cpp [5:5]
+ tools/llvm-exegesis/lib/Assembler.h [5:5]
+ tools/llvm-exegesis/lib/BenchmarkCode.h [5:5]
+ tools/llvm-exegesis/lib/BenchmarkResult.cpp [5:5]
+ tools/llvm-exegesis/lib/BenchmarkResult.h [5:5]
+ tools/llvm-exegesis/lib/BenchmarkRunner.cpp [5:5]
+ tools/llvm-exegesis/lib/BenchmarkRunner.h [5:5]
+ tools/llvm-exegesis/lib/Clustering.cpp [5:5]
+ tools/llvm-exegesis/lib/Clustering.h [5:5]
+ tools/llvm-exegesis/lib/CodeTemplate.cpp [5:5]
+ tools/llvm-exegesis/lib/CodeTemplate.h [5:5]
+ tools/llvm-exegesis/lib/Error.cpp [5:5]
+ tools/llvm-exegesis/lib/Error.h [5:5]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.cpp [5:5]
+ tools/llvm-exegesis/lib/LatencyBenchmarkRunner.h [5:5]
+ tools/llvm-exegesis/lib/LlvmState.cpp [5:5]
+ tools/llvm-exegesis/lib/LlvmState.h [5:5]
+ tools/llvm-exegesis/lib/MCInstrDescView.cpp [5:5]
+ tools/llvm-exegesis/lib/MCInstrDescView.h [5:5]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp [5:5]
+ tools/llvm-exegesis/lib/ParallelSnippetGenerator.h [5:5]
+ tools/llvm-exegesis/lib/PerfHelper.cpp [5:5]
+ tools/llvm-exegesis/lib/PerfHelper.h [5:5]
+ tools/llvm-exegesis/lib/PowerPC/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/ProgressMeter.h [5:5]
+ tools/llvm-exegesis/lib/RegisterAliasing.cpp [5:5]
+ tools/llvm-exegesis/lib/RegisterAliasing.h [5:5]
+ tools/llvm-exegesis/lib/RegisterValue.cpp [5:5]
+ tools/llvm-exegesis/lib/RegisterValue.h [5:5]
+ tools/llvm-exegesis/lib/SchedClassResolution.cpp [5:5]
+ tools/llvm-exegesis/lib/SchedClassResolution.h [5:5]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp [5:5]
+ tools/llvm-exegesis/lib/SerialSnippetGenerator.h [5:5]
+ tools/llvm-exegesis/lib/SnippetFile.cpp [5:5]
+ tools/llvm-exegesis/lib/SnippetFile.h [5:5]
+ tools/llvm-exegesis/lib/SnippetGenerator.cpp [5:5]
+ tools/llvm-exegesis/lib/SnippetGenerator.h [5:5]
+ tools/llvm-exegesis/lib/SnippetRepetitor.cpp [5:5]
+ tools/llvm-exegesis/lib/SnippetRepetitor.h [5:5]
+ tools/llvm-exegesis/lib/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/Target.h [5:5]
+ tools/llvm-exegesis/lib/TargetSelect.h [5:5]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.cpp [5:5]
+ tools/llvm-exegesis/lib/UopsBenchmarkRunner.h [5:5]
+ tools/llvm-exegesis/lib/X86/Target.cpp [5:5]
+ tools/llvm-exegesis/lib/X86/X86Counter.cpp [5:5]
+ tools/llvm-exegesis/lib/X86/X86Counter.h [5:5]
+ tools/llvm-exegesis/llvm-exegesis.cpp [5:5]
+ tools/llvm-extract/llvm-extract.cpp [5:5]
+ tools/llvm-gsymutil/llvm-gsymutil.cpp [5:5]
+ tools/llvm-ifs/ErrorCollector.cpp [5:5]
+ tools/llvm-ifs/ErrorCollector.h [5:5]
+ tools/llvm-ifs/llvm-ifs-driver.cpp [5:5]
+ tools/llvm-ifs/llvm-ifs.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-coff.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-elf.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-executor/llvm-jitlink-executor.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink-macho.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink.cpp [5:5]
+ tools/llvm-jitlink/llvm-jitlink.h [5:5]
+ tools/llvm-libtool-darwin/DependencyInfo.h [5:5]
+ tools/llvm-libtool-darwin/llvm-libtool-darwin.cpp [5:5]
+ tools/llvm-link/llvm-link.cpp [5:5]
+ tools/llvm-lipo/llvm-lipo-driver.cpp [5:5]
+ tools/llvm-lipo/llvm-lipo.cpp [5:5]
+ tools/llvm-lto/llvm-lto.cpp [5:5]
+ tools/llvm-lto2/llvm-lto2.cpp [5:5]
+ tools/llvm-mc/Disassembler.cpp [5:5]
+ tools/llvm-mc/Disassembler.h [5:5]
+ tools/llvm-mc/llvm-mc.cpp [5:5]
+ tools/llvm-mca/CodeRegion.cpp [5:5]
+ tools/llvm-mca/CodeRegion.h [5:5]
+ tools/llvm-mca/CodeRegionGenerator.cpp [5:5]
+ tools/llvm-mca/CodeRegionGenerator.h [5:5]
+ tools/llvm-mca/PipelinePrinter.cpp [5:5]
+ tools/llvm-mca/PipelinePrinter.h [5:5]
+ tools/llvm-mca/Views/BottleneckAnalysis.cpp [5:5]
+ tools/llvm-mca/Views/BottleneckAnalysis.h [5:5]
+ tools/llvm-mca/Views/DispatchStatistics.cpp [5:5]
+ tools/llvm-mca/Views/DispatchStatistics.h [5:5]
+ tools/llvm-mca/Views/InstructionInfoView.cpp [5:5]
+ tools/llvm-mca/Views/InstructionInfoView.h [5:5]
+ tools/llvm-mca/Views/InstructionView.cpp [5:5]
+ tools/llvm-mca/Views/InstructionView.h [5:5]
+ tools/llvm-mca/Views/RegisterFileStatistics.cpp [5:5]
+ tools/llvm-mca/Views/RegisterFileStatistics.h [5:5]
+ tools/llvm-mca/Views/ResourcePressureView.cpp [5:5]
+ tools/llvm-mca/Views/ResourcePressureView.h [5:5]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.cpp [5:5]
+ tools/llvm-mca/Views/RetireControlUnitStatistics.h [5:5]
+ tools/llvm-mca/Views/SchedulerStatistics.cpp [5:5]
+ tools/llvm-mca/Views/SchedulerStatistics.h [5:5]
+ tools/llvm-mca/Views/SummaryView.cpp [5:5]
+ tools/llvm-mca/Views/SummaryView.h [5:5]
+ tools/llvm-mca/Views/TimelineView.cpp [5:5]
+ tools/llvm-mca/Views/TimelineView.h [5:5]
+ tools/llvm-mca/llvm-mca.cpp [5:5]
+ tools/llvm-ml/Disassembler.cpp [5:5]
+ tools/llvm-ml/Disassembler.h [5:5]
+ tools/llvm-ml/llvm-ml.cpp [5:5]
+ tools/llvm-modextract/llvm-modextract.cpp [5:5]
+ tools/llvm-mt/llvm-mt-driver.cpp [5:5]
+ tools/llvm-mt/llvm-mt.cpp [5:5]
+ tools/llvm-nm/llvm-nm-driver.cpp [5:5]
+ tools/llvm-nm/llvm-nm.cpp [5:5]
+ tools/llvm-objcopy/BitcodeStripOpts.td [5:5]
+ tools/llvm-objcopy/InstallNameToolOpts.td [5:5]
+ tools/llvm-objcopy/ObjcopyOptions.cpp [5:5]
+ tools/llvm-objcopy/ObjcopyOptions.h [5:5]
+ tools/llvm-objcopy/llvm-objcopy-driver.cpp [5:5]
+ tools/llvm-objcopy/llvm-objcopy.cpp [5:5]
+ tools/llvm-objdump/COFFDump.cpp [5:5]
+ tools/llvm-objdump/COFFDump.h [5:5]
+ tools/llvm-objdump/ELFDump.cpp [5:5]
+ tools/llvm-objdump/ELFDump.h [5:5]
+ tools/llvm-objdump/MachODump.cpp [5:5]
+ tools/llvm-objdump/MachODump.h [5:5]
+ tools/llvm-objdump/OffloadDump.cpp [5:5]
+ tools/llvm-objdump/OffloadDump.h [5:5]
+ tools/llvm-objdump/SourcePrinter.cpp [5:5]
+ tools/llvm-objdump/SourcePrinter.h [5:5]
+ tools/llvm-objdump/WasmDump.cpp [5:5]
+ tools/llvm-objdump/WasmDump.h [5:5]
+ tools/llvm-objdump/XCOFFDump.cpp [5:5]
+ tools/llvm-objdump/XCOFFDump.h [5:5]
+ tools/llvm-objdump/llvm-objdump.cpp [5:5]
+ tools/llvm-objdump/llvm-objdump.h [5:5]
+ tools/llvm-opt-report/OptReport.cpp [5:5]
+ tools/llvm-pdbutil/BytesOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/BytesOutputStyle.h [5:5]
+ tools/llvm-pdbutil/DumpOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/DumpOutputStyle.h [5:5]
+ tools/llvm-pdbutil/ExplainOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/ExplainOutputStyle.h [5:5]
+ tools/llvm-pdbutil/MinimalSymbolDumper.cpp [5:5]
+ tools/llvm-pdbutil/MinimalSymbolDumper.h [5:5]
+ tools/llvm-pdbutil/MinimalTypeDumper.cpp [5:5]
+ tools/llvm-pdbutil/MinimalTypeDumper.h [5:5]
+ tools/llvm-pdbutil/OutputStyle.h [5:5]
+ tools/llvm-pdbutil/PdbYaml.cpp [5:5]
+ tools/llvm-pdbutil/PdbYaml.h [5:5]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyBuiltinDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyClassDefinitionDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyCompilandDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyCompilandDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyEnumDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyEnumDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyExternalSymbolDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyFunctionDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyFunctionDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyTypeDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyTypeDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyTypedefDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyTypedefDumper.h [5:5]
+ tools/llvm-pdbutil/PrettyVariableDumper.cpp [5:5]
+ tools/llvm-pdbutil/PrettyVariableDumper.h [5:5]
+ tools/llvm-pdbutil/StreamUtil.cpp [5:5]
+ tools/llvm-pdbutil/StreamUtil.h [5:5]
+ tools/llvm-pdbutil/TypeReferenceTracker.cpp [5:5]
+ tools/llvm-pdbutil/TypeReferenceTracker.h [5:5]
+ tools/llvm-pdbutil/YAMLOutputStyle.cpp [5:5]
+ tools/llvm-pdbutil/YAMLOutputStyle.h [5:5]
+ tools/llvm-pdbutil/llvm-pdbutil.cpp [5:5]
+ tools/llvm-pdbutil/llvm-pdbutil.h [5:5]
+ tools/llvm-profdata/llvm-profdata-driver.cpp [5:5]
+ tools/llvm-profdata/llvm-profdata.cpp [5:5]
+ tools/llvm-profgen/CSPreInliner.cpp [5:5]
+ tools/llvm-profgen/CSPreInliner.h [5:5]
+ tools/llvm-profgen/CallContext.h [5:5]
+ tools/llvm-profgen/ErrorHandling.h [5:5]
+ tools/llvm-profgen/MissingFrameInferrer.cpp [5:5]
+ tools/llvm-profgen/MissingFrameInferrer.h [5:5]
+ tools/llvm-profgen/PerfReader.cpp [5:5]
+ tools/llvm-profgen/PerfReader.h [5:5]
+ tools/llvm-profgen/ProfileGenerator.cpp [5:5]
+ tools/llvm-profgen/ProfileGenerator.h [5:5]
+ tools/llvm-profgen/ProfiledBinary.cpp [5:5]
+ tools/llvm-profgen/ProfiledBinary.h [5:5]
+ tools/llvm-profgen/llvm-profgen.cpp [5:5]
+ tools/llvm-rc/ResourceFileWriter.cpp [5:5]
+ tools/llvm-rc/ResourceFileWriter.h [5:5]
+ tools/llvm-rc/ResourceScriptCppFilter.cpp [5:5]
+ tools/llvm-rc/ResourceScriptCppFilter.h [5:5]
+ tools/llvm-rc/ResourceScriptParser.cpp [5:5]
+ tools/llvm-rc/ResourceScriptParser.h [5:5]
+ tools/llvm-rc/ResourceScriptStmt.cpp [4:4]
+ tools/llvm-rc/ResourceScriptStmt.h [5:5]
+ tools/llvm-rc/ResourceScriptToken.cpp [5:5]
+ tools/llvm-rc/ResourceScriptToken.h [5:5]
+ tools/llvm-rc/ResourceScriptTokenList.def [5:5]
+ tools/llvm-rc/ResourceVisitor.h [5:5]
+ tools/llvm-rc/llvm-rc-driver.cpp [5:5]
+ tools/llvm-rc/llvm-rc.cpp [5:5]
+ tools/llvm-readobj/ARMEHABIPrinter.h [5:5]
+ tools/llvm-readobj/ARMWinEHPrinter.cpp [5:5]
+ tools/llvm-readobj/ARMWinEHPrinter.h [5:5]
+ tools/llvm-readobj/COFFDumper.cpp [5:5]
+ tools/llvm-readobj/COFFImportDumper.cpp [5:5]
+ tools/llvm-readobj/DwarfCFIEHPrinter.h [5:5]
+ tools/llvm-readobj/ELFDumper.cpp [5:5]
+ tools/llvm-readobj/MachODumper.cpp [5:5]
+ tools/llvm-readobj/ObjDumper.cpp [5:5]
+ tools/llvm-readobj/ObjDumper.h [5:5]
+ tools/llvm-readobj/StackMapPrinter.h [5:5]
+ tools/llvm-readobj/WasmDumper.cpp [5:5]
+ tools/llvm-readobj/Win64EHDumper.cpp [5:5]
+ tools/llvm-readobj/Win64EHDumper.h [5:5]
+ tools/llvm-readobj/WindowsResourceDumper.cpp [5:5]
+ tools/llvm-readobj/WindowsResourceDumper.h [5:5]
+ tools/llvm-readobj/XCOFFDumper.cpp [5:5]
+ tools/llvm-readobj/llvm-readobj-driver.cpp [5:5]
+ tools/llvm-readobj/llvm-readobj.cpp [5:5]
+ tools/llvm-readobj/llvm-readobj.h [5:5]
+ tools/llvm-reduce/DeltaManager.cpp [5:5]
+ tools/llvm-reduce/DeltaManager.h [5:5]
+ tools/llvm-reduce/ReducerWorkItem.cpp [5:5]
+ tools/llvm-reduce/ReducerWorkItem.h [5:5]
+ tools/llvm-reduce/TestRunner.cpp [5:5]
+ tools/llvm-reduce/TestRunner.h [5:5]
+ tools/llvm-reduce/deltas/Delta.cpp [5:5]
+ tools/llvm-reduce/deltas/Delta.h [5:5]
+ tools/llvm-reduce/deltas/ReduceAliases.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceAliases.h [5:5]
+ tools/llvm-reduce/deltas/ReduceArguments.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceArguments.h [5:5]
+ tools/llvm-reduce/deltas/ReduceAttributes.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceAttributes.h [5:5]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceBasicBlocks.h [5:5]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceDIMetadata.h [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctionBodies.h [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctions.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceFunctions.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalObjects.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalValues.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVarInitializers.h [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceGlobalVars.h [5:5]
+ tools/llvm-reduce/deltas/ReduceIRReferences.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceIRReferences.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlags.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionFlagsMIR.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructions.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructions.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInstructionsMIR.h [5:5]
+ tools/llvm-reduce/deltas/ReduceInvokes.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceInvokes.h [5:5]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceMemoryOperations.h [5:5]
+ tools/llvm-reduce/deltas/ReduceMetadata.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceMetadata.h [5:5]
+ tools/llvm-reduce/deltas/ReduceModuleData.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceModuleData.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOpcodes.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOpcodes.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandBundles.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperands.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperands.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsSkip.h [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceOperandsToArgs.h [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterDefs.h [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterMasks.h [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceRegisterUses.h [5:5]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceSpecialGlobals.h [5:5]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceUsingSimplifyCFG.h [5:5]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.cpp [5:5]
+ tools/llvm-reduce/deltas/ReduceVirtualRegisters.h [5:5]
+ tools/llvm-reduce/deltas/RunIRPasses.cpp [5:5]
+ tools/llvm-reduce/deltas/RunIRPasses.h [5:5]
+ tools/llvm-reduce/deltas/SimplifyInstructions.cpp [5:5]
+ tools/llvm-reduce/deltas/SimplifyInstructions.h [5:5]
+ tools/llvm-reduce/deltas/StripDebugInfo.cpp [5:5]
+ tools/llvm-reduce/deltas/StripDebugInfo.h [5:5]
+ tools/llvm-reduce/deltas/Utils.cpp [5:5]
+ tools/llvm-reduce/deltas/Utils.h [5:5]
+ tools/llvm-reduce/llvm-reduce.cpp [5:5]
+ tools/llvm-rtdyld/llvm-rtdyld.cpp [5:5]
+ tools/llvm-size/llvm-size-driver.cpp [5:5]
+ tools/llvm-size/llvm-size.cpp [5:5]
+ tools/llvm-split/llvm-split.cpp [5:5]
+ tools/llvm-stress/llvm-stress.cpp [5:5]
+ tools/llvm-strings/llvm-strings.cpp [5:5]
+ tools/llvm-symbolizer/llvm-symbolizer.cpp [5:5]
+ tools/llvm-undname/llvm-undname.cpp [6:6]
+ tools/llvm-xray/func-id-helper.cpp [5:5]
+ tools/llvm-xray/func-id-helper.h [5:5]
+ tools/llvm-xray/llvm-xray.cpp [5:5]
+ tools/llvm-xray/trie-node.h [5:5]
+ tools/llvm-xray/xray-account.cpp [5:5]
+ tools/llvm-xray/xray-account.h [5:5]
+ tools/llvm-xray/xray-color-helper.cpp [5:5]
+ tools/llvm-xray/xray-color-helper.h [5:5]
+ tools/llvm-xray/xray-converter.cpp [5:5]
+ tools/llvm-xray/xray-converter.h [5:5]
+ tools/llvm-xray/xray-extract.cpp [5:5]
+ tools/llvm-xray/xray-fdr-dump.cpp [5:5]
+ tools/llvm-xray/xray-graph-diff.cpp [5:5]
+ tools/llvm-xray/xray-graph-diff.h [5:5]
+ tools/llvm-xray/xray-graph.cpp [5:5]
+ tools/llvm-xray/xray-graph.h [5:5]
+ tools/llvm-xray/xray-registry.cpp [5:5]
+ tools/llvm-xray/xray-registry.h [5:5]
+ tools/llvm-xray/xray-stacks.cpp [5:5]
+ tools/lto/LTODisassembler.cpp [5:5]
+ tools/lto/lto.cpp [5:5]
+ tools/obj2yaml/archive2yaml.cpp [5:5]
+ tools/obj2yaml/coff2yaml.cpp [5:5]
+ tools/obj2yaml/dwarf2yaml.cpp [5:5]
+ tools/obj2yaml/dxcontainer2yaml.cpp [5:5]
+ tools/obj2yaml/elf2yaml.cpp [5:5]
+ tools/obj2yaml/macho2yaml.cpp [5:5]
+ tools/obj2yaml/minidump2yaml.cpp [5:5]
+ tools/obj2yaml/obj2yaml.cpp [5:5]
+ tools/obj2yaml/obj2yaml.h [5:5]
+ tools/obj2yaml/offload2yaml.cpp [5:5]
+ tools/obj2yaml/wasm2yaml.cpp [5:5]
+ tools/obj2yaml/xcoff2yaml.cpp [5:5]
+ tools/opt/AnalysisWrappers.cpp [5:5]
+ tools/opt/BreakpointPrinter.cpp [5:5]
+ tools/opt/BreakpointPrinter.h [5:5]
+ tools/opt/NewPMDriver.cpp [5:5]
+ tools/opt/NewPMDriver.h [5:5]
+ tools/opt/opt.cpp [5:5]
+ tools/polly/include/polly/Canonicalization.h [5:5]
+ tools/polly/include/polly/CodeGen/BlockGenerators.h [5:5]
+ tools/polly/include/polly/CodeGen/CodeGeneration.h [5:5]
+ tools/polly/include/polly/CodeGen/IRBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/IslAst.h [5:5]
+ tools/polly/include/polly/CodeGen/IslExprBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/IslNodeBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/LoopGenerators.h [5:5]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsGOMP.h [5:5]
+ tools/polly/include/polly/CodeGen/LoopGeneratorsKMP.h [5:5]
+ tools/polly/include/polly/CodeGen/PPCGCodeGeneration.h [5:5]
+ tools/polly/include/polly/CodeGen/PerfMonitor.h [5:5]
+ tools/polly/include/polly/CodeGen/RuntimeDebugBuilder.h [5:5]
+ tools/polly/include/polly/CodeGen/Utils.h [5:5]
+ tools/polly/include/polly/CodePreparation.h [5:5]
+ tools/polly/include/polly/Config/config.h [5:5]
+ tools/polly/include/polly/DeLICM.h [5:5]
+ tools/polly/include/polly/DeadCodeElimination.h [5:5]
+ tools/polly/include/polly/DependenceInfo.h [5:5]
+ tools/polly/include/polly/FlattenAlgo.h [5:5]
+ tools/polly/include/polly/FlattenSchedule.h [5:5]
+ tools/polly/include/polly/ForwardOpTree.h [5:5]
+ tools/polly/include/polly/JSONExporter.h [5:5]
+ tools/polly/include/polly/LinkAllPasses.h [5:5]
+ tools/polly/include/polly/ManualOptimizer.h [5:5]
+ tools/polly/include/polly/MatmulOptimizer.h [5:5]
+ tools/polly/include/polly/MaximalStaticExpansion.h [5:5]
+ tools/polly/include/polly/Options.h [5:5]
+ tools/polly/include/polly/PolyhedralInfo.h [5:5]
+ tools/polly/include/polly/PruneUnprofitable.h [5:5]
+ tools/polly/include/polly/RegisterPasses.h [5:5]
+ tools/polly/include/polly/ScheduleOptimizer.h [5:5]
+ tools/polly/include/polly/ScheduleTreeTransform.h [5:5]
+ tools/polly/include/polly/ScopBuilder.h [5:5]
+ tools/polly/include/polly/ScopDetection.h [5:5]
+ tools/polly/include/polly/ScopDetectionDiagnostic.h [5:5]
+ tools/polly/include/polly/ScopGraphPrinter.h [5:5]
+ tools/polly/include/polly/ScopInfo.h [5:5]
+ tools/polly/include/polly/ScopPass.h [5:5]
+ tools/polly/include/polly/Simplify.h [5:5]
+ tools/polly/include/polly/Support/DumpFunctionPass.h [5:5]
+ tools/polly/include/polly/Support/DumpModulePass.h [5:5]
+ tools/polly/include/polly/Support/GICHelper.h [5:5]
+ tools/polly/include/polly/Support/ISLOStream.h [5:5]
+ tools/polly/include/polly/Support/ISLOperators.h [5:5]
+ tools/polly/include/polly/Support/ISLTools.h [5:5]
+ tools/polly/include/polly/Support/SCEVAffinator.h [5:5]
+ tools/polly/include/polly/Support/SCEVValidator.h [5:5]
+ tools/polly/include/polly/Support/ScopHelper.h [5:5]
+ tools/polly/include/polly/Support/ScopLocation.h [5:5]
+ tools/polly/include/polly/Support/VirtualInstruction.h [5:5]
+ tools/polly/include/polly/ZoneAlgo.h [5:5]
+ tools/polly/lib/Analysis/DependenceInfo.cpp [5:5]
+ tools/polly/lib/Analysis/PolyhedralInfo.cpp [5:5]
+ tools/polly/lib/Analysis/PruneUnprofitable.cpp [5:5]
+ tools/polly/lib/Analysis/ScopBuilder.cpp [5:5]
+ tools/polly/lib/Analysis/ScopDetection.cpp [5:5]
+ tools/polly/lib/Analysis/ScopDetectionDiagnostic.cpp [5:5]
+ tools/polly/lib/Analysis/ScopGraphPrinter.cpp [5:5]
+ tools/polly/lib/Analysis/ScopInfo.cpp [5:5]
+ tools/polly/lib/Analysis/ScopPass.cpp [5:5]
+ tools/polly/lib/CodeGen/BlockGenerators.cpp [5:5]
+ tools/polly/lib/CodeGen/CodeGeneration.cpp [5:5]
+ tools/polly/lib/CodeGen/CodegenCleanup.cpp [5:5]
+ tools/polly/lib/CodeGen/IRBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/IslAst.cpp [5:5]
+ tools/polly/lib/CodeGen/IslExprBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/IslNodeBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/LoopGenerators.cpp [5:5]
+ tools/polly/lib/CodeGen/LoopGeneratorsGOMP.cpp [5:5]
+ tools/polly/lib/CodeGen/LoopGeneratorsKMP.cpp [5:5]
+ tools/polly/lib/CodeGen/ManagedMemoryRewrite.cpp [5:5]
+ tools/polly/lib/CodeGen/PPCGCodeGeneration.cpp [5:5]
+ tools/polly/lib/CodeGen/PerfMonitor.cpp [5:5]
+ tools/polly/lib/CodeGen/RuntimeDebugBuilder.cpp [5:5]
+ tools/polly/lib/CodeGen/Utils.cpp [5:5]
+ tools/polly/lib/Exchange/JSONExporter.cpp [5:5]
+ tools/polly/lib/Support/DumpFunctionPass.cpp [5:5]
+ tools/polly/lib/Support/DumpModulePass.cpp [5:5]
+ tools/polly/lib/Support/GICHelper.cpp [5:5]
+ tools/polly/lib/Support/ISLTools.cpp [5:5]
+ tools/polly/lib/Support/RegisterPasses.cpp [5:5]
+ tools/polly/lib/Support/SCEVAffinator.cpp [5:5]
+ tools/polly/lib/Support/ScopHelper.cpp [5:5]
+ tools/polly/lib/Support/ScopLocation.cpp [5:5]
+ tools/polly/lib/Support/VirtualInstruction.cpp [5:5]
+ tools/polly/lib/Transform/Canonicalization.cpp [5:5]
+ tools/polly/lib/Transform/CodePreparation.cpp [5:5]
+ tools/polly/lib/Transform/DeLICM.cpp [5:5]
+ tools/polly/lib/Transform/DeadCodeElimination.cpp [5:5]
+ tools/polly/lib/Transform/FlattenAlgo.cpp [5:5]
+ tools/polly/lib/Transform/FlattenSchedule.cpp [5:5]
+ tools/polly/lib/Transform/ForwardOpTree.cpp [5:5]
+ tools/polly/lib/Transform/ManualOptimizer.cpp [5:5]
+ tools/polly/lib/Transform/MatmulOptimizer.cpp [5:5]
+ tools/polly/lib/Transform/MaximalStaticExpansion.cpp [5:5]
+ tools/polly/lib/Transform/ScheduleOptimizer.cpp [5:5]
+ tools/polly/lib/Transform/ScheduleTreeTransform.cpp [5:5]
+ tools/polly/lib/Transform/ScopInliner.cpp [5:5]
+ tools/polly/lib/Transform/Simplify.cpp [5:5]
+ tools/polly/lib/Transform/ZoneAlgo.cpp [5:5]
+ tools/remarks-shlib/libremarks.cpp [5:5]
+ tools/sancov/sancov.cpp [5:5]
+ tools/sanstats/sanstats.cpp [5:5]
+ tools/verify-uselistorder/verify-uselistorder.cpp [5:5]
+ tools/yaml2obj/yaml2obj.cpp [5:5]
+ utils/TableGen/AsmMatcherEmitter.cpp [5:5]
+ utils/TableGen/AsmWriterEmitter.cpp [5:5]
+ utils/TableGen/AsmWriterInst.cpp [5:5]
+ utils/TableGen/AsmWriterInst.h [5:5]
+ utils/TableGen/Attributes.cpp [5:5]
+ utils/TableGen/CTagsEmitter.cpp [5:5]
+ utils/TableGen/CallingConvEmitter.cpp [5:5]
+ utils/TableGen/CodeEmitterGen.cpp [5:5]
+ utils/TableGen/CodeGenDAGPatterns.cpp [5:5]
+ utils/TableGen/CodeGenDAGPatterns.h [5:5]
+ utils/TableGen/CodeGenHwModes.cpp [5:5]
+ utils/TableGen/CodeGenHwModes.h [5:5]
+ utils/TableGen/CodeGenInstruction.cpp [5:5]
+ utils/TableGen/CodeGenInstruction.h [5:5]
+ utils/TableGen/CodeGenIntrinsics.h [5:5]
+ utils/TableGen/CodeGenMapTable.cpp [5:5]
+ utils/TableGen/CodeGenRegisters.cpp [5:5]
+ utils/TableGen/CodeGenRegisters.h [5:5]
+ utils/TableGen/CodeGenSchedule.cpp [5:5]
+ utils/TableGen/CodeGenSchedule.h [5:5]
+ utils/TableGen/CodeGenTarget.cpp [5:5]
+ utils/TableGen/CodeGenTarget.h [5:5]
+ utils/TableGen/CompressInstEmitter.cpp [5:5]
+ utils/TableGen/DAGISelEmitter.cpp [5:5]
+ utils/TableGen/DAGISelMatcher.cpp [5:5]
+ utils/TableGen/DAGISelMatcher.h [5:5]
+ utils/TableGen/DAGISelMatcherEmitter.cpp [5:5]
+ utils/TableGen/DAGISelMatcherGen.cpp [5:5]
+ utils/TableGen/DAGISelMatcherOpt.cpp [5:5]
+ utils/TableGen/DFAEmitter.cpp [5:5]
+ utils/TableGen/DFAEmitter.h [5:5]
+ utils/TableGen/DFAPacketizerEmitter.cpp [5:5]
+ utils/TableGen/DXILEmitter.cpp [5:5]
+ utils/TableGen/DecoderEmitter.cpp [5:5]
+ utils/TableGen/DirectiveEmitter.cpp [5:5]
+ utils/TableGen/DisassemblerEmitter.cpp [5:5]
+ utils/TableGen/ExegesisEmitter.cpp [5:5]
+ utils/TableGen/FastISelEmitter.cpp [5:5]
+ utils/TableGen/GICombinerEmitter.cpp [5:5]
+ utils/TableGen/GlobalISel/CodeExpander.cpp [5:5]
+ utils/TableGen/GlobalISel/CodeExpander.h [5:5]
+ utils/TableGen/GlobalISel/CodeExpansions.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDag.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDag.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagEdge.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagInstr.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagOperands.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicate.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h [5:5]
+ utils/TableGen/GlobalISel/GIMatchTree.cpp [5:5]
+ utils/TableGen/GlobalISel/GIMatchTree.h [5:5]
+ utils/TableGen/GlobalISelEmitter.cpp [5:5]
+ utils/TableGen/InfoByHwMode.cpp [5:5]
+ utils/TableGen/InfoByHwMode.h [5:5]
+ utils/TableGen/InstrDocsEmitter.cpp [5:5]
+ utils/TableGen/InstrInfoEmitter.cpp [5:5]
+ utils/TableGen/IntrinsicEmitter.cpp [5:5]
+ utils/TableGen/OptEmitter.cpp [5:5]
+ utils/TableGen/OptEmitter.h [5:5]
+ utils/TableGen/OptParserEmitter.cpp [5:5]
+ utils/TableGen/OptRSTEmitter.cpp [5:5]
+ utils/TableGen/PredicateExpander.cpp [5:5]
+ utils/TableGen/PredicateExpander.h [5:5]
+ utils/TableGen/PseudoLoweringEmitter.cpp [5:5]
+ utils/TableGen/RISCVTargetDefEmitter.cpp [5:5]
+ utils/TableGen/RegisterBankEmitter.cpp [5:5]
+ utils/TableGen/RegisterInfoEmitter.cpp [5:5]
+ utils/TableGen/SDNodeProperties.cpp [5:5]
+ utils/TableGen/SDNodeProperties.h [5:5]
+ utils/TableGen/SearchableTableEmitter.cpp [5:5]
+ utils/TableGen/SequenceToOffsetTable.h [5:5]
+ utils/TableGen/SubtargetEmitter.cpp [5:5]
+ utils/TableGen/SubtargetFeatureInfo.cpp [5:5]
+ utils/TableGen/SubtargetFeatureInfo.h [5:5]
+ utils/TableGen/TableGen.cpp [5:5]
+ utils/TableGen/TableGenBackends.h [5:5]
+ utils/TableGen/Types.cpp [5:5]
+ utils/TableGen/Types.h [5:5]
+ utils/TableGen/VarLenCodeEmitterGen.cpp [5:5]
+ utils/TableGen/VarLenCodeEmitterGen.h [5:5]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.cpp [5:5]
+ utils/TableGen/WebAssemblyDisassemblerEmitter.h [5:5]
+ utils/TableGen/X86DisassemblerShared.h [5:5]
+ utils/TableGen/X86DisassemblerTables.cpp [5:5]
+ utils/TableGen/X86DisassemblerTables.h [5:5]
+ utils/TableGen/X86EVEX2VEXTablesEmitter.cpp [5:5]
+ utils/TableGen/X86FoldTablesEmitter.cpp [5:5]
+ utils/TableGen/X86MnemonicTables.cpp [5:5]
+ utils/TableGen/X86ModRMFilters.cpp [5:5]
+ utils/TableGen/X86ModRMFilters.h [5:5]
+ utils/TableGen/X86RecognizableInstr.cpp [5:5]
+ utils/TableGen/X86RecognizableInstr.h [5:5]
+
+KEEP Apache-2.0 WITH LLVM-exception bb25e85a7730c0eb3be9012f011952ca
+BELONGS include/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm-c/Object.h [10:12]
+ include/llvm-c/Target.h [10:12]
+ include/llvm/Config/abi-breaking.h [10:12]
+ include/llvm/Config/abi-breaking.h.cmake [3:5]
+ include/llvm/Config/llvm-config-linux.h [10:12]
+ include/llvm/Config/llvm-config-osx.h [3:5]
+ include/llvm/Config/llvm-config-win.h [3:5]
+ include/llvm/Config/llvm-config.h.cmake [3:5]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm-c/Object.h [10:12]
+ include/llvm-c/Target.h [10:12]
+ include/llvm/Config/abi-breaking.h [10:12]
+ include/llvm/Config/abi-breaking.h.cmake [3:5]
+ include/llvm/Config/llvm-config-linux.h [10:12]
+ include/llvm/Config/llvm-config-osx.h [3:5]
+ include/llvm/Config/llvm-config-win.h [3:5]
+ include/llvm/Config/llvm-config.h.cmake [3:5]
+
+KEEP Apache-2.0 WITH LLVM-exception c1e6c6536fcc444f916047679115c618
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/Support/ConvertUTF.h [12:12]
+ include/llvm/Support/Solaris/sys/regset.h [12:12]
+ lib/Support/ConvertUTF.cpp [5:5]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/Support/ConvertUTF.h [12:12]
+ include/llvm/Support/Solaris/sys/regset.h [12:12]
+ lib/Support/ConvertUTF.cpp [5:5]
+
+KEEP Unicode c3e7996ece5dfe4215b98cc7cead1198
+BELONGS include/ya.make lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-unicode
+ Score : 100.00
+ Match type : TEXT
+ Links : http://unicode.org/, http://unicode.org/copyright.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unicode.LICENSE
+ Files with this license:
+ include/llvm/Support/ConvertUTF.h [17:50]
+ lib/Support/ConvertUTF.cpp [10:43]
+
+KEEP BSD-2-Clause c67981f06ea0c00fcf4c73a2ef8d2c8a
+BELONGS include/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: BSD-2-Clause
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/bsd-license.php, http://www.opensource.org/licenses/BSD-2-Clause, https://spdx.org/licenses/BSD-2-Clause
+ Files with this license:
+ include/llvm/Support/xxhash.h [15:36]
+
+KEEP MIT ce957f5a4402b71a91bdb2fe214adfdd
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ * where it was posted in 2011 by Jeffrey Stedfast under the MIT license.
+ * The MIT license text is as follows:
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : REFERENCE
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/isl_sort.c [3:4]
+
+KEEP Public-Domain d33a831e1d18fad46d6036f07e233fe4
+BELONGS include/ya.make lib/Support/ya.make
+ License text:
+ * [0] https://hyperelliptic.org/nacl/nacl-20110221.tar.bz2 (public domain
+ * code)
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-public-domain
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
+ Files with this license:
+ include/llvm/Support/SHA256.h [23:24]
+ lib/Support/SHA256.cpp [16:17]
+
+KEEP BSD-3-Clause d518ef1ba9e05a24208c9169fce47395
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ license I am using. This is basically the BSD license, so
+ Scancode info:
+ Original SPDX id: BSD-3-Clause
+ Score : 99.00
+ Match type : REFERENCE
+ Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause
+ Files with this license:
+ tools/polly/lib/External/isl/imath/ChangeLog [98:98]
+
+KEEP MIT d8ecaf8ae07902aec387aecab19f18c6
+BELONGS tools/polly/lib/ya.make
+ License text:
+ License: MIT-STYLE
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 95.00
+ Match type : REFERENCE
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/README.txt [9:9]
+ tools/polly/lib/External/README.txt [13:13]
+
+KEEP Apache-2.0 WITH LLVM-exception df18889e552d44a4679aff552267f802
+BELONGS ya.make
+ License text:
+ The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ LICENSE.TXT [2:2]
+ tools/polly/LICENSE.TXT [2:2]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ LICENSE.TXT [2:2]
+ tools/polly/LICENSE.TXT [2:2]
+
+KEEP Apache-2.0 WITH LLVM-exception df2799ee3781d409b5469950b953503d
+BELONGS include/ya.make
+ License text:
+ Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ See https://llvm.org/LICENSE.txt for license information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/Support/LICENSE.TXT [4:5]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/Support/LICENSE.TXT [4:5]
+
+KEEP MIT e0068b592aa878935b6e2110931d77dd
+BELONGS include/ya.make
+FILE_INCLUDE tools/polly/lib/External/isl/AUTHORS found in files: include/llvm/WindowsDriver/MSVCSetupApi.h at line 30
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ include/llvm/WindowsDriver/MSVCSetupApi.h [17:33]
+
+KEEP ISC e6a382fc7564fdd1a5e46b2d97b3221f
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: ISC
+ Score : 100.00
+ Match type : TEXT
+ Links : http://fedoraproject.org/wiki/Licensing:MIT#Old_Style_with_legal_disclaimer_2, https://spdx.org/licenses/ISC, https://www.isc.org/software/license
+ Files with this license:
+ lib/Support/regstrlcpy.c [6:16]
+
+KEEP MIT eda4392ff1ecb2e3afd880e12d2d1e8c
+BELONGS tools/polly/lib/External/isl/ya.make
+FILE_INCLUDE tools/polly/lib/External/isl/AUTHORS found in files: tools/polly/lib/External/isl/LICENSE at line 16
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/LICENSE [3:19]
+
+KEEP MIT f0a9bc0d07550a52c128ea0a8106fd15
+BELONGS tools/polly/lib/External/isl/ya.make
+ # see license text
+ License text:
+ isl is released under the MIT license, but depends on the LGPL GMP
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/README [7:7]
+ Scancode info:
+ Original SPDX id: LGPL-2.0-or-later
+ Score : 75.00
+ Match type : REFERENCE
+ Links : http://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html, http://www.gnu.org/licenses/old-licenses/lgpl-2.0.html, https://spdx.org/licenses/LGPL-2.0-or-later
+ Files with this license:
+ tools/polly/lib/External/isl/README [7:7]
+
+KEEP BSD-2-Clause f34a4678bf5186b7d0a23d3f898f17d4
+BELONGS lib/Support/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: BSD-2-Clause
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/bsd-license.php, http://www.opensource.org/licenses/BSD-2-Clause, https://spdx.org/licenses/BSD-2-Clause
+ Files with this license:
+ lib/Support/xxhash.cpp [7:28]
+
+KEEP Apache-2.0 WITH LLVM-exception f75f3cc6b327e5dd41c6b0f3189a3308
+BELONGS include/ya.make
+ License text:
+ |* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : TAG
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm-c/Analysis.h [13:13]
+ include/llvm-c/BitReader.h [13:13]
+ include/llvm-c/BitWriter.h [13:13]
+ include/llvm-c/Comdat.h [13:13]
+ include/llvm-c/Core.h [13:13]
+ include/llvm-c/DataTypes.h [13:13]
+ include/llvm-c/Deprecated.h [13:13]
+ include/llvm-c/Disassembler.h [13:13]
+ include/llvm-c/DisassemblerTypes.h [13:13]
+ include/llvm-c/Error.h [13:13]
+ include/llvm-c/ErrorHandling.h [13:13]
+ include/llvm-c/ExecutionEngine.h [13:13]
+ include/llvm-c/ExternC.h [13:13]
+ include/llvm-c/IRReader.h [13:13]
+ include/llvm-c/Initialization.h [13:13]
+ include/llvm-c/LLJIT.h [13:13]
+ include/llvm-c/Linker.h [13:13]
+ include/llvm-c/Orc.h [13:13]
+ include/llvm-c/OrcEE.h [13:13]
+ include/llvm-c/Remarks.h [13:13]
+ include/llvm-c/Support.h [13:13]
+ include/llvm-c/TargetMachine.h [13:13]
+ include/llvm-c/Transforms/IPO.h [13:13]
+ include/llvm-c/Transforms/InstCombine.h [13:13]
+ include/llvm-c/Transforms/PassBuilder.h [13:13]
+ include/llvm-c/Transforms/PassManagerBuilder.h [13:13]
+ include/llvm-c/Transforms/Scalar.h [13:13]
+ include/llvm-c/Transforms/Utils.h [13:13]
+ include/llvm-c/Transforms/Vectorize.h [14:14]
+ include/llvm-c/Types.h [13:13]
+ include/llvm-c/lto.h [13:13]
+ include/llvm/Config/AsmParsers.def [6:6]
+ include/llvm/Config/AsmParsers.def.in [6:6]
+ include/llvm/Config/AsmPrinters.def [6:6]
+ include/llvm/Config/AsmPrinters.def.in [6:6]
+ include/llvm/Config/Disassemblers.def [6:6]
+ include/llvm/Config/Disassemblers.def.in [6:6]
+ include/llvm/Config/TargetExegesis.def [6:6]
+ include/llvm/Config/TargetExegesis.def.in [6:6]
+ include/llvm/Config/TargetMCAs.def [6:6]
+ include/llvm/Config/TargetMCAs.def.in [6:6]
+ include/llvm/Config/Targets.def [6:6]
+ include/llvm/Config/Targets.def.in [6:6]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : TAG
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm-c/Analysis.h [13:13]
+ include/llvm-c/BitReader.h [13:13]
+ include/llvm-c/BitWriter.h [13:13]
+ include/llvm-c/Comdat.h [13:13]
+ include/llvm-c/Core.h [13:13]
+ include/llvm-c/DataTypes.h [13:13]
+ include/llvm-c/Deprecated.h [13:13]
+ include/llvm-c/Disassembler.h [13:13]
+ include/llvm-c/DisassemblerTypes.h [13:13]
+ include/llvm-c/Error.h [13:13]
+ include/llvm-c/ErrorHandling.h [13:13]
+ include/llvm-c/ExecutionEngine.h [13:13]
+ include/llvm-c/ExternC.h [13:13]
+ include/llvm-c/IRReader.h [13:13]
+ include/llvm-c/Initialization.h [13:13]
+ include/llvm-c/LLJIT.h [13:13]
+ include/llvm-c/Linker.h [13:13]
+ include/llvm-c/Orc.h [13:13]
+ include/llvm-c/OrcEE.h [13:13]
+ include/llvm-c/Remarks.h [13:13]
+ include/llvm-c/Support.h [13:13]
+ include/llvm-c/TargetMachine.h [13:13]
+ include/llvm-c/Transforms/IPO.h [13:13]
+ include/llvm-c/Transforms/InstCombine.h [13:13]
+ include/llvm-c/Transforms/PassBuilder.h [13:13]
+ include/llvm-c/Transforms/PassManagerBuilder.h [13:13]
+ include/llvm-c/Transforms/Scalar.h [13:13]
+ include/llvm-c/Transforms/Utils.h [13:13]
+ include/llvm-c/Transforms/Vectorize.h [14:14]
+ include/llvm-c/Types.h [13:13]
+ include/llvm-c/lto.h [13:13]
+ include/llvm/Config/AsmParsers.def [6:6]
+ include/llvm/Config/AsmParsers.def.in [6:6]
+ include/llvm/Config/AsmPrinters.def [6:6]
+ include/llvm/Config/AsmPrinters.def.in [6:6]
+ include/llvm/Config/Disassemblers.def [6:6]
+ include/llvm/Config/Disassemblers.def.in [6:6]
+ include/llvm/Config/TargetExegesis.def [6:6]
+ include/llvm/Config/TargetExegesis.def.in [6:6]
+ include/llvm/Config/TargetMCAs.def [6:6]
+ include/llvm/Config/TargetMCAs.def.in [6:6]
+ include/llvm/Config/Targets.def [6:6]
+ include/llvm/Config/Targets.def.in [6:6]
+
+KEEP Apache-2.0 WITH LLVM-exception fb91bf796967c5e7cef8c3b4cab9b44a
+BELONGS include/ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm-c/Analysis.h [10:12]
+ include/llvm-c/BitReader.h [10:12]
+ include/llvm-c/BitWriter.h [10:12]
+ include/llvm-c/Comdat.h [10:12]
+ include/llvm-c/Core.h [10:12]
+ include/llvm-c/DataTypes.h [10:12]
+ include/llvm-c/Deprecated.h [10:12]
+ include/llvm-c/Disassembler.h [10:12]
+ include/llvm-c/DisassemblerTypes.h [10:12]
+ include/llvm-c/Error.h [10:12]
+ include/llvm-c/ErrorHandling.h [10:12]
+ include/llvm-c/ExecutionEngine.h [10:12]
+ include/llvm-c/ExternC.h [10:12]
+ include/llvm-c/IRReader.h [10:12]
+ include/llvm-c/Initialization.h [10:12]
+ include/llvm-c/LLJIT.h [10:12]
+ include/llvm-c/Linker.h [10:12]
+ include/llvm-c/Orc.h [10:12]
+ include/llvm-c/OrcEE.h [10:12]
+ include/llvm-c/Remarks.h [10:12]
+ include/llvm-c/Support.h [10:12]
+ include/llvm-c/TargetMachine.h [10:12]
+ include/llvm-c/Transforms/IPO.h [10:12]
+ include/llvm-c/Transforms/InstCombine.h [10:12]
+ include/llvm-c/Transforms/PassBuilder.h [10:12]
+ include/llvm-c/Transforms/PassManagerBuilder.h [10:12]
+ include/llvm-c/Transforms/Scalar.h [10:12]
+ include/llvm-c/Transforms/Utils.h [10:12]
+ include/llvm-c/Transforms/Vectorize.h [11:13]
+ include/llvm-c/Types.h [10:12]
+ include/llvm-c/lto.h [10:12]
+ include/llvm/Config/AsmParsers.def [3:5]
+ include/llvm/Config/AsmParsers.def.in [3:5]
+ include/llvm/Config/AsmPrinters.def [3:5]
+ include/llvm/Config/AsmPrinters.def.in [3:5]
+ include/llvm/Config/Disassemblers.def [3:5]
+ include/llvm/Config/Disassemblers.def.in [3:5]
+ include/llvm/Config/TargetExegesis.def [3:5]
+ include/llvm/Config/TargetExegesis.def.in [3:5]
+ include/llvm/Config/TargetMCAs.def [3:5]
+ include/llvm/Config/TargetMCAs.def.in [3:5]
+ include/llvm/Config/Targets.def [3:5]
+ include/llvm/Config/Targets.def.in [3:5]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm-c/Analysis.h [10:12]
+ include/llvm-c/BitReader.h [10:12]
+ include/llvm-c/BitWriter.h [10:12]
+ include/llvm-c/Comdat.h [10:12]
+ include/llvm-c/Core.h [10:12]
+ include/llvm-c/DataTypes.h [10:12]
+ include/llvm-c/Deprecated.h [10:12]
+ include/llvm-c/Disassembler.h [10:12]
+ include/llvm-c/DisassemblerTypes.h [10:12]
+ include/llvm-c/Error.h [10:12]
+ include/llvm-c/ErrorHandling.h [10:12]
+ include/llvm-c/ExecutionEngine.h [10:12]
+ include/llvm-c/ExternC.h [10:12]
+ include/llvm-c/IRReader.h [10:12]
+ include/llvm-c/Initialization.h [10:12]
+ include/llvm-c/LLJIT.h [10:12]
+ include/llvm-c/Linker.h [10:12]
+ include/llvm-c/Orc.h [10:12]
+ include/llvm-c/OrcEE.h [10:12]
+ include/llvm-c/Remarks.h [10:12]
+ include/llvm-c/Support.h [10:12]
+ include/llvm-c/TargetMachine.h [10:12]
+ include/llvm-c/Transforms/IPO.h [10:12]
+ include/llvm-c/Transforms/InstCombine.h [10:12]
+ include/llvm-c/Transforms/PassBuilder.h [10:12]
+ include/llvm-c/Transforms/PassManagerBuilder.h [10:12]
+ include/llvm-c/Transforms/Scalar.h [10:12]
+ include/llvm-c/Transforms/Utils.h [10:12]
+ include/llvm-c/Transforms/Vectorize.h [11:13]
+ include/llvm-c/Types.h [10:12]
+ include/llvm-c/lto.h [10:12]
+ include/llvm/Config/AsmParsers.def [3:5]
+ include/llvm/Config/AsmParsers.def.in [3:5]
+ include/llvm/Config/AsmPrinters.def [3:5]
+ include/llvm/Config/AsmPrinters.def.in [3:5]
+ include/llvm/Config/Disassemblers.def [3:5]
+ include/llvm/Config/Disassemblers.def.in [3:5]
+ include/llvm/Config/TargetExegesis.def [3:5]
+ include/llvm/Config/TargetExegesis.def.in [3:5]
+ include/llvm/Config/TargetMCAs.def [3:5]
+ include/llvm/Config/TargetMCAs.def.in [3:5]
+ include/llvm/Config/Targets.def [3:5]
+ include/llvm/Config/Targets.def.in [3:5]
+
+KEEP Apache-2.0 WITH LLVM-exception fbfa20072adb96ef931350cc08ff1c57
+BELONGS include/ya.make lib/CodeGen/ya.make
+ License text:
+ /// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ /// See https://llvm.org/LICENSE.txt for license information.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h [10:11]
+ include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h [10:11]
+ lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp [3:4]
+ lib/CodeGen/MachineOptimizationRemarkEmitter.cpp [3:4]
+ Scancode info:
+ Original SPDX id: LLVM-exception
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://llvm.org/foundation/relicensing/LICENSE.txt, https://spdx.org/licenses/LLVM-exception
+ Files with this license:
+ include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h [10:11]
+ include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h [10:11]
+ lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp [3:4]
+ lib/CodeGen/MachineOptimizationRemarkEmitter.cpp [3:4]
+
+KEEP MIT fffc03224b31436e503bb389186ff530
+BELONGS tools/polly/lib/External/isl/ya.make
+ License text:
+ MIT License (MIT)
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : REFERENCE
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ tools/polly/lib/External/isl/LICENSE [1:1]
diff --git a/contrib/libs/llvm16/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..b8025be265
--- /dev/null
+++ b/contrib/libs/llvm16/.yandex_meta/licenses.list.txt
@@ -0,0 +1,312 @@
+====================Apache-2.0 WITH LLVM-exception====================
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+
+
+====================COPYRIGHT====================
+Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+
+====================COPYRIGHT====================
+Copyright (c) 2009-2019 Polly Team
+All rights reserved.
+
+
+====================NCSA====================
+LLVM is open source software. You may freely distribute it under the terms of
+the license agreement found in LICENSE.txt.
+
+
+====================NCSA====================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+
+====================NCSA====================
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+
+====================NCSA====================
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the Polly Team, copyright holders, nor the names of
+ its contributors may be used to endorse or promote products derived from
+ this Software without specific prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE. \ No newline at end of file
diff --git a/contrib/libs/llvm16/include/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/include/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..43639bf84c
--- /dev/null
+++ b/contrib/libs/llvm16/include/.yandex_meta/licenses.list.txt
@@ -0,0 +1,289 @@
+====================Apache-2.0 WITH LLVM-exception====================
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+/* Part of the LLVM Project, under the Apache License v2.0 with LLVM */
+/* Exceptions. */
+/* See https://llvm.org/LICENSE.txt for license information. */
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+/* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+/// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+/// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+/// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+====================Apache-2.0 WITH LLVM-exception====================
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+
+
+====================BSD-2-Clause====================
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+
+====================BSD-2-Clause====================
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+====================CC0-1.0====================
+|* SPDX-License-Identifier: CC0-1.0 *|
+
+
+====================COPYRIGHT====================
+ Copyright (C) 2012-2016, Yann Collet.
+
+
+====================COPYRIGHT====================
+ static bool isChar6(char C) { return isAlnum(C) || C == '.' || C == '_'; }
+ static unsigned EncodeChar6(char C) {
+ if (C >= 'a' && C <= 'z') return C-'a';
+
+
+====================COPYRIGHT====================
+ * Copyright © 1991-2015 Unicode, Inc. All rights reserved.
+
+
+====================COPYRIGHT====================
+ * This software was written by Alexander Peslyak in 2001. No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+
+
+====================COPYRIGHT====================
+// <copyright file="Program.cpp" company="Microsoft Corporation">
+// Copyright (C) Microsoft Corporation. All rights reserved.
+
+
+====================File: tools/polly/lib/External/isl/AUTHORS====================
+isl was written by
+
+ Sven Verdoolaege
+2006-2007 Leiden Institute of Advanced Computer Science
+ Universiteit Leiden
+ Niels Bohrweg 1
+ 2333 CA Leiden
+ The Netherlands
+2008-2009 K.U.Leuven
+ Departement Computerwetenschappen
+ Celestijnenlaan 200A
+ B-3001 Leuven
+ Belgium
+2010-2011 INRIA Saclay - Ile-de-France
+ Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod
+ 91893 Orsay
+ France
+2011-2012 consultant for Leiden Institute of Advanced Computer Science
+2012-2014 Ecole Normale Superieure
+ 45 rue d'Ulm, 75230 Paris
+ France
+2014-2015 INRIA Rocquencourt
+ Domaine de Voluceau - Rocquencourt, B.P. 105
+ 78153 Le Chesnay
+ France
+2015-2020 Polly Labs
+2018-2020 Cerebras Systems
+ 175 S San Antonio Rd
+ Los Altos, CA
+ USA
+
+Contributions by
+
+Mythri Alle
+Riyadh Baghdadi
+Serge Belyshev
+Albert Cohen
+Ray Donnelly
+Johannes Doerfert
+Andi Drebes
+Ron Estrin
+Clement Foyer
+Armin Groesslinger
+Tobias Grosser
+Frederik Harwath
+Alexandre Isoard
+Andreas Kloeckner
+Michael Kruse
+Manjunath Kudlur
+Alexander Matz
+Chielo Newctle
+Sebastian Pop
+Louis-Noel Pouchet
+Benoit Pradelle
+Uday Bondhugula
+Andreas Simbuerger
+Tianjiao Sun
+Malhar Thakkar
+Sergei Trofimovich
+Miheer Vaidya
+Sven van Haastregt
+Oleksandr Zinenko
+
+The merge sort implementation was written by Jeffrey Stedfast.
+
+
+====================MIT====================
+// <license>
+// The MIT License (MIT)
+
+
+====================MIT====================
+// Licensed under the MIT license.
+
+
+====================MIT====================
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+
+====================Public-Domain====================
+ * [0] https://hyperelliptic.org/nacl/nacl-20110221.tar.bz2 (public domain
+ * code)
+
+
+====================Public-Domain====================
+ * Homepage:
+ * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
+ *
+ * Author:
+ * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
+ *
+ * This software was written by Alexander Peslyak in 2001. No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted.
+ *
+ * There's ABSOLUTELY NO WARRANTY, express or implied.
+
+
+====================Public-Domain====================
+// This code is taken from public domain
+
+
+====================Public-Domain AND CC0-1.0====================
+|* Released into the public domain with CC0 1.0 *|
+
+
+====================Unicode====================
+ * Distributed under the Terms of Use in
+ * http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of the Unicode data files and any associated documentation
+ * (the "Data Files") or Unicode software and any associated documentation
+ * (the "Software") to deal in the Data Files or Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, and/or sell copies of
+ * the Data Files or Software, and to permit persons to whom the Data Files
+ * or Software are furnished to do so, provided that
+ * (a) this copyright and permission notice appear with all copies
+ * of the Data Files or Software,
+ * (b) this copyright and permission notice appear in associated
+ * documentation, and
+ * (c) there is clear notice in each modified Data File or in the Software
+ * as well as in the documentation associated with the Data File(s) or
+ * Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+ * NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+ * DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder
+ * shall not be used in advertising or otherwise to promote the sale,
+ * use or other dealings in these Data Files or Software without prior
+ * written authorization of the copyright holder.
diff --git a/contrib/libs/llvm16/include/llvm-c/module.modulemap b/contrib/libs/llvm16/include/llvm-c/module.modulemap
new file mode 100644
index 0000000000..a456119595
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm-c/module.modulemap
@@ -0,0 +1,4 @@
+module LLVM_C {
+ umbrella "."
+ module * { export * }
+}
diff --git a/contrib/libs/llvm16/include/llvm/ADT/IntervalTree.h b/contrib/libs/llvm16/include/llvm/ADT/IntervalTree.h
new file mode 100644
index 0000000000..df39d72cfa
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/ADT/IntervalTree.h
@@ -0,0 +1,704 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- IntervalTree.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an interval tree.
+//
+// Further information:
+// https://en.wikipedia.org/wiki/Interval_tree
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTERVALTREE_H
+#define LLVM_ADT_INTERVALTREE_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+
+// IntervalTree is a light tree data structure to hold intervals. It allows
+// finding all intervals that overlap with any given point. At this time,
+// it does not support any deletion or rebalancing operations.
+//
+// The IntervalTree is designed to be set up once, and then queried without
+// any further additions.
+//
+// Synopsis:
+// Closed intervals delimited by PointT objects are mapped to ValueT objects.
+//
+// Restrictions:
+// PointT must be a fundamental type.
+// ValueT must be a fundamental or pointer type.
+//
+// template <typename PointT, typename ValueT, typename DataT>
+// class IntervalTree {
+// public:
+//
+// IntervalTree();
+// ~IntervalTree():
+//
+// using IntervalReferences = SmallVector<IntervalData *>;
+//
+// void create();
+// void insert(PointT Left, PointT Right, ValueT Value);
+//
+// IntervalReferences getContaining(PointT Point);
+// static void sortIntervals(IntervalReferences &Intervals, Sorting Sort);
+//
+// find_iterator begin(PointType Point) const;
+// find_iterator end() const;
+//
+// bool empty() const;
+// void clear();
+//
+// void print(raw_ostream &OS, bool HexFormat = true);
+// };
+//
+//===----------------------------------------------------------------------===//
+//
+// In the below given dataset
+//
+// [a, b] <- (x)
+//
+// 'a' and 'b' describe a range and 'x' the value for that interval.
+//
+// The following data are purely for illustrative purposes:
+//
+// [30, 35] <- (3035), [39, 50] <- (3950), [55, 61] <- (5561),
+// [31, 56] <- (3156), [12, 21] <- (1221), [25, 41] <- (2541),
+// [49, 65] <- (4965), [71, 79] <- (7179), [11, 16] <- (1116),
+// [20, 30] <- (2030), [36, 54] <- (3654), [60, 70] <- (6070),
+// [74, 80] <- (7480), [15, 40] <- (1540), [43, 43] <- (4343),
+// [50, 75] <- (5075), [10, 85] <- (1085)
+//
+// The data represents a set of overlapping intervals:
+//
+// 30--35 39------------50 55----61
+// 31------------------------56
+// 12--------21 25------------41 49-------------65 71-----79
+// 11----16 20-----30 36----------------54 60------70 74---- 80
+// 15---------------------40 43--43 50--------------------75
+// 10----------------------------------------------------------------------85
+//
+// The items are stored in a binary tree with each node storing:
+//
+// MP: A middle point.
+// IL: All intervals whose left value are completely to the left of the middle
+// point. They are sorted in ascending order by their beginning point.
+// IR: All intervals whose right value are completely to the right of the
+// middle point. They are sorted in descending order by their ending point.
+// LS: Left subtree.
+// RS: Right subtree.
+//
+// As IL and IR will contain the same intervals, in order to optimize space,
+// instead of storing intervals on each node, we use two vectors that will
+// contain the intervals described by IL and IR. Each node will contain an
+// index into that vector (global bucket), to indicate the beginning of the
+// intervals assigned to the node.
+//
+// The following is the output from print():
+//
+// 0: MP:43 IR [10,85] [31,56] [36,54] [39,50] [43,43]
+// 0: MP:43 IL [10,85] [31,56] [36,54] [39,50] [43,43]
+// 1: MP:25 IR [25,41] [15,40] [20,30]
+// 1: MP:25 IL [15,40] [20,30] [25,41]
+// 2: MP:15 IR [12,21] [11,16]
+// 2: MP:15 IL [11,16] [12,21]
+// 2: MP:36 IR []
+// 2: MP:36 IL []
+// 3: MP:31 IR [30,35]
+// 3: MP:31 IL [30,35]
+// 1: MP:61 IR [50,75] [60,70] [49,65] [55,61]
+// 1: MP:61 IL [49,65] [50,75] [55,61] [60,70]
+// 2: MP:74 IR [74,80] [71,79]
+// 2: MP:74 IL [71,79] [74,80]
+//
+// with:
+// 0: Root Node.
+// MP: Middle point.
+// IL: Intervals to the left (in ascending order by beginning point).
+// IR: Intervals to the right (in descending order by ending point).
+//
+// Root
+// |
+// V
+// +------------MP:43------------+
+// | IL IR |
+// | [10,85] [10,85] |
+// LS | [31,56] [31,56] | RS
+// | [36,54] [36,54] |
+// | [39,50] [39,50] |
+// | [43,43] [43,43] |
+// V V
+// +------------MP:25------------+ MP:61------------+
+// | IL IR | IL IR |
+// | [15,40] [25,41] | [49,65] [50,75] |
+// LS | [20,30] [15,40] | RS [50,75] [60,70] | RS
+// | [25,41] [20,30] | [55,61] [49,65] |
+// | | [60,70] [55,61] |
+// V V V
+// MP:15 +-------MP:36 MP:74
+// IL IR | IL IR IL IR
+// [11,16] [12,21] LS | [] [] [71,79] [74,80]
+// [12,21] [11,16] | [74,80] [71,79]
+// V
+// MP:31
+// IL IR
+// [30,35] [30,35]
+//
+// The creation of an interval tree is done in 2 steps:
+// 1) Insert the interval items by calling
+// void insert(PointT Left, PointT Right, ValueT Value);
+// Left, Right: the interval left and right limits.
+// Value: the data associated with that specific interval.
+//
+// 2) Create the interval tree by calling
+// void create();
+//
+// Once the tree is created, it is switched to query mode.
+// Query the tree by using iterators or container.
+//
+// a) Iterators over intervals overlapping the given point with very weak
+// ordering guarantees.
+// find_iterator begin(PointType Point) const;
+// find_iterator end() const;
+// Point: a target point to be tested for inclusion in any interval.
+//
+// b) Container:
+// IntervalReferences getContaining(PointT Point);
+// Point: a target point to be tested for inclusion in any interval.
+// Returns vector with all the intervals containing the target point.
+//
+// The returned intervals are in their natural tree location. They can
+// be sorted:
+//
+// static void sortIntervals(IntervalReferences &Intervals, Sorting Sort);
+//
+// Ability to print the constructed interval tree:
+// void print(raw_ostream &OS, bool HexFormat = true);
+// Display the associated data in hexadecimal format.
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//--- IntervalData ----//
+//===----------------------------------------------------------------------===//
+/// An interval data composed by a \a Left and \a Right points and an
+/// associated \a Value.
+/// \a PointT corresponds to the interval endpoints type.
+/// \a ValueT corresponds to the interval value type.
+template <typename PointT, typename ValueT> class IntervalData {
+protected:
+ using PointType = PointT;
+ using ValueType = ValueT;
+
+private:
+ PointType Left;
+ PointType Right;
+ ValueType Value;
+
+public:
+ IntervalData() = delete;
+ IntervalData(PointType Left, PointType Right, ValueType Value)
+ : Left(Left), Right(Right), Value(Value) {
+ assert(Left <= Right && "'Left' must be less or equal to 'Right'");
+ }
+ virtual ~IntervalData() = default;
+ PointType left() const { return Left; }
+ PointType right() const { return Right; }
+ ValueType value() const { return Value; }
+
+ /// Return true if \a Point is inside the left bound of closed interval \a
+ /// [Left;Right]. This is Left <= Point for closed intervals.
+ bool left(const PointType &Point) const { return left() <= Point; }
+
+ /// Return true if \a Point is inside the right bound of closed interval \a
+ /// [Left;Right]. This is Point <= Right for closed intervals.
+ bool right(const PointType &Point) const { return Point <= right(); }
+
+ /// Return true when \a Point is contained in interval \a [Left;Right].
+ /// This is Left <= Point <= Right for closed intervals.
+ bool contains(const PointType &Point) const {
+ return left(Point) && right(Point);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalTree ----//
+//===----------------------------------------------------------------------===//
+// Helper class template that is used by the IntervalTree to ensure that one
+// does instantiate using only fundamental and/or pointer types.
+template <typename T>
+using PointTypeIsValid = std::bool_constant<std::is_fundamental<T>::value>;
+
+template <typename T>
+using ValueTypeIsValid = std::bool_constant<std::is_fundamental<T>::value ||
+ std::is_pointer<T>::value>;
+
+template <typename PointT, typename ValueT,
+ typename DataT = IntervalData<PointT, ValueT>>
+class IntervalTree {
+ static_assert(PointTypeIsValid<PointT>::value,
+ "PointT must be a fundamental type");
+ static_assert(ValueTypeIsValid<ValueT>::value,
+ "ValueT must be a fundamental or pointer type");
+
+public:
+ using PointType = PointT;
+ using ValueType = ValueT;
+ using DataType = DataT;
+ using Allocator = BumpPtrAllocator;
+
+ enum class Sorting { Ascending, Descending };
+ using IntervalReferences = SmallVector<const DataType *, 4>;
+
+private:
+ using IntervalVector = SmallVector<DataType, 4>;
+ using PointsVector = SmallVector<PointType, 4>;
+
+ class IntervalNode {
+ PointType MiddlePoint; // MP - Middle point.
+ IntervalNode *Left = nullptr; // LS - Left subtree.
+ IntervalNode *Right = nullptr; // RS - Right subtree.
+ unsigned BucketIntervalsStart = 0; // Starting index in global bucket.
+ unsigned BucketIntervalsSize = 0; // Size of bucket.
+
+ public:
+ PointType middle() const { return MiddlePoint; }
+ unsigned start() const { return BucketIntervalsStart; }
+ unsigned size() const { return BucketIntervalsSize; }
+
+ IntervalNode(PointType Point, unsigned Start)
+ : MiddlePoint(Point), BucketIntervalsStart(Start) {}
+
+ friend IntervalTree;
+ };
+
+ Allocator &NodeAllocator; // Allocator used for creating interval nodes.
+ IntervalNode *Root = nullptr; // Interval tree root.
+ IntervalVector Intervals; // Storage for each interval and all of the fields
+ // point back into it.
+ PointsVector EndPoints; // Sorted left and right points of all the intervals.
+
+ // These vectors provide storage that nodes carve buckets of overlapping
+ // intervals out of. All intervals are recorded on each vector.
+ // The bucket with the intervals associated to a node, is determined by
+ // the fields 'BucketIntervalStart' and 'BucketIntervalSize' in the node.
+ // The buckets in the first vector are sorted in ascending order using
+ // the left value and the buckets in the second vector are sorted in
+ // descending order using the right value. Every interval in a bucket
+ // contains the middle point for the node.
+ IntervalReferences IntervalsLeft; // Intervals to the left of middle point.
+ IntervalReferences IntervalsRight; // Intervals to the right of middle point.
+
+ // Working vector used during the tree creation to sort the intervals. It is
+ // cleared once the tree is created.
+ IntervalReferences References;
+
+ /// Recursively delete the constructed tree.
+ void deleteTree(IntervalNode *Node) {
+ if (Node) {
+ deleteTree(Node->Left);
+ deleteTree(Node->Right);
+ Node->~IntervalNode();
+ NodeAllocator.Deallocate(Node);
+ }
+ }
+
+ /// Print the interval list (left and right) for a given \a Node.
+ static void printList(raw_ostream &OS, IntervalReferences &IntervalSet,
+ unsigned Start, unsigned Size, bool HexFormat = true) {
+ assert(Start + Size <= IntervalSet.size() &&
+ "Start + Size must be in bounds of the IntervalSet");
+ const char *Format = HexFormat ? "[0x%08x,0x%08x] " : "[%2d,%2d] ";
+ if (Size) {
+ for (unsigned Position = Start; Position < Start + Size; ++Position)
+ OS << format(Format, IntervalSet[Position]->left(),
+ IntervalSet[Position]->right());
+ } else {
+ OS << "[]";
+ }
+ OS << "\n";
+ }
+
+ /// Print an interval tree \a Node.
+ void printNode(raw_ostream &OS, unsigned Level, IntervalNode *Node,
+ bool HexFormat = true) {
+ const char *Format = HexFormat ? "MP:0x%08x " : "MP:%2d ";
+ auto PrintNodeData = [&](StringRef Text, IntervalReferences &IntervalSet) {
+ OS << format("%5d: ", Level);
+ OS.indent(Level * 2);
+ OS << format(Format, Node->middle()) << Text << " ";
+ printList(OS, IntervalSet, Node->start(), Node->size(), HexFormat);
+ };
+
+ PrintNodeData("IR", IntervalsRight);
+ PrintNodeData("IL", IntervalsLeft);
+ }
+
+ /// Recursively print all the interval nodes.
+ void printTree(raw_ostream &OS, unsigned Level, IntervalNode *Node,
+ bool HexFormat = true) {
+ if (Node) {
+ printNode(OS, Level, Node, HexFormat);
+ ++Level;
+ printTree(OS, Level, Node->Left, HexFormat);
+ printTree(OS, Level, Node->Right, HexFormat);
+ }
+ }
+
+ /// Recursively construct the interval tree.
+ /// IntervalsSize: Number of intervals that have been processed and it will
+ /// be used as the start for the intervals bucket for a node.
+ /// PointsBeginIndex, PointsEndIndex: Determine the range into the EndPoints
+ /// vector of end points to be processed.
+ /// ReferencesBeginIndex, ReferencesSize: Determine the range into the
+ /// intervals being processed.
+ IntervalNode *createTree(unsigned &IntervalsSize, int PointsBeginIndex,
+ int PointsEndIndex, int ReferencesBeginIndex,
+ int ReferencesSize) {
+ // We start by taking the entire range of all the intervals and dividing
+ // it in half at x_middle (in practice, x_middle should be picked to keep
+ // the tree relatively balanced).
+ // This gives three sets of intervals, those completely to the left of
+ // x_middle which we'll call S_left, those completely to the right of
+ // x_middle which we'll call S_right, and those overlapping x_middle
+ // which we'll call S_middle.
+ // The intervals in S_left and S_right are recursively divided in the
+ // same manner until there are no intervals remaining.
+
+ if (PointsBeginIndex > PointsEndIndex ||
+ ReferencesBeginIndex >= ReferencesSize)
+ return nullptr;
+
+ int MiddleIndex = (PointsBeginIndex + PointsEndIndex) / 2;
+ PointType MiddlePoint = EndPoints[MiddleIndex];
+
+ unsigned NewBucketStart = IntervalsSize;
+ unsigned NewBucketSize = 0;
+ int ReferencesRightIndex = ReferencesSize;
+
+ IntervalNode *Root =
+ new (NodeAllocator) IntervalNode(MiddlePoint, NewBucketStart);
+
+ // A quicksort implementation where all the intervals that overlap
+ // with the pivot are put into the "bucket", and "References" is the
+ // partition space where we recursively sort the remaining intervals.
+ for (int Index = ReferencesBeginIndex; Index < ReferencesRightIndex;) {
+
+ // Current interval contains the middle point.
+ if (References[Index]->contains(MiddlePoint)) {
+ IntervalsLeft[IntervalsSize] = References[Index];
+ IntervalsRight[IntervalsSize] = References[Index];
+ ++IntervalsSize;
+ Root->BucketIntervalsSize = ++NewBucketSize;
+
+ if (Index < --ReferencesRightIndex)
+ std::swap(References[Index], References[ReferencesRightIndex]);
+ if (ReferencesRightIndex < --ReferencesSize)
+ std::swap(References[ReferencesRightIndex],
+ References[ReferencesSize]);
+ continue;
+ }
+
+ if (References[Index]->left() > MiddlePoint) {
+ if (Index < --ReferencesRightIndex)
+ std::swap(References[Index], References[ReferencesRightIndex]);
+ continue;
+ }
+ ++Index;
+ }
+
+ // Sort intervals on the left and right of the middle point.
+ if (NewBucketSize > 1) {
+ // Sort the intervals in ascending order by their beginning point.
+ std::stable_sort(IntervalsLeft.begin() + NewBucketStart,
+ IntervalsLeft.begin() + NewBucketStart + NewBucketSize,
+ [](const DataType *LHS, const DataType *RHS) {
+ return LHS->left() < RHS->left();
+ });
+ // Sort the intervals in descending order by their ending point.
+ std::stable_sort(IntervalsRight.begin() + NewBucketStart,
+ IntervalsRight.begin() + NewBucketStart + NewBucketSize,
+ [](const DataType *LHS, const DataType *RHS) {
+ return LHS->right() > RHS->right();
+ });
+ }
+
+ if (PointsBeginIndex <= MiddleIndex - 1) {
+ Root->Left = createTree(IntervalsSize, PointsBeginIndex, MiddleIndex - 1,
+ ReferencesBeginIndex, ReferencesRightIndex);
+ }
+
+ if (MiddleIndex + 1 <= PointsEndIndex) {
+ Root->Right = createTree(IntervalsSize, MiddleIndex + 1, PointsEndIndex,
+ ReferencesRightIndex, ReferencesSize);
+ }
+
+ return Root;
+ }
+
+public:
+ class find_iterator {
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = DataType;
+ using difference_type = DataType;
+ using pointer = DataType *;
+ using reference = DataType &;
+
+ private:
+ const IntervalReferences *AscendingBuckets = nullptr;
+ const IntervalReferences *DescendingBuckets = nullptr;
+
+ // Current node and index while traversing the intervals that contain
+ // the reference point.
+ IntervalNode *Node = nullptr;
+ PointType Point;
+ unsigned Index = 0;
+
+ // For the current node, check if we have intervals that contain the
+ // reference point. We return when the node does have intervals that
+ // contain such point. Otherwise we keep descending on that branch.
+ void initNode() {
+ Index = 0;
+ while (Node) {
+ // Return if the reference point is the same as the middle point or
+ // the current node doesn't have any intervals at all.
+ if (Point == Node->middle()) {
+ if (Node->size() == 0) {
+ // No intervals that contain the reference point.
+ Node = nullptr;
+ }
+ return;
+ }
+
+ if (Point < Node->middle()) {
+ // The reference point can be at the left or right of the middle
+ // point. Return if the current node has intervals that contain the
+ // reference point; otherwise descend on the respective branch.
+ if (Node->size() && (*AscendingBuckets)[Node->start()]->left(Point)) {
+ return;
+ }
+ Node = Node->Left;
+ } else {
+ if (Node->size() &&
+ (*DescendingBuckets)[Node->start()]->right(Point)) {
+ return;
+ }
+ Node = Node->Right;
+ }
+ }
+ }
+
+ // Given the current node (which was initialized by initNode), move to
+ // the next interval in the list of intervals that contain the reference
+ // point. Otherwise move to the next node, as the intervals contained
+ // in that node, can contain the reference point.
+ void nextInterval() {
+ // If there are available intervals that contain the reference point,
+ // traverse them; otherwise move to the left or right node, depending
+ // on the middle point value.
+ if (++Index < Node->size()) {
+ if (Node->middle() == Point)
+ return;
+ if (Point < Node->middle()) {
+ // Reference point is on the left.
+ if (!(*AscendingBuckets)[Node->start() + Index]->left(Point)) {
+ // The intervals don't contain the reference point. Move to the
+ // next node, preserving the descending order.
+ Node = Node->Left;
+ initNode();
+ }
+ } else {
+ // Reference point is on the right.
+ if (!(*DescendingBuckets)[Node->start() + Index]->right(Point)) {
+ // The intervals don't contain the reference point. Move to the
+ // next node, preserving the ascending order.
+ Node = Node->Right;
+ initNode();
+ }
+ }
+ } else {
+ // We have traversed all the intervals in the current node.
+ if (Point == Node->middle()) {
+ Node = nullptr;
+ Index = 0;
+ return;
+ }
+ // Select a branch based on the middle point.
+ Node = Point < Node->middle() ? Node->Left : Node->Right;
+ initNode();
+ }
+ }
+
+ find_iterator() = default;
+ explicit find_iterator(const IntervalReferences *Left,
+ const IntervalReferences *Right, IntervalNode *Node,
+ PointType Point)
+ : AscendingBuckets(Left), DescendingBuckets(Right), Node(Node),
+ Point(Point), Index(0) {
+ initNode();
+ }
+
+ const DataType *current() const {
+ return (Point <= Node->middle())
+ ? (*AscendingBuckets)[Node->start() + Index]
+ : (*DescendingBuckets)[Node->start() + Index];
+ }
+
+ public:
+ find_iterator &operator++() {
+ nextInterval();
+ return *this;
+ }
+
+ find_iterator operator++(int) {
+ find_iterator Iter(*this);
+ nextInterval();
+ return Iter;
+ }
+
+ /// Dereference operators.
+ const DataType *operator->() const { return current(); }
+ const DataType &operator*() const { return *(current()); }
+
+ /// Comparison operators.
+ friend bool operator==(const find_iterator &LHS, const find_iterator &RHS) {
+ return (!LHS.Node && !RHS.Node && !LHS.Index && !RHS.Index) ||
+ (LHS.Point == RHS.Point && LHS.Node == RHS.Node &&
+ LHS.Index == RHS.Index);
+ }
+ friend bool operator!=(const find_iterator &LHS, const find_iterator &RHS) {
+ return !(LHS == RHS);
+ }
+
+ friend IntervalTree;
+ };
+
+private:
+ find_iterator End;
+
+public:
+ explicit IntervalTree(Allocator &NodeAllocator)
+ : NodeAllocator(NodeAllocator) {}
+ ~IntervalTree() { clear(); }
+
+ /// Return true when no intervals are mapped.
+ bool empty() const { return Root == nullptr; }
+
+ /// Remove all entries.
+ void clear() {
+ deleteTree(Root);
+ Root = nullptr;
+ Intervals.clear();
+ IntervalsLeft.clear();
+ IntervalsRight.clear();
+ EndPoints.clear();
+ }
+
+ /// Add a mapping of [Left;Right] to \a Value.
+ void insert(PointType Left, PointType Right, ValueType Value) {
+ assert(empty() && "Invalid insertion. Interval tree already constructed.");
+ Intervals.emplace_back(Left, Right, Value);
+ }
+
+ /// Return all the intervals in their natural tree location, that
+ /// contain the given point.
+ IntervalReferences getContaining(PointType Point) const {
+ assert(!empty() && "Interval tree it is not constructed.");
+ IntervalReferences IntervalSet;
+ for (find_iterator Iter = find(Point), E = find_end(); Iter != E; ++Iter)
+ IntervalSet.push_back(const_cast<DataType *>(&(*Iter)));
+ return IntervalSet;
+ }
+
+ /// Sort the given intervals using the following sort options:
+ /// Ascending: return the intervals with the smallest at the front.
+ /// Descending: return the intervals with the biggest at the front.
+ static void sortIntervals(IntervalReferences &IntervalSet, Sorting Sort) {
+ std::stable_sort(IntervalSet.begin(), IntervalSet.end(),
+ [Sort](const DataType *RHS, const DataType *LHS) {
+ return Sort == Sorting::Ascending
+ ? (LHS->right() - LHS->left()) >
+ (RHS->right() - RHS->left())
+ : (LHS->right() - LHS->left()) <
+ (RHS->right() - RHS->left());
+ });
+ }
+
+ /// Print the interval tree.
+ /// When \a HexFormat is true, the interval tree interval ranges and
+ /// associated values are printed in hexadecimal format.
+ void print(raw_ostream &OS, bool HexFormat = true) {
+ printTree(OS, 0, Root, HexFormat);
+ }
+
+ /// Create the interval tree.
+ void create() {
+ assert(empty() && "Interval tree already constructed.");
+ // Sorted vector of unique end points values of all the intervals.
+ // Records references to the collected intervals.
+ SmallVector<PointType, 4> Points;
+ for (const DataType &Data : Intervals) {
+ Points.push_back(Data.left());
+ Points.push_back(Data.right());
+ References.push_back(std::addressof(Data));
+ }
+ std::stable_sort(Points.begin(), Points.end());
+ auto Last = std::unique(Points.begin(), Points.end());
+ Points.erase(Last, Points.end());
+
+ EndPoints.assign(Points.begin(), Points.end());
+
+ IntervalsLeft.resize(Intervals.size());
+ IntervalsRight.resize(Intervals.size());
+
+ // Given a set of n intervals, construct a data structure so that
+ // we can efficiently retrieve all intervals overlapping another
+ // interval or point.
+ unsigned IntervalsSize = 0;
+ Root =
+ createTree(IntervalsSize, /*PointsBeginIndex=*/0, EndPoints.size() - 1,
+ /*ReferencesBeginIndex=*/0, References.size());
+
+ // Save to clear this storage, as it used only to sort the intervals.
+ References.clear();
+ }
+
+ /// Iterator to start a find operation; it returns find_end() if the
+ /// tree has not been built.
+ /// There is no support to iterate over all the elements of the tree.
+ find_iterator find(PointType Point) const {
+ return empty()
+ ? find_end()
+ : find_iterator(&IntervalsLeft, &IntervalsRight, Root, Point);
+ }
+
+ /// Iterator to end find operation.
+ find_iterator find_end() const { return End; }
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_INTERVALTREE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/ADT/Optional.h b/contrib/libs/llvm16/include/llvm/ADT/Optional.h
new file mode 100644
index 0000000000..6854427bf7
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/ADT/Optional.h
@@ -0,0 +1,38 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides Optional, a template class modeled in the spirit of
+/// OCaml's 'opt' variant. The idea is to strongly type whether or not
+/// a value can be optional.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_OPTIONAL_H
+#define LLVM_ADT_OPTIONAL_H
+
+#include <optional>
+
+namespace llvm {
+// Legacy alias of llvm::Optional to std::optional.
+// FIXME: Remove this after LLVM 16.
+template <class T> using Optional = std::optional<T>;
+} // namespace llvm
+
+#endif // LLVM_ADT_OPTIONAL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/ADT/TypeSwitch.h b/contrib/libs/llvm16/include/llvm/ADT/TypeSwitch.h
new file mode 100644
index 0000000000..cba4c37d72
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/ADT/TypeSwitch.h
@@ -0,0 +1,198 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- TypeSwitch.h - Switch functionality for RTTI casting -*- C++ -*-----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the TypeSwitch template, which mimics a switch()
+/// statement whose cases are type names.
+///
+//===-----------------------------------------------------------------------===/
+
+#ifndef LLVM_ADT_TYPESWITCH_H
+#define LLVM_ADT_TYPESWITCH_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
+#include <optional>
+
+namespace llvm {
+namespace detail {
+
+template <typename DerivedT, typename T> class TypeSwitchBase {
+public:
+ TypeSwitchBase(const T &value) : value(value) {}
+ TypeSwitchBase(TypeSwitchBase &&other) : value(other.value) {}
+ ~TypeSwitchBase() = default;
+
+ /// TypeSwitchBase is not copyable.
+ TypeSwitchBase(const TypeSwitchBase &) = delete;
+ void operator=(const TypeSwitchBase &) = delete;
+ void operator=(TypeSwitchBase &&other) = delete;
+
+ /// Invoke a case on the derived class with multiple case types.
+ template <typename CaseT, typename CaseT2, typename... CaseTs,
+ typename CallableT>
+ // This is marked always_inline and nodebug so it doesn't show up in stack
+ // traces at -O0 (or other optimization levels). Large TypeSwitch's are
+ // common, are equivalent to a switch, and don't add any value to stack
+ // traces.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE LLVM_ATTRIBUTE_NODEBUG DerivedT &
+ Case(CallableT &&caseFn) {
+ DerivedT &derived = static_cast<DerivedT &>(*this);
+ return derived.template Case<CaseT>(caseFn)
+ .template Case<CaseT2, CaseTs...>(caseFn);
+ }
+
+ /// Invoke a case on the derived class, inferring the type of the Case from
+ /// the first input of the given callable.
+ /// Note: This inference rules for this overload are very simple: strip
+ /// pointers and references.
+ template <typename CallableT> DerivedT &Case(CallableT &&caseFn) {
+ using Traits = function_traits<std::decay_t<CallableT>>;
+ using CaseT = std::remove_cv_t<std::remove_pointer_t<
+ std::remove_reference_t<typename Traits::template arg_t<0>>>>;
+
+ DerivedT &derived = static_cast<DerivedT &>(*this);
+ return derived.template Case<CaseT>(std::forward<CallableT>(caseFn));
+ }
+
+protected:
+ /// Trait to check whether `ValueT` provides a 'dyn_cast' method with type
+ /// `CastT`.
+ template <typename ValueT, typename CastT>
+ using has_dyn_cast_t =
+ decltype(std::declval<ValueT &>().template dyn_cast<CastT>());
+
+ /// Attempt to dyn_cast the given `value` to `CastT`. This overload is
+ /// selected if `value` already has a suitable dyn_cast method.
+ template <typename CastT, typename ValueT>
+ static decltype(auto) castValue(
+ ValueT &&value,
+ std::enable_if_t<is_detected<has_dyn_cast_t, ValueT, CastT>::value> * =
+ nullptr) {
+ return value.template dyn_cast<CastT>();
+ }
+
+ /// Attempt to dyn_cast the given `value` to `CastT`. This overload is
+ /// selected if llvm::dyn_cast should be used.
+ template <typename CastT, typename ValueT>
+ static decltype(auto) castValue(
+ ValueT &&value,
+ std::enable_if_t<!is_detected<has_dyn_cast_t, ValueT, CastT>::value> * =
+ nullptr) {
+ return dyn_cast<CastT>(value);
+ }
+
+ /// The root value we are switching on.
+ const T value;
+};
+} // end namespace detail
+
+/// This class implements a switch-like dispatch statement for a value of 'T'
+/// using dyn_cast functionality. Each `Case<T>` takes a callable to be invoked
+/// if the root value isa<T>, the callable is invoked with the result of
+/// dyn_cast<T>() as a parameter.
+///
+/// Example:
+/// Operation *op = ...;
+/// LogicalResult result = TypeSwitch<Operation *, LogicalResult>(op)
+/// .Case<ConstantOp>([](ConstantOp op) { ... })
+/// .Default([](Operation *op) { ... });
+///
+template <typename T, typename ResultT = void>
+class TypeSwitch : public detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T> {
+public:
+ using BaseT = detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T>;
+ using BaseT::BaseT;
+ using BaseT::Case;
+ TypeSwitch(TypeSwitch &&other) = default;
+
+ /// Add a case on the given type.
+ template <typename CaseT, typename CallableT>
+ TypeSwitch<T, ResultT> &Case(CallableT &&caseFn) {
+ if (result)
+ return *this;
+
+ // Check to see if CaseT applies to 'value'.
+ if (auto caseValue = BaseT::template castValue<CaseT>(this->value))
+ result.emplace(caseFn(caseValue));
+ return *this;
+ }
+
+ /// As a default, invoke the given callable within the root value.
+ template <typename CallableT>
+ [[nodiscard]] ResultT Default(CallableT &&defaultFn) {
+ if (result)
+ return std::move(*result);
+ return defaultFn(this->value);
+ }
+ /// As a default, return the given value.
+ [[nodiscard]] ResultT Default(ResultT defaultResult) {
+ if (result)
+ return std::move(*result);
+ return defaultResult;
+ }
+
+ [[nodiscard]] operator ResultT() {
+ assert(result && "Fell off the end of a type-switch");
+ return std::move(*result);
+ }
+
+private:
+ /// The pointer to the result of this switch statement, once known,
+ /// null before that.
+ std::optional<ResultT> result;
+};
+
+/// Specialization of TypeSwitch for void returning callables.
+template <typename T>
+class TypeSwitch<T, void>
+ : public detail::TypeSwitchBase<TypeSwitch<T, void>, T> {
+public:
+ using BaseT = detail::TypeSwitchBase<TypeSwitch<T, void>, T>;
+ using BaseT::BaseT;
+ using BaseT::Case;
+ TypeSwitch(TypeSwitch &&other) = default;
+
+ /// Add a case on the given type.
+ template <typename CaseT, typename CallableT>
+ TypeSwitch<T, void> &Case(CallableT &&caseFn) {
+ if (foundMatch)
+ return *this;
+
+ // Check to see if any of the types apply to 'value'.
+ if (auto caseValue = BaseT::template castValue<CaseT>(this->value)) {
+ caseFn(caseValue);
+ foundMatch = true;
+ }
+ return *this;
+ }
+
+ /// As a default, invoke the given callable within the root value.
+ template <typename CallableT> void Default(CallableT &&defaultFn) {
+ if (!foundMatch)
+ defaultFn(this->value);
+ }
+
+private:
+ /// A flag detailing if we have already found a match.
+ bool foundMatch = false;
+};
+} // end namespace llvm
+
+#endif // LLVM_ADT_TYPESWITCH_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Bitcode/BitcodeConvenience.h b/contrib/libs/llvm16/include/llvm/Bitcode/BitcodeConvenience.h
new file mode 100644
index 0000000000..be880b8870
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Bitcode/BitcodeConvenience.h
@@ -0,0 +1,498 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Bitcode/BitcodeConvenience.h - Convenience Wrappers -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file Convenience wrappers for the LLVM bitcode format and bitstream APIs.
+///
+/// This allows you to use a sort of DSL to declare and use bitcode
+/// abbreviations and records. Example:
+///
+/// \code
+/// using Metadata = BCRecordLayout<
+/// METADATA_ID, // ID
+/// BCFixed<16>, // Module format major version
+/// BCFixed<16>, // Module format minor version
+/// BCBlob // misc. version information
+/// >;
+/// Metadata metadata(Out);
+/// metadata.emit(ScratchRecord, VERSION_MAJOR, VERSION_MINOR, Data);
+/// \endcode
+///
+/// For details on the bitcode format, see
+/// http://llvm.org/docs/BitCodeFormat.html
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODECONVENIENCE_H
+#define LLVM_BITCODE_BITCODECONVENIENCE_H
+
+#include "llvm/Bitstream/BitCodes.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
+#include <cstdint>
+#include <optional>
+
+namespace llvm {
+namespace detail {
+/// Convenience base for all kinds of bitcode abbreviation fields.
+///
+/// This just defines common properties queried by the metaprogramming.
+template <bool Compound = false> class BCField {
+public:
+ static const bool IsCompound = Compound;
+
+ /// Asserts that the given data is a valid value for this field.
+ template <typename T> static void assertValid(const T &data) {}
+
+ /// Converts a raw numeric representation of this value to its preferred
+ /// type.
+ template <typename T> static T convert(T rawValue) { return rawValue; }
+};
+} // namespace detail
+
+/// Represents a literal operand in a bitcode record.
+///
+/// The value of a literal operand is the same for all instances of the record,
+/// so it is only emitted in the abbreviation definition.
+///
+/// Note that because this uses a compile-time template, you cannot have a
+/// literal operand that is fixed at run-time without dropping down to the
+/// raw LLVM APIs.
+template <uint64_t Value> class BCLiteral : public detail::BCField<> {
+public:
+ static void emitOp(llvm::BitCodeAbbrev &abbrev) {
+ abbrev.Add(llvm::BitCodeAbbrevOp(Value));
+ }
+
+ template <typename T> static void assertValid(const T &data) {
+ assert(data == Value && "data value does not match declared literal value");
+ }
+};
+
+/// Represents a fixed-width value in a bitcode record.
+///
+/// Note that the LLVM bitcode format only supports unsigned values.
+template <unsigned Width> class BCFixed : public detail::BCField<> {
+public:
+ static_assert(Width <= 64, "fixed-width field is too large");
+
+ static void emitOp(llvm::BitCodeAbbrev &abbrev) {
+ abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Fixed, Width));
+ }
+
+ static void assertValid(const bool &data) {
+ assert(llvm::isUInt<Width>(data) &&
+ "data value does not fit in the given bit width");
+ }
+
+ template <typename T> static void assertValid(const T &data) {
+ assert(data >= 0 && "cannot encode signed integers");
+ assert(llvm::isUInt<Width>(data) &&
+ "data value does not fit in the given bit width");
+ }
+};
+
+/// Represents a variable-width value in a bitcode record.
+///
+/// The \p Width parameter should include the continuation bit.
+///
+/// Note that the LLVM bitcode format only supports unsigned values.
+template <unsigned Width> class BCVBR : public detail::BCField<> {
+ static_assert(Width >= 2, "width does not have room for continuation bit");
+
+public:
+ static void emitOp(llvm::BitCodeAbbrev &abbrev) {
+ abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, Width));
+ }
+
+ template <typename T> static void assertValid(const T &data) {
+ assert(data >= 0 && "cannot encode signed integers");
+ }
+};
+
+/// Represents a character encoded in LLVM's Char6 encoding.
+///
+/// This format is suitable for encoding decimal numbers (without signs or
+/// exponents) and C identifiers (without dollar signs), but not much else.
+///
+/// \sa http://llvm.org/docs/BitCodeFormat.html#char6-encoded-value
+class BCChar6 : public detail::BCField<> {
+public:
+ static void emitOp(llvm::BitCodeAbbrev &abbrev) {
+ abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Char6));
+ }
+
+ template <typename T> static void assertValid(const T &data) {
+ assert(llvm::BitCodeAbbrevOp::isChar6(data) && "invalid Char6 data");
+ }
+
+ template <typename T> char convert(T rawValue) {
+ return static_cast<char>(rawValue);
+ }
+};
+
+/// Represents an untyped blob of bytes.
+///
+/// If present, this must be the last field in a record.
+class BCBlob : public detail::BCField<true> {
+public:
+ static void emitOp(llvm::BitCodeAbbrev &abbrev) {
+ abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ }
+};
+
+/// Represents an array of some other type.
+///
+/// If present, this must be the last field in a record.
+template <typename ElementTy> class BCArray : public detail::BCField<true> {
+ static_assert(!ElementTy::IsCompound, "arrays can only contain scalar types");
+
+public:
+ static void emitOp(llvm::BitCodeAbbrev &abbrev) {
+ abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Array));
+ ElementTy::emitOp(abbrev);
+ }
+};
+
+namespace detail {
+/// Attaches the last field to an abbreviation.
+///
+/// This is the base case for \c emitOps.
+///
+/// \sa BCRecordLayout::emitAbbrev
+template <typename FieldTy> static void emitOps(llvm::BitCodeAbbrev &abbrev) {
+ FieldTy::emitOp(abbrev);
+}
+
+/// Attaches fields to an abbreviation.
+///
+/// This is the recursive case for \c emitOps.
+///
+/// \sa BCRecordLayout::emitAbbrev
+template <typename FieldTy, typename Next, typename... Rest>
+static void emitOps(llvm::BitCodeAbbrev &abbrev) {
+ static_assert(!FieldTy::IsCompound,
+ "arrays and blobs may not appear in the middle of a record");
+ FieldTy::emitOp(abbrev);
+ emitOps<Next, Rest...>(abbrev);
+}
+
+/// Helper class for dealing with a scalar element in the middle of a record.
+///
+/// \sa BCRecordLayout
+template <typename ElementTy, typename... Fields> class BCRecordCoding {
+public:
+ template <typename BufferTy, typename ElementDataTy, typename... DataTy>
+ static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned code, ElementDataTy element, DataTy &&...data) {
+ static_assert(!ElementTy::IsCompound,
+ "arrays and blobs may not appear in the middle of a record");
+ ElementTy::assertValid(element);
+ buffer.push_back(element);
+ BCRecordCoding<Fields...>::emit(Stream, buffer, code,
+ std::forward<DataTy>(data)...);
+ }
+
+ template <typename T, typename ElementDataTy, typename... DataTy>
+ static void read(ArrayRef<T> buffer, ElementDataTy &element,
+ DataTy &&...data) {
+ assert(!buffer.empty() && "too few elements in buffer");
+ element = ElementTy::convert(buffer.front());
+ BCRecordCoding<Fields...>::read(buffer.slice(1),
+ std::forward<DataTy>(data)...);
+ }
+
+ template <typename T, typename... DataTy>
+ static void read(ArrayRef<T> buffer, std::nullopt_t, DataTy &&...data) {
+ assert(!buffer.empty() && "too few elements in buffer");
+ BCRecordCoding<Fields...>::read(buffer.slice(1),
+ std::forward<DataTy>(data)...);
+ }
+};
+
+/// Helper class for dealing with a scalar element at the end of a record.
+///
+/// This has a separate implementation because up until now we've only been
+/// \em building the record (into a data buffer), and now we need to hand it
+/// off to the BitstreamWriter to be emitted.
+///
+/// \sa BCRecordLayout
+template <typename ElementTy> class BCRecordCoding<ElementTy> {
+public:
+ template <typename BufferTy, typename DataTy>
+ static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned code, const DataTy &data) {
+ static_assert(!ElementTy::IsCompound,
+ "arrays and blobs need special handling");
+ ElementTy::assertValid(data);
+ buffer.push_back(data);
+ Stream.EmitRecordWithAbbrev(code, buffer);
+ }
+
+ template <typename T, typename DataTy>
+ static void read(ArrayRef<T> buffer, DataTy &data) {
+ assert(buffer.size() == 1 && "record data does not match layout");
+ data = ElementTy::convert(buffer.front());
+ }
+
+ template <typename T> static void read(ArrayRef<T> buffer, std::nullopt_t) {
+ assert(buffer.size() == 1 && "record data does not match layout");
+ (void)buffer;
+ }
+
+ template <typename T> static void read(ArrayRef<T> buffer) = delete;
+};
+
+/// Helper class for dealing with an array at the end of a record.
+///
+/// \sa BCRecordLayout::emitRecord
+template <typename ElementTy> class BCRecordCoding<BCArray<ElementTy>> {
+public:
+ template <typename BufferTy>
+ static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned code, StringRef data) {
+ // TODO: validate array data.
+ Stream.EmitRecordWithArray(code, buffer, data);
+ }
+
+ template <typename BufferTy, typename ArrayTy>
+ static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned code, const ArrayTy &array) {
+#ifndef NDEBUG
+ for (auto &element : array)
+ ElementTy::assertValid(element);
+#endif
+ buffer.reserve(buffer.size() + std::distance(array.begin(), array.end()));
+ std::copy(array.begin(), array.end(), std::back_inserter(buffer));
+ Stream.EmitRecordWithAbbrev(code, buffer);
+ }
+
+ template <typename BufferTy, typename ElementDataTy, typename... DataTy>
+ static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned code, ElementDataTy element, DataTy... data) {
+ std::array<ElementDataTy, 1 + sizeof...(data)> array{{element, data...}};
+ emit(Stream, buffer, code, array);
+ }
+
+ template <typename BufferTy>
+ static void emit(llvm::BitstreamWriter &Stream, BufferTy &Buffer,
+ unsigned code, std::nullopt_t) {
+ Stream.EmitRecordWithAbbrev(code, Buffer);
+ }
+
+ template <typename T>
+ static void read(ArrayRef<T> Buffer, ArrayRef<T> &rawData) {
+ rawData = Buffer;
+ }
+
+ template <typename T, typename ArrayTy>
+ static void read(ArrayRef<T> buffer, ArrayTy &array) {
+ array.append(llvm::map_iterator(buffer.begin(), T::convert),
+ llvm::map_iterator(buffer.end(), T::convert));
+ }
+
+ template <typename T> static void read(ArrayRef<T> buffer, std::nullopt_t) {
+ (void)buffer;
+ }
+
+ template <typename T> static void read(ArrayRef<T> buffer) = delete;
+};
+
+/// Helper class for dealing with a blob at the end of a record.
+///
+/// \sa BCRecordLayout
+template <> class BCRecordCoding<BCBlob> {
+public:
+ template <typename BufferTy>
+ static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned code, StringRef data) {
+ Stream.EmitRecordWithBlob(code, buffer, data);
+ }
+
+ template <typename T> static void read(ArrayRef<T> buffer) { (void)buffer; }
+
+ /// Blob data is not stored in the buffer if you are using the correct
+ /// accessor; this method should not be used.
+ template <typename T, typename DataTy>
+ static void read(ArrayRef<T> buffer, DataTy &data) = delete;
+};
+
+/// A type trait whose \c type field is the last of its template parameters.
+template <typename Head, typename... Tail> struct last_type {
+ using type = typename last_type<Tail...>::type;
+};
+
+template <typename Head> struct last_type<Head> { using type = Head; };
+
+/// A type trait whose \c value field is \c true if the last type is BCBlob.
+template <typename... Types>
+using has_blob = std::is_same<BCBlob, typename last_type<int, Types...>::type>;
+
+/// A type trait whose \c value field is \c true if the given type is a
+/// BCArray (of any element kind).
+template <typename T> struct is_array {
+private:
+ template <typename E> static bool check(BCArray<E> *);
+ static int check(...);
+
+public:
+ typedef bool value_type;
+ static constexpr bool value = !std::is_same<decltype(check((T *)nullptr)),
+ decltype(check(false))>::value;
+};
+
+/// A type trait whose \c value field is \c true if the last type is a
+/// BCArray (of any element kind).
+template <typename... Types>
+using has_array = is_array<typename last_type<int, Types...>::type>;
+} // namespace detail
+
+/// Represents a single bitcode record type.
+///
+/// This class template is meant to be instantiated and then given a name,
+/// so that from then on that name can be used.
+template <typename IDField, typename... Fields> class BCGenericRecordLayout {
+ llvm::BitstreamWriter &Stream;
+
+public:
+ /// The abbreviation code used for this record in the current block.
+ ///
+ /// Note that this is not the same as the semantic record code, which is the
+ /// first field of the record.
+ const unsigned AbbrevCode;
+
+ /// Create a layout and register it with the given bitstream writer.
+ explicit BCGenericRecordLayout(llvm::BitstreamWriter &Stream)
+ : Stream(Stream), AbbrevCode(emitAbbrev(Stream)) {}
+
+ /// Emit a record to the bitstream writer, using the given buffer for scratch
+ /// space.
+ ///
+ /// Note that even fixed arguments must be specified here.
+ template <typename BufferTy, typename... Data>
+ void emit(BufferTy &buffer, unsigned id, Data &&...data) const {
+ emitRecord(Stream, buffer, AbbrevCode, id, std::forward<Data>(data)...);
+ }
+
+ /// Registers this record's layout with the bitstream reader.
+ ///
+ /// eturns The abbreviation code for the newly-registered record type.
+ static unsigned emitAbbrev(llvm::BitstreamWriter &Stream) {
+ auto Abbrev = std::make_shared<llvm::BitCodeAbbrev>();
+ detail::emitOps<IDField, Fields...>(*Abbrev);
+ return Stream.EmitAbbrev(std::move(Abbrev));
+ }
+
+ /// Emit a record identified by \p abbrCode to bitstream reader \p Stream,
+ /// using \p buffer for scratch space.
+ ///
+ /// Note that even fixed arguments must be specified here. Blobs are passed
+ /// as StringRefs, while arrays can be passed inline, as aggregates, or as
+ /// pre-encoded StringRef data. Skipped values and empty arrays should use
+ /// the special Nothing value.
+ template <typename BufferTy, typename... Data>
+ static void emitRecord(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned abbrCode, unsigned recordID, Data &&...data) {
+ static_assert(sizeof...(data) <= sizeof...(Fields) ||
+ detail::has_array<Fields...>::value,
+ "Too many record elements");
+ static_assert(sizeof...(data) >= sizeof...(Fields),
+ "Too few record elements");
+ buffer.clear();
+ detail::BCRecordCoding<IDField, Fields...>::emit(
+ Stream, buffer, abbrCode, recordID, std::forward<Data>(data)...);
+ }
+
+ /// Extract record data from \p buffer into the given data fields.
+ ///
+ /// Note that even fixed arguments must be specified here. Pass \c Nothing
+ /// if you don't care about a particular parameter. Blob data is not included
+ /// in the buffer and should be handled separately by the caller.
+ template <typename ElementTy, typename... Data>
+ static void readRecord(ArrayRef<ElementTy> buffer, Data &&...data) {
+ static_assert(sizeof...(data) <= sizeof...(Fields),
+ "Too many record elements");
+ static_assert(sizeof...(Fields) <=
+ sizeof...(data) + detail::has_blob<Fields...>::value,
+ "Too few record elements");
+ return detail::BCRecordCoding<Fields...>::read(buffer,
+ std::forward<Data>(data)...);
+ }
+
+ /// Extract record data from \p buffer into the given data fields.
+ ///
+ /// Note that even fixed arguments must be specified here. Pass \c Nothing
+ /// if you don't care about a particular parameter. Blob data is not included
+ /// in the buffer and should be handled separately by the caller.
+ template <typename BufferTy, typename... Data>
+ static void readRecord(BufferTy &buffer, Data &&...data) {
+ return readRecord(llvm::ArrayRef(buffer), std::forward<Data>(data)...);
+ }
+};
+
+/// A record with a fixed record code.
+template <unsigned RecordCode, typename... Fields>
+class BCRecordLayout
+ : public BCGenericRecordLayout<BCLiteral<RecordCode>, Fields...> {
+ using Base = BCGenericRecordLayout<BCLiteral<RecordCode>, Fields...>;
+
+public:
+ enum : unsigned {
+ /// The record code associated with this layout.
+ Code = RecordCode
+ };
+
+ /// Create a layout and register it with the given bitstream writer.
+ explicit BCRecordLayout(llvm::BitstreamWriter &Stream) : Base(Stream) {}
+
+ /// Emit a record to the bitstream writer, using the given buffer for scratch
+ /// space.
+ ///
+ /// Note that even fixed arguments must be specified here.
+ template <typename BufferTy, typename... Data>
+ void emit(BufferTy &buffer, Data &&...data) const {
+ Base::emit(buffer, RecordCode, std::forward<Data>(data)...);
+ }
+
+ /// Emit a record identified by \p abbrCode to bitstream reader \p Stream,
+ /// using \p buffer for scratch space.
+ ///
+ /// Note that even fixed arguments must be specified here. Currently, arrays
+ /// and blobs can only be passed as StringRefs.
+ template <typename BufferTy, typename... Data>
+ static void emitRecord(llvm::BitstreamWriter &Stream, BufferTy &buffer,
+ unsigned abbrCode, Data &&...data) {
+ Base::emitRecord(Stream, buffer, abbrCode, RecordCode,
+ std::forward<Data>(data)...);
+ }
+};
+
+/// RAII object to pair entering and exiting a sub-block.
+class BCBlockRAII {
+ llvm::BitstreamWriter &Stream;
+
+public:
+ BCBlockRAII(llvm::BitstreamWriter &Stream, unsigned block, unsigned abbrev)
+ : Stream(Stream) {
+ Stream.EnterSubblock(block, abbrev);
+ }
+
+ ~BCBlockRAII() { Stream.ExitBlock(); }
+};
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/CodeGen/MachORelocation.h b/contrib/libs/llvm16/include/llvm/CodeGen/MachORelocation.h
new file mode 100644
index 0000000000..20b31a21f4
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/CodeGen/MachORelocation.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachORelocation class.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_MACHORELOCATION_H
+#define LLVM_CODEGEN_MACHORELOCATION_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+ /// MachORelocation - This struct contains information about each relocation
+ /// that needs to be emitted to the file.
+ /// see <mach-o/reloc.h>
+ class MachORelocation {
+ uint32_t r_address; // offset in the section to what is being relocated
+ uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
+ bool r_pcrel; // was relocated pc-relative already
+ uint8_t r_length; // length = 2 ^ r_length
+ bool r_extern; //
+ uint8_t r_type; // if not 0, machine-specific relocation type.
+ bool r_scattered; // 1 = scattered, 0 = non-scattered
+ int32_t r_value; // the value the item to be relocated is referring
+ // to.
+ public:
+ uint32_t getPackedFields() const {
+ if (r_scattered)
+ return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
+ ((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
+ else
+ return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
+ (r_extern << 4) | (r_type & 15);
+ }
+ uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
+ uint32_t getRawAddress() const { return r_address; }
+
+ MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
+ bool ext, uint8_t type, bool scattered = false,
+ int32_t value = 0) :
+ r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
+ r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
+ };
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_MACHORELOCATION_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Config/AsmParsers.def.in b/contrib/libs/llvm16/include/llvm/Config/AsmParsers.def.in
new file mode 100644
index 0000000000..2e5fa33ace
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/AsmParsers.def.in
@@ -0,0 +1,29 @@
+/*===- llvm/Config/AsmParsers.def - LLVM Assembly Parsers -------*- C++ -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file enumerates all of the assembly-language parsers *|
+|* supported by this build of LLVM. Clients of this file should define *|
+|* the LLVM_ASM_PARSER macro to be a function-like macro with a *|
+|* single parameter (the name of the target whose assembly can be *|
+|* generated); including this file will then enumerate all of the *|
+|* targets with assembly parsers. *|
+|* *|
+|* The set of targets supported by LLVM is generated at configuration *|
+|* time, at which point this header is generated. Do not modify this *|
+|* header directly. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_ASM_PARSER
+# error Please define the macro LLVM_ASM_PARSER(TargetName)
+#endif
+
+@LLVM_ENUM_ASM_PARSERS@
+
+#undef LLVM_ASM_PARSER
diff --git a/contrib/libs/llvm16/include/llvm/Config/AsmPrinters.def.in b/contrib/libs/llvm16/include/llvm/Config/AsmPrinters.def.in
new file mode 100644
index 0000000000..56419d9c14
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/AsmPrinters.def.in
@@ -0,0 +1,29 @@
+/*===- llvm/Config/AsmPrinters.def - LLVM Assembly Printers -----*- C++ -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file enumerates all of the assembly-language printers *|
+|* supported by this build of LLVM. Clients of this file should define *|
+|* the LLVM_ASM_PRINTER macro to be a function-like macro with a *|
+|* single parameter (the name of the target whose assembly can be *|
+|* generated); including this file will then enumerate all of the *|
+|* targets with assembly printers. *|
+|* *|
+|* The set of targets supported by LLVM is generated at configuration *|
+|* time, at which point this header is generated. Do not modify this *|
+|* header directly. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_ASM_PRINTER
+# error Please define the macro LLVM_ASM_PRINTER(TargetName)
+#endif
+
+@LLVM_ENUM_ASM_PRINTERS@
+
+#undef LLVM_ASM_PRINTER
diff --git a/contrib/libs/llvm16/include/llvm/Config/Disassemblers.def.in b/contrib/libs/llvm16/include/llvm/Config/Disassemblers.def.in
new file mode 100644
index 0000000000..b110b1588d
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/Disassemblers.def.in
@@ -0,0 +1,29 @@
+/*===- llvm/Config/Disassemblers.def - LLVM Assembly Parsers ----*- C++ -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file enumerates all of the assembly-language parsers *|
+|* supported by this build of LLVM. Clients of this file should define *|
+|* the LLVM_DISASSEMBLER macro to be a function-like macro with a *|
+|* single parameter (the name of the target whose assembly can be *|
+|* generated); including this file will then enumerate all of the *|
+|* targets with assembly parsers. *|
+|* *|
+|* The set of targets supported by LLVM is generated at configuration *|
+|* time, at which point this header is generated. Do not modify this *|
+|* header directly. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_DISASSEMBLER
+# error Please define the macro LLVM_DISASSEMBLER(TargetName)
+#endif
+
+@LLVM_ENUM_DISASSEMBLERS@
+
+#undef LLVM_DISASSEMBLER
diff --git a/contrib/libs/llvm16/include/llvm/Config/TargetExegesis.def.in b/contrib/libs/llvm16/include/llvm/Config/TargetExegesis.def.in
new file mode 100644
index 0000000000..884a154551
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/TargetExegesis.def.in
@@ -0,0 +1,29 @@
+/*===----- llvm/Config/TargetExegesis.def - LLVM Target Exegesis-*- C++ -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file enumerates all of the target's of llvm-exegesis *|
+|* supported by this build of LLVM. Clients of this file should define *|
+|* the LLVM_EXEGISIS macro to be a function-like macro with a *|
+|* single parameter (the name of the target whose assembly can be *|
+|* generated); including this file will then enumerate all of the *|
+|* targets with target llvm-exegsis support. *|
+|* *|
+|* The set of targets supported by LLVM is generated at configuration *|
+|* time, at which point this header is generated. Do not modify this *|
+|* header directly. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_EXEGESIS
+# error Please define the macro LLVM_EXEGESIS(TargetName)
+#endif
+
+@LLVM_ENUM_EXEGESIS@
+
+#undef LLVM_EXEGESIS
diff --git a/contrib/libs/llvm16/include/llvm/Config/TargetMCAs.def.in b/contrib/libs/llvm16/include/llvm/Config/TargetMCAs.def.in
new file mode 100644
index 0000000000..0409b93cbf
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/TargetMCAs.def.in
@@ -0,0 +1,29 @@
+/*===------ llvm/Config/TargetMCAs.def - LLVM Target MCAs -------*- C++ -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file enumerates all of the target MCAs *|
+|* supported by this build of LLVM. Clients of this file should define *|
+|* the LLVM_TARGETMCA macro to be a function-like macro with a *|
+|* single parameter (the name of the target whose assembly can be *|
+|* generated); including this file will then enumerate all of the *|
+|* targets with target MCAs. *|
+|* *|
+|* The set of targets supported by LLVM is generated at configuration *|
+|* time, at which point this header is generated. Do not modify this *|
+|* header directly. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_TARGETMCA
+# error Please define the macro LLVM_TARGETMCA(TargetName)
+#endif
+
+@LLVM_ENUM_TARGETMCAS@
+
+#undef LLVM_TARGETMCA
diff --git a/contrib/libs/llvm16/include/llvm/Config/Targets.def.in b/contrib/libs/llvm16/include/llvm/Config/Targets.def.in
new file mode 100644
index 0000000000..0da9b948c9
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/Targets.def.in
@@ -0,0 +1,28 @@
+/*===- llvm/Config/Targets.def - LLVM Target Architectures ------*- C++ -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file enumerates all of the target architectures supported by *|
+|* this build of LLVM. Clients of this file should define the *|
+|* LLVM_TARGET macro to be a function-like macro with a single *|
+|* parameter (the name of the target); including this file will then *|
+|* enumerate all of the targets. *|
+|* *|
+|* The set of targets supported by LLVM is generated at configuration *|
+|* time, at which point this header is generated. Do not modify this *|
+|* header directly. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_TARGET
+# error Please define the macro LLVM_TARGET(TargetName)
+#endif
+
+@LLVM_ENUM_TARGETS@
+
+#undef LLVM_TARGET
diff --git a/contrib/libs/llvm16/include/llvm/Config/abi-breaking.h.cmake b/contrib/libs/llvm16/include/llvm/Config/abi-breaking.h.cmake
new file mode 100644
index 0000000000..2d27e02b1d
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/abi-breaking.h.cmake
@@ -0,0 +1,62 @@
+/*===------- llvm/Config/abi-breaking.h - llvm configuration -------*- C -*-===*/
+/* */
+/* Part of the LLVM Project, under the Apache License v2.0 with LLVM */
+/* Exceptions. */
+/* See https://llvm.org/LICENSE.txt for license information. */
+/* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */
+/* */
+/*===----------------------------------------------------------------------===*/
+
+/* This file controls the C++ ABI break introduced in LLVM public header. */
+
+#ifndef LLVM_ABI_BREAKING_CHECKS_H
+#define LLVM_ABI_BREAKING_CHECKS_H
+
+/* Define to enable checks that alter the LLVM C++ ABI */
+#cmakedefine01 LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+/* Define to enable reverse iteration of unordered llvm containers */
+#cmakedefine01 LLVM_ENABLE_REVERSE_ITERATION
+
+/* Allow selectively disabling link-time mismatch checking so that header-only
+ ADT content from LLVM can be used without linking libSupport. */
+#if !defined(LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING) || !LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING
+
+// ABI_BREAKING_CHECKS protection: provides link-time failure when clients build
+// mismatch with LLVM
+#if defined(_MSC_VER)
+// Use pragma with MSVC
+#define LLVM_XSTR(s) LLVM_STR(s)
+#define LLVM_STR(s) #s
+#pragma detect_mismatch("LLVM_ENABLE_ABI_BREAKING_CHECKS", LLVM_XSTR(LLVM_ENABLE_ABI_BREAKING_CHECKS))
+#undef LLVM_XSTR
+#undef LLVM_STR
+#elif defined(_WIN32) || defined(__CYGWIN__) // Win32 w/o #pragma detect_mismatch
+// FIXME: Implement checks without weak.
+#elif defined(__cplusplus)
+#if !(defined(_AIX) && defined(__GNUC__) && !defined(__clang__))
+#define LLVM_HIDDEN_VISIBILITY __attribute__ ((visibility("hidden")))
+#else
+// GCC on AIX does not support visibility attributes. Symbols are not
+// exported by default on AIX.
+#define LLVM_HIDDEN_VISIBILITY
+#endif
+namespace llvm {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+extern int EnableABIBreakingChecks;
+LLVM_HIDDEN_VISIBILITY
+__attribute__((weak)) int *VerifyEnableABIBreakingChecks =
+ &EnableABIBreakingChecks;
+#else
+extern int DisableABIBreakingChecks;
+LLVM_HIDDEN_VISIBILITY
+__attribute__((weak)) int *VerifyDisableABIBreakingChecks =
+ &DisableABIBreakingChecks;
+#endif
+}
+#undef LLVM_HIDDEN_VISIBILITY
+#endif // _MSC_VER
+
+#endif // LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING
+
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Config/config.h.cmake b/contrib/libs/llvm16/include/llvm/Config/config.h.cmake
new file mode 100644
index 0000000000..29ac536b4c
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/config.h.cmake
@@ -0,0 +1,337 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+// Include this header only under the llvm source tree.
+// This is a private header.
+
+/* Exported configuration */
+#include "llvm/Config/llvm-config.h"
+
+/* Bug report URL. */
+#define BUG_REPORT_URL "${BUG_REPORT_URL}"
+
+/* Define to 1 to enable backtraces, and to 0 otherwise. */
+#cmakedefine01 ENABLE_BACKTRACES
+
+/* Define to 1 to enable crash overrides, and to 0 otherwise. */
+#cmakedefine01 ENABLE_CRASH_OVERRIDES
+
+/* Define to 1 to enable crash memory dumps, and to 0 otherwise. */
+#cmakedefine01 LLVM_ENABLE_CRASH_DUMPS
+
+/* Define to 1 to prefer forward slashes on Windows, and to 0 prefer
+ backslashes. */
+#cmakedefine01 LLVM_WINDOWS_PREFER_FORWARD_SLASH
+
+/* Define to 1 if you have the `backtrace' function. */
+#cmakedefine HAVE_BACKTRACE ${HAVE_BACKTRACE}
+
+#define BACKTRACE_HEADER <${BACKTRACE_HEADER}>
+
+/* Define to 1 if you have the <CrashReporterClient.h> header file. */
+#cmakedefine HAVE_CRASHREPORTERCLIENT_H
+
+/* can use __crashreporter_info__ */
+#cmakedefine01 HAVE_CRASHREPORTER_INFO
+
+/* Define to 1 if you have the declaration of `arc4random', and to 0 if you
+ don't. */
+#cmakedefine01 HAVE_DECL_ARC4RANDOM
+
+/* Define to 1 if you have the declaration of `FE_ALL_EXCEPT', and to 0 if you
+ don't. */
+#cmakedefine01 HAVE_DECL_FE_ALL_EXCEPT
+
+/* Define to 1 if you have the declaration of `FE_INEXACT', and to 0 if you
+ don't. */
+#cmakedefine01 HAVE_DECL_FE_INEXACT
+
+/* Define to 1 if you have the declaration of `strerror_s', and to 0 if you
+ don't. */
+#cmakedefine01 HAVE_DECL_STRERROR_S
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#cmakedefine HAVE_DLFCN_H ${HAVE_DLFCN_H}
+
+/* Define if dlopen() is available on this platform. */
+#cmakedefine HAVE_DLOPEN ${HAVE_DLOPEN}
+
+/* Define if dladdr() is available on this platform. */
+#cmakedefine HAVE_DLADDR ${HAVE_DLADDR}
+
+/* Define to 1 if we can register EH frames on this platform. */
+#cmakedefine HAVE_REGISTER_FRAME ${HAVE_REGISTER_FRAME}
+
+/* Define to 1 if we can deregister EH frames on this platform. */
+#cmakedefine HAVE_DEREGISTER_FRAME ${HAVE_DEREGISTER_FRAME}
+
+/* Define if __unw_add_dynamic_fde() is available on this platform. */
+#cmakedefine HAVE_UNW_ADD_DYNAMIC_FDE ${HAVE_UNW_ADD_DYNAMIC_FDE}
+
+/* Define to 1 if you have the <errno.h> header file. */
+#cmakedefine HAVE_ERRNO_H ${HAVE_ERRNO_H}
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#cmakedefine HAVE_FCNTL_H ${HAVE_FCNTL_H}
+
+/* Define to 1 if you have the <fenv.h> header file. */
+#cmakedefine HAVE_FENV_H ${HAVE_FENV_H}
+
+/* Define if libffi is available on this platform. */
+#cmakedefine HAVE_FFI_CALL ${HAVE_FFI_CALL}
+
+/* Define to 1 if you have the <ffi/ffi.h> header file. */
+#cmakedefine HAVE_FFI_FFI_H ${HAVE_FFI_FFI_H}
+
+/* Define to 1 if you have the <ffi.h> header file. */
+#cmakedefine HAVE_FFI_H ${HAVE_FFI_H}
+
+/* Define to 1 if you have the `futimens' function. */
+#cmakedefine HAVE_FUTIMENS ${HAVE_FUTIMENS}
+
+/* Define to 1 if you have the `futimes' function. */
+#cmakedefine HAVE_FUTIMES ${HAVE_FUTIMES}
+
+/* Define to 1 if you have the `getpagesize' function. */
+#cmakedefine HAVE_GETPAGESIZE ${HAVE_GETPAGESIZE}
+
+/* Define to 1 if you have the `getrlimit' function. */
+#cmakedefine HAVE_GETRLIMIT ${HAVE_GETRLIMIT}
+
+/* Define to 1 if you have the `getrusage' function. */
+#cmakedefine HAVE_GETRUSAGE ${HAVE_GETRUSAGE}
+
+/* Define to 1 if you have the `isatty' function. */
+#cmakedefine HAVE_ISATTY 1
+
+/* Define to 1 if you have the `edit' library (-ledit). */
+#cmakedefine HAVE_LIBEDIT ${HAVE_LIBEDIT}
+
+/* Define to 1 if you have the `pfm' library (-lpfm). */
+#cmakedefine HAVE_LIBPFM ${HAVE_LIBPFM}
+
+/* Define to 1 if the `perf_branch_entry' struct has field cycles. */
+#cmakedefine LIBPFM_HAS_FIELD_CYCLES ${LIBPFM_HAS_FIELD_CYCLES}
+
+/* Define to 1 if you have the `psapi' library (-lpsapi). */
+#cmakedefine HAVE_LIBPSAPI ${HAVE_LIBPSAPI}
+
+/* Define to 1 if you have the `pthread' library (-lpthread). */
+#cmakedefine HAVE_LIBPTHREAD ${HAVE_LIBPTHREAD}
+
+/* Define to 1 if you have the `pthread_getname_np' function. */
+#cmakedefine HAVE_PTHREAD_GETNAME_NP ${HAVE_PTHREAD_GETNAME_NP}
+
+/* Define to 1 if you have the `pthread_setname_np' function. */
+#cmakedefine HAVE_PTHREAD_SETNAME_NP ${HAVE_PTHREAD_SETNAME_NP}
+
+/* Define to 1 if you have the <link.h> header file. */
+#cmakedefine HAVE_LINK_H ${HAVE_LINK_H}
+
+/* Define to 1 if you have the <mach/mach.h> header file. */
+#cmakedefine HAVE_MACH_MACH_H ${HAVE_MACH_MACH_H}
+
+/* Define to 1 if you have the `mallctl' function. */
+#cmakedefine HAVE_MALLCTL ${HAVE_MALLCTL}
+
+/* Define to 1 if you have the `mallinfo' function. */
+#cmakedefine HAVE_MALLINFO ${HAVE_MALLINFO}
+
+/* Define to 1 if you have the `mallinfo2' function. */
+#cmakedefine HAVE_MALLINFO2 ${HAVE_MALLINFO2}
+
+/* Define to 1 if you have the <malloc/malloc.h> header file. */
+#cmakedefine HAVE_MALLOC_MALLOC_H ${HAVE_MALLOC_MALLOC_H}
+
+/* Define to 1 if you have the `malloc_zone_statistics' function. */
+#cmakedefine HAVE_MALLOC_ZONE_STATISTICS ${HAVE_MALLOC_ZONE_STATISTICS}
+
+/* Define to 1 if you have the `posix_spawn' function. */
+#cmakedefine HAVE_POSIX_SPAWN ${HAVE_POSIX_SPAWN}
+
+/* Define to 1 if you have the `pread' function. */
+#cmakedefine HAVE_PREAD ${HAVE_PREAD}
+
+/* Define to 1 if you have the <pthread.h> header file. */
+#cmakedefine HAVE_PTHREAD_H ${HAVE_PTHREAD_H}
+
+/* Have pthread_mutex_lock */
+#cmakedefine HAVE_PTHREAD_MUTEX_LOCK ${HAVE_PTHREAD_MUTEX_LOCK}
+
+/* Have pthread_rwlock_init */
+#cmakedefine HAVE_PTHREAD_RWLOCK_INIT ${HAVE_PTHREAD_RWLOCK_INIT}
+
+/* Define to 1 if you have the `sbrk' function. */
+#cmakedefine HAVE_SBRK ${HAVE_SBRK}
+
+/* Define to 1 if you have the `setenv' function. */
+#cmakedefine HAVE_SETENV ${HAVE_SETENV}
+
+/* Define to 1 if you have the `setrlimit' function. */
+#cmakedefine HAVE_SETRLIMIT ${HAVE_SETRLIMIT}
+
+/* Define to 1 if you have the `sigaltstack' function. */
+#cmakedefine HAVE_SIGALTSTACK ${HAVE_SIGALTSTACK}
+
+/* Define to 1 if you have the <signal.h> header file. */
+#cmakedefine HAVE_SIGNAL_H ${HAVE_SIGNAL_H}
+
+/* Define to 1 if you have the `strerror' function. */
+#cmakedefine HAVE_STRERROR ${HAVE_STRERROR}
+
+/* Define to 1 if you have the `strerror_r' function. */
+#cmakedefine HAVE_STRERROR_R ${HAVE_STRERROR_R}
+
+/* Define to 1 if you have the `sysconf' function. */
+#cmakedefine HAVE_SYSCONF ${HAVE_SYSCONF}
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#cmakedefine HAVE_SYS_IOCTL_H ${HAVE_SYS_IOCTL_H}
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#cmakedefine HAVE_SYS_MMAN_H ${HAVE_SYS_MMAN_H}
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#cmakedefine HAVE_SYS_PARAM_H ${HAVE_SYS_PARAM_H}
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#cmakedefine HAVE_SYS_RESOURCE_H ${HAVE_SYS_RESOURCE_H}
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#cmakedefine HAVE_SYS_STAT_H ${HAVE_SYS_STAT_H}
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#cmakedefine HAVE_SYS_TIME_H ${HAVE_SYS_TIME_H}
+
+/* Define to 1 if stat struct has st_mtimespec member .*/
+#cmakedefine HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC ${HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC}
+
+/* Define to 1 if stat struct has st_mtim member. */
+#cmakedefine HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC ${HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC}
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#cmakedefine HAVE_SYS_TYPES_H ${HAVE_SYS_TYPES_H}
+
+/* Define if the setupterm() function is supported this platform. */
+#cmakedefine LLVM_ENABLE_TERMINFO ${LLVM_ENABLE_TERMINFO}
+
+/* Define to 1 if you have the <termios.h> header file. */
+#cmakedefine HAVE_TERMIOS_H ${HAVE_TERMIOS_H}
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#cmakedefine HAVE_UNISTD_H ${HAVE_UNISTD_H}
+
+/* Define to 1 if you have the <valgrind/valgrind.h> header file. */
+#cmakedefine HAVE_VALGRIND_VALGRIND_H ${HAVE_VALGRIND_VALGRIND_H}
+
+/* Have host's _alloca */
+#cmakedefine HAVE__ALLOCA ${HAVE__ALLOCA}
+
+/* Define to 1 if you have the `_chsize_s' function. */
+#cmakedefine HAVE__CHSIZE_S ${HAVE__CHSIZE_S}
+
+/* Define to 1 if you have the `_Unwind_Backtrace' function. */
+#cmakedefine HAVE__UNWIND_BACKTRACE ${HAVE__UNWIND_BACKTRACE}
+
+/* Have host's __alloca */
+#cmakedefine HAVE___ALLOCA ${HAVE___ALLOCA}
+
+/* Have host's __ashldi3 */
+#cmakedefine HAVE___ASHLDI3 ${HAVE___ASHLDI3}
+
+/* Have host's __ashrdi3 */
+#cmakedefine HAVE___ASHRDI3 ${HAVE___ASHRDI3}
+
+/* Have host's __chkstk */
+#cmakedefine HAVE___CHKSTK ${HAVE___CHKSTK}
+
+/* Have host's __chkstk_ms */
+#cmakedefine HAVE___CHKSTK_MS ${HAVE___CHKSTK_MS}
+
+/* Have host's __cmpdi2 */
+#cmakedefine HAVE___CMPDI2 ${HAVE___CMPDI2}
+
+/* Have host's __divdi3 */
+#cmakedefine HAVE___DIVDI3 ${HAVE___DIVDI3}
+
+/* Have host's __fixdfdi */
+#cmakedefine HAVE___FIXDFDI ${HAVE___FIXDFDI}
+
+/* Have host's __fixsfdi */
+#cmakedefine HAVE___FIXSFDI ${HAVE___FIXSFDI}
+
+/* Have host's __floatdidf */
+#cmakedefine HAVE___FLOATDIDF ${HAVE___FLOATDIDF}
+
+/* Have host's __lshrdi3 */
+#cmakedefine HAVE___LSHRDI3 ${HAVE___LSHRDI3}
+
+/* Have host's __main */
+#cmakedefine HAVE___MAIN ${HAVE___MAIN}
+
+/* Have host's __moddi3 */
+#cmakedefine HAVE___MODDI3 ${HAVE___MODDI3}
+
+/* Have host's __udivdi3 */
+#cmakedefine HAVE___UDIVDI3 ${HAVE___UDIVDI3}
+
+/* Have host's __umoddi3 */
+#cmakedefine HAVE___UMODDI3 ${HAVE___UMODDI3}
+
+/* Have host's ___chkstk */
+#cmakedefine HAVE____CHKSTK ${HAVE____CHKSTK}
+
+/* Have host's ___chkstk_ms */
+#cmakedefine HAVE____CHKSTK_MS ${HAVE____CHKSTK_MS}
+
+/* Linker version detected at compile time. */
+#cmakedefine HOST_LINK_VERSION "${HOST_LINK_VERSION}"
+
+/* Define if overriding target triple is enabled */
+#cmakedefine LLVM_TARGET_TRIPLE_ENV "${LLVM_TARGET_TRIPLE_ENV}"
+
+/* Whether tools show host and target info when invoked with --version */
+#cmakedefine01 LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO
+
+/* Define if libxml2 is supported on this platform. */
+#cmakedefine LLVM_ENABLE_LIBXML2 ${LLVM_ENABLE_LIBXML2}
+
+/* Define to the extension used for shared libraries, say, ".so". */
+#cmakedefine LTDL_SHLIB_EXT "${LTDL_SHLIB_EXT}"
+
+/* Define to the extension used for plugin libraries, say, ".so". */
+#cmakedefine LLVM_PLUGIN_EXT "${LLVM_PLUGIN_EXT}"
+
+/* Define to the address where bug reports for this package should be sent. */
+#cmakedefine PACKAGE_BUGREPORT "${PACKAGE_BUGREPORT}"
+
+/* Define to the full name of this package. */
+#cmakedefine PACKAGE_NAME "${PACKAGE_NAME}"
+
+/* Define to the full name and version of this package. */
+#cmakedefine PACKAGE_STRING "${PACKAGE_STRING}"
+
+/* Define to the version of this package. */
+#cmakedefine PACKAGE_VERSION "${PACKAGE_VERSION}"
+
+/* Define to the vendor of this package. */
+#cmakedefine PACKAGE_VENDOR "${PACKAGE_VENDOR}"
+
+/* Define to a function implementing stricmp */
+#cmakedefine stricmp ${stricmp}
+
+/* Define to a function implementing strdup */
+#cmakedefine strdup ${strdup}
+
+/* Whether GlobalISel rule coverage is being collected */
+#cmakedefine01 LLVM_GISEL_COV_ENABLED
+
+/* Define to the default GlobalISel coverage file prefix */
+#cmakedefine LLVM_GISEL_COV_PREFIX "${LLVM_GISEL_COV_PREFIX}"
+
+/* Whether Timers signpost passes in Xcode Instruments */
+#cmakedefine01 LLVM_SUPPORT_XCODE_SIGNPOSTS
+
+#cmakedefine HAVE_PROC_PID_RUSAGE 1
+
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Config/llvm-config.h.cmake b/contrib/libs/llvm16/include/llvm/Config/llvm-config.h.cmake
new file mode 100644
index 0000000000..012ae2174c
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Config/llvm-config.h.cmake
@@ -0,0 +1,129 @@
+/*===------- llvm/Config/llvm-config.h - llvm configuration -------*- C -*-===*/
+/* */
+/* Part of the LLVM Project, under the Apache License v2.0 with LLVM */
+/* Exceptions. */
+/* See https://llvm.org/LICENSE.txt for license information. */
+/* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */
+/* */
+/*===----------------------------------------------------------------------===*/
+
+/* This file enumerates variables from the LLVM configuration so that they
+ can be in exported headers and won't override package specific directives.
+ This is a C header that can be included in the llvm-c headers. */
+
+#ifndef LLVM_CONFIG_H
+#define LLVM_CONFIG_H
+
+/* Define if LLVM_ENABLE_DUMP is enabled */
+#cmakedefine LLVM_ENABLE_DUMP
+
+/* Target triple LLVM will generate code for by default */
+/* Doesn't use `cmakedefine` because it is allowed to be empty. */
+#define LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}"
+
+/* Define if threads enabled */
+#cmakedefine01 LLVM_ENABLE_THREADS
+
+/* Has gcc/MSVC atomic intrinsics */
+#cmakedefine01 LLVM_HAS_ATOMICS
+
+/* Host triple LLVM will be executed on */
+#cmakedefine LLVM_HOST_TRIPLE "${LLVM_HOST_TRIPLE}"
+
+/* LLVM architecture name for the native architecture, if available */
+#cmakedefine LLVM_NATIVE_ARCH ${LLVM_NATIVE_ARCH}
+
+/* LLVM name for the native AsmParser init function, if available */
+#cmakedefine LLVM_NATIVE_ASMPARSER LLVMInitialize${LLVM_NATIVE_ARCH}AsmParser
+
+/* LLVM name for the native AsmPrinter init function, if available */
+#cmakedefine LLVM_NATIVE_ASMPRINTER LLVMInitialize${LLVM_NATIVE_ARCH}AsmPrinter
+
+/* LLVM name for the native Disassembler init function, if available */
+#cmakedefine LLVM_NATIVE_DISASSEMBLER LLVMInitialize${LLVM_NATIVE_ARCH}Disassembler
+
+/* LLVM name for the native Target init function, if available */
+#cmakedefine LLVM_NATIVE_TARGET LLVMInitialize${LLVM_NATIVE_ARCH}Target
+
+/* LLVM name for the native TargetInfo init function, if available */
+#cmakedefine LLVM_NATIVE_TARGETINFO LLVMInitialize${LLVM_NATIVE_ARCH}TargetInfo
+
+/* LLVM name for the native target MC init function, if available */
+#cmakedefine LLVM_NATIVE_TARGETMC LLVMInitialize${LLVM_NATIVE_ARCH}TargetMC
+
+/* LLVM name for the native target MCA init function, if available */
+#cmakedefine LLVM_NATIVE_TARGETMCA LLVMInitialize${LLVM_NATIVE_ARCH}TargetMCA
+
+/* Define if this is Unixish platform */
+#cmakedefine LLVM_ON_UNIX ${LLVM_ON_UNIX}
+
+/* Define if we have the Intel JIT API runtime support library */
+#cmakedefine01 LLVM_USE_INTEL_JITEVENTS
+
+/* Define if we have the oprofile JIT-support library */
+#cmakedefine01 LLVM_USE_OPROFILE
+
+/* Define if we have the perf JIT-support library */
+#cmakedefine01 LLVM_USE_PERF
+
+/* Major version of the LLVM API */
+#define LLVM_VERSION_MAJOR ${LLVM_VERSION_MAJOR}
+
+/* Minor version of the LLVM API */
+#define LLVM_VERSION_MINOR ${LLVM_VERSION_MINOR}
+
+/* Patch version of the LLVM API */
+#define LLVM_VERSION_PATCH ${LLVM_VERSION_PATCH}
+
+/* LLVM version string */
+#define LLVM_VERSION_STRING "${PACKAGE_VERSION}"
+
+/* Whether LLVM records statistics for use with GetStatistics(),
+ * PrintStatistics() or PrintStatisticsJSON()
+ */
+#cmakedefine01 LLVM_FORCE_ENABLE_STATS
+
+/* Define if we have z3 and want to build it */
+#cmakedefine LLVM_WITH_Z3 ${LLVM_WITH_Z3}
+
+/* Define if we have curl and want to use it */
+#cmakedefine LLVM_ENABLE_CURL ${LLVM_ENABLE_CURL}
+
+/* Define if we have cpp-httplib and want to use it */
+#cmakedefine LLVM_ENABLE_HTTPLIB ${LLVM_ENABLE_HTTPLIB}
+
+/* Define if zlib compression is available */
+#cmakedefine01 LLVM_ENABLE_ZLIB
+
+/* Define if zstd compression is available */
+#cmakedefine01 LLVM_ENABLE_ZSTD
+
+/* Define if LLVM is using tflite instead of libtensorflow */
+#cmakedefine LLVM_HAVE_TFLITE
+
+/* Define to 1 if you have the <sysexits.h> header file. */
+#cmakedefine HAVE_SYSEXITS_H ${HAVE_SYSEXITS_H}
+
+/* Define if the xar_open() function is supported on this platform. */
+#cmakedefine LLVM_HAVE_LIBXAR ${LLVM_HAVE_LIBXAR}
+
+/* Define if building libLLVM shared library */
+#cmakedefine LLVM_BUILD_LLVM_DYLIB
+
+/* Define if building LLVM with BUILD_SHARED_LIBS */
+#cmakedefine LLVM_BUILD_SHARED_LIBS
+
+/* Define if building LLVM with LLVM_FORCE_USE_OLD_TOOLCHAIN_LIBS */
+#cmakedefine LLVM_FORCE_USE_OLD_TOOLCHAIN ${LLVM_FORCE_USE_OLD_TOOLCHAIN}
+
+/* Define if llvm_unreachable should be optimized with undefined behavior
+ * in non assert builds */
+#cmakedefine01 LLVM_UNREACHABLE_OPTIMIZE
+
+/* Define to 1 if you have the DIA SDK installed, and to 0 if you don't. */
+#cmakedefine01 LLVM_ENABLE_DIA_SDK
+
+/* Define if plugins enabled */
+#cmakedefine LLVM_ENABLE_PLUGINS
+
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DWARFLinkerParallel/DWARFLinker.h b/contrib/libs/llvm16/include/llvm/DWARFLinkerParallel/DWARFLinker.h
new file mode 100644
index 0000000000..ae538b49a1
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DWARFLinkerParallel/DWARFLinker.h
@@ -0,0 +1,27 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DWARFLinker.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DWARFLINKERPARALLEL_DWARFLINKER_H
+#define LLVM_DWARFLINKERPARALLEL_DWARFLINKER_H
+
+namespace llvm {
+namespace dwarflinker_parallel {} // end namespace dwarflinker_parallel
+} // end namespace llvm
+
+#endif // LLVM_DWARFLINKERPARALLEL_DWARFLINKER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/FunctionId.h b/contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/FunctionId.h
new file mode 100644
index 0000000000..8de994911b
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/FunctionId.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- FunctionId.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H
+#define LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H
+
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+class FunctionId {
+public:
+ FunctionId() : Index(0) {}
+
+ explicit FunctionId(uint32_t Index) : Index(Index) {}
+
+ uint32_t getIndex() const { return Index; }
+
+private:
+ uint32_t Index;
+};
+
+inline bool operator==(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() == B.getIndex();
+}
+
+inline bool operator!=(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() != B.getIndex();
+}
+
+inline bool operator<(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() < B.getIndex();
+}
+
+inline bool operator<=(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() <= B.getIndex();
+}
+
+inline bool operator>(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() > B.getIndex();
+}
+
+inline bool operator>=(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() >= B.getIndex();
+}
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h b/contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h
new file mode 100644
index 0000000000..9500bdb088
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- TypeSymbolEmitter.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H
+
+namespace llvm {
+class StringRef;
+
+namespace codeview {
+class TypeIndex;
+
+class TypeSymbolEmitter {
+private:
+ TypeSymbolEmitter(const TypeSymbolEmitter &) = delete;
+ TypeSymbolEmitter &operator=(const TypeSymbolEmitter &) = delete;
+
+protected:
+ TypeSymbolEmitter() {}
+
+public:
+ virtual ~TypeSymbolEmitter() {}
+
+public:
+ virtual void writeUserDefinedType(TypeIndex TI, StringRef Name) = 0;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVCompare.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVCompare.h
new file mode 100644
index 0000000000..c8d663e255
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVCompare.h
@@ -0,0 +1,100 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVCompare.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVCompare class, which is used to describe a logical
+// view comparison.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVCOMPARE_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVCOMPARE_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"
+
+namespace llvm {
+namespace logicalview {
+
+class LVReader;
+
+// Record the elements missing or added and their compare pass.
+using LVPassEntry = std::tuple<LVReader *, LVElement *, LVComparePass>;
+using LVPassTable = std::vector<LVPassEntry>;
+
+class LVCompare final {
+ raw_ostream &OS;
+ LVScopes ScopeStack;
+
+ // As the comparison is performed twice (by exchanging the reference
+ // and target readers) the element missing/added status does specify
+ // the comparison pass.
+ // By recording each missing/added elements along with its pass, it
+ // allows checking which elements were missing/added during each pass.
+ LVPassTable PassTable;
+
+ // Reader used on the LHS of the comparison.
+ // In the 'Missing' pass, it points to the reference reader.
+ // In the 'Added' pass it points to the target reader.
+ LVReader *Reader = nullptr;
+
+ bool FirstMissing = true;
+ bool PrintLines = false;
+ bool PrintScopes = false;
+ bool PrintSymbols = false;
+ bool PrintTypes = false;
+
+ static void setInstance(LVCompare *Compare);
+
+ void printCurrentStack();
+ void printSummary() const;
+
+public:
+ LVCompare() = delete;
+ LVCompare(raw_ostream &OS);
+ LVCompare(const LVCompare &) = delete;
+ LVCompare &operator=(const LVCompare &) = delete;
+ ~LVCompare() = default;
+
+ static LVCompare &getInstance();
+
+ // Scopes stack used during the missing/added reporting.
+ void push(LVScope *Scope) { ScopeStack.push_back(Scope); }
+ void pop() { ScopeStack.pop_back(); }
+
+ // Perform comparison between the 'Reference' and 'Target' scopes tree.
+ Error execute(LVReader *ReferenceReader, LVReader *TargetReader);
+
+ void addPassEntry(LVReader *Reader, LVElement *Element, LVComparePass Pass) {
+ PassTable.emplace_back(Reader, Element, Pass);
+ }
+ const LVPassTable &getPassTable() const & { return PassTable; }
+
+ void printItem(LVElement *Element, LVComparePass Pass);
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+inline LVCompare &getComparator() { return LVCompare::getInstance(); }
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVCOMPARE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVElement.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVElement.h
new file mode 100644
index 0000000000..1b840ee8c2
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVElement.h
@@ -0,0 +1,363 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVElement.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVElement class, which is used to describe a debug
+// information element.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVELEMENT_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVELEMENT_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"
+#include "llvm/Support/Casting.h"
+#include <map>
+#include <set>
+#include <vector>
+
+namespace llvm {
+namespace logicalview {
+
+// RTTI Subclasses ID.
+enum class LVSubclassID : unsigned char {
+ LV_ELEMENT,
+ LV_LINE_FIRST,
+ LV_LINE,
+ LV_LINE_DEBUG,
+ LV_LINE_ASSEMBLER,
+ LV_LINE_LAST,
+ lV_SCOPE_FIRST,
+ LV_SCOPE,
+ LV_SCOPE_AGGREGATE,
+ LV_SCOPE_ALIAS,
+ LV_SCOPE_ARRAY,
+ LV_SCOPE_COMPILE_UNIT,
+ LV_SCOPE_ENUMERATION,
+ LV_SCOPE_FORMAL_PACK,
+ LV_SCOPE_FUNCTION,
+ LV_SCOPE_FUNCTION_INLINED,
+ LV_SCOPE_FUNCTION_TYPE,
+ LV_SCOPE_NAMESPACE,
+ LV_SCOPE_ROOT,
+ LV_SCOPE_TEMPLATE_PACK,
+ LV_SCOPE_LAST,
+ LV_SYMBOL_FIRST,
+ LV_SYMBOL,
+ LV_SYMBOL_LAST,
+ LV_TYPE_FIRST,
+ LV_TYPE,
+ LV_TYPE_DEFINITION,
+ LV_TYPE_ENUMERATOR,
+ LV_TYPE_IMPORT,
+ LV_TYPE_PARAM,
+ LV_TYPE_SUBRANGE,
+ LV_TYPE_LAST
+};
+
+enum class LVElementKind { Discarded, Global, Optimized, LastEntry };
+using LVElementKindSet = std::set<LVElementKind>;
+using LVElementDispatch = std::map<LVElementKind, LVElementGetFunction>;
+using LVElementRequest = std::vector<LVElementGetFunction>;
+
+class LVElement : public LVObject {
+ enum class Property {
+ IsLine, // A logical line.
+ IsScope, // A logical scope.
+ IsSymbol, // A logical symbol.
+ IsType, // A logical type.
+ IsEnumClass,
+ IsExternal,
+ HasType,
+ HasAugmentedName,
+ IsTypedefReduced,
+ IsArrayResolved,
+ IsMemberPointerResolved,
+ IsTemplateResolved,
+ IsInlined,
+ IsInlinedAbstract,
+ InvalidFilename,
+ HasReference,
+ HasReferenceAbstract,
+ HasReferenceExtension,
+ HasReferenceSpecification,
+ QualifiedResolved,
+ IncludeInPrint,
+ IsStatic,
+ TransformName,
+ IsScoped, // CodeView local type.
+ IsNested, // CodeView nested type.
+ IsScopedAlready, // CodeView nested type inserted in correct scope.
+ IsArtificial,
+ IsReferencedType,
+ IsSystem,
+ OffsetFromTypeIndex,
+ IsAnonymous,
+ LastEntry
+ };
+ // Typed bitvector with properties for this element.
+ LVProperties<Property> Properties;
+ static LVElementDispatch Dispatch;
+
+ /// RTTI.
+ const LVSubclassID SubclassID;
+
+ // Indexes in the String Pool.
+ size_t NameIndex = 0;
+ size_t QualifiedNameIndex = 0;
+ size_t FilenameIndex = 0;
+
+ uint16_t AccessibilityCode : 2; // DW_AT_accessibility.
+ uint16_t InlineCode : 2; // DW_AT_inline.
+ uint16_t VirtualityCode : 2; // DW_AT_virtuality.
+
+ // The given Specification points to an element that is connected via the
+ // DW_AT_specification, DW_AT_abstract_origin or DW_AT_extension attribute.
+ void setFileLine(LVElement *Specification);
+
+ // Get the qualified name that include its parents name.
+ void resolveQualifiedName();
+
+protected:
+ // Type of this element.
+ LVElement *ElementType = nullptr;
+
+ // Print the FileName Index.
+ void printFileIndex(raw_ostream &OS, bool Full = true) const override;
+
+public:
+ LVElement(LVSubclassID ID)
+ : LVObject(), SubclassID(ID), AccessibilityCode(0), InlineCode(0),
+ VirtualityCode(0) {}
+ LVElement(const LVElement &) = delete;
+ LVElement &operator=(const LVElement &) = delete;
+ virtual ~LVElement() = default;
+
+ LVSubclassID getSubclassID() const { return SubclassID; }
+
+ PROPERTY(Property, IsLine);
+ PROPERTY(Property, IsScope);
+ PROPERTY(Property, IsSymbol);
+ PROPERTY(Property, IsType);
+ PROPERTY(Property, IsEnumClass);
+ PROPERTY(Property, IsExternal);
+ PROPERTY(Property, HasType);
+ PROPERTY(Property, HasAugmentedName);
+ PROPERTY(Property, IsTypedefReduced);
+ PROPERTY(Property, IsArrayResolved);
+ PROPERTY(Property, IsMemberPointerResolved);
+ PROPERTY(Property, IsTemplateResolved);
+ PROPERTY(Property, IsInlined);
+ PROPERTY(Property, IsInlinedAbstract);
+ PROPERTY(Property, InvalidFilename);
+ PROPERTY(Property, HasReference);
+ PROPERTY(Property, HasReferenceAbstract);
+ PROPERTY(Property, HasReferenceExtension);
+ PROPERTY(Property, HasReferenceSpecification);
+ PROPERTY(Property, QualifiedResolved);
+ PROPERTY(Property, IncludeInPrint);
+ PROPERTY(Property, IsStatic);
+ PROPERTY(Property, TransformName);
+ PROPERTY(Property, IsScoped);
+ PROPERTY(Property, IsNested);
+ PROPERTY(Property, IsScopedAlready);
+ PROPERTY(Property, IsArtificial);
+ PROPERTY(Property, IsReferencedType);
+ PROPERTY(Property, IsSystem);
+ PROPERTY(Property, OffsetFromTypeIndex);
+ PROPERTY(Property, IsAnonymous);
+
+ bool isNamed() const override { return NameIndex != 0; }
+ bool isTyped() const override { return ElementType != nullptr; }
+ bool isFiled() const override { return FilenameIndex != 0; }
+
+ // The Element class type can point to a Type or Scope.
+ bool getIsKindType() const { return ElementType && ElementType->getIsType(); }
+ bool getIsKindScope() const {
+ return ElementType && ElementType->getIsScope();
+ }
+
+ StringRef getName() const override {
+ return getStringPool().getString(NameIndex);
+ }
+ void setName(StringRef ElementName) override;
+
+ // Get pathname associated with the Element.
+ StringRef getPathname() const {
+ return getStringPool().getString(getFilenameIndex());
+ }
+
+ // Set filename associated with the Element.
+ void setFilename(StringRef Filename);
+
+ // Set the Element qualified name.
+ void setQualifiedName(StringRef Name) {
+ QualifiedNameIndex = getStringPool().getIndex(Name);
+ }
+ StringRef getQualifiedName() const {
+ return getStringPool().getString(QualifiedNameIndex);
+ }
+
+ size_t getNameIndex() const { return NameIndex; }
+ size_t getQualifiedNameIndex() const { return QualifiedNameIndex; }
+
+ // Element type name.
+ StringRef getTypeName() const;
+
+ virtual StringRef getProducer() const { return StringRef(); }
+ virtual void setProducer(StringRef ProducerName) {}
+
+ virtual bool isCompileUnit() const { return false; }
+ virtual bool isRoot() const { return false; }
+
+ virtual void setReference(LVElement *Element) {}
+ virtual void setReference(LVScope *Scope) {}
+ virtual void setReference(LVSymbol *Symbol) {}
+ virtual void setReference(LVType *Type) {}
+
+ virtual void setLinkageName(StringRef LinkageName) {}
+ virtual StringRef getLinkageName() const { return StringRef(); }
+ virtual size_t getLinkageNameIndex() const { return 0; }
+
+ virtual uint32_t getCallLineNumber() const { return 0; }
+ virtual void setCallLineNumber(uint32_t Number) {}
+ virtual size_t getCallFilenameIndex() const { return 0; }
+ virtual void setCallFilenameIndex(size_t Index) {}
+ size_t getFilenameIndex() const { return FilenameIndex; }
+ void setFilenameIndex(size_t Index) { FilenameIndex = Index; }
+
+ // Set the File location for the Element.
+ void setFile(LVElement *Reference = nullptr);
+
+ virtual bool isBase() const { return false; }
+ virtual bool isTemplateParam() const { return false; }
+
+ virtual uint32_t getBitSize() const { return 0; }
+ virtual void setBitSize(uint32_t Size) {}
+
+ virtual int64_t getCount() const { return 0; }
+ virtual void setCount(int64_t Value) {}
+ virtual int64_t getLowerBound() const { return 0; }
+ virtual void setLowerBound(int64_t Value) {}
+ virtual int64_t getUpperBound() const { return 0; }
+ virtual void setUpperBound(int64_t Value) {}
+ virtual std::pair<unsigned, unsigned> getBounds() const { return {}; }
+ virtual void setBounds(unsigned Lower, unsigned Upper) {}
+
+ // Access DW_AT_GNU_discriminator attribute.
+ virtual uint32_t getDiscriminator() const { return 0; }
+ virtual void setDiscriminator(uint32_t Value) {}
+
+ // Process the values for a DW_TAG_enumerator.
+ virtual std::string getValue() const { return {}; }
+ virtual void setValue(StringRef Value) {}
+ virtual size_t getValueIndex() const { return 0; }
+
+ // DWARF Accessibility Codes.
+ uint32_t getAccessibilityCode() const { return AccessibilityCode; }
+ void setAccessibilityCode(uint32_t Access) { AccessibilityCode = Access; }
+ StringRef
+ accessibilityString(uint32_t Access = dwarf::DW_ACCESS_private) const;
+
+ // DWARF Inline Codes.
+ uint32_t getInlineCode() const { return InlineCode; }
+ void setInlineCode(uint32_t Code) { InlineCode = Code; }
+ StringRef inlineCodeString(uint32_t Code) const;
+
+ // DWARF Virtuality Codes.
+ uint32_t getVirtualityCode() const { return VirtualityCode; }
+ void setVirtualityCode(uint32_t Virtuality) { VirtualityCode = Virtuality; }
+ StringRef
+ virtualityString(uint32_t Virtuality = dwarf::DW_VIRTUALITY_none) const;
+
+ // DWARF Extern Codes.
+ StringRef externalString() const;
+
+ LVElement *getType() const { return ElementType; }
+ LVType *getTypeAsType() const;
+ LVScope *getTypeAsScope() const;
+
+ void setType(LVElement *Element = nullptr) {
+ ElementType = Element;
+ if (Element) {
+ setHasType();
+ Element->setIsReferencedType();
+ }
+ }
+
+ // Set the type for the element, handling template parameters.
+ void setGenericType(LVElement *Element);
+
+ StringRef getTypeQualifiedName() const {
+ return ElementType ? ElementType->getQualifiedName() : "";
+ }
+
+ StringRef typeAsString() const;
+ std::string typeOffsetAsString() const;
+ std::string discriminatorAsString() const;
+
+ LVScope *traverseParents(LVScopeGetFunction GetFunction) const;
+
+ LVScope *getFunctionParent() const;
+ virtual LVScope *getCompileUnitParent() const;
+
+ // Print any referenced element.
+ void printReference(raw_ostream &OS, bool Full, LVElement *Parent) const;
+
+ // Print the linkage name (Symbols and functions).
+ void printLinkageName(raw_ostream &OS, bool Full, LVElement *Parent,
+ LVScope *Scope) const;
+ void printLinkageName(raw_ostream &OS, bool Full, LVElement *Parent) const;
+
+ // Generate the full name for the Element.
+ void resolveFullname(LVElement *BaseType, StringRef Name = emptyString());
+
+ // Generate a name for unnamed elements.
+ void generateName(std::string &Prefix) const;
+ void generateName();
+
+ virtual bool removeElement(LVElement *Element) { return false; }
+ virtual void updateLevel(LVScope *Parent, bool Moved = false);
+
+ // During the parsing of the debug information, the logical elements are
+ // created with information extracted from its description entries (DIE).
+ // But they are not complete for the logical view concept. A second pass
+ // is executed in order to collect their additional information.
+ // The following functions 'resolve' some of their properties, such as
+ // name, references, parents, extra information based on the element kind.
+ virtual void resolve();
+ virtual void resolveExtra() {}
+ virtual void resolveName();
+ virtual void resolveReferences() {}
+ void resolveParents();
+
+ bool referenceMatch(const LVElement *Element) const;
+
+ // Returns true if current element is logically equal to the given 'Element'.
+ bool equals(const LVElement *Element) const;
+
+ // Report the current element as missing or added during comparison.
+ virtual void report(LVComparePass Pass) {}
+
+ static LVElementDispatch &getDispatch() { return Dispatch; }
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVELEMENT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLine.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLine.h
new file mode 100644
index 0000000000..4716a17c03
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLine.h
@@ -0,0 +1,175 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVLine.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVLine class, which is used to describe a debug
+// information line.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLINE_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLINE_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"
+
+namespace llvm {
+namespace logicalview {
+
+enum class LVLineKind {
+ IsBasicBlock,
+ IsDiscriminator,
+ IsEndSequence,
+ IsEpilogueBegin,
+ IsLineDebug,
+ IsLineAssembler,
+ IsNewStatement, // Shared with CodeView 'IsStatement' flag.
+ IsPrologueEnd,
+ IsAlwaysStepInto, // CodeView
+ IsNeverStepInto, // CodeView
+ LastEntry
+};
+using LVLineKindSet = std::set<LVLineKind>;
+using LVLineDispatch = std::map<LVLineKind, LVLineGetFunction>;
+using LVLineRequest = std::vector<LVLineGetFunction>;
+
+// Class to represent a logical line.
+class LVLine : public LVElement {
+ // Typed bitvector with kinds for this line.
+ LVProperties<LVLineKind> Kinds;
+ static LVLineDispatch Dispatch;
+
+ // Find the current line in the given 'Targets'.
+ LVLine *findIn(const LVLines *Targets) const;
+
+public:
+ LVLine() : LVElement(LVSubclassID::LV_LINE) {
+ setIsLine();
+ setIncludeInPrint();
+ }
+ LVLine(const LVLine &) = delete;
+ LVLine &operator=(const LVLine &) = delete;
+ virtual ~LVLine() = default;
+
+ static bool classof(const LVElement *Element) {
+ return Element->getSubclassID() == LVSubclassID::LV_LINE;
+ }
+
+ KIND(LVLineKind, IsBasicBlock);
+ KIND(LVLineKind, IsDiscriminator);
+ KIND(LVLineKind, IsEndSequence);
+ KIND(LVLineKind, IsEpilogueBegin);
+ KIND(LVLineKind, IsLineDebug);
+ KIND(LVLineKind, IsLineAssembler);
+ KIND(LVLineKind, IsNewStatement);
+ KIND(LVLineKind, IsPrologueEnd);
+ KIND(LVLineKind, IsAlwaysStepInto);
+ KIND(LVLineKind, IsNeverStepInto);
+
+ const char *kind() const override;
+
+ // Use the offset to store the line address.
+ uint64_t getAddress() const { return getOffset(); }
+ void setAddress(uint64_t address) { setOffset(address); }
+
+ // String used for printing objects with no line number.
+ std::string noLineAsString(bool ShowZero = false) const override;
+
+ // Line number for display; in the case of Inlined Functions, we use the
+ // DW_AT_call_line attribute; otherwise use DW_AT_decl_line attribute.
+ std::string lineNumberAsString(bool ShowZero = false) const override {
+ return lineAsString(getLineNumber(), getDiscriminator(), ShowZero);
+ }
+
+ static LVLineDispatch &getDispatch() { return Dispatch; }
+
+ // Iterate through the 'References' set and check that all its elements
+ // are present in the 'Targets' set. For a missing element, mark its
+ // parents as missing.
+ static void markMissingParents(const LVLines *References,
+ const LVLines *Targets);
+
+ // Returns true if current line is logically equal to the given 'Line'.
+ virtual bool equals(const LVLine *Line) const;
+
+ // Returns true if the given 'References' are logically equal to the
+ // given 'Targets'.
+ static bool equals(const LVLines *References, const LVLines *Targets);
+
+ // Report the current line as missing or added during comparison.
+ void report(LVComparePass Pass) override;
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override {}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const override { print(dbgs()); }
+#endif
+};
+
+// Class to represent a DWARF line record object.
+class LVLineDebug final : public LVLine {
+ // Discriminator value (DW_LNE_set_discriminator). The DWARF standard
+ // defines the discriminator as an unsigned LEB128 integer.
+ uint32_t Discriminator = 0;
+
+public:
+ LVLineDebug() : LVLine() { setIsLineDebug(); }
+ LVLineDebug(const LVLineDebug &) = delete;
+ LVLineDebug &operator=(const LVLineDebug &) = delete;
+ ~LVLineDebug() = default;
+
+ // Additional line information. It includes attributes that describes
+ // states in the machine instructions (basic block, end prologue, etc).
+ std::string statesInfo(bool Formatted) const;
+
+ // Access DW_LNE_set_discriminator attribute.
+ uint32_t getDiscriminator() const override { return Discriminator; }
+ void setDiscriminator(uint32_t Value) override {
+ Discriminator = Value;
+ setIsDiscriminator();
+ }
+
+ // Returns true if current line is logically equal to the given 'Line'.
+ bool equals(const LVLine *Line) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent an assembler line extracted from the text section.
+class LVLineAssembler final : public LVLine {
+public:
+ LVLineAssembler() : LVLine() { setIsLineAssembler(); }
+ LVLineAssembler(const LVLineAssembler &) = delete;
+ LVLineAssembler &operator=(const LVLineAssembler &) = delete;
+ ~LVLineAssembler() = default;
+
+ // Print blanks as the line number.
+ std::string noLineAsString(bool ShowZero) const override {
+ return std::string(8, ' ');
+ };
+
+ // Returns true if current line is logically equal to the given 'Line'.
+ bool equals(const LVLine *Line) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLINE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLocation.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLocation.h
new file mode 100644
index 0000000000..40f7ac0f59
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVLocation.h
@@ -0,0 +1,208 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVLocation.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVOperation and LVLocation classes, which are used
+// to describe variable locations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLOCATION_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLOCATION_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"
+
+namespace llvm {
+namespace logicalview {
+
+using LVLineRange = std::pair<LVLine *, LVLine *>;
+
+// The DW_AT_data_member_location attribute is a simple member offset.
+const LVSmall LVLocationMemberOffset = 0;
+
+class LVOperation final {
+ // To describe an operation:
+ // OpCode
+ // Operands[0]: First operand.
+ // Operands[1]: Second operand.
+ // OP_bregx, OP_bit_piece, OP_[GNU_]const_type,
+ // OP_[GNU_]deref_type, OP_[GNU_]entry_value, OP_implicit_value,
+ // OP_[GNU_]implicit_pointer, OP_[GNU_]regval_type, OP_xderef_type.
+ LVSmall Opcode = 0;
+ uint64_t Operands[2];
+
+public:
+ LVOperation() = delete;
+ LVOperation(LVSmall Opcode, LVUnsigned Operand1, LVUnsigned Operand2)
+ : Opcode(Opcode) {
+ Operands[0] = Operand1;
+ Operands[1] = Operand2;
+ }
+ LVOperation(const LVOperation &) = delete;
+ LVOperation &operator=(const LVOperation &) = delete;
+ ~LVOperation() = default;
+
+ LVSmall getOpcode() const { return Opcode; }
+ uint64_t getOperand1() const { return Operands[0]; }
+ uint64_t getOperand2() const { return Operands[1]; }
+ std::string getOperandsDWARFInfo();
+ std::string getOperandsCodeViewInfo();
+
+ void print(raw_ostream &OS, bool Full = true) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() { print(dbgs()); }
+#endif
+};
+
+class LVLocation : public LVObject {
+ enum class Property {
+ IsAddressRange,
+ IsBaseClassOffset,
+ IsBaseClassStep,
+ IsClassOffset,
+ IsFixedAddress,
+ IsLocationSimple,
+ IsGapEntry,
+ IsOperation,
+ IsOperationList,
+ IsRegister,
+ IsStackOffset,
+ IsDiscardedRange,
+ IsInvalidRange,
+ IsInvalidLower,
+ IsInvalidUpper,
+ IsCallSite,
+ LastEntry
+ };
+ // Typed bitvector with properties for this location.
+ LVProperties<Property> Properties;
+
+ // True if the location it is associated with a debug range.
+ bool hasAssociatedRange() const {
+ return !getIsClassOffset() && !getIsDiscardedRange();
+ }
+
+protected:
+ // Line numbers associated with locations ranges.
+ LVLine *LowerLine = nullptr;
+ LVLine *UpperLine = nullptr;
+
+ // Active range:
+ // LowPC: an offset from an applicable base address, not a PC value.
+ // HighPC: an offset from an applicable base address, or a length.
+ LVAddress LowPC = 0;
+ LVAddress HighPC = 0;
+
+ void setKind();
+
+public:
+ LVLocation() : LVObject() { setIsLocation(); }
+ LVLocation(const LVLocation &) = delete;
+ LVLocation &operator=(const LVLocation &) = delete;
+ virtual ~LVLocation() = default;
+
+ PROPERTY(Property, IsAddressRange);
+ PROPERTY(Property, IsBaseClassOffset);
+ PROPERTY(Property, IsBaseClassStep);
+ PROPERTY_1(Property, IsClassOffset, IsLocationSimple);
+ PROPERTY_1(Property, IsFixedAddress, IsLocationSimple);
+ PROPERTY(Property, IsLocationSimple);
+ PROPERTY(Property, IsGapEntry);
+ PROPERTY(Property, IsOperationList);
+ PROPERTY(Property, IsOperation);
+ PROPERTY(Property, IsRegister);
+ PROPERTY_1(Property, IsStackOffset, IsLocationSimple);
+ PROPERTY(Property, IsDiscardedRange);
+ PROPERTY(Property, IsInvalidRange);
+ PROPERTY(Property, IsInvalidLower);
+ PROPERTY(Property, IsInvalidUpper);
+ PROPERTY(Property, IsCallSite);
+
+ const char *kind() const override;
+ // Mark the locations that have only DW_OP_fbreg as stack offset based.
+ virtual void updateKind() {}
+
+ // Line numbers for locations.
+ const LVLine *getLowerLine() const { return LowerLine; }
+ void setLowerLine(LVLine *Line) { LowerLine = Line; }
+ const LVLine *getUpperLine() const { return UpperLine; }
+ void setUpperLine(LVLine *Line) { UpperLine = Line; }
+
+ // Addresses for locations.
+ LVAddress getLowerAddress() const override { return LowPC; }
+ void setLowerAddress(LVAddress Address) override { LowPC = Address; }
+ LVAddress getUpperAddress() const override { return HighPC; }
+ void setUpperAddress(LVAddress Address) override { HighPC = Address; }
+
+ std::string getIntervalInfo() const;
+
+ bool validateRanges();
+
+ // In order to calculate a symbol coverage (percentage), take the ranges
+ // and obtain the number of units (bytes) covered by those ranges. We can't
+ // use the line numbers, because they can be zero or invalid.
+ // We return:
+ // false: No locations or multiple locations.
+ // true: a single location.
+ static bool calculateCoverage(LVLocations *Locations, unsigned &Factor,
+ float &Percentage);
+
+ virtual void addObject(LVAddress LowPC, LVAddress HighPC,
+ LVUnsigned SectionOffset, uint64_t LocDescOffset) {}
+ virtual void addObject(LVSmall Opcode, LVUnsigned Operand1,
+ LVUnsigned Operand2) {}
+
+ static void print(LVLocations *Locations, raw_ostream &OS, bool Full = true);
+ void printInterval(raw_ostream &OS, bool Full = true) const;
+ void printRaw(raw_ostream &OS, bool Full = true) const;
+ virtual void printRawExtra(raw_ostream &OS, bool Full = true) const {}
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const override { print(dbgs()); }
+#endif
+};
+
+class LVLocationSymbol final : public LVLocation {
+ // Location descriptors for the active range.
+ LVAutoOperations *Entries = nullptr;
+
+ void updateKind() override;
+
+public:
+ LVLocationSymbol() : LVLocation() {}
+ LVLocationSymbol(const LVLocationSymbol &) = delete;
+ LVLocationSymbol &operator=(const LVLocationSymbol &) = delete;
+ ~LVLocationSymbol() { delete Entries; };
+
+ void addObject(LVAddress LowPC, LVAddress HighPC, LVUnsigned SectionOffset,
+ uint64_t LocDescOffset) override;
+ void addObject(LVSmall Opcode, LVUnsigned Operand1,
+ LVUnsigned Operand2) override;
+
+ void printRawExtra(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLOCATION_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVObject.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVObject.h
new file mode 100644
index 0000000000..8af2e4da50
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVObject.h
@@ -0,0 +1,361 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVObject.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVObject class, which is used to describe a debug
+// information object.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOBJECT_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOBJECT_H
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVSupport.h"
+#include <limits>
+#include <list>
+#include <map>
+#include <string>
+
+namespace llvm {
+namespace dwarf {
+// Support for CodeView ModifierOptions::Unaligned.
+constexpr Tag DW_TAG_unaligned = Tag(dwarf::DW_TAG_hi_user + 1);
+} // namespace dwarf
+} // namespace llvm
+
+namespace llvm {
+namespace logicalview {
+
+using LVSectionIndex = uint64_t;
+using LVAddress = uint64_t;
+using LVHalf = uint16_t;
+using LVLevel = uint32_t;
+using LVOffset = uint64_t;
+using LVSigned = int64_t;
+using LVUnsigned = uint64_t;
+using LVSmall = uint8_t;
+
+class LVElement;
+class LVLine;
+class LVLocation;
+class LVLocationSymbol;
+class LVObject;
+class LVOperation;
+class LVScope;
+class LVSymbol;
+class LVType;
+
+class LVOptions;
+class LVPatterns;
+
+StringRef typeNone();
+StringRef typeVoid();
+StringRef typeInt();
+StringRef typeUnknown();
+StringRef emptyString();
+
+using LVElementSetFunction = void (LVElement::*)();
+using LVElementGetFunction = bool (LVElement::*)() const;
+using LVLineSetFunction = void (LVLine::*)();
+using LVLineGetFunction = bool (LVLine::*)() const;
+using LVObjectSetFunction = void (LVObject::*)();
+using LVObjectGetFunction = bool (LVObject::*)() const;
+using LVScopeSetFunction = void (LVScope::*)();
+using LVScopeGetFunction = bool (LVScope::*)() const;
+using LVSymbolSetFunction = void (LVSymbol::*)();
+using LVSymbolGetFunction = bool (LVSymbol::*)() const;
+using LVTypeSetFunction = void (LVType::*)();
+using LVTypeGetFunction = bool (LVType::*)() const;
+
+// The LVScope class represents a logical scope and uses vectors to store its
+// children, which are pointers to other allocated logical elements (types,
+// symbols, lines, scopes, ranges). On destruction, we have to traverse each
+// vector and destroy its elements. The other case is LVSymbol.
+// These definitions are intended to be used by the LVScope and LVSymbol
+// to support automatic vector cleanup.
+using LVAutoLines = LVAutoSmallVector<LVLine *>;
+using LVAutoLocations = LVAutoSmallVector<LVLocation *>;
+using LVAutoOperations = LVAutoSmallVector<LVOperation *, 8>;
+using LVAutoScopes = LVAutoSmallVector<LVScope *>;
+using LVAutoSymbols = LVAutoSmallVector<LVSymbol *>;
+using LVAutoTypes = LVAutoSmallVector<LVType *>;
+
+// These definitions are intended to be used when the vector will be used
+// just a container, with no automatic destruction.
+using LVElements = SmallVector<LVElement *, 8>;
+using LVLines = SmallVector<LVLine *, 8>;
+using LVLocations = SmallVector<LVLocation *, 8>;
+using LVOperations = SmallVector<LVOperation *, 8>;
+using LVScopes = SmallVector<LVScope *, 8>;
+using LVSymbols = SmallVector<LVSymbol *, 8>;
+using LVTypes = SmallVector<LVType *, 8>;
+
+using LVOffsets = SmallVector<LVOffset, 8>;
+
+const LVAddress MaxAddress = std::numeric_limits<uint64_t>::max();
+
+enum class LVBinaryType { NONE, ELF, COFF };
+enum class LVComparePass { Missing, Added };
+
+// Validate functions.
+using LVValidLocation = bool (LVLocation::*)();
+
+// Keep counters of objects.
+struct LVCounter {
+ unsigned Lines = 0;
+ unsigned Scopes = 0;
+ unsigned Symbols = 0;
+ unsigned Types = 0;
+ void reset() {
+ Lines = 0;
+ Scopes = 0;
+ Symbols = 0;
+ Types = 0;
+ }
+};
+
+class LVObject {
+ enum class Property {
+ IsLocation, // Location.
+ IsGlobalReference, // This object is being referenced from another CU.
+ IsGeneratedName, // The Object name was generated.
+ IsResolved, // Object has been resolved.
+ IsResolvedName, // Object name has been resolved.
+ IsDiscarded, // Object has been stripped by the linker.
+ IsOptimized, // Object has been optimized by the compiler.
+ IsAdded, // Object has been 'added'.
+ IsMatched, // Object has been matched to a given pattern.
+ IsMissing, // Object is 'missing'.
+ IsMissingLink, // Object is indirectly 'missing'.
+ IsInCompare, // In 'compare' mode.
+ IsFileFromReference, // File ID from specification.
+ IsLineFromReference, // Line No from specification.
+ HasMoved, // The object was moved from 'target' to 'reference'.
+ HasPattern, // The object has a pattern.
+ IsFinalized, // CodeView object is finalized.
+ IsReferenced, // CodeView object being referenced.
+ HasCodeViewLocation, // CodeView object with debug location.
+ LastEntry
+ };
+ // Typed bitvector with properties for this object.
+ LVProperties<Property> Properties;
+
+ LVOffset Offset = 0;
+ uint32_t LineNumber = 0;
+ LVLevel ScopeLevel = 0;
+ union {
+ dwarf::Tag Tag;
+ dwarf::Attribute Attr;
+ LVSmall Opcode;
+ } TagAttrOpcode = {dwarf::DW_TAG_null};
+
+ // The parent of this object (nullptr if the root scope). For locations,
+ // the parent is a symbol object; otherwise it is a scope object.
+ union {
+ LVElement *Element;
+ LVScope *Scope;
+ LVSymbol *Symbol;
+ } Parent = {nullptr};
+
+ // We do not support any object duplication, as they are created by parsing
+ // the debug information. There is only the case where we need a very basic
+ // object, to manipulate its offset, line number and scope level. Allow the
+ // copy constructor to create that object; it is used to print a reference
+ // to another object and in the case of templates, to print its encoded args.
+ LVObject(const LVObject &Object) {
+#ifndef NDEBUG
+ incID();
+#endif
+ Properties = Object.Properties;
+ Offset = Object.Offset;
+ LineNumber = Object.LineNumber;
+ ScopeLevel = Object.ScopeLevel;
+ TagAttrOpcode = Object.TagAttrOpcode;
+ Parent = Object.Parent;
+ }
+
+#ifndef NDEBUG
+ // This is an internal ID used for debugging logical elements. It is used
+ // for cases where an unique offset within the binary input file is not
+ // available.
+ static uint64_t GID;
+ uint64_t ID = 0;
+
+ void incID() {
+ ++GID;
+ ID = GID;
+ }
+#endif
+
+protected:
+ // Get a string representation for the given number and discriminator.
+ std::string lineAsString(uint32_t LineNumber, LVHalf Discriminator,
+ bool ShowZero) const;
+
+ // Get a string representation for the given number.
+ std::string referenceAsString(uint32_t LineNumber, bool Spaces) const;
+
+ // Print the Filename or Pathname.
+ // Empty implementation for those objects that do not have any user
+ // source file references, such as debug locations.
+ virtual void printFileIndex(raw_ostream &OS, bool Full = true) const {}
+
+public:
+ LVObject() {
+#ifndef NDEBUG
+ incID();
+#endif
+ };
+ LVObject &operator=(const LVObject &) = delete;
+ virtual ~LVObject() = default;
+
+ PROPERTY(Property, IsLocation);
+ PROPERTY(Property, IsGlobalReference);
+ PROPERTY(Property, IsGeneratedName);
+ PROPERTY(Property, IsResolved);
+ PROPERTY(Property, IsResolvedName);
+ PROPERTY(Property, IsDiscarded);
+ PROPERTY(Property, IsOptimized);
+ PROPERTY(Property, IsAdded);
+ PROPERTY(Property, IsMatched);
+ PROPERTY(Property, IsMissing);
+ PROPERTY(Property, IsMissingLink);
+ PROPERTY(Property, IsInCompare);
+ PROPERTY(Property, IsFileFromReference);
+ PROPERTY(Property, IsLineFromReference);
+ PROPERTY(Property, HasMoved);
+ PROPERTY(Property, HasPattern);
+ PROPERTY(Property, IsFinalized);
+ PROPERTY(Property, IsReferenced);
+ PROPERTY(Property, HasCodeViewLocation);
+
+ // True if the scope has been named or typed or with line number.
+ virtual bool isNamed() const { return false; }
+ virtual bool isTyped() const { return false; }
+ virtual bool isFiled() const { return false; }
+ bool isLined() const { return LineNumber != 0; }
+
+ // DWARF tag, attribute or expression opcode.
+ dwarf::Tag getTag() const { return TagAttrOpcode.Tag; }
+ void setTag(dwarf::Tag Tag) { TagAttrOpcode.Tag = Tag; }
+ dwarf::Attribute getAttr() const { return TagAttrOpcode.Attr; }
+ void setAttr(dwarf::Attribute Attr) { TagAttrOpcode.Attr = Attr; }
+ LVSmall getOpcode() const { return TagAttrOpcode.Opcode; }
+ void setOpcode(LVSmall Opcode) { TagAttrOpcode.Opcode = Opcode; }
+
+ // DIE offset.
+ LVOffset getOffset() const { return Offset; }
+ void setOffset(LVOffset DieOffset) { Offset = DieOffset; }
+
+ // Level where this object is located.
+ LVLevel getLevel() const { return ScopeLevel; }
+ void setLevel(LVLevel Level) { ScopeLevel = Level; }
+
+ virtual StringRef getName() const { return StringRef(); }
+ virtual void setName(StringRef ObjectName) {}
+
+ LVElement *getParent() const {
+ assert((!Parent.Element ||
+ (Parent.Element && static_cast<LVElement *>(Parent.Element))) &&
+ "Invalid element");
+ return Parent.Element;
+ }
+ LVScope *getParentScope() const {
+ assert((!Parent.Scope ||
+ (Parent.Scope && static_cast<LVScope *>(Parent.Scope))) &&
+ "Invalid scope");
+ return Parent.Scope;
+ }
+ LVSymbol *getParentSymbol() const {
+ assert((!Parent.Symbol ||
+ (Parent.Symbol && static_cast<LVSymbol *>(Parent.Symbol))) &&
+ "Invalid symbol");
+ return Parent.Symbol;
+ }
+ void setParent(LVScope *Scope);
+ void setParent(LVSymbol *Symbol);
+ void resetParent() { Parent = {nullptr}; }
+
+ virtual LVAddress getLowerAddress() const { return 0; }
+ virtual void setLowerAddress(LVAddress Address) {}
+ virtual LVAddress getUpperAddress() const { return 0; }
+ virtual void setUpperAddress(LVAddress Address) {}
+
+ uint32_t getLineNumber() const { return LineNumber; }
+ void setLineNumber(uint32_t Number) { LineNumber = Number; }
+
+ virtual const char *kind() const { return nullptr; }
+
+ std::string indentAsString() const;
+ std::string indentAsString(LVLevel Level) const;
+
+ // String used as padding for printing objects with no line number.
+ virtual std::string noLineAsString(bool ShowZero) const;
+
+ // Line number for display; in the case of inlined functions, we use the
+ // DW_AT_call_line attribute; otherwise use DW_AT_decl_line attribute.
+ virtual std::string lineNumberAsString(bool ShowZero = false) const {
+ return lineAsString(getLineNumber(), 0, ShowZero);
+ }
+ std::string lineNumberAsStringStripped(bool ShowZero = false) const;
+
+ // This function prints the logical view to an output stream.
+ // Split: Prints the compilation unit view to a file.
+ // Match: Prints the object only if it satisfies the patterns collected
+ // from the command line. See the '--select' option.
+ // Print: Print the object only if satisfies the conditions specified by
+ // the different '--print' options.
+ // Full: Prints full information for objects representing debug locations,
+ // aggregated scopes, compile unit, functions and namespaces.
+ virtual Error doPrint(bool Split, bool Match, bool Print, raw_ostream &OS,
+ bool Full = true) const;
+ void printAttributes(raw_ostream &OS, bool Full = true) const;
+ void printAttributes(raw_ostream &OS, bool Full, StringRef Name,
+ LVObject *Parent, StringRef Value,
+ bool UseQuotes = false, bool PrintRef = false) const;
+
+ // Mark branch as missing (current element and parents).
+ void markBranchAsMissing();
+
+ // Prints the common information for an object (name, type, etc).
+ virtual void print(raw_ostream &OS, bool Full = true) const;
+ // Prints additional information for an object, depending on its kind
+ // (class attributes, debug ranges, files, directories, etc).
+ virtual void printExtra(raw_ostream &OS, bool Full = true) const {}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ virtual void dump() const { print(dbgs()); }
+#endif
+
+ uint64_t getID() const {
+ return
+#ifndef NDEBUG
+ ID;
+#else
+ 0;
+#endif
+ }
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOBJECT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVOptions.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVOptions.h
new file mode 100644
index 0000000000..ad0e0592ac
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVOptions.h
@@ -0,0 +1,656 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVOptions.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVOptions class, which is used to record the command
+// line options.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOPTIONS_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOPTIONS_H
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVLine.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVScope.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVSymbol.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVType.h"
+#include "llvm/Support/Regex.h"
+#include <set>
+#include <string>
+
+namespace llvm {
+namespace logicalview {
+
+// Generate get and set 'bool' functions.
+#define BOOL_FUNCTION(FAMILY, FIELD) \
+ bool get##FAMILY##FIELD() const { return FAMILY.FIELD; } \
+ void set##FAMILY##FIELD() { FAMILY.FIELD = true; } \
+ void reset##FAMILY##FIELD() { FAMILY.FIELD = false; }
+
+// Generate get and set 'unsigned' functions.
+#define UNSIGNED_FUNCTION(FAMILY, FIELD) \
+ unsigned get##FAMILY##FIELD() const { return FAMILY.FIELD; } \
+ void set##FAMILY##FIELD(unsigned Value) { FAMILY.FIELD = Value; } \
+ void reset##FAMILY##FIELD() { FAMILY.FIELD = -1U; }
+
+// Generate get and set 'std::string' functions.
+#define STD_STRING_FUNCTION(FAMILY, FIELD) \
+ std::string get##FAMILY##FIELD() const { return FAMILY.FIELD; } \
+ void set##FAMILY##FIELD(std::string FIELD) { FAMILY.FIELD = FIELD; } \
+ void reset##FAMILY##FIELD() { FAMILY.FIELD = ""; }
+
+// Generate get and set 'std::set' functions.
+#define STDSET_FUNCTION_4(FAMILY, FIELD, TYPE, SET) \
+ bool get##FAMILY##FIELD() const { \
+ return FAMILY.SET.find(TYPE::FIELD) != FAMILY.SET.end(); \
+ } \
+ void set##FAMILY##FIELD() { FAMILY.SET.insert(TYPE::FIELD); } \
+ void reset##FAMILY##FIELD() { \
+ std::set<TYPE>::iterator Iter = FAMILY.SET.find(TYPE::FIELD); \
+ if (Iter != FAMILY.SET.end()) \
+ FAMILY.SET.erase(Iter); \
+ }
+
+#define STDSET_FUNCTION_5(FAMILY, FIELD, ENTRY, TYPE, SET) \
+ bool get##FAMILY##FIELD##ENTRY() const { \
+ return FAMILY.SET.find(TYPE::ENTRY) != FAMILY.SET.end(); \
+ } \
+ void set##FAMILY##FIELD##ENTRY() { FAMILY.SET.insert(TYPE::ENTRY); }
+
+// Generate get and set functions for '--attribute'
+#define ATTRIBUTE_OPTION(FIELD) \
+ STDSET_FUNCTION_4(Attribute, FIELD, LVAttributeKind, Kinds)
+
+// Generate get and set functions for '--output'
+#define OUTPUT_OPTION(FIELD) \
+ STDSET_FUNCTION_4(Output, FIELD, LVOutputKind, Kinds)
+
+// Generate get and set functions for '--print'
+#define PRINT_OPTION(FIELD) STDSET_FUNCTION_4(Print, FIELD, LVPrintKind, Kinds)
+
+// Generate get and set functions for '--warning'
+#define WARNING_OPTION(FIELD) \
+ STDSET_FUNCTION_4(Warning, FIELD, LVWarningKind, Kinds)
+
+// Generate get and set functions for '--compare'
+#define COMPARE_OPTION(FIELD) \
+ STDSET_FUNCTION_4(Compare, FIELD, LVCompareKind, Elements)
+
+// Generate get and set functions for '--report'
+#define REPORT_OPTION(FIELD) \
+ STDSET_FUNCTION_4(Report, FIELD, LVReportKind, Kinds)
+
+// Generate get and set functions for '--internal'
+#define INTERNAL_OPTION(FIELD) \
+ STDSET_FUNCTION_4(Internal, FIELD, LVInternalKind, Kinds)
+
+using LVOffsetSet = std::set<uint64_t>;
+
+enum class LVAttributeKind {
+ All, // --attribute=all
+ Argument, // --attribute=argument
+ Base, // --attribute=base
+ Coverage, // --attribute=coverage
+ Directories, // --attribute=directories
+ Discarded, // --attribute=discarded
+ Discriminator, // --attribute=discriminator
+ Encoded, // --attribute=encoded
+ Extended, // --attribute=extended
+ Filename, // --attribute=filename
+ Files, // --attribute=files
+ Format, // --attribute=format
+ Gaps, // --attribute=gaps
+ Generated, // --attribute=generated
+ Global, // --attribute=global
+ Inserted, // --attribute=inserted
+ Level, // --attribute=level
+ Linkage, // --attribute=linkage
+ Local, // --attribute=local
+ Location, // --attribute=location
+ Offset, // --attribute=offset
+ Pathname, // --attribute=pathname
+ Producer, // --attribute=producer
+ Publics, // --attribute=publics
+ Qualified, // --attribute=qualified
+ Qualifier, // --attribute=qualifier
+ Range, // --attribute=range
+ Reference, // --attribute=reference
+ Register, // --attribute=register
+ Standard, // --attribute=standard
+ Subrange, // --attribute=subrange
+ System, // --attribute=system
+ Typename, // --attribute=typename
+ Underlying, // --attribute=underlying
+ Zero // --attribute=zero
+};
+using LVAttributeKindSet = std::set<LVAttributeKind>;
+
+enum class LVCompareKind {
+ All, // --compare=all
+ Lines, // --compare=lines
+ Scopes, // --compare=scopes
+ Symbols, // --compare=symbols
+ Types // --compare=types
+};
+using LVCompareKindSet = std::set<LVCompareKind>;
+
+enum class LVOutputKind {
+ All, // --output=all
+ Split, // --output=split
+ Json, // --output=json
+ Text // --output=text
+};
+using LVOutputKindSet = std::set<LVOutputKind>;
+
+enum class LVPrintKind {
+ All, // --print=all
+ Elements, // --print=elements
+ Instructions, // --print=instructions
+ Lines, // --print=lines
+ Scopes, // --print=scopes
+ Sizes, // --print=sizes
+ Symbols, // --print=symbols
+ Summary, // --print=summary
+ Types, // --print=types
+ Warnings // --print=warnings
+};
+using LVPrintKindSet = std::set<LVPrintKind>;
+
+enum class LVReportKind {
+ All, // --report=all
+ Children, // --report=children
+ List, // --report=list
+ Parents, // --report=parents
+ View // --report=view
+};
+using LVReportKindSet = std::set<LVReportKind>;
+
+enum class LVWarningKind {
+ All, // --warning=all
+ Coverages, // --warning=coverages
+ Lines, // --warning=lines
+ Locations, // --warning=locations
+ Ranges // --warning=ranges
+};
+using LVWarningKindSet = std::set<LVWarningKind>;
+
+enum class LVInternalKind {
+ All, // --internal=all
+ Cmdline, // --internal=cmdline
+ ID, // --internal=id
+ Integrity, // --internal=integrity
+ None, // --internal=none
+ Tag // --internal=tag
+};
+using LVInternalKindSet = std::set<LVInternalKind>;
+
+// The 'Kinds' members are a one-to-one mapping to the associated command
+// options that supports comma separated values. There are other 'bool'
+// members that in very few cases point to a command option (see associated
+// comment). Other cases for 'bool' refers to internal values derivated from
+// the command options.
+class LVOptions {
+ class LVAttribute {
+ public:
+ LVAttributeKindSet Kinds; // --attribute=<Kind>
+ bool Added = false; // Added elements found during comparison.
+ bool AnyLocation = false; // Any kind of location information.
+ bool AnySource = false; // Any kind of source information.
+ bool Missing = false; // Missing elements found during comparison.
+ };
+
+ class LVCompare {
+ public:
+ LVCompareKindSet Elements; // --compare=<kind>
+ bool Context = false; // --compare-context
+ bool Execute = false; // Compare requested.
+ bool Print = false; // Enable any printing.
+ };
+
+ class LVPrint {
+ public:
+ LVPrintKindSet Kinds; // --print=<Kind>
+ bool AnyElement = false; // Request to print any element.
+ bool AnyLine = false; // Print 'lines' or 'instructions'.
+ bool Execute = false; // Print requested.
+ bool Formatting = true; // Disable formatting during printing.
+ bool Offset = false; // Print offsets while formatting is disabled.
+ bool SizesSummary = false; // Print 'sizes' or 'summary'.
+ };
+
+ class LVReport {
+ public:
+ LVReportKindSet Kinds; // --report=<kind>
+ bool AnyView = false; // View, Parents or Children.
+ bool Execute = false; // Report requested.
+ };
+
+ class LVSelect {
+ public:
+ bool IgnoreCase = false; // --select-ignore-case
+ bool UseRegex = false; // --select-use-regex
+ bool Execute = false; // Select requested.
+ bool GenericKind = false; // We have collected generic kinds.
+ bool GenericPattern = false; // We have collected generic patterns.
+ bool OffsetPattern = false; // We have collected offset patterns.
+ StringSet<> Generic; // --select=<Pattern>
+ LVOffsetSet Offsets; // --select-offset=<Offset>
+ LVElementKindSet Elements; // --select-elements=<Kind>
+ LVLineKindSet Lines; // --select-lines=<Kind>
+ LVScopeKindSet Scopes; // --select-scopes=<Kind>
+ LVSymbolKindSet Symbols; // --select-symbols=<Kind>
+ LVTypeKindSelection Types; // --select-types=<Kind>
+ };
+
+ class LVOutput {
+ public:
+ LVOutputKindSet Kinds; // --output=<kind>
+ LVSortMode SortMode = LVSortMode::None; // --output-sort=<SortMode>
+ std::string Folder; // --output-folder=<Folder>
+ unsigned Level = -1U; // --output-level=<level>
+ };
+
+ class LVWarning {
+ public:
+ LVWarningKindSet Kinds; // --warning=<Kind>
+ };
+
+ class LVInternal {
+ public:
+ LVInternalKindSet Kinds; // --internal=<Kind>
+ };
+
+ class LVGeneral {
+ public:
+ bool CollectRanges = false; // Collect ranges information.
+ };
+
+ // Filters the output of the filename associated with the element being
+ // printed in order to see clearly which logical elements belongs to
+ // a particular filename. It is value is reset after the element
+ // that represents the Compile Unit is printed.
+ size_t LastFilenameIndex = 0;
+
+ // Controls the amount of additional spaces to insert when printing
+ // object attributes, in order to get a consistent printing layout.
+ size_t IndentationSize = 0;
+
+ // Calculate the indentation size, so we can use that value when printing
+ // additional attributes to objects, such as location.
+ void calculateIndentationSize();
+
+public:
+ void resetFilenameIndex() { LastFilenameIndex = 0; }
+ bool changeFilenameIndex(size_t Index) {
+ bool IndexChanged = (Index != LastFilenameIndex);
+ if (IndexChanged)
+ LastFilenameIndex = Index;
+ return IndexChanged;
+ }
+
+ // Access to command line options, pattern and printing information.
+ static LVOptions *getOptions();
+ static void setOptions(LVOptions *Options);
+
+ LVOptions() = default;
+ LVOptions(const LVOptions &) = default;
+ LVOptions &operator=(const LVOptions &) = default;
+ ~LVOptions() = default;
+
+ // Some command line options support shortcuts. For example:
+ // The command line option '--print=elements' is a shortcut for:
+ // '--print=instructions,lines,scopes,symbols,types'.
+ // In the case of logical view comparison, some options related to
+ // attributes must be set or reset for a proper comparison.
+ // Resolve any dependencies between command line options.
+ void resolveDependencies();
+ size_t indentationSize() const { return IndentationSize; }
+
+ LVAttribute Attribute;
+ LVCompare Compare;
+ LVOutput Output;
+ LVPrint Print;
+ LVReport Report;
+ LVSelect Select;
+ LVWarning Warning;
+ LVInternal Internal;
+ LVGeneral General;
+
+ // --attribute.
+ ATTRIBUTE_OPTION(All);
+ ATTRIBUTE_OPTION(Argument);
+ ATTRIBUTE_OPTION(Base);
+ ATTRIBUTE_OPTION(Coverage);
+ ATTRIBUTE_OPTION(Directories);
+ ATTRIBUTE_OPTION(Discarded);
+ ATTRIBUTE_OPTION(Discriminator);
+ ATTRIBUTE_OPTION(Encoded);
+ ATTRIBUTE_OPTION(Extended);
+ ATTRIBUTE_OPTION(Filename);
+ ATTRIBUTE_OPTION(Files);
+ ATTRIBUTE_OPTION(Format);
+ ATTRIBUTE_OPTION(Gaps);
+ ATTRIBUTE_OPTION(Generated);
+ ATTRIBUTE_OPTION(Global);
+ ATTRIBUTE_OPTION(Inserted);
+ ATTRIBUTE_OPTION(Level);
+ ATTRIBUTE_OPTION(Linkage);
+ ATTRIBUTE_OPTION(Location);
+ ATTRIBUTE_OPTION(Local);
+ ATTRIBUTE_OPTION(Offset);
+ ATTRIBUTE_OPTION(Pathname);
+ ATTRIBUTE_OPTION(Producer);
+ ATTRIBUTE_OPTION(Publics);
+ ATTRIBUTE_OPTION(Qualified);
+ ATTRIBUTE_OPTION(Qualifier);
+ ATTRIBUTE_OPTION(Range);
+ ATTRIBUTE_OPTION(Reference);
+ ATTRIBUTE_OPTION(Register);
+ ATTRIBUTE_OPTION(Standard);
+ ATTRIBUTE_OPTION(Subrange);
+ ATTRIBUTE_OPTION(System);
+ ATTRIBUTE_OPTION(Typename);
+ ATTRIBUTE_OPTION(Underlying);
+ ATTRIBUTE_OPTION(Zero);
+ BOOL_FUNCTION(Attribute, Added);
+ BOOL_FUNCTION(Attribute, AnyLocation);
+ BOOL_FUNCTION(Attribute, AnySource);
+ BOOL_FUNCTION(Attribute, Missing);
+
+ // --compare.
+ COMPARE_OPTION(All);
+ COMPARE_OPTION(Lines);
+ COMPARE_OPTION(Scopes);
+ COMPARE_OPTION(Symbols);
+ COMPARE_OPTION(Types);
+ BOOL_FUNCTION(Compare, Context);
+ BOOL_FUNCTION(Compare, Execute);
+ BOOL_FUNCTION(Compare, Print);
+
+ // --output.
+ OUTPUT_OPTION(All);
+ OUTPUT_OPTION(Split);
+ OUTPUT_OPTION(Text);
+ OUTPUT_OPTION(Json);
+ STD_STRING_FUNCTION(Output, Folder);
+ UNSIGNED_FUNCTION(Output, Level);
+ LVSortMode getSortMode() const { return Output.SortMode; }
+ void setSortMode(LVSortMode SortMode) { Output.SortMode = SortMode; }
+
+ // --print.
+ PRINT_OPTION(All);
+ PRINT_OPTION(Elements);
+ PRINT_OPTION(Instructions);
+ PRINT_OPTION(Lines);
+ PRINT_OPTION(Scopes);
+ PRINT_OPTION(Sizes);
+ PRINT_OPTION(Symbols);
+ PRINT_OPTION(Summary);
+ PRINT_OPTION(Types);
+ PRINT_OPTION(Warnings);
+ BOOL_FUNCTION(Print, AnyElement);
+ BOOL_FUNCTION(Print, AnyLine);
+ BOOL_FUNCTION(Print, Execute);
+ BOOL_FUNCTION(Print, Formatting);
+ BOOL_FUNCTION(Print, Offset);
+ BOOL_FUNCTION(Print, SizesSummary);
+
+ // --report.
+ REPORT_OPTION(All);
+ REPORT_OPTION(Children);
+ REPORT_OPTION(List);
+ REPORT_OPTION(Parents);
+ REPORT_OPTION(View);
+ BOOL_FUNCTION(Report, AnyView);
+ BOOL_FUNCTION(Report, Execute);
+
+ // --select.
+ BOOL_FUNCTION(Select, IgnoreCase);
+ BOOL_FUNCTION(Select, UseRegex);
+ BOOL_FUNCTION(Select, Execute);
+ BOOL_FUNCTION(Select, GenericKind);
+ BOOL_FUNCTION(Select, GenericPattern);
+ BOOL_FUNCTION(Select, OffsetPattern);
+
+ // --warning.
+ WARNING_OPTION(All);
+ WARNING_OPTION(Coverages);
+ WARNING_OPTION(Lines);
+ WARNING_OPTION(Locations);
+ WARNING_OPTION(Ranges);
+
+ // --internal.
+ INTERNAL_OPTION(All);
+ INTERNAL_OPTION(Cmdline);
+ INTERNAL_OPTION(ID);
+ INTERNAL_OPTION(Integrity);
+ INTERNAL_OPTION(None);
+ INTERNAL_OPTION(Tag);
+
+ // General shortcuts to some combinations.
+ BOOL_FUNCTION(General, CollectRanges);
+
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+inline LVOptions &options() { return (*LVOptions::getOptions()); }
+inline void setOptions(LVOptions *Options) { LVOptions::setOptions(Options); }
+
+class LVPatterns final {
+ // Pattern Mode.
+ enum class LVMatchMode {
+ None = 0, // No given pattern.
+ Match, // Perfect match.
+ NoCase, // Ignore case.
+ Regex // Regular expression.
+ };
+
+ // Keep the search pattern information.
+ struct LVMatch {
+ std::string Pattern; // Normal pattern.
+ std::shared_ptr<Regex> RE; // Regular Expression Pattern.
+ LVMatchMode Mode = LVMatchMode::None; // Match mode.
+ };
+
+ using LVMatchInfo = std::vector<LVMatch>;
+ LVMatchInfo GenericMatchInfo;
+ using LVMatchOffsets = std::vector<uint64_t>;
+ LVMatchOffsets OffsetMatchInfo;
+
+ // Element selection.
+ LVElementDispatch ElementDispatch;
+ LVLineDispatch LineDispatch;
+ LVScopeDispatch ScopeDispatch;
+ LVSymbolDispatch SymbolDispatch;
+ LVTypeDispatch TypeDispatch;
+
+ // Element selection request.
+ LVElementRequest ElementRequest;
+ LVLineRequest LineRequest;
+ LVScopeRequest ScopeRequest;
+ LVSymbolRequest SymbolRequest;
+ LVTypeRequest TypeRequest;
+
+ // Check an element printing Request.
+ template <typename T, typename U>
+ bool checkElementRequest(const T *Element, const U &Requests) const {
+ assert(Element && "Element must not be nullptr");
+ for (const auto &Request : Requests)
+ if ((Element->*Request)())
+ return true;
+ // Check generic element requests.
+ for (const LVElementGetFunction &Request : ElementRequest)
+ if ((Element->*Request)())
+ return true;
+ return false;
+ }
+
+ // Add an element printing request based on its kind.
+ template <typename T, typename U, typename V>
+ void addRequest(const T &Selection, const U &Dispatch, V &Request) const {
+ for (const auto &Entry : Selection) {
+ // Find target function to fullfit request.
+ typename U::const_iterator Iter = Dispatch.find(Entry);
+ if (Iter != Dispatch.end())
+ Request.push_back(Iter->second);
+ }
+ }
+
+ void addElement(LVElement *Element);
+
+ template <typename T, typename U>
+ void resolveGenericPatternMatch(T *Element, const U &Requests) {
+ assert(Element && "Element must not be nullptr");
+ auto CheckPattern = [=]() -> bool {
+ return (Element->isNamed() && matchGenericPattern(Element->getName())) ||
+ (Element->isTyped() &&
+ matchGenericPattern(Element->getTypeName()));
+ };
+ auto CheckOffset = [=]() -> bool {
+ return matchOffsetPattern(Element->getOffset());
+ };
+ if ((options().getSelectGenericPattern() && CheckPattern()) ||
+ (options().getSelectOffsetPattern() && CheckOffset()) ||
+ ((Requests.size() || ElementRequest.size()) &&
+ checkElementRequest(Element, Requests)))
+ addElement(Element);
+ }
+
+ template <typename U>
+ void resolveGenericPatternMatch(LVLine *Line, const U &Requests) {
+ assert(Line && "Line must not be nullptr");
+ auto CheckPattern = [=]() -> bool {
+ return matchGenericPattern(Line->lineNumberAsStringStripped()) ||
+ matchGenericPattern(Line->getName()) ||
+ matchGenericPattern(Line->getPathname());
+ };
+ auto CheckOffset = [=]() -> bool {
+ return matchOffsetPattern(Line->getAddress());
+ };
+ if ((options().getSelectGenericPattern() && CheckPattern()) ||
+ (options().getSelectOffsetPattern() && CheckOffset()) ||
+ (Requests.size() && checkElementRequest(Line, Requests)))
+ addElement(Line);
+ }
+
+ Error createMatchEntry(LVMatchInfo &Filters, StringRef Pattern,
+ bool IgnoreCase, bool UseRegex);
+
+public:
+ static LVPatterns *getPatterns();
+
+ LVPatterns() {
+ ElementDispatch = LVElement::getDispatch();
+ LineDispatch = LVLine::getDispatch();
+ ScopeDispatch = LVScope::getDispatch();
+ SymbolDispatch = LVSymbol::getDispatch();
+ TypeDispatch = LVType::getDispatch();
+ }
+ LVPatterns(const LVPatterns &) = delete;
+ LVPatterns &operator=(const LVPatterns &) = delete;
+ ~LVPatterns() = default;
+
+ // Clear any existing patterns.
+ void clear() {
+ GenericMatchInfo.clear();
+ OffsetMatchInfo.clear();
+ ElementRequest.clear();
+ LineRequest.clear();
+ ScopeRequest.clear();
+ SymbolRequest.clear();
+ TypeRequest.clear();
+
+ options().resetSelectGenericKind();
+ options().resetSelectGenericPattern();
+ options().resetSelectOffsetPattern();
+ }
+
+ void addRequest(LVElementKindSet &Selection) {
+ addRequest(Selection, ElementDispatch, ElementRequest);
+ }
+ void addRequest(LVLineKindSet &Selection) {
+ addRequest(Selection, LineDispatch, LineRequest);
+ }
+ void addRequest(LVScopeKindSet &Selection) {
+ addRequest(Selection, ScopeDispatch, ScopeRequest);
+ }
+ void addRequest(LVSymbolKindSet &Selection) {
+ addRequest(Selection, SymbolDispatch, SymbolRequest);
+ }
+ void addRequest(LVTypeKindSelection &Selection) {
+ addRequest(Selection, TypeDispatch, TypeRequest);
+ }
+
+ void updateReportOptions();
+
+ bool matchPattern(StringRef Input, const LVMatchInfo &MatchInfo);
+ // Match a pattern (--select='pattern').
+ bool matchGenericPattern(StringRef Input) {
+ return matchPattern(Input, GenericMatchInfo);
+ }
+ bool matchOffsetPattern(LVOffset Offset) {
+ return llvm::is_contained(OffsetMatchInfo, Offset);
+ }
+
+ void resolvePatternMatch(LVLine *Line) {
+ resolveGenericPatternMatch(Line, LineRequest);
+ }
+
+ void resolvePatternMatch(LVScope *Scope) {
+ resolveGenericPatternMatch(Scope, ScopeRequest);
+ }
+
+ void resolvePatternMatch(LVSymbol *Symbol) {
+ resolveGenericPatternMatch(Symbol, SymbolRequest);
+ }
+
+ void resolvePatternMatch(LVType *Type) {
+ resolveGenericPatternMatch(Type, TypeRequest);
+ }
+
+ void addPatterns(StringSet<> &Patterns, LVMatchInfo &Filters);
+
+ // Add generic and offset patterns info.
+ void addGenericPatterns(StringSet<> &Patterns);
+ void addOffsetPatterns(const LVOffsetSet &Patterns);
+
+ // Conditions to print an object.
+ bool printElement(const LVLine *Line) const;
+ bool printObject(const LVLocation *Location) const;
+ bool printElement(const LVScope *Scope) const;
+ bool printElement(const LVSymbol *Symbol) const;
+ bool printElement(const LVType *Type) const;
+
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+inline LVPatterns &patterns() { return *LVPatterns::getPatterns(); }
+
+} // namespace logicalview
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOPTIONS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVRange.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVRange.h
new file mode 100644
index 0000000000..3183fd0da7
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVRange.h
@@ -0,0 +1,109 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVRange.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVRange class, which is used to describe a debug
+// information range.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVRANGE_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVRANGE_H
+
+#include "llvm/ADT/IntervalTree.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"
+
+namespace llvm {
+namespace logicalview {
+
+using LVAddressRange = std::pair<LVAddress, LVAddress>;
+
+class LVRangeEntry final {
+ LVAddress Lower = 0;
+ LVAddress Upper = 0;
+ LVScope *Scope = nullptr;
+
+public:
+ using RangeType = LVAddress;
+
+ LVRangeEntry() = delete;
+ LVRangeEntry(LVAddress LowerAddress, LVAddress UpperAddress, LVScope *Scope)
+ : Lower(LowerAddress), Upper(UpperAddress), Scope(Scope) {}
+
+ RangeType lower() const { return Lower; }
+ RangeType upper() const { return Upper; }
+ LVAddressRange addressRange() const {
+ return LVAddressRange(lower(), upper());
+ }
+ LVScope *scope() const { return Scope; }
+};
+
+// Class to represent a list of range addresses associated with a
+// scope; the addresses are stored in ascending order and can overlap.
+using LVRangeEntries = std::vector<LVRangeEntry>;
+
+class LVRange final : public LVObject {
+ /// Map of where a user value is live, and its location.
+ using LVRangesTree = IntervalTree<LVAddress, LVScope *>;
+ using LVAllocator = LVRangesTree::Allocator;
+
+ LVAllocator Allocator;
+ LVRangesTree RangesTree;
+ LVRangeEntries RangeEntries;
+ LVAddress Lower = MaxAddress;
+ LVAddress Upper = 0;
+
+public:
+ LVRange() : LVObject(), RangesTree(Allocator) {}
+ LVRange(const LVRange &) = delete;
+ LVRange &operator=(const LVRange &) = delete;
+ ~LVRange() = default;
+
+ void addEntry(LVScope *Scope, LVAddress LowerAddress, LVAddress UpperAddress);
+ void addEntry(LVScope *Scope);
+ LVScope *getEntry(LVAddress Address) const;
+ LVScope *getEntry(LVAddress LowerAddress, LVAddress UpperAddress) const;
+ bool hasEntry(LVAddress Low, LVAddress High) const;
+ LVAddress getLower() const { return Lower; }
+ LVAddress getUpper() const { return Upper; }
+
+ const LVRangeEntries &getEntries() const { return RangeEntries; }
+
+ void clear() {
+ RangeEntries.clear();
+ Lower = MaxAddress;
+ Upper = 0;
+ }
+ bool empty() const { return RangeEntries.empty(); }
+ void sort();
+
+ void startSearch();
+ void endSearch() {}
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override {}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const override { print(dbgs()); }
+#endif
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVRANGE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVReader.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVReader.h
new file mode 100644
index 0000000000..5c537cc7fa
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVReader.h
@@ -0,0 +1,250 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVReader.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVReader class, which is used to describe a debug
+// information reader.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVREADER_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVREADER_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVOptions.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVRange.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include <map>
+
+namespace llvm {
+namespace logicalview {
+
+constexpr LVSectionIndex UndefinedSectionIndex = 0;
+
+class LVScopeCompileUnit;
+class LVObject;
+
+class LVSplitContext final {
+ std::unique_ptr<ToolOutputFile> OutputFile;
+ std::string Location;
+
+public:
+ LVSplitContext() = default;
+ LVSplitContext(const LVSplitContext &) = delete;
+ LVSplitContext &operator=(const LVSplitContext &) = delete;
+ ~LVSplitContext() = default;
+
+ Error createSplitFolder(StringRef Where);
+ std::error_code open(std::string Name, std::string Extension,
+ raw_ostream &OS);
+ void close() {
+ if (OutputFile) {
+ OutputFile->os().close();
+ OutputFile = nullptr;
+ }
+ }
+
+ std::string getLocation() const { return Location; }
+ raw_fd_ostream &os() { return OutputFile->os(); }
+};
+
+class LVReader {
+ LVBinaryType BinaryType;
+
+ // Context used by '--output=split' command line option.
+ LVSplitContext SplitContext;
+
+ // Compile Units DIE Offset => Scope.
+ using LVCompileUnits = std::map<LVOffset, LVScopeCompileUnit *>;
+ LVCompileUnits CompileUnits;
+
+ // Added elements to be used during elements comparison.
+ LVLines Lines;
+ LVScopes Scopes;
+ LVSymbols Symbols;
+ LVTypes Types;
+
+ // Create split folder.
+ Error createSplitFolder();
+ bool OutputSplit = false;
+
+protected:
+ LVScopeRoot *Root = nullptr;
+ std::string InputFilename;
+ std::string FileFormatName;
+ ScopedPrinter &W;
+ raw_ostream &OS;
+ LVScopeCompileUnit *CompileUnit = nullptr;
+
+ // Only for ELF format. The CodeView is handled in a different way.
+ LVSectionIndex DotTextSectionIndex = UndefinedSectionIndex;
+
+ // Record Compilation Unit entry.
+ void addCompileUnitOffset(LVOffset Offset, LVScopeCompileUnit *CompileUnit) {
+ CompileUnits.emplace(Offset, CompileUnit);
+ }
+
+ // Create the Scope Root.
+ virtual Error createScopes() {
+ Root = new LVScopeRoot();
+ Root->setName(getFilename());
+ if (options().getAttributeFormat())
+ Root->setFileFormatName(FileFormatName);
+ return Error::success();
+ }
+
+ // Return a pathname composed by: parent_path(InputFilename)/filename(From).
+ // This is useful when a type server (PDB file associated with an object
+ // file or a precompiled header file) or a DWARF split object have been
+ // moved from their original location. That is the case when running
+ // regression tests, where object files are created in one location and
+ // executed in a different location.
+ std::string createAlternativePath(StringRef From) {
+ // During the reader initialization, any backslashes in 'InputFilename'
+ // are converted to forward slashes.
+ SmallString<128> Path;
+ sys::path::append(Path, sys::path::Style::posix,
+ sys::path::parent_path(InputFilename),
+ sys::path::filename(sys::path::convert_to_slash(
+ From, sys::path::Style::windows)));
+ return std::string(Path);
+ }
+
+ virtual Error printScopes();
+ virtual Error printMatchedElements(bool UseMatchedElements);
+ virtual void sortScopes() {}
+
+public:
+ LVReader() = delete;
+ LVReader(StringRef InputFilename, StringRef FileFormatName, ScopedPrinter &W,
+ LVBinaryType BinaryType = LVBinaryType::NONE)
+ : BinaryType(BinaryType), OutputSplit(options().getOutputSplit()),
+ InputFilename(InputFilename), FileFormatName(FileFormatName), W(W),
+ OS(W.getOStream()) {}
+ LVReader(const LVReader &) = delete;
+ LVReader &operator=(const LVReader &) = delete;
+ virtual ~LVReader() {
+ if (Root)
+ delete Root;
+ }
+
+ StringRef getFilename(LVObject *Object, size_t Index) const;
+ StringRef getFilename() const { return InputFilename; }
+ void setFilename(std::string Name) { InputFilename = std::move(Name); }
+ StringRef getFileFormatName() const { return FileFormatName; }
+
+ raw_ostream &outputStream() { return OS; }
+
+ bool isBinaryTypeNone() const { return BinaryType == LVBinaryType::NONE; }
+ bool isBinaryTypeELF() const { return BinaryType == LVBinaryType::ELF; }
+ bool isBinaryTypeCOFF() const { return BinaryType == LVBinaryType::COFF; }
+
+ LVScopeCompileUnit *getCompileUnit() const { return CompileUnit; }
+ void setCompileUnit(LVScope *Scope) {
+ assert(Scope && Scope->isCompileUnit() && "Scope is not a compile unit");
+ CompileUnit = static_cast<LVScopeCompileUnit *>(Scope);
+ }
+
+ // Access to the scopes root.
+ LVScopeRoot *getScopesRoot() const { return Root; }
+
+ Error doPrint();
+ Error doLoad();
+
+ virtual std::string getRegisterName(LVSmall Opcode, uint64_t Operands[2]) {
+ llvm_unreachable("Invalid instance reader.");
+ return {};
+ }
+
+ LVSectionIndex getDotTextSectionIndex() const { return DotTextSectionIndex; }
+ virtual LVSectionIndex getSectionIndex(LVScope *Scope) {
+ return getDotTextSectionIndex();
+ }
+
+ virtual bool isSystemEntry(LVElement *Element, StringRef Name = {}) const {
+ return false;
+ };
+
+ // Access to split context.
+ LVSplitContext &getSplitContext() { return SplitContext; }
+
+ // In the case of element comparison, register that added element.
+ void notifyAddedElement(LVLine *Line) {
+ if (!options().getCompareContext() && options().getCompareLines())
+ Lines.push_back(Line);
+ }
+ void notifyAddedElement(LVScope *Scope) {
+ if (!options().getCompareContext() && options().getCompareScopes())
+ Scopes.push_back(Scope);
+ }
+ void notifyAddedElement(LVSymbol *Symbol) {
+ if (!options().getCompareContext() && options().getCompareSymbols())
+ Symbols.push_back(Symbol);
+ }
+ void notifyAddedElement(LVType *Type) {
+ if (!options().getCompareContext() && options().getCompareTypes())
+ Types.push_back(Type);
+ }
+
+ const LVLines &getLines() const { return Lines; }
+ const LVScopes &getScopes() const { return Scopes; }
+ const LVSymbols &getSymbols() const { return Symbols; }
+ const LVTypes &getTypes() const { return Types; }
+
+ // Conditions to print an object.
+ bool doPrintLine(const LVLine *Line) const {
+ return patterns().printElement(Line);
+ }
+ bool doPrintLocation(const LVLocation *Location) const {
+ return patterns().printObject(Location);
+ }
+ bool doPrintScope(const LVScope *Scope) const {
+ return patterns().printElement(Scope);
+ }
+ bool doPrintSymbol(const LVSymbol *Symbol) const {
+ return patterns().printElement(Symbol);
+ }
+ bool doPrintType(const LVType *Type) const {
+ return patterns().printElement(Type);
+ }
+
+ static LVReader &getInstance();
+ static void setInstance(LVReader *Reader);
+
+ void print(raw_ostream &OS) const;
+ virtual void printRecords(raw_ostream &OS) const {}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+inline LVReader &getReader() { return LVReader::getInstance(); }
+inline LVSplitContext &getReaderSplitContext() {
+ return getReader().getSplitContext();
+}
+inline LVScopeCompileUnit *getReaderCompileUnit() {
+ return getReader().getCompileUnit();
+}
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVREADER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVScope.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVScope.h
new file mode 100644
index 0000000000..8d2a4cdfec
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVScope.h
@@ -0,0 +1,838 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVScope.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVScope class, which is used to describe a debug
+// information scope.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVLocation.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVSort.h"
+#include "llvm/Object/ObjectFile.h"
+#include <list>
+#include <map>
+#include <set>
+
+namespace llvm {
+namespace logicalview {
+
+// Name address, Code size.
+using LVNameInfo = std::pair<LVAddress, uint64_t>;
+using LVPublicNames = std::map<LVScope *, LVNameInfo>;
+using LVPublicAddresses = std::map<LVAddress, LVNameInfo>;
+
+class LVRange;
+
+enum class LVScopeKind {
+ IsAggregate,
+ IsArray,
+ IsBlock,
+ IsCallSite,
+ IsCatchBlock,
+ IsClass,
+ IsCompileUnit,
+ IsEntryPoint,
+ IsEnumeration,
+ IsFunction,
+ IsFunctionType,
+ IsInlinedFunction,
+ IsLabel,
+ IsLexicalBlock,
+ IsMember,
+ IsNamespace,
+ IsRoot,
+ IsStructure,
+ IsSubprogram,
+ IsTemplate,
+ IsTemplateAlias,
+ IsTemplatePack,
+ IsTryBlock,
+ IsUnion,
+ LastEntry
+};
+using LVScopeKindSet = std::set<LVScopeKind>;
+using LVScopeDispatch = std::map<LVScopeKind, LVScopeGetFunction>;
+using LVScopeRequest = std::vector<LVScopeGetFunction>;
+
+using LVOffsetList = std::list<LVOffset>;
+using LVOffsetElementMap = std::map<LVOffset, LVElement *>;
+using LVOffsetLinesMap = std::map<LVOffset, LVLines *>;
+using LVOffsetLocationsMap = std::map<LVOffset, LVLocations *>;
+using LVOffsetSymbolMap = std::map<LVOffset, LVSymbol *>;
+using LVTagOffsetsMap = std::map<dwarf::Tag, LVOffsetList *>;
+
+// Class to represent a DWARF Scope.
+class LVScope : public LVElement {
+ enum class Property {
+ HasDiscriminator,
+ CanHaveRanges,
+ CanHaveLines,
+ HasGlobals,
+ HasLocals,
+ HasLines,
+ HasScopes,
+ HasSymbols,
+ HasTypes,
+ IsComdat,
+ HasComdatScopes, // Compile Unit has comdat functions.
+ HasRanges,
+ AddedMissing, // Added missing referenced symbols.
+ LastEntry
+ };
+
+ // Typed bitvector with kinds and properties for this scope.
+ LVProperties<LVScopeKind> Kinds;
+ LVProperties<Property> Properties;
+ static LVScopeDispatch Dispatch;
+
+ // Coverage factor in units (bytes).
+ unsigned CoverageFactor = 0;
+
+ // Calculate coverage factor.
+ void calculateCoverage() {
+ float CoveragePercentage = 0;
+ LVLocation::calculateCoverage(Ranges, CoverageFactor, CoveragePercentage);
+ }
+
+ // Decide if the scope will be printed, using some conditions given by:
+ // only-globals, only-locals, a-pattern.
+ bool resolvePrinting() const;
+
+ // Find the current scope in the given 'Targets'.
+ LVScope *findIn(const LVScopes *Targets) const;
+
+ // Traverse the scope parent tree, executing the given callback function
+ // on each scope.
+ void traverseParents(LVScopeGetFunction GetFunction,
+ LVScopeSetFunction SetFunction);
+
+protected:
+ // Types, Symbols, Scopes, Lines, Locations in this scope.
+ LVAutoTypes *Types = nullptr;
+ LVAutoSymbols *Symbols = nullptr;
+ LVAutoScopes *Scopes = nullptr;
+ LVAutoLines *Lines = nullptr;
+ LVAutoLocations *Ranges = nullptr;
+
+ // Vector of elements (types, scopes and symbols).
+ // It is the union of (*Types, *Symbols and *Scopes) to be used for
+ // the following reasons:
+ // - Preserve the order the logical elements are read in.
+ // - To have a single container with all the logical elements, when
+ // the traversal does not require any specific element kind.
+ LVElements *Children = nullptr;
+
+ // Resolve the template parameters/arguments relationship.
+ void resolveTemplate();
+ void printEncodedArgs(raw_ostream &OS, bool Full) const;
+
+ void printActiveRanges(raw_ostream &OS, bool Full = true) const;
+ virtual void printSizes(raw_ostream &OS) const {}
+ virtual void printSummary(raw_ostream &OS) const {}
+
+ // Encoded template arguments.
+ virtual StringRef getEncodedArgs() const { return StringRef(); }
+ virtual void setEncodedArgs(StringRef EncodedArgs) {}
+
+public:
+ LVScope() : LVElement(LVSubclassID::LV_SCOPE) {
+ setIsScope();
+ setIncludeInPrint();
+ }
+ LVScope(const LVScope &) = delete;
+ LVScope &operator=(const LVScope &) = delete;
+ virtual ~LVScope();
+
+ static bool classof(const LVElement *Element) {
+ return Element->getSubclassID() == LVSubclassID::LV_SCOPE;
+ }
+
+ KIND(LVScopeKind, IsAggregate);
+ KIND(LVScopeKind, IsArray);
+ KIND_2(LVScopeKind, IsBlock, CanHaveRanges, CanHaveLines);
+ KIND_1(LVScopeKind, IsCallSite, IsFunction);
+ KIND_1(LVScopeKind, IsCatchBlock, IsBlock);
+ KIND_1(LVScopeKind, IsClass, IsAggregate);
+ KIND_3(LVScopeKind, IsCompileUnit, CanHaveRanges, CanHaveLines,
+ TransformName);
+ KIND_1(LVScopeKind, IsEntryPoint, IsFunction);
+ KIND(LVScopeKind, IsEnumeration);
+ KIND_2(LVScopeKind, IsFunction, CanHaveRanges, CanHaveLines);
+ KIND_1(LVScopeKind, IsFunctionType, IsFunction);
+ KIND_2(LVScopeKind, IsInlinedFunction, IsFunction, IsInlined);
+ KIND_1(LVScopeKind, IsLabel, IsFunction);
+ KIND_1(LVScopeKind, IsLexicalBlock, IsBlock);
+ KIND(LVScopeKind, IsMember);
+ KIND(LVScopeKind, IsNamespace);
+ KIND_1(LVScopeKind, IsRoot, TransformName);
+ KIND_1(LVScopeKind, IsStructure, IsAggregate);
+ KIND_1(LVScopeKind, IsSubprogram, IsFunction);
+ KIND(LVScopeKind, IsTemplate);
+ KIND(LVScopeKind, IsTemplateAlias);
+ KIND(LVScopeKind, IsTemplatePack);
+ KIND_1(LVScopeKind, IsTryBlock, IsBlock);
+ KIND_1(LVScopeKind, IsUnion, IsAggregate);
+
+ PROPERTY(Property, HasDiscriminator);
+ PROPERTY(Property, CanHaveRanges);
+ PROPERTY(Property, CanHaveLines);
+ PROPERTY(Property, HasGlobals);
+ PROPERTY(Property, HasLocals);
+ PROPERTY(Property, HasLines);
+ PROPERTY(Property, HasScopes);
+ PROPERTY(Property, HasSymbols);
+ PROPERTY(Property, HasTypes);
+ PROPERTY(Property, IsComdat);
+ PROPERTY(Property, HasComdatScopes);
+ PROPERTY(Property, HasRanges);
+ PROPERTY(Property, AddedMissing);
+
+ bool isCompileUnit() const override { return getIsCompileUnit(); }
+ bool isRoot() const override { return getIsRoot(); }
+
+ const char *kind() const override;
+
+ // Get the specific children.
+ const LVLines *getLines() const { return Lines; }
+ const LVLocations *getRanges() const { return Ranges; }
+ const LVScopes *getScopes() const { return Scopes; }
+ const LVSymbols *getSymbols() const { return Symbols; }
+ const LVTypes *getTypes() const { return Types; }
+ const LVElements *getChildren() const { return Children; }
+
+ void addElement(LVElement *Element);
+ void addElement(LVLine *Line);
+ void addElement(LVScope *Scope);
+ void addElement(LVSymbol *Symbol);
+ void addElement(LVType *Type);
+ void addObject(LVLocation *Location);
+ void addObject(LVAddress LowerAddress, LVAddress UpperAddress);
+ void addToChildren(LVElement *Element);
+
+ // Add the missing elements from the given 'Reference', which is the
+ // scope associated with any DW_AT_specification, DW_AT_abstract_origin.
+ void addMissingElements(LVScope *Reference);
+
+ // Traverse the scope parent tree and the children, executing the given
+ // callback function on each element.
+ void traverseParentsAndChildren(LVObjectGetFunction GetFunction,
+ LVObjectSetFunction SetFunction);
+
+ // Get the size of specific children.
+ size_t lineCount() const { return Lines ? Lines->size() : 0; }
+ size_t rangeCount() const { return Ranges ? Ranges->size() : 0; }
+ size_t scopeCount() const { return Scopes ? Scopes->size() : 0; }
+ size_t symbolCount() const { return Symbols ? Symbols->size() : 0; }
+ size_t typeCount() const { return Types ? Types->size() : 0; }
+
+ // Find containing parent for the given address.
+ LVScope *outermostParent(LVAddress Address);
+
+ // Get all the locations associated with symbols.
+ void getLocations(LVLocations &LocationList, LVValidLocation ValidLocation,
+ bool RecordInvalid = false);
+ void getRanges(LVLocations &LocationList, LVValidLocation ValidLocation,
+ bool RecordInvalid = false);
+ void getRanges(LVRange &RangeList);
+
+ unsigned getCoverageFactor() const { return CoverageFactor; }
+
+ Error doPrint(bool Split, bool Match, bool Print, raw_ostream &OS,
+ bool Full = true) const override;
+ // Sort the logical elements using the criteria specified by the
+ // command line option '--output-sort'.
+ void sort();
+
+ // Get template parameter types.
+ bool getTemplateParameterTypes(LVTypes &Params);
+
+ // DW_AT_specification, DW_AT_abstract_origin, DW_AT_extension.
+ virtual LVScope *getReference() const { return nullptr; }
+
+ LVScope *getCompileUnitParent() const override {
+ return LVElement::getCompileUnitParent();
+ }
+
+ // Follow a chain of references given by DW_AT_abstract_origin and/or
+ // DW_AT_specification and update the scope name.
+ StringRef resolveReferencesChain();
+
+ bool removeElement(LVElement *Element) override;
+ void updateLevel(LVScope *Parent, bool Moved) override;
+
+ void resolve() override;
+ void resolveName() override;
+ void resolveReferences() override;
+
+ // Return the chain of parents as a string.
+ void getQualifiedName(std::string &QualifiedName) const;
+ // Encode the template arguments.
+ void encodeTemplateArguments(std::string &Name) const;
+ void encodeTemplateArguments(std::string &Name, const LVTypes *Types) const;
+
+ void resolveElements();
+
+ // Iterate through the 'References' set and check that all its elements
+ // are present in the 'Targets' set. For a missing element, mark its
+ // parents as missing.
+ static void markMissingParents(const LVScopes *References,
+ const LVScopes *Targets,
+ bool TraverseChildren);
+
+ // Checks if the current scope is contained within the target scope.
+ // Depending on the result, the callback may be performed.
+ virtual void markMissingParents(const LVScope *Target, bool TraverseChildren);
+
+ // Returns true if the current scope and the given 'Scope' have the
+ // same number of children.
+ virtual bool equalNumberOfChildren(const LVScope *Scope) const;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ virtual bool equals(const LVScope *Scope) const;
+
+ // Returns true if the given 'References' are logically equal to the
+ // given 'Targets'.
+ static bool equals(const LVScopes *References, const LVScopes *Targets);
+
+ // For the given 'Scopes' returns a scope that is logically equal
+ // to the current scope; otherwise 'nullptr'.
+ virtual LVScope *findEqualScope(const LVScopes *Scopes) const;
+
+ // Report the current scope as missing or added during comparison.
+ void report(LVComparePass Pass) override;
+
+ static LVScopeDispatch &getDispatch() { return Dispatch; }
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+ virtual void printWarnings(raw_ostream &OS, bool Full = true) const {}
+ virtual void printMatchedElements(raw_ostream &OS, bool UseMatchedElements) {}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const override { print(dbgs()); }
+#endif
+};
+
+// Class to represent a DWARF Union/Structure/Class.
+class LVScopeAggregate final : public LVScope {
+ LVScope *Reference = nullptr; // DW_AT_specification, DW_AT_abstract_origin.
+ size_t EncodedArgsIndex = 0; // Template encoded arguments.
+
+public:
+ LVScopeAggregate() : LVScope() {}
+ LVScopeAggregate(const LVScopeAggregate &) = delete;
+ LVScopeAggregate &operator=(const LVScopeAggregate &) = delete;
+ ~LVScopeAggregate() = default;
+
+ // DW_AT_specification, DW_AT_abstract_origin.
+ LVScope *getReference() const override { return Reference; }
+ void setReference(LVScope *Scope) override {
+ Reference = Scope;
+ setHasReference();
+ }
+ void setReference(LVElement *Element) override {
+ setReference(static_cast<LVScope *>(Element));
+ }
+
+ StringRef getEncodedArgs() const override {
+ return getStringPool().getString(EncodedArgsIndex);
+ }
+ void setEncodedArgs(StringRef EncodedArgs) override {
+ EncodedArgsIndex = getStringPool().getIndex(EncodedArgs);
+ }
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ // For the given 'Scopes' returns a scope that is logically equal
+ // to the current scope; otherwise 'nullptr'.
+ LVScope *findEqualScope(const LVScopes *Scopes) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF Template alias.
+class LVScopeAlias final : public LVScope {
+public:
+ LVScopeAlias() : LVScope() {
+ setIsTemplateAlias();
+ setIsTemplate();
+ }
+ LVScopeAlias(const LVScopeAlias &) = delete;
+ LVScopeAlias &operator=(const LVScopeAlias &) = delete;
+ ~LVScopeAlias() = default;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF array (DW_TAG_array_type).
+class LVScopeArray final : public LVScope {
+public:
+ LVScopeArray() : LVScope() { setIsArray(); }
+ LVScopeArray(const LVScopeArray &) = delete;
+ LVScopeArray &operator=(const LVScopeArray &) = delete;
+ ~LVScopeArray() = default;
+
+ void resolveExtra() override;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF Compilation Unit (CU).
+class LVScopeCompileUnit final : public LVScope {
+ // Names (files and directories) used by the Compile Unit.
+ std::vector<size_t> Filenames;
+
+ // As the .debug_pubnames section has been removed in DWARF5, we have a
+ // similar functionality, which is used by the decoded functions. We use
+ // the low-pc and high-pc for those scopes that are marked as public, in
+ // order to support DWARF and CodeView.
+ LVPublicNames PublicNames;
+
+ // Toolchain producer.
+ size_t ProducerIndex = 0;
+
+ // Compilation directory name.
+ size_t CompilationDirectoryIndex = 0;
+
+ // Keep record of elements. They are needed at the compilation unit level
+ // to print the summary at the end of the printing.
+ LVCounter Allocated;
+ LVCounter Found;
+ LVCounter Printed;
+
+ // Elements that match a given command line pattern.
+ LVElements MatchedElements;
+ LVScopes MatchedScopes;
+
+ // It records the mapping between logical lines representing a debug line
+ // entry and its address in the text section. It is used to find a line
+ // giving its exact or closest address. To support comdat functions, all
+ // addresses for the same section are recorded in the same map.
+ using LVAddressToLine = std::map<LVAddress, LVLine *>;
+ LVDoubleMap<LVSectionIndex, LVAddress, LVLine *> SectionMappings;
+
+ // DWARF Tags (Tag, Element list).
+ LVTagOffsetsMap DebugTags;
+
+ // Offsets associated with objects being flagged as having invalid data
+ // (ranges, locations, lines zero or coverages).
+ LVOffsetElementMap WarningOffsets;
+
+ // Symbols with invalid locations. (Symbol, Location List).
+ LVOffsetLocationsMap InvalidLocations;
+
+ // Symbols with invalid coverage values.
+ LVOffsetSymbolMap InvalidCoverages;
+
+ // Scopes with invalid ranges (Scope, Range list).
+ LVOffsetLocationsMap InvalidRanges;
+
+ // Scopes with lines zero (Scope, Line list).
+ LVOffsetLinesMap LinesZero;
+
+ // Record scopes contribution in bytes to the debug information.
+ using LVSizesMap = std::map<const LVScope *, LVOffset>;
+ LVSizesMap Sizes;
+ LVOffset CUContributionSize = 0;
+
+ // Helper function to add an invalid location/range.
+ void addInvalidLocationOrRange(LVLocation *Location, LVElement *Element,
+ LVOffsetLocationsMap *Map) {
+ LVOffset Offset = Element->getOffset();
+ addInvalidOffset(Offset, Element);
+ addItem<LVOffsetLocationsMap, LVLocations, LVOffset, LVLocation *>(
+ Map, Offset, Location);
+ }
+
+ // Record scope sizes indexed by lexical level.
+ // Setting an initial size that will cover a very deep nested scopes.
+ const size_t TotalInitialSize = 8;
+ using LVTotalsEntry = std::pair<unsigned, float>;
+ SmallVector<LVTotalsEntry> Totals;
+ // Maximum seen lexical level. It is used to control how many entries
+ // in the 'Totals' vector are valid values.
+ LVLevel MaxSeenLevel = 0;
+
+ // Get the line located at the given address.
+ LVLine *lineLowerBound(LVAddress Address, LVScope *Scope) const;
+ LVLine *lineUpperBound(LVAddress Address, LVScope *Scope) const;
+
+ void printScopeSize(const LVScope *Scope, raw_ostream &OS);
+ void printScopeSize(const LVScope *Scope, raw_ostream &OS) const {
+ (const_cast<LVScopeCompileUnit *>(this))->printScopeSize(Scope, OS);
+ }
+ void printTotals(raw_ostream &OS) const;
+
+protected:
+ void printSizes(raw_ostream &OS) const override;
+ void printSummary(raw_ostream &OS) const override;
+
+public:
+ LVScopeCompileUnit() : LVScope(), Totals(TotalInitialSize, {0, 0.0}) {
+ setIsCompileUnit();
+ }
+ LVScopeCompileUnit(const LVScopeCompileUnit &) = delete;
+ LVScopeCompileUnit &operator=(const LVScopeCompileUnit &) = delete;
+ ~LVScopeCompileUnit() {
+ deleteList<LVTagOffsetsMap>(DebugTags);
+ deleteList<LVOffsetLocationsMap>(InvalidLocations);
+ deleteList<LVOffsetLocationsMap>(InvalidRanges);
+ deleteList<LVOffsetLinesMap>(LinesZero);
+ }
+
+ LVScope *getCompileUnitParent() const override {
+ return static_cast<LVScope *>(const_cast<LVScopeCompileUnit *>(this));
+ }
+
+ // Add line to address mapping.
+ void addMapping(LVLine *Line, LVSectionIndex SectionIndex);
+ LVLineRange lineRange(LVLocation *Location) const;
+
+ LVNameInfo NameNone = {UINT64_MAX, 0};
+ void addPublicName(LVScope *Scope, LVAddress LowPC, LVAddress HighPC) {
+ PublicNames.emplace(std::piecewise_construct, std::forward_as_tuple(Scope),
+ std::forward_as_tuple(LowPC, HighPC - LowPC));
+ }
+ const LVNameInfo &findPublicName(LVScope *Scope) {
+ LVPublicNames::iterator Iter = PublicNames.find(Scope);
+ return (Iter != PublicNames.end()) ? Iter->second : NameNone;
+ }
+ const LVPublicNames &getPublicNames() const { return PublicNames; }
+
+ // The base address of the scope for any of the debugging information
+ // entries listed, is given by either the DW_AT_low_pc attribute or the
+ // first address in the first range entry in the list of ranges given by
+ // the DW_AT_ranges attribute.
+ LVAddress getBaseAddress() const {
+ return Ranges ? Ranges->front()->getLowerAddress() : 0;
+ }
+
+ StringRef getCompilationDirectory() const {
+ return getStringPool().getString(CompilationDirectoryIndex);
+ }
+ void setCompilationDirectory(StringRef CompilationDirectory) {
+ CompilationDirectoryIndex = getStringPool().getIndex(CompilationDirectory);
+ }
+
+ StringRef getFilename(size_t Index) const;
+ void addFilename(StringRef Name) {
+ Filenames.push_back(getStringPool().getIndex(Name));
+ }
+
+ StringRef getProducer() const override {
+ return getStringPool().getString(ProducerIndex);
+ }
+ void setProducer(StringRef ProducerName) override {
+ ProducerIndex = getStringPool().getIndex(ProducerName);
+ }
+
+ // Record DWARF tags.
+ void addDebugTag(dwarf::Tag Target, LVOffset Offset);
+ // Record elements with invalid offsets.
+ void addInvalidOffset(LVOffset Offset, LVElement *Element);
+ // Record symbols with invalid coverage values.
+ void addInvalidCoverage(LVSymbol *Symbol);
+ // Record symbols with invalid locations.
+ void addInvalidLocation(LVLocation *Location);
+ // Record scopes with invalid ranges.
+ void addInvalidRange(LVLocation *Location);
+ // Record line zero.
+ void addLineZero(LVLine *Line);
+
+ const LVTagOffsetsMap &getDebugTags() const { return DebugTags; }
+ const LVOffsetElementMap &getWarningOffsets() const { return WarningOffsets; }
+ const LVOffsetLocationsMap &getInvalidLocations() const {
+ return InvalidLocations;
+ }
+ const LVOffsetSymbolMap &getInvalidCoverages() const {
+ return InvalidCoverages;
+ }
+ const LVOffsetLocationsMap &getInvalidRanges() const { return InvalidRanges; }
+ const LVOffsetLinesMap &getLinesZero() const { return LinesZero; }
+
+ // Process ranges, locations and calculate coverage.
+ void processRangeLocationCoverage(
+ LVValidLocation ValidLocation = &LVLocation::validateRanges);
+
+ // Add matched element.
+ void addMatched(LVElement *Element) { MatchedElements.push_back(Element); }
+ void addMatched(LVScope *Scope) { MatchedScopes.push_back(Scope); }
+ void propagatePatternMatch();
+
+ const LVElements &getMatchedElements() const { return MatchedElements; }
+ const LVScopes &getMatchedScopes() const { return MatchedScopes; }
+
+ void printLocalNames(raw_ostream &OS, bool Full = true) const;
+ void printSummary(raw_ostream &OS, const LVCounter &Counter,
+ const char *Header) const;
+
+ void incrementPrintedLines();
+ void incrementPrintedScopes();
+ void incrementPrintedSymbols();
+ void incrementPrintedTypes();
+
+ // Values are used by '--summary' option (allocated).
+ void increment(LVLine *Line);
+ void increment(LVScope *Scope);
+ void increment(LVSymbol *Symbol);
+ void increment(LVType *Type);
+
+ // A new element has been added to the scopes tree. Take the following steps:
+ // Increase the added element counters, for printing summary.
+ // During comparison notify the Reader of the new element.
+ void addedElement(LVLine *Line);
+ void addedElement(LVScope *Scope);
+ void addedElement(LVSymbol *Symbol);
+ void addedElement(LVType *Type);
+
+ void addSize(LVScope *Scope, LVOffset Lower, LVOffset Upper);
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+ void printWarnings(raw_ostream &OS, bool Full = true) const override;
+ void printMatchedElements(raw_ostream &OS, bool UseMatchedElements) override;
+};
+
+// Class to represent a DWARF enumerator (DW_TAG_enumeration_type).
+class LVScopeEnumeration final : public LVScope {
+public:
+ LVScopeEnumeration() : LVScope() { setIsEnumeration(); }
+ LVScopeEnumeration(const LVScopeEnumeration &) = delete;
+ LVScopeEnumeration &operator=(const LVScopeEnumeration &) = delete;
+ ~LVScopeEnumeration() = default;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF formal parameter pack
+// (DW_TAG_GNU_formal_parameter_pack).
+class LVScopeFormalPack final : public LVScope {
+public:
+ LVScopeFormalPack() : LVScope() { setIsTemplatePack(); }
+ LVScopeFormalPack(const LVScopeFormalPack &) = delete;
+ LVScopeFormalPack &operator=(const LVScopeFormalPack &) = delete;
+ ~LVScopeFormalPack() = default;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF Function.
+class LVScopeFunction : public LVScope {
+ LVScope *Reference = nullptr; // DW_AT_specification, DW_AT_abstract_origin.
+ size_t LinkageNameIndex = 0; // Function DW_AT_linkage_name attribute.
+ size_t EncodedArgsIndex = 0; // Template encoded arguments.
+
+public:
+ LVScopeFunction() : LVScope() {}
+ LVScopeFunction(const LVScopeFunction &) = delete;
+ LVScopeFunction &operator=(const LVScopeFunction &) = delete;
+ virtual ~LVScopeFunction() = default;
+
+ // DW_AT_specification, DW_AT_abstract_origin.
+ LVScope *getReference() const override { return Reference; }
+ void setReference(LVScope *Scope) override {
+ Reference = Scope;
+ setHasReference();
+ }
+ void setReference(LVElement *Element) override {
+ setReference(static_cast<LVScope *>(Element));
+ }
+
+ StringRef getEncodedArgs() const override {
+ return getStringPool().getString(EncodedArgsIndex);
+ }
+ void setEncodedArgs(StringRef EncodedArgs) override {
+ EncodedArgsIndex = getStringPool().getIndex(EncodedArgs);
+ }
+
+ void setLinkageName(StringRef LinkageName) override {
+ LinkageNameIndex = getStringPool().getIndex(LinkageName);
+ }
+ StringRef getLinkageName() const override {
+ return getStringPool().getString(LinkageNameIndex);
+ }
+ size_t getLinkageNameIndex() const override { return LinkageNameIndex; }
+
+ void setName(StringRef ObjectName) override;
+
+ void resolveExtra() override;
+ void resolveReferences() override;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ // For the given 'Scopes' returns a scope that is logically equal
+ // to the current scope; otherwise 'nullptr'.
+ LVScope *findEqualScope(const LVScopes *Scopes) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF inlined function.
+class LVScopeFunctionInlined final : public LVScopeFunction {
+ size_t CallFilenameIndex = 0;
+ uint32_t CallLineNumber = 0;
+ uint32_t Discriminator = 0;
+
+public:
+ LVScopeFunctionInlined() : LVScopeFunction() { setIsInlinedFunction(); }
+ LVScopeFunctionInlined(const LVScopeFunctionInlined &) = delete;
+ LVScopeFunctionInlined &operator=(const LVScopeFunctionInlined &) = delete;
+ ~LVScopeFunctionInlined() = default;
+
+ uint32_t getDiscriminator() const override { return Discriminator; }
+ void setDiscriminator(uint32_t Value) override {
+ Discriminator = Value;
+ setHasDiscriminator();
+ }
+
+ uint32_t getCallLineNumber() const override { return CallLineNumber; }
+ void setCallLineNumber(uint32_t Number) override { CallLineNumber = Number; }
+ size_t getCallFilenameIndex() const override { return CallFilenameIndex; }
+ void setCallFilenameIndex(size_t Index) override {
+ CallFilenameIndex = Index;
+ }
+
+ // Line number for display; in the case of Inlined Functions, we use the
+ // DW_AT_call_line attribute; otherwise use DW_AT_decl_line attribute.
+ std::string lineNumberAsString(bool ShowZero = false) const override {
+ return lineAsString(getCallLineNumber(), getDiscriminator(), ShowZero);
+ }
+
+ void resolveExtra() override;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ // For the given 'Scopes' returns a scope that is logically equal
+ // to the current scope; otherwise 'nullptr'.
+ LVScope *findEqualScope(const LVScopes *Scopes) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF subroutine type.
+class LVScopeFunctionType final : public LVScopeFunction {
+public:
+ LVScopeFunctionType() : LVScopeFunction() { setIsFunctionType(); }
+ LVScopeFunctionType(const LVScopeFunctionType &) = delete;
+ LVScopeFunctionType &operator=(const LVScopeFunctionType &) = delete;
+ ~LVScopeFunctionType() = default;
+
+ void resolveExtra() override;
+};
+
+// Class to represent a DWARF Namespace.
+class LVScopeNamespace final : public LVScope {
+ LVScope *Reference = nullptr; // Reference to DW_AT_extension attribute.
+
+public:
+ LVScopeNamespace() : LVScope() { setIsNamespace(); }
+ LVScopeNamespace(const LVScopeNamespace &) = delete;
+ LVScopeNamespace &operator=(const LVScopeNamespace &) = delete;
+ ~LVScopeNamespace() = default;
+
+ // Access DW_AT_extension reference.
+ LVScope *getReference() const override { return Reference; }
+ void setReference(LVScope *Scope) override {
+ Reference = Scope;
+ setHasReference();
+ }
+ void setReference(LVElement *Element) override {
+ setReference(static_cast<LVScope *>(Element));
+ }
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ // For the given 'Scopes' returns a scope that is logically equal
+ // to the current scope; otherwise 'nullptr'.
+ LVScope *findEqualScope(const LVScopes *Scopes) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent the binary file being analyzed.
+class LVScopeRoot final : public LVScope {
+ size_t FileFormatNameIndex = 0;
+
+public:
+ LVScopeRoot() : LVScope() { setIsRoot(); }
+ LVScopeRoot(const LVScopeRoot &) = delete;
+ LVScopeRoot &operator=(const LVScopeRoot &) = delete;
+ ~LVScopeRoot() = default;
+
+ StringRef getFileFormatName() const {
+ return getStringPool().getString(FileFormatNameIndex);
+ }
+ void setFileFormatName(StringRef FileFormatName) {
+ FileFormatNameIndex = getStringPool().getIndex(FileFormatName);
+ }
+
+ // Process the collected location, ranges and calculate coverage.
+ void processRangeInformation();
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+ Error doPrintMatches(bool Split, raw_ostream &OS,
+ bool UseMatchedElements) const;
+};
+
+// Class to represent a DWARF template parameter pack
+// (DW_TAG_GNU_template_parameter_pack).
+class LVScopeTemplatePack final : public LVScope {
+public:
+ LVScopeTemplatePack() : LVScope() { setIsTemplatePack(); }
+ LVScopeTemplatePack(const LVScopeTemplatePack &) = delete;
+ LVScopeTemplatePack &operator=(const LVScopeTemplatePack &) = delete;
+ ~LVScopeTemplatePack() = default;
+
+ // Returns true if current scope is logically equal to the given 'Scope'.
+ bool equals(const LVScope *Scope) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSort.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSort.h
new file mode 100644
index 0000000000..143f817c1e
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSort.h
@@ -0,0 +1,62 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVSort.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the sort algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSORT_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSORT_H
+
+namespace llvm {
+namespace logicalview {
+
+class LVObject;
+
+// Object Sorting Mode.
+enum class LVSortMode {
+ None = 0, // No given sort.
+ Kind, // Sort by kind.
+ Line, // Sort by line.
+ Name, // Sort by name.
+ Offset // Sort by offset.
+};
+
+// Type of function to be called when sorting an object.
+using LVSortValue = int;
+using LVSortFunction = LVSortValue (*)(const LVObject *LHS,
+ const LVObject *RHS);
+
+// Get the comparator function, based on the command line options.
+LVSortFunction getSortFunction();
+
+// Comparator functions that can be used for sorting.
+LVSortValue compareKind(const LVObject *LHS, const LVObject *RHS);
+LVSortValue compareLine(const LVObject *LHS, const LVObject *RHS);
+LVSortValue compareName(const LVObject *LHS, const LVObject *RHS);
+LVSortValue compareOffset(const LVObject *LHS, const LVObject *RHS);
+LVSortValue compareRange(const LVObject *LHS, const LVObject *RHS);
+LVSortValue sortByKind(const LVObject *LHS, const LVObject *RHS);
+LVSortValue sortByLine(const LVObject *LHS, const LVObject *RHS);
+LVSortValue sortByName(const LVObject *LHS, const LVObject *RHS);
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSORT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h
new file mode 100644
index 0000000000..df1d8a53dd
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h
@@ -0,0 +1,102 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVStringPool.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVStringPool class, which is used to implement a
+// basic string pool table.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSTRINGPOOL_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSTRINGPOOL_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <iomanip>
+#include <vector>
+
+namespace llvm {
+namespace logicalview {
+
+class LVStringPool {
+ static constexpr size_t BadIndex = std::numeric_limits<size_t>::max();
+ using TableType = StringMap<size_t, BumpPtrAllocator>;
+ using ValueType = TableType::value_type;
+ BumpPtrAllocator Allocator;
+ TableType StringTable;
+ std::vector<ValueType *> Entries;
+
+public:
+ LVStringPool() { getIndex(""); }
+ LVStringPool(LVStringPool const &other) = delete;
+ LVStringPool(LVStringPool &&other) = delete;
+ ~LVStringPool() = default;
+
+ bool isValidIndex(size_t Index) const { return Index != BadIndex; }
+
+ // Return number of strings in the pool. The empty string is allocated
+ // at the slot zero. We substract 1 to indicate the number of non empty
+ // strings.
+ size_t getSize() const { return Entries.size() - 1; }
+
+ // Return the index for the specified key, otherwise 'BadIndex'.
+ size_t findIndex(StringRef Key) const {
+ TableType::const_iterator Iter = StringTable.find(Key);
+ if (Iter != StringTable.end())
+ return Iter->second;
+ return BadIndex;
+ }
+
+ // Return an index for the specified key.
+ size_t getIndex(StringRef Key) {
+ size_t Index = findIndex(Key);
+ if (isValidIndex(Index))
+ return Index;
+ size_t Value = Entries.size();
+ ValueType *Entry = ValueType::create(Key, Allocator, std::move(Value));
+ StringTable.insert(Entry);
+ Entries.push_back(Entry);
+ return Value;
+ }
+
+ // Given the index, return its corresponding string.
+ StringRef getString(size_t Index) const {
+ return (Index >= Entries.size()) ? StringRef() : Entries[Index]->getKey();
+ }
+
+ void print(raw_ostream &OS) const {
+ if (!Entries.empty()) {
+ OS << "\nString Pool:\n";
+ for (const ValueType *Entry : Entries)
+ OS << "Index: " << Entry->getValue() << ", "
+ << "Key: '" << Entry->getKey() << "'\n";
+ }
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+} // namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSTRINGPOOL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h
new file mode 100644
index 0000000000..e6912dddbb
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h
@@ -0,0 +1,295 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVSupport.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSUPPORT_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSUPPORT_H
+
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVStringPool.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cctype>
+#include <map>
+#include <sstream>
+
+namespace llvm {
+namespace logicalview {
+
+// Returns the unique string pool instance.
+LVStringPool &getStringPool();
+
+template <typename T>
+using TypeIsValid = std::bool_constant<std::is_pointer<T>::value>;
+
+// Utility class to help memory management and perform an automatic cleaning.
+template <typename T, unsigned N = 8>
+class LVAutoSmallVector : public SmallVector<T, N> {
+ static_assert(TypeIsValid<T>::value, "T must be a pointer type");
+
+public:
+ using iterator = typename SmallVector<T, N>::iterator;
+ LVAutoSmallVector() : SmallVector<T, N>::SmallVector() {}
+
+ ~LVAutoSmallVector() {
+ // Destroy the constructed elements in the vector.
+ for (auto *Item : *this)
+ delete Item;
+ }
+};
+
+// Used to record specific characteristics about the objects.
+template <typename T> class LVProperties {
+ SmallBitVector Bits = SmallBitVector(static_cast<unsigned>(T::LastEntry) + 1);
+
+public:
+ LVProperties() = default;
+
+ void set(T Idx) { Bits[static_cast<unsigned>(Idx)] = 1; }
+ void reset(T Idx) { Bits[static_cast<unsigned>(Idx)] = 0; }
+ bool get(T Idx) const { return Bits[static_cast<unsigned>(Idx)]; }
+};
+
+// Generate get, set and reset 'bool' functions for LVProperties instances.
+// FAMILY: instance name.
+// ENUM: enumeration instance.
+// FIELD: enumerator instance.
+// F1, F2, F3: optional 'set' functions to be called.
+#define BOOL_BIT(FAMILY, ENUM, FIELD) \
+ bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); } \
+ void set##FIELD() { FAMILY.set(ENUM::FIELD); } \
+ void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }
+
+#define BOOL_BIT_1(FAMILY, ENUM, FIELD, F1) \
+ bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); } \
+ void set##FIELD() { \
+ FAMILY.set(ENUM::FIELD); \
+ set##F1(); \
+ } \
+ void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }
+
+#define BOOL_BIT_2(FAMILY, ENUM, FIELD, F1, F2) \
+ bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); } \
+ void set##FIELD() { \
+ FAMILY.set(ENUM::FIELD); \
+ set##F1(); \
+ set##F2(); \
+ } \
+ void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }
+
+#define BOOL_BIT_3(FAMILY, ENUM, FIELD, F1, F2, F3) \
+ bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); } \
+ void set##FIELD() { \
+ FAMILY.set(ENUM::FIELD); \
+ set##F1(); \
+ set##F2(); \
+ set##F3(); \
+ } \
+ void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }
+
+// Generate get, set and reset functions for 'properties'.
+#define PROPERTY(ENUM, FIELD) BOOL_BIT(Properties, ENUM, FIELD)
+#define PROPERTY_1(ENUM, FIELD, F1) BOOL_BIT_1(Properties, ENUM, FIELD, F1)
+#define PROPERTY_2(ENUM, FIELD, F1, F2) \
+ BOOL_BIT_2(Properties, ENUM, FIELD, F1, F2)
+#define PROPERTY_3(ENUM, FIELD, F1, F2, F3) \
+ BOOL_BIT_3(Properties, ENUM, FIELD, F1, F2, F3)
+
+// Generate get, set and reset functions for 'kinds'.
+#define KIND(ENUM, FIELD) BOOL_BIT(Kinds, ENUM, FIELD)
+#define KIND_1(ENUM, FIELD, F1) BOOL_BIT_1(Kinds, ENUM, FIELD, F1)
+#define KIND_2(ENUM, FIELD, F1, F2) BOOL_BIT_2(Kinds, ENUM, FIELD, F1, F2)
+#define KIND_3(ENUM, FIELD, F1, F2, F3) \
+ BOOL_BIT_3(Kinds, ENUM, FIELD, F1, F2, F3)
+
+const int HEX_WIDTH = 12;
+inline FormattedNumber hexValue(uint64_t N, unsigned Width = HEX_WIDTH,
+ bool Upper = false) {
+ return format_hex(N, Width, Upper);
+}
+
+// Output the hexadecimal representation of 'Value' using '[0x%08x]' format.
+inline std::string hexString(uint64_t Value, size_t Width = HEX_WIDTH) {
+ std::string String;
+ raw_string_ostream Stream(String);
+ Stream << hexValue(Value, Width, false);
+ return Stream.str();
+}
+
+// Get a hexadecimal string representation for the given value.
+inline std::string hexSquareString(uint64_t Value) {
+ return (Twine("[") + Twine(hexString(Value)) + Twine("]")).str();
+}
+
+// Return a string with the First and Others separated by spaces.
+template <typename... Args>
+std::string formatAttributes(const StringRef First, Args... Others) {
+ const auto List = {First, Others...};
+ std::stringstream Stream;
+ size_t Size = 0;
+ for (const StringRef &Item : List) {
+ Stream << (Size ? " " : "") << Item.str();
+ Size = Item.size();
+ }
+ Stream << (Size ? " " : "");
+ return Stream.str();
+}
+
+// Add an item to a map with second being a list.
+template <typename MapType, typename ListType, typename KeyType,
+ typename ValueType>
+void addItem(MapType *Map, KeyType Key, ValueType Value) {
+ ListType *List = nullptr;
+ typename MapType::const_iterator Iter = Map->find(Key);
+ if (Iter != Map->end())
+ List = Iter->second;
+ else {
+ List = new ListType();
+ Map->emplace(Key, List);
+ }
+ List->push_back(Value);
+}
+
+// Delete the map contained list.
+template <typename MapType> void deleteList(MapType &Map) {
+ for (typename MapType::const_reference Entry : Map)
+ delete Entry.second;
+}
+
+// Double map data structure.
+template <typename FirstKeyType, typename SecondKeyType, typename ValueType>
+class LVDoubleMap {
+ static_assert(std::is_pointer<ValueType>::value,
+ "ValueType must be a pointer.");
+ using LVSecondMapType = std::map<SecondKeyType, ValueType>;
+ using LVFirstMapType = std::map<FirstKeyType, LVSecondMapType *>;
+ using LVAuxMapType = std::map<SecondKeyType, FirstKeyType>;
+ using LVValueTypes = std::vector<ValueType>;
+ LVFirstMapType FirstMap;
+ LVAuxMapType AuxMap;
+
+public:
+ LVDoubleMap() = default;
+ ~LVDoubleMap() {
+ for (auto &Entry : FirstMap)
+ delete Entry.second;
+ }
+
+ void add(FirstKeyType FirstKey, SecondKeyType SecondKey, ValueType Value) {
+ LVSecondMapType *SecondMap = nullptr;
+ typename LVFirstMapType::iterator FirstIter = FirstMap.find(FirstKey);
+ if (FirstIter == FirstMap.end()) {
+ SecondMap = new LVSecondMapType();
+ FirstMap.emplace(FirstKey, SecondMap);
+ } else {
+ SecondMap = FirstIter->second;
+ }
+
+ assert(SecondMap && "SecondMap is null.");
+ if (SecondMap && SecondMap->find(SecondKey) == SecondMap->end())
+ SecondMap->emplace(SecondKey, Value);
+
+ typename LVAuxMapType::iterator AuxIter = AuxMap.find(SecondKey);
+ if (AuxIter == AuxMap.end()) {
+ AuxMap.emplace(SecondKey, FirstKey);
+ }
+ }
+
+ LVSecondMapType *findMap(FirstKeyType FirstKey) const {
+ typename LVFirstMapType::const_iterator FirstIter = FirstMap.find(FirstKey);
+ if (FirstIter == FirstMap.end())
+ return nullptr;
+
+ LVSecondMapType *SecondMap = FirstIter->second;
+ return SecondMap;
+ }
+
+ ValueType find(FirstKeyType FirstKey, SecondKeyType SecondKey) const {
+ LVSecondMapType *SecondMap = findMap(FirstKey);
+ if (!SecondMap)
+ return nullptr;
+
+ typename LVSecondMapType::const_iterator SecondIter =
+ SecondMap->find(SecondKey);
+ return (SecondIter != SecondMap->end()) ? SecondIter->second : nullptr;
+ }
+
+ ValueType find(SecondKeyType SecondKey) const {
+ typename LVAuxMapType::const_iterator AuxIter = AuxMap.find(SecondKey);
+ if (AuxIter == AuxMap.end())
+ return nullptr;
+ return find(AuxIter->second, SecondKey);
+ }
+
+ // Return a vector with all the 'ValueType' values.
+ LVValueTypes find() const {
+ LVValueTypes Values;
+ if (FirstMap.empty())
+ return Values;
+ for (typename LVFirstMapType::const_reference FirstEntry : FirstMap) {
+ LVSecondMapType *SecondMap = FirstEntry.second;
+ for (typename LVSecondMapType::const_reference SecondEntry : *SecondMap)
+ Values.push_back(SecondEntry.second);
+ }
+ return Values;
+ }
+};
+
+// Unified and flattened pathnames.
+std::string transformPath(StringRef Path);
+std::string flattenedFilePath(StringRef Path);
+
+inline std::string formattedKind(StringRef Kind) {
+ return (Twine("{") + Twine(Kind) + Twine("}")).str();
+}
+
+inline std::string formattedName(StringRef Name) {
+ return (Twine("'") + Twine(Name) + Twine("'")).str();
+}
+
+inline std::string formattedNames(StringRef Name1, StringRef Name2) {
+ return (Twine("'") + Twine(Name1) + Twine(Name2) + Twine("'")).str();
+}
+
+// These are the values assigned to the debug location record IDs.
+// See DebugInfo/CodeView/CodeViewSymbols.def.
+// S_DEFRANGE 0x113f
+// S_DEFRANGE_SUBFIELD 0x1140
+// S_DEFRANGE_REGISTER 0x1141
+// S_DEFRANGE_FRAMEPOINTER_REL 0x1142
+// S_DEFRANGE_SUBFIELD_REGISTER 0x1143
+// S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE 0x1144
+// S_DEFRANGE_REGISTER_REL 0x1145
+// When recording CodeView debug location, the above values are truncated
+// to a uint8_t value in order to fit the 'OpCode' used for the logical
+// debug location operations.
+// Return the original CodeView enum value.
+inline uint16_t getCodeViewOperationCode(uint8_t Code) { return 0x1100 | Code; }
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSUPPORT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h
new file mode 100644
index 0000000000..79b467969c
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVSymbol.h
@@ -0,0 +1,206 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVSymbol.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVSymbol class, which is used to describe a debug
+// information symbol.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSYMBOL_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSYMBOL_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"
+
+namespace llvm {
+namespace logicalview {
+
+enum class LVSymbolKind {
+ IsCallSiteParameter,
+ IsConstant,
+ IsInheritance,
+ IsMember,
+ IsParameter,
+ IsUnspecified,
+ IsVariable,
+ LastEntry
+};
+using LVSymbolKindSet = std::set<LVSymbolKind>;
+using LVSymbolDispatch = std::map<LVSymbolKind, LVSymbolGetFunction>;
+using LVSymbolRequest = std::vector<LVSymbolGetFunction>;
+
+class LVSymbol final : public LVElement {
+ enum class Property { HasLocation, FillGaps, LastEntry };
+
+ // Typed bitvector with kinds and properties for this symbol.
+ LVProperties<LVSymbolKind> Kinds;
+ LVProperties<Property> Properties;
+ static LVSymbolDispatch Dispatch;
+
+ // CodeView symbol Linkage name.
+ size_t LinkageNameIndex = 0;
+
+ // Reference to DW_AT_specification, DW_AT_abstract_origin attribute.
+ LVSymbol *Reference = nullptr;
+ LVAutoLocations *Locations = nullptr;
+ LVLocation *CurrentLocation = nullptr;
+
+ // Bitfields length.
+ uint32_t BitSize = 0;
+
+ // Index in the String pool representing any initial value.
+ size_t ValueIndex = 0;
+
+ // Coverage factor in units (bytes).
+ unsigned CoverageFactor = 0;
+ float CoveragePercentage = 0;
+
+ // Add a location gap into the location list.
+ LVAutoLocations::iterator addLocationGap(LVAutoLocations::iterator Pos,
+ LVAddress LowPC, LVAddress HighPC);
+
+ // Find the current symbol in the given 'Targets'.
+ LVSymbol *findIn(const LVSymbols *Targets) const;
+
+public:
+ LVSymbol() : LVElement(LVSubclassID::LV_SYMBOL) {
+ setIsSymbol();
+ setIncludeInPrint();
+ }
+ LVSymbol(const LVSymbol &) = delete;
+ LVSymbol &operator=(const LVSymbol &) = delete;
+ ~LVSymbol() { delete Locations; }
+
+ static bool classof(const LVElement *Element) {
+ return Element->getSubclassID() == LVSubclassID::LV_SYMBOL;
+ }
+
+ KIND(LVSymbolKind, IsCallSiteParameter);
+ KIND(LVSymbolKind, IsConstant);
+ KIND(LVSymbolKind, IsInheritance);
+ KIND(LVSymbolKind, IsMember);
+ KIND(LVSymbolKind, IsParameter);
+ KIND(LVSymbolKind, IsUnspecified);
+ KIND(LVSymbolKind, IsVariable);
+
+ PROPERTY(Property, HasLocation);
+ PROPERTY(Property, FillGaps);
+
+ const char *kind() const override;
+
+ // Access DW_AT_specification, DW_AT_abstract_origin reference.
+ LVSymbol *getReference() const { return Reference; }
+ void setReference(LVSymbol *Symbol) override {
+ Reference = Symbol;
+ setHasReference();
+ }
+ void setReference(LVElement *Element) override {
+ assert((!Element || isa<LVSymbol>(Element)) && "Invalid element");
+ setReference(static_cast<LVSymbol *>(Element));
+ }
+
+ void setLinkageName(StringRef LinkageName) override {
+ LinkageNameIndex = getStringPool().getIndex(LinkageName);
+ }
+ StringRef getLinkageName() const override {
+ return getStringPool().getString(LinkageNameIndex);
+ }
+ size_t getLinkageNameIndex() const override { return LinkageNameIndex; }
+
+ uint32_t getBitSize() const override { return BitSize; }
+ void setBitSize(uint32_t Size) override { BitSize = Size; }
+
+ // Process the values for a DW_AT_const_value.
+ std::string getValue() const override {
+ return std::string(getStringPool().getString(ValueIndex));
+ }
+ void setValue(StringRef Value) override {
+ ValueIndex = getStringPool().getIndex(Value);
+ }
+ size_t getValueIndex() const override { return ValueIndex; }
+
+ // Add a Location Entry.
+ void addLocationConstant(dwarf::Attribute Attr, LVUnsigned Constant,
+ uint64_t LocDescOffset);
+ void addLocationOperands(LVSmall Opcode, uint64_t Operand1,
+ uint64_t Operand2);
+ void addLocation(dwarf::Attribute Attr, LVAddress LowPC, LVAddress HighPC,
+ LVUnsigned SectionOffset, uint64_t LocDescOffset,
+ bool CallSiteLocation = false);
+
+ // Fill gaps in the location list.
+ void fillLocationGaps();
+
+ // Get all the locations associated with symbols.
+ void getLocations(LVLocations &LocationList, LVValidLocation ValidLocation,
+ bool RecordInvalid = false);
+ void getLocations(LVLocations &LocationList) const;
+
+ // Calculate coverage factor.
+ void calculateCoverage();
+
+ unsigned getCoverageFactor() const { return CoverageFactor; }
+ void setCoverageFactor(unsigned Value) { CoverageFactor = Value; }
+ float getCoveragePercentage() const { return CoveragePercentage; }
+ void setCoveragePercentage(float Value) { CoveragePercentage = Value; }
+
+ // Print location in raw format.
+ void printLocations(raw_ostream &OS, bool Full = true) const;
+
+ // Follow a chain of references given by DW_AT_abstract_origin and/or
+ // DW_AT_specification and update the symbol name.
+ StringRef resolveReferencesChain();
+
+ void resolveName() override;
+ void resolveReferences() override;
+
+ static LVSymbolDispatch &getDispatch() { return Dispatch; }
+
+ static bool parametersMatch(const LVSymbols *References,
+ const LVSymbols *Targets);
+
+ static void getParameters(const LVSymbols *Symbols, LVSymbols *Parameters);
+
+ // Iterate through the 'References' set and check that all its elements
+ // are present in the 'Targets' set. For a missing element, mark its
+ // parents as missing.
+ static void markMissingParents(const LVSymbols *References,
+ const LVSymbols *Targets);
+
+ // Returns true if current type is logically equal to the given 'Symbol'.
+ bool equals(const LVSymbol *Symbol) const;
+
+ // Returns true if the given 'References' are logically equal to the
+ // given 'Targets'.
+ static bool equals(const LVSymbols *References, const LVSymbols *Targets);
+
+ // Report the current symbol as missing or added during comparison.
+ void report(LVComparePass Pass) override;
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const override { print(dbgs()); }
+#endif
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSYMBOL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVType.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVType.h
new file mode 100644
index 0000000000..7beabb2fba
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Core/LVType.h
@@ -0,0 +1,301 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVType.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVType class, which is used to describe a debug
+// information type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVTYPE_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVTYPE_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"
+
+namespace llvm {
+namespace logicalview {
+
+enum class LVTypeKind {
+ IsBase,
+ IsConst,
+ IsEnumerator,
+ IsImport,
+ IsImportDeclaration,
+ IsImportModule,
+ IsPointer,
+ IsPointerMember,
+ IsReference,
+ IsRestrict,
+ IsRvalueReference,
+ IsSubrange,
+ IsTemplateParam,
+ IsTemplateTemplateParam,
+ IsTemplateTypeParam,
+ IsTemplateValueParam,
+ IsTypedef,
+ IsUnaligned,
+ IsUnspecified,
+ IsVolatile,
+ IsModifier, // CodeView - LF_MODIFIER
+ LastEntry
+};
+using LVTypeKindSelection = std::set<LVTypeKind>;
+using LVTypeDispatch = std::map<LVTypeKind, LVTypeGetFunction>;
+using LVTypeRequest = std::vector<LVTypeGetFunction>;
+
+// Class to represent a DWARF Type.
+class LVType : public LVElement {
+ enum class Property { IsSubrangeCount, LastEntry };
+
+ // Typed bitvector with kinds and properties for this type.
+ LVProperties<LVTypeKind> Kinds;
+ LVProperties<Property> Properties;
+ static LVTypeDispatch Dispatch;
+
+ // Find the current type in the given 'Targets'.
+ LVType *findIn(const LVTypes *Targets) const;
+
+public:
+ LVType() : LVElement(LVSubclassID::LV_TYPE) { setIsType(); }
+ LVType(const LVType &) = delete;
+ LVType &operator=(const LVType &) = delete;
+ virtual ~LVType() = default;
+
+ static bool classof(const LVElement *Element) {
+ return Element->getSubclassID() == LVSubclassID::LV_TYPE;
+ }
+
+ KIND(LVTypeKind, IsBase);
+ KIND(LVTypeKind, IsConst);
+ KIND(LVTypeKind, IsEnumerator);
+ KIND(LVTypeKind, IsImport);
+ KIND_1(LVTypeKind, IsImportDeclaration, IsImport);
+ KIND_1(LVTypeKind, IsImportModule, IsImport);
+ KIND(LVTypeKind, IsPointer);
+ KIND(LVTypeKind, IsPointerMember);
+ KIND(LVTypeKind, IsReference);
+ KIND(LVTypeKind, IsRestrict);
+ KIND(LVTypeKind, IsRvalueReference);
+ KIND(LVTypeKind, IsSubrange);
+ KIND(LVTypeKind, IsTemplateParam);
+ KIND_1(LVTypeKind, IsTemplateTemplateParam, IsTemplateParam);
+ KIND_1(LVTypeKind, IsTemplateTypeParam, IsTemplateParam);
+ KIND_1(LVTypeKind, IsTemplateValueParam, IsTemplateParam);
+ KIND(LVTypeKind, IsTypedef);
+ KIND(LVTypeKind, IsUnaligned);
+ KIND(LVTypeKind, IsUnspecified);
+ KIND(LVTypeKind, IsVolatile);
+ KIND(LVTypeKind, IsModifier);
+
+ PROPERTY(Property, IsSubrangeCount);
+
+ const char *kind() const override;
+
+ // Follow a chain of references given by DW_AT_abstract_origin and/or
+ // DW_AT_specification and update the type name.
+ StringRef resolveReferencesChain();
+
+ bool isBase() const override { return getIsBase(); }
+ bool isTemplateParam() const override { return getIsTemplateParam(); }
+
+ // Encode the specific template argument.
+ virtual void encodeTemplateArgument(std::string &Name) const {}
+
+ // Return the underlying type for a type definition.
+ virtual LVElement *getUnderlyingType() { return nullptr; }
+ virtual void setUnderlyingType(LVElement *Element) {}
+
+ void resolveName() override;
+ void resolveReferences() override;
+
+ static LVTypeDispatch &getDispatch() { return Dispatch; }
+
+ static bool parametersMatch(const LVTypes *References,
+ const LVTypes *Targets);
+
+ static void getParameters(const LVTypes *Types, LVTypes *TypesParam,
+ LVScopes *ScopesParam);
+
+ // Iterate through the 'References' set and check that all its elements
+ // are present in the 'Targets' set. For a missing element, mark its
+ // parents as missing.
+ static void markMissingParents(const LVTypes *References,
+ const LVTypes *Targets);
+
+ // Returns true if current type is logically equal to the given 'Type'.
+ virtual bool equals(const LVType *Type) const;
+
+ // Returns true if the given 'References' are logically equal to the
+ // given 'Targets'.
+ static bool equals(const LVTypes *References, const LVTypes *Targets);
+
+ // Report the current type as missing or added during comparison.
+ void report(LVComparePass Pass) override;
+
+ void print(raw_ostream &OS, bool Full = true) const override;
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const override { print(dbgs()); }
+#endif
+};
+
+// Class to represent DW_TAG_typedef_type.
+class LVTypeDefinition final : public LVType {
+public:
+ LVTypeDefinition() : LVType() {
+ setIsTypedef();
+ setIncludeInPrint();
+ }
+ LVTypeDefinition(const LVTypeDefinition &) = delete;
+ LVTypeDefinition &operator=(const LVTypeDefinition &) = delete;
+ ~LVTypeDefinition() = default;
+
+ // Return the underlying type for a type definition.
+ LVElement *getUnderlyingType() override;
+ void setUnderlyingType(LVElement *Element) override { setType(Element); }
+
+ void resolveExtra() override;
+
+ // Returns true if current type is logically equal to the given 'Type'.
+ bool equals(const LVType *Type) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DW_TAG_enumerator.
+class LVTypeEnumerator final : public LVType {
+ // Index in the String pool representing any initial value.
+ size_t ValueIndex = 0;
+
+public:
+ LVTypeEnumerator() : LVType() {
+ setIsEnumerator();
+ setIncludeInPrint();
+ }
+ LVTypeEnumerator(const LVTypeEnumerator &) = delete;
+ LVTypeEnumerator &operator=(const LVTypeEnumerator &) = delete;
+ ~LVTypeEnumerator() = default;
+
+ // Process the values for a DW_TAG_enumerator.
+ std::string getValue() const override {
+ return std::string(getStringPool().getString(ValueIndex));
+ }
+ void setValue(StringRef Value) override {
+ ValueIndex = getStringPool().getIndex(Value);
+ }
+ size_t getValueIndex() const override { return ValueIndex; }
+
+ // Returns true if current type is logically equal to the given 'Type'.
+ bool equals(const LVType *Type) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent DW_TAG_imported_module / DW_TAG_imported_declaration.
+class LVTypeImport final : public LVType {
+public:
+ LVTypeImport() : LVType() { setIncludeInPrint(); }
+ LVTypeImport(const LVTypeImport &) = delete;
+ LVTypeImport &operator=(const LVTypeImport &) = delete;
+ ~LVTypeImport() = default;
+
+ // Returns true if current type is logically equal to the given 'Type'.
+ bool equals(const LVType *Type) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DWARF Template parameter holder (type or param).
+class LVTypeParam final : public LVType {
+ // Index in the String pool representing any initial value.
+ size_t ValueIndex = 0;
+
+public:
+ LVTypeParam();
+ LVTypeParam(const LVTypeParam &) = delete;
+ LVTypeParam &operator=(const LVTypeParam &) = delete;
+ ~LVTypeParam() = default;
+
+ // Template parameter value.
+ std::string getValue() const override {
+ return std::string(getStringPool().getString(ValueIndex));
+ }
+ void setValue(StringRef Value) override {
+ ValueIndex = getStringPool().getIndex(Value);
+ }
+ size_t getValueIndex() const override { return ValueIndex; }
+
+ // Encode the specific template argument.
+ void encodeTemplateArgument(std::string &Name) const override;
+
+ // Returns true if current type is logically equal to the given 'Type'.
+ bool equals(const LVType *Type) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+// Class to represent a DW_TAG_subrange_type.
+class LVTypeSubrange final : public LVType {
+ // Values describing the subrange bounds.
+ int64_t LowerBound = 0; // DW_AT_lower_bound or DW_AT_count value.
+ int64_t UpperBound = 0; // DW_AT_upper_bound value.
+
+public:
+ LVTypeSubrange() : LVType() {
+ setIsSubrange();
+ setIncludeInPrint();
+ }
+ LVTypeSubrange(const LVTypeSubrange &) = delete;
+ LVTypeSubrange &operator=(const LVTypeSubrange &) = delete;
+ ~LVTypeSubrange() = default;
+
+ int64_t getCount() const override {
+ return getIsSubrangeCount() ? LowerBound : 0;
+ }
+ void setCount(int64_t Value) override {
+ LowerBound = Value;
+ setIsSubrangeCount();
+ }
+
+ int64_t getLowerBound() const override { return LowerBound; }
+ void setLowerBound(int64_t Value) override { LowerBound = Value; }
+
+ int64_t getUpperBound() const override { return UpperBound; }
+ void setUpperBound(int64_t Value) override { UpperBound = Value; }
+
+ std::pair<unsigned, unsigned> getBounds() const override {
+ return {LowerBound, UpperBound};
+ }
+ void setBounds(unsigned Lower, unsigned Upper) override {
+ LowerBound = Lower;
+ UpperBound = Upper;
+ }
+
+ void resolveExtra() override;
+
+ // Returns true if current type is logically equal to the given 'Type'.
+ bool equals(const LVType *Type) const override;
+
+ void printExtra(raw_ostream &OS, bool Full = true) const override;
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVTYPE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/LVReaderHandler.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/LVReaderHandler.h
new file mode 100644
index 0000000000..2b3dea1370
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/LVReaderHandler.h
@@ -0,0 +1,111 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVReaderHandler.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements the Reader handler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVREADERHANDLER_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVREADERHANDLER_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVReader.h"
+#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/MachOUniversal.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace logicalview {
+
+using LVReaders = std::vector<LVReader *>;
+using ArgVector = std::vector<std::string>;
+using PdbOrObj = PointerUnion<object::ObjectFile *, pdb::PDBFile *>;
+
+// This class performs the following tasks:
+// - Creates a logical reader for every binary file in the command line,
+// that parses the debug information and creates a high level logical
+// view representation containing scopes, symbols, types and lines.
+// - Prints and compares the logical views.
+//
+// The supported binary formats are: ELF, Mach-O and CodeView.
+class LVReaderHandler {
+ ArgVector &Objects;
+ ScopedPrinter &W;
+ raw_ostream &OS;
+ LVReaders TheReaders;
+
+ Error createReaders();
+ void destroyReaders();
+ Error printReaders();
+ Error compareReaders();
+
+ Error handleArchive(LVReaders &Readers, StringRef Filename,
+ object::Archive &Arch);
+ Error handleBuffer(LVReaders &Readers, StringRef Filename,
+ MemoryBufferRef Buffer, StringRef ExePath = {});
+ Error handleFile(LVReaders &Readers, StringRef Filename,
+ StringRef ExePath = {});
+ Error handleMach(LVReaders &Readers, StringRef Filename,
+ object::MachOUniversalBinary &Mach);
+ Error handleObject(LVReaders &Readers, StringRef Filename,
+ object::Binary &Binary);
+
+ Error createReader(StringRef Filename, LVReaders &Readers, PdbOrObj &Input,
+ StringRef FileFormatName, StringRef ExePath = {});
+
+public:
+ LVReaderHandler() = delete;
+ LVReaderHandler(ArgVector &Objects, ScopedPrinter &W,
+ LVOptions &ReaderOptions)
+ : Objects(Objects), W(W), OS(W.getOStream()) {
+ setOptions(&ReaderOptions);
+ }
+ LVReaderHandler(const LVReaderHandler &) = delete;
+ LVReaderHandler &operator=(const LVReaderHandler &) = delete;
+ ~LVReaderHandler() { destroyReaders(); }
+
+ Error createReader(StringRef Filename, LVReaders &Readers) {
+ return handleFile(Readers, Filename);
+ }
+ Error process();
+
+ Expected<LVReader *> createReader(StringRef Pathname) {
+ LVReaders Readers;
+ if (Error Err = createReader(Pathname, Readers))
+ return std::move(Err);
+ return Readers[0];
+ }
+ void deleteReader(LVReader *Reader) { delete Reader; }
+
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+} // end namespace logicalview
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVREADERHANDLER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h
new file mode 100644
index 0000000000..3063fb40ac
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h
@@ -0,0 +1,191 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVBinaryReader.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVBinaryReader class, which is used to describe a
+// binary reader.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVBINARYREADER_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVBINARYREADER_H
+
+#include "llvm/DebugInfo/LogicalView/Core/LVReader.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/ObjectFile.h"
+
+namespace llvm {
+namespace logicalview {
+
+constexpr bool UpdateHighAddress = false;
+
+// Logical scope, Section address, Section index, IsComdat.
+struct LVSymbolTableEntry final {
+ LVScope *Scope = nullptr;
+ LVAddress Address = 0;
+ LVSectionIndex SectionIndex = 0;
+ bool IsComdat = false;
+ LVSymbolTableEntry() = default;
+ LVSymbolTableEntry(LVScope *Scope, LVAddress Address,
+ LVSectionIndex SectionIndex, bool IsComdat)
+ : Scope(Scope), Address(Address), SectionIndex(SectionIndex),
+ IsComdat(IsComdat) {}
+};
+
+// Function names extracted from the object symbol table.
+class LVSymbolTable final {
+ using LVSymbolNames = std::map<std::string, LVSymbolTableEntry>;
+ LVSymbolNames SymbolNames;
+
+public:
+ LVSymbolTable() = default;
+
+ void add(StringRef Name, LVScope *Function, LVSectionIndex SectionIndex = 0);
+ void add(StringRef Name, LVAddress Address, LVSectionIndex SectionIndex,
+ bool IsComdat);
+ LVSectionIndex update(LVScope *Function);
+
+ const LVSymbolTableEntry &getEntry(StringRef Name);
+ LVAddress getAddress(StringRef Name);
+ LVSectionIndex getIndex(StringRef Name);
+ bool getIsComdat(StringRef Name);
+
+ void print(raw_ostream &OS);
+};
+
+class LVBinaryReader : public LVReader {
+ // Function names extracted from the object symbol table.
+ LVSymbolTable SymbolTable;
+
+ // Instruction lines for a logical scope. These instructions are fetched
+ // during its merge with the debug lines.
+ LVDoubleMap<LVSectionIndex, LVScope *, LVLines *> ScopeInstructions;
+
+ // Links the scope with its first assembler address line.
+ LVDoubleMap<LVSectionIndex, LVAddress, LVScope *> AssemblerMappings;
+
+ // Mapping from virtual address to section.
+ // The virtual address refers to the address where the section is loaded.
+ using LVSectionAddresses = std::map<LVSectionIndex, object::SectionRef>;
+ LVSectionAddresses SectionAddresses;
+
+ void addSectionAddress(const object::SectionRef &Section) {
+ if (SectionAddresses.find(Section.getAddress()) == SectionAddresses.end())
+ SectionAddresses.emplace(Section.getAddress(), Section);
+ }
+
+ // Scopes with ranges for current compile unit. It is used to find a line
+ // giving its exact or closest address. To support comdat functions, all
+ // addresses for the same section are recorded in the same map.
+ using LVSectionRanges = std::map<LVSectionIndex, LVRange *>;
+ LVSectionRanges SectionRanges;
+
+ // Image base and virtual address for Executable file.
+ uint64_t ImageBaseAddress = 0;
+ uint64_t VirtualAddress = 0;
+
+ // Object sections with machine code.
+ using LVSections = std::map<LVSectionIndex, object::SectionRef>;
+ LVSections Sections;
+
+protected:
+ // It contains the LVLineDebug elements representing the logical lines for
+ // the current compile unit, created by parsing the debug line section.
+ LVLines CULines;
+
+ std::unique_ptr<const MCRegisterInfo> MRI;
+ std::unique_ptr<const MCAsmInfo> MAI;
+ std::unique_ptr<const MCSubtargetInfo> STI;
+ std::unique_ptr<const MCInstrInfo> MII;
+ std::unique_ptr<const MCDisassembler> MD;
+ std::unique_ptr<MCContext> MC;
+ std::unique_ptr<MCInstPrinter> MIP;
+
+ // Loads all info for the architecture of the provided object file.
+ Error loadGenericTargetInfo(StringRef TheTriple, StringRef TheFeatures);
+
+ virtual void mapRangeAddress(const object::ObjectFile &Obj) {}
+ virtual void mapRangeAddress(const object::ObjectFile &Obj,
+ const object::SectionRef &Section,
+ bool IsComdat) {}
+
+ // Create a mapping from virtual address to section.
+ void mapVirtualAddress(const object::ObjectFile &Obj);
+ void mapVirtualAddress(const object::COFFObjectFile &COFFObj);
+
+ Expected<std::pair<LVSectionIndex, object::SectionRef>>
+ getSection(LVScope *Scope, LVAddress Address, LVSectionIndex SectionIndex);
+
+ void addSectionRange(LVSectionIndex SectionIndex, LVScope *Scope);
+ void addSectionRange(LVSectionIndex SectionIndex, LVScope *Scope,
+ LVAddress LowerAddress, LVAddress UpperAddress);
+ LVRange *getSectionRanges(LVSectionIndex SectionIndex);
+
+ Error createInstructions();
+ Error createInstructions(LVScope *Function, LVSectionIndex SectionIndex);
+ Error createInstructions(LVScope *Function, LVSectionIndex SectionIndex,
+ const LVNameInfo &NameInfo);
+
+ void processLines(LVLines *DebugLines, LVSectionIndex SectionIndex);
+ void processLines(LVLines *DebugLines, LVSectionIndex SectionIndex,
+ LVScope *Function);
+
+public:
+ LVBinaryReader() = delete;
+ LVBinaryReader(StringRef Filename, StringRef FileFormatName, ScopedPrinter &W,
+ LVBinaryType BinaryType)
+ : LVReader(Filename, FileFormatName, W, BinaryType) {}
+ LVBinaryReader(const LVBinaryReader &) = delete;
+ LVBinaryReader &operator=(const LVBinaryReader &) = delete;
+ virtual ~LVBinaryReader();
+
+ void addToSymbolTable(StringRef Name, LVScope *Function,
+ LVSectionIndex SectionIndex = 0);
+ void addToSymbolTable(StringRef Name, LVAddress Address,
+ LVSectionIndex SectionIndex, bool IsComdat);
+ LVSectionIndex updateSymbolTable(LVScope *Function);
+
+ const LVSymbolTableEntry &getSymbolTableEntry(StringRef Name);
+ LVAddress getSymbolTableAddress(StringRef Name);
+ LVSectionIndex getSymbolTableIndex(StringRef Name);
+ bool getSymbolTableIsComdat(StringRef Name);
+
+ LVSectionIndex getSectionIndex(LVScope *Scope) override {
+ return Scope ? getSymbolTableIndex(Scope->getLinkageName())
+ : DotTextSectionIndex;
+ }
+
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVBINARYREADER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h
new file mode 100644
index 0000000000..8689d1535e
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/LogicalView/Readers/LVELFReader.h
@@ -0,0 +1,165 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- LVELFReader.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LVELFReader class, which is used to describe a
+// debug information (DWARF) reader.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVELFREADER_H
+#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVELFREADER_H
+
+#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h"
+#include <unordered_set>
+
+namespace llvm {
+namespace logicalview {
+
+class LVElement;
+class LVLine;
+class LVScopeCompileUnit;
+class LVSymbol;
+class LVType;
+
+using AttributeSpec = DWARFAbbreviationDeclaration::AttributeSpec;
+
+class LVELFReader final : public LVBinaryReader {
+ object::ObjectFile &Obj;
+
+ // Indicates if ranges data are available; in the case of split DWARF any
+ // reference to ranges is valid only if the skeleton DIE has been loaded.
+ bool RangesDataAvailable = false;
+ LVAddress CUBaseAddress = 0;
+ LVAddress CUHighAddress = 0;
+
+ // Current elements during the processing of a DIE.
+ LVElement *CurrentElement = nullptr;
+ LVScope *CurrentScope = nullptr;
+ LVSymbol *CurrentSymbol = nullptr;
+ LVType *CurrentType = nullptr;
+ LVOffset CurrentOffset = 0;
+ LVOffset CurrentEndOffset = 0;
+
+ // In DWARF v4, the files are 1-indexed.
+ // In DWARF v5, the files are 0-indexed.
+ // The ELF reader expects the indexes as 1-indexed.
+ bool IncrementFileIndex = false;
+
+ // Address ranges collected for current DIE.
+ std::vector<LVAddressRange> CurrentRanges;
+
+ // Symbols with locations for current compile unit.
+ LVSymbols SymbolsWithLocations;
+
+ // Global Offsets (Offset, Element).
+ LVOffsetElementMap GlobalOffsets;
+
+ // Low PC and High PC values for DIE being processed.
+ LVAddress CurrentLowPC = 0;
+ LVAddress CurrentHighPC = 0;
+ bool FoundLowPC = false;
+ bool FoundHighPC = false;
+
+ // Cross references (Elements).
+ using LVElementSet = std::unordered_set<LVElement *>;
+ using LVElementEntry = std::pair<LVElement *, LVElementSet>;
+ using LVElementReference = std::unordered_map<LVOffset, LVElementEntry>;
+ LVElementReference ElementTable;
+
+ Error loadTargetInfo(const object::ObjectFile &Obj);
+
+ void mapRangeAddress(const object::ObjectFile &Obj) override;
+
+ LVElement *createElement(dwarf::Tag Tag);
+ void traverseDieAndChildren(DWARFDie &DIE, LVScope *Parent,
+ DWARFDie &SkeletonDie);
+ // Process the attributes for the given DIE.
+ LVScope *processOneDie(const DWARFDie &InputDIE, LVScope *Parent,
+ DWARFDie &SkeletonDie);
+ void processOneAttribute(const DWARFDie &Die, LVOffset *OffsetPtr,
+ const AttributeSpec &AttrSpec);
+ void createLineAndFileRecords(const DWARFDebugLine::LineTable *Lines);
+ void processLocationGaps();
+
+ // Add offset to global map.
+ void addGlobalOffset(LVOffset Offset) {
+ if (GlobalOffsets.find(Offset) == GlobalOffsets.end())
+ // Just associate the DIE offset with a null element, as we do not
+ // know if the referenced element has been created.
+ GlobalOffsets.emplace(Offset, nullptr);
+ }
+
+ // Remove offset from global map.
+ void removeGlobalOffset(LVOffset Offset) {
+ LVOffsetElementMap::iterator Iter = GlobalOffsets.find(Offset);
+ if (Iter != GlobalOffsets.end())
+ GlobalOffsets.erase(Iter);
+ }
+
+ // Get the location information for DW_AT_data_member_location.
+ void processLocationMember(dwarf::Attribute Attr,
+ const DWARFFormValue &FormValue,
+ const DWARFDie &Die, uint64_t OffsetOnEntry);
+ void processLocationList(dwarf::Attribute Attr,
+ const DWARFFormValue &FormValue, const DWARFDie &Die,
+ uint64_t OffsetOnEntry,
+ bool CallSiteLocation = false);
+ void updateReference(dwarf::Attribute Attr, const DWARFFormValue &FormValue);
+
+ // Get an element given the DIE offset.
+ LVElement *getElementForOffset(LVOffset offset, LVElement *Element);
+
+protected:
+ Error createScopes() override;
+ void sortScopes() override;
+
+public:
+ LVELFReader() = delete;
+ LVELFReader(StringRef Filename, StringRef FileFormatName,
+ object::ObjectFile &Obj, ScopedPrinter &W)
+ : LVBinaryReader(Filename, FileFormatName, W, LVBinaryType::ELF),
+ Obj(Obj) {}
+ LVELFReader(const LVELFReader &) = delete;
+ LVELFReader &operator=(const LVELFReader &) = delete;
+ ~LVELFReader() = default;
+
+ LVAddress getCUBaseAddress() const { return CUBaseAddress; }
+ void setCUBaseAddress(LVAddress Address) { CUBaseAddress = Address; }
+ LVAddress getCUHighAddress() const { return CUHighAddress; }
+ void setCUHighAddress(LVAddress Address) { CUHighAddress = Address; }
+
+ const LVSymbols &GetSymbolsWithLocations() const {
+ return SymbolsWithLocations;
+ }
+
+ std::string getRegisterName(LVSmall Opcode, uint64_t Operands[2]) override;
+
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const { print(dbgs()); }
+#endif
+};
+
+} // end namespace logicalview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVELFREADER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
new file mode 100644
index 0000000000..9bd4b7b477
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
@@ -0,0 +1,44 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIADataStream.h - DIA implementation of IPDBDataStream ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBDataStream.h"
+
+namespace llvm {
+namespace pdb {
+class DIADataStream : public IPDBDataStream {
+public:
+ explicit DIADataStream(CComPtr<IDiaEnumDebugStreamData> DiaStreamData);
+
+ uint32_t getRecordCount() const override;
+ std::string getName() const override;
+ std::optional<RecordType> getItemAtIndex(uint32_t Index) const override;
+ bool getNext(RecordType &Record) override;
+ void reset() override;
+
+private:
+ CComPtr<IDiaEnumDebugStreamData> StreamData;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
new file mode 100644
index 0000000000..7b0bdd2384
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
@@ -0,0 +1,47 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- DIAEnumDebugStreams.h - DIA Debug Stream Enumerator impl ---*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBDataStream.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+
+namespace llvm {
+namespace pdb {
+
+class IPDBDataStream;
+
+class DIAEnumDebugStreams : public IPDBEnumChildren<IPDBDataStream> {
+public:
+ explicit DIAEnumDebugStreams(CComPtr<IDiaEnumDebugStreams> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+
+private:
+ CComPtr<IDiaEnumDebugStreams> Enumerator;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h
new file mode 100644
index 0000000000..ccaccaa887
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- DIAEnumFrameData.h --------------------------------------- -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMFRAMEDATA_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMFRAMEDATA_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBFrameData.h"
+
+namespace llvm {
+namespace pdb {
+
+class DIAEnumFrameData : public IPDBEnumChildren<IPDBFrameData> {
+public:
+ explicit DIAEnumFrameData(CComPtr<IDiaEnumFrameData> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+
+private:
+ CComPtr<IDiaEnumFrameData> Enumerator;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h
new file mode 100644
index 0000000000..4776d02f47
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- DIAEnumInjectedSources.h - DIA Injected Sources Enumerator -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBInjectedSource.h"
+
+namespace llvm {
+namespace pdb {
+
+class DIAEnumInjectedSources : public IPDBEnumChildren<IPDBInjectedSource> {
+public:
+ explicit DIAEnumInjectedSources(
+ CComPtr<IDiaEnumInjectedSources> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+
+private:
+ CComPtr<IDiaEnumInjectedSources> Enumerator;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
new file mode 100644
index 0000000000..21dbc4b127
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- DIAEnumLineNumbers.h - DIA Line Number Enumerator impl -----*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"
+
+namespace llvm {
+namespace pdb {
+class IPDBLineNumber;
+
+class DIAEnumLineNumbers : public IPDBEnumChildren<IPDBLineNumber> {
+public:
+ explicit DIAEnumLineNumbers(CComPtr<IDiaEnumLineNumbers> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+
+private:
+ CComPtr<IDiaEnumLineNumbers> Enumerator;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h
new file mode 100644
index 0000000000..2e44bb48ac
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- DIAEnumSectionContribs.h --------------------------------- -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBSectionContrib.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAEnumSectionContribs : public IPDBEnumChildren<IPDBSectionContrib> {
+public:
+ explicit DIAEnumSectionContribs(
+ const DIASession &PDBSession,
+ CComPtr<IDiaEnumSectionContribs> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaEnumSectionContribs> Enumerator;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
new file mode 100644
index 0000000000..43ea2df383
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
@@ -0,0 +1,48 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- DIAEnumSourceFiles.h - DIA Source File Enumerator impl -----*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAEnumSourceFiles : public IPDBEnumChildren<IPDBSourceFile> {
+public:
+ explicit DIAEnumSourceFiles(const DIASession &PDBSession,
+ CComPtr<IDiaEnumSourceFiles> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaEnumSourceFiles> Enumerator;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
new file mode 100644
index 0000000000..d8df677665
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
@@ -0,0 +1,48 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- DIAEnumSymbols.h - DIA Symbol Enumerator impl --------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/PDBSymbol.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAEnumSymbols : public IPDBEnumChildren<PDBSymbol> {
+public:
+ explicit DIAEnumSymbols(const DIASession &Session,
+ CComPtr<IDiaEnumSymbols> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
+ std::unique_ptr<PDBSymbol> getNext() override;
+ void reset() override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaEnumSymbols> Enumerator;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h
new file mode 100644
index 0000000000..b4d41eb392
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIAEnumTables.h - DIA Tables Enumerator Impl -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBTable.h"
+
+namespace llvm {
+namespace pdb {
+class IPDBTable;
+
+class DIAEnumTables : public IPDBEnumChildren<IPDBTable> {
+public:
+ explicit DIAEnumTables(CComPtr<IDiaEnumTables> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ std::unique_ptr<IPDBTable> getChildAtIndex(uint32_t Index) const override;
+ std::unique_ptr<IPDBTable> getNext() override;
+ void reset() override;
+
+private:
+ CComPtr<IDiaEnumTables> Enumerator;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAError.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAError.h
new file mode 100644
index 0000000000..0c3342967e
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAError.h
@@ -0,0 +1,61 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIAError.h - Error extensions for PDB DIA implementation -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace pdb {
+enum class dia_error_code {
+ unspecified = 1,
+ could_not_create_impl,
+ invalid_file_format,
+ invalid_parameter,
+ already_loaded,
+ debug_info_mismatch,
+};
+} // namespace pdb
+} // namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::pdb::dia_error_code> : std::true_type {};
+} // namespace std
+
+namespace llvm {
+namespace pdb {
+const std::error_category &DIAErrCategory();
+
+inline std::error_code make_error_code(dia_error_code E) {
+ return std::error_code(static_cast<int>(E), DIAErrCategory());
+}
+
+/// Base class for errors originating in DIA SDK, e.g. COM calls
+class DIAError : public ErrorInfo<DIAError, StringError> {
+public:
+ using ErrorInfo<DIAError, StringError>::ErrorInfo;
+ DIAError(const Twine &S) : ErrorInfo(S, dia_error_code::unspecified) {}
+ static char ID;
+};
+} // namespace pdb
+} // namespace llvm
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h
new file mode 100644
index 0000000000..104017577f
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIAFrameData.h - DIA Impl. of IPDBFrameData ---------------- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAFRAMEDATA_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAFRAMEDATA_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBFrameData.h"
+
+namespace llvm {
+namespace pdb {
+
+class DIASession;
+
+class DIAFrameData : public IPDBFrameData {
+public:
+ explicit DIAFrameData(CComPtr<IDiaFrameData> DiaFrameData);
+
+ uint32_t getAddressOffset() const override;
+ uint32_t getAddressSection() const override;
+ uint32_t getLengthBlock() const override;
+ std::string getProgram() const override;
+ uint32_t getRelativeVirtualAddress() const override;
+ uint64_t getVirtualAddress() const override;
+
+private:
+ CComPtr<IDiaFrameData> FrameData;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h
new file mode 100644
index 0000000000..fd1732f811
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAInjectedSource.h
@@ -0,0 +1,48 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIAInjectedSource.h - DIA impl for IPDBInjectedSource ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBInjectedSource.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIAInjectedSource : public IPDBInjectedSource {
+public:
+ explicit DIAInjectedSource(CComPtr<IDiaInjectedSource> DiaSourceFile);
+
+ uint32_t getCrc32() const override;
+ uint64_t getCodeByteSize() const override;
+ std::string getFileName() const override;
+ std::string getObjectFileName() const override;
+ std::string getVirtualFileName() const override;
+ uint32_t getCompression() const override;
+ std::string getCode() const override;
+
+private:
+ CComPtr<IDiaInjectedSource> SourceFile;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h
new file mode 100644
index 0000000000..42db84a6ea
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h
@@ -0,0 +1,50 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIALineNumber.h - DIA implementation of IPDBLineNumber ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"
+
+namespace llvm {
+namespace pdb {
+class DIALineNumber : public IPDBLineNumber {
+public:
+ explicit DIALineNumber(CComPtr<IDiaLineNumber> DiaLineNumber);
+
+ uint32_t getLineNumber() const override;
+ uint32_t getLineNumberEnd() const override;
+ uint32_t getColumnNumber() const override;
+ uint32_t getColumnNumberEnd() const override;
+ uint32_t getAddressSection() const override;
+ uint32_t getAddressOffset() const override;
+ uint32_t getRelativeVirtualAddress() const override;
+ uint64_t getVirtualAddress() const override;
+ uint32_t getLength() const override;
+ uint32_t getSourceFileId() const override;
+ uint32_t getCompilandId() const override;
+ bool isStatement() const override;
+
+private:
+ CComPtr<IDiaLineNumber> LineNumber;
+};
+}
+}
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
new file mode 100644
index 0000000000..0b3c91431b
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
@@ -0,0 +1,244 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIARawSymbol.h - DIA implementation of IPDBRawSymbol ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+class DIARawSymbol : public IPDBRawSymbol {
+public:
+ DIARawSymbol(const DIASession &PDBSession, CComPtr<IDiaSymbol> DiaSymbol);
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ CComPtr<IDiaSymbol> getDiaSymbol() const { return Symbol; }
+
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type, StringRef Name,
+ PDB_NameSearchFlags Flags) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildrenByAddr(PDB_SymType Type, StringRef Name,
+ PDB_NameSearchFlags Flags,
+ uint32_t Section, uint32_t Offset) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildrenByVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+ uint64_t VA) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+ uint32_t RVA) const override;
+
+ std::unique_ptr<IPDBEnumSymbols>
+ findInlineFramesByAddr(uint32_t Section, uint32_t Offset) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findInlineFramesByRVA(uint32_t RVA) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findInlineFramesByVA(uint64_t VA) const override;
+
+ std::unique_ptr<IPDBEnumLineNumbers> findInlineeLines() const override;
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findInlineeLinesByAddr(uint32_t Section, uint32_t Offset,
+ uint32_t Length) const override;
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const override;
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findInlineeLinesByVA(uint64_t VA, uint32_t Length) const override;
+
+ void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes) const override;
+ void getFrontEndVersion(VersionInfo &Version) const override;
+ void getBackEndVersion(VersionInfo &Version) const override;
+ PDB_MemberAccess getAccess() const override;
+ uint32_t getAddressOffset() const override;
+ uint32_t getAddressSection() const override;
+ uint32_t getAge() const override;
+ SymIndexId getArrayIndexTypeId() const override;
+ uint32_t getBaseDataOffset() const override;
+ uint32_t getBaseDataSlot() const override;
+ SymIndexId getBaseSymbolId() const override;
+ PDB_BuiltinType getBuiltinType() const override;
+ uint32_t getBitPosition() const override;
+ PDB_CallingConv getCallingConvention() const override;
+ SymIndexId getClassParentId() const override;
+ std::string getCompilerName() const override;
+ uint32_t getCount() const override;
+ uint32_t getCountLiveRanges() const override;
+ PDB_Lang getLanguage() const override;
+ SymIndexId getLexicalParentId() const override;
+ std::string getLibraryName() const override;
+ uint32_t getLiveRangeStartAddressOffset() const override;
+ uint32_t getLiveRangeStartAddressSection() const override;
+ uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
+ codeview::RegisterId getLocalBasePointerRegisterId() const override;
+ SymIndexId getLowerBoundId() const override;
+ uint32_t getMemorySpaceKind() const override;
+ std::string getName() const override;
+ uint32_t getNumberOfAcceleratorPointerTags() const override;
+ uint32_t getNumberOfColumns() const override;
+ uint32_t getNumberOfModifiers() const override;
+ uint32_t getNumberOfRegisterIndices() const override;
+ uint32_t getNumberOfRows() const override;
+ std::string getObjectFileName() const override;
+ uint32_t getOemId() const override;
+ SymIndexId getOemSymbolId() const override;
+ uint32_t getOffsetInUdt() const override;
+ PDB_Cpu getPlatform() const override;
+ uint32_t getRank() const override;
+ codeview::RegisterId getRegisterId() const override;
+ uint32_t getRegisterType() const override;
+ uint32_t getRelativeVirtualAddress() const override;
+ uint32_t getSamplerSlot() const override;
+ uint32_t getSignature() const override;
+ uint32_t getSizeInUdt() const override;
+ uint32_t getSlot() const override;
+ std::string getSourceFileName() const override;
+ std::unique_ptr<IPDBLineNumber> getSrcLineOnTypeDefn() const override;
+ uint32_t getStride() const override;
+ SymIndexId getSubTypeId() const override;
+ std::string getSymbolsFileName() const override;
+ SymIndexId getSymIndexId() const override;
+ uint32_t getTargetOffset() const override;
+ uint32_t getTargetRelativeVirtualAddress() const override;
+ uint64_t getTargetVirtualAddress() const override;
+ uint32_t getTargetSection() const override;
+ uint32_t getTextureSlot() const override;
+ uint32_t getTimeStamp() const override;
+ uint32_t getToken() const override;
+ SymIndexId getTypeId() const override;
+ uint32_t getUavSlot() const override;
+ std::string getUndecoratedName() const override;
+ std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const override;
+ SymIndexId getUnmodifiedTypeId() const override;
+ SymIndexId getUpperBoundId() const override;
+ Variant getValue() const override;
+ uint32_t getVirtualBaseDispIndex() const override;
+ uint32_t getVirtualBaseOffset() const override;
+ SymIndexId getVirtualTableShapeId() const override;
+ std::unique_ptr<PDBSymbolTypeBuiltin>
+ getVirtualBaseTableType() const override;
+ PDB_DataKind getDataKind() const override;
+ PDB_SymType getSymTag() const override;
+ codeview::GUID getGuid() const override;
+ int32_t getOffset() const override;
+ int32_t getThisAdjust() const override;
+ int32_t getVirtualBasePointerOffset() const override;
+ PDB_LocType getLocationType() const override;
+ PDB_Machine getMachineType() const override;
+ codeview::ThunkOrdinal getThunkOrdinal() const override;
+ uint64_t getLength() const override;
+ uint64_t getLiveRangeLength() const override;
+ uint64_t getVirtualAddress() const override;
+ PDB_UdtType getUdtKind() const override;
+ bool hasConstructor() const override;
+ bool hasCustomCallingConvention() const override;
+ bool hasFarReturn() const override;
+ bool isCode() const override;
+ bool isCompilerGenerated() const override;
+ bool isConstType() const override;
+ bool isEditAndContinueEnabled() const override;
+ bool isFunction() const override;
+ bool getAddressTaken() const override;
+ bool getNoStackOrdering() const override;
+ bool hasAlloca() const override;
+ bool hasAssignmentOperator() const override;
+ bool hasCTypes() const override;
+ bool hasCastOperator() const override;
+ bool hasDebugInfo() const override;
+ bool hasEH() const override;
+ bool hasEHa() const override;
+ bool hasInlAsm() const override;
+ bool hasInlineAttribute() const override;
+ bool hasInterruptReturn() const override;
+ bool hasFramePointer() const override;
+ bool hasLongJump() const override;
+ bool hasManagedCode() const override;
+ bool hasNestedTypes() const override;
+ bool hasNoInlineAttribute() const override;
+ bool hasNoReturnAttribute() const override;
+ bool hasOptimizedCodeDebugInfo() const override;
+ bool hasOverloadedOperator() const override;
+ bool hasSEH() const override;
+ bool hasSecurityChecks() const override;
+ bool hasSetJump() const override;
+ bool hasStrictGSCheck() const override;
+ bool isAcceleratorGroupSharedLocal() const override;
+ bool isAcceleratorPointerTagLiveRange() const override;
+ bool isAcceleratorStubFunction() const override;
+ bool isAggregated() const override;
+ bool isIntroVirtualFunction() const override;
+ bool isCVTCIL() const override;
+ bool isConstructorVirtualBase() const override;
+ bool isCxxReturnUdt() const override;
+ bool isDataAligned() const override;
+ bool isHLSLData() const override;
+ bool isHotpatchable() const override;
+ bool isIndirectVirtualBaseClass() const override;
+ bool isInterfaceUdt() const override;
+ bool isIntrinsic() const override;
+ bool isLTCG() const override;
+ bool isLocationControlFlowDependent() const override;
+ bool isMSILNetmodule() const override;
+ bool isMatrixRowMajor() const override;
+ bool isManagedCode() const override;
+ bool isMSILCode() const override;
+ bool isMultipleInheritance() const override;
+ bool isNaked() const override;
+ bool isNested() const override;
+ bool isOptimizedAway() const override;
+ bool isPacked() const override;
+ bool isPointerBasedOnSymbolValue() const override;
+ bool isPointerToDataMember() const override;
+ bool isPointerToMemberFunction() const override;
+ bool isPureVirtual() const override;
+ bool isRValueReference() const override;
+ bool isRefUdt() const override;
+ bool isReference() const override;
+ bool isRestrictedType() const override;
+ bool isReturnValue() const override;
+ bool isSafeBuffers() const override;
+ bool isScoped() const override;
+ bool isSdl() const override;
+ bool isSingleInheritance() const override;
+ bool isSplitted() const override;
+ bool isStatic() const override;
+ bool hasPrivateSymbols() const override;
+ bool isUnalignedType() const override;
+ bool isUnreached() const override;
+ bool isValueUdt() const override;
+ bool isVirtual() const override;
+ bool isVirtualBaseClass() const override;
+ bool isVirtualInheritance() const override;
+ bool isVolatileType() const override;
+ bool wasInlined() const override;
+ std::string getUnused() const override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaSymbol> Symbol;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h
new file mode 100644
index 0000000000..af2d7b788a
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASectionContrib.h
@@ -0,0 +1,65 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIASectionContrib.h - DIA Impl. of IPDBSectionContrib ------ C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBSectionContrib.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIASectionContrib : public IPDBSectionContrib {
+public:
+ explicit DIASectionContrib(const DIASession &PDBSession,
+ CComPtr<IDiaSectionContrib> DiaSection);
+
+ std::unique_ptr<PDBSymbolCompiland> getCompiland() const override;
+ uint32_t getAddressSection() const override;
+ uint32_t getAddressOffset() const override;
+ uint32_t getRelativeVirtualAddress() const override;
+ uint64_t getVirtualAddress() const override;
+ uint32_t getLength() const override;
+ bool isNotPaged() const override;
+ bool hasCode() const override;
+ bool hasCode16Bit() const override;
+ bool hasInitializedData() const override;
+ bool hasUninitializedData() const override;
+ bool isRemoved() const override;
+ bool hasComdat() const override;
+ bool isDiscardable() const override;
+ bool isNotCached() const override;
+ bool isShared() const override;
+ bool isExecutable() const override;
+ bool isReadable() const override;
+ bool isWritable() const override;
+ uint32_t getDataCrc32() const override;
+ uint32_t getRelocationsCrc32() const override;
+ uint32_t getCompilandId() const override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaSectionContrib> Section;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASession.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASession.h
new file mode 100644
index 0000000000..48b167843f
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASession.h
@@ -0,0 +1,104 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIASession.h - DIA implementation of IPDBSession ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBSession.h"
+#include "llvm/Support/Error.h"
+
+#include <system_error>
+
+namespace llvm {
+class StringRef;
+
+namespace pdb {
+class DIASession : public IPDBSession {
+public:
+ explicit DIASession(CComPtr<IDiaSession> DiaSession);
+
+ static Error createFromPdb(StringRef Path,
+ std::unique_ptr<IPDBSession> &Session);
+ static Error createFromExe(StringRef Path,
+ std::unique_ptr<IPDBSession> &Session);
+
+ uint64_t getLoadAddress() const override;
+ bool setLoadAddress(uint64_t Address) override;
+ std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
+ std::unique_ptr<PDBSymbol> getSymbolById(SymIndexId SymbolId) const override;
+
+ bool addressForVA(uint64_t VA, uint32_t &Section,
+ uint32_t &Offset) const override;
+ bool addressForRVA(uint32_t RVA, uint32_t &Section,
+ uint32_t &Offset) const override;
+
+ std::unique_ptr<PDBSymbol> findSymbolByAddress(uint64_t Address,
+ PDB_SymType Type) override;
+ std::unique_ptr<PDBSymbol> findSymbolByRVA(uint32_t RVA,
+ PDB_SymType Type) override;
+ std::unique_ptr<PDBSymbol> findSymbolBySectOffset(uint32_t Section,
+ uint32_t Offset,
+ PDB_SymType Type) override;
+
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findLineNumbers(const PDBSymbolCompiland &Compiland,
+ const IPDBSourceFile &File) const override;
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findLineNumbersByAddress(uint64_t Address, uint32_t Length) const override;
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findLineNumbersByRVA(uint32_t RVA, uint32_t Length) const override;
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findLineNumbersBySectOffset(uint32_t Section, uint32_t Offset,
+ uint32_t Length) const override;
+
+ std::unique_ptr<IPDBEnumSourceFiles>
+ findSourceFiles(const PDBSymbolCompiland *Compiland, llvm::StringRef Pattern,
+ PDB_NameSearchFlags Flags) const override;
+ std::unique_ptr<IPDBSourceFile>
+ findOneSourceFile(const PDBSymbolCompiland *Compiland,
+ llvm::StringRef Pattern,
+ PDB_NameSearchFlags Flags) const override;
+ std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
+ findCompilandsForSourceFile(llvm::StringRef Pattern,
+ PDB_NameSearchFlags Flags) const override;
+ std::unique_ptr<PDBSymbolCompiland>
+ findOneCompilandForSourceFile(llvm::StringRef Pattern,
+ PDB_NameSearchFlags Flags) const override;
+ std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const override;
+ std::unique_ptr<IPDBEnumSourceFiles> getSourceFilesForCompiland(
+ const PDBSymbolCompiland &Compiland) const override;
+ std::unique_ptr<IPDBSourceFile>
+ getSourceFileById(uint32_t FileId) const override;
+
+ std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const override;
+
+ std::unique_ptr<IPDBEnumTables> getEnumTables() const override;
+
+ std::unique_ptr<IPDBEnumInjectedSources> getInjectedSources() const override;
+
+ std::unique_ptr<IPDBEnumSectionContribs> getSectionContribs() const override;
+
+ std::unique_ptr<IPDBEnumFrameData> getFrameData() const override;
+private:
+ CComPtr<IDiaSession> Session;
+};
+} // namespace pdb
+} // namespace llvm
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h
new file mode 100644
index 0000000000..46e1a2b96c
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h
@@ -0,0 +1,51 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIASourceFile.h - DIA implementation of IPDBSourceFile ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
+
+namespace llvm {
+namespace pdb {
+class DIASession;
+
+class DIASourceFile : public IPDBSourceFile {
+public:
+ explicit DIASourceFile(const DIASession &Session,
+ CComPtr<IDiaSourceFile> DiaSourceFile);
+
+ std::string getFileName() const override;
+ uint32_t getUniqueId() const override;
+ std::string getChecksum() const override;
+ PDB_Checksum getChecksumType() const override;
+ std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
+ getCompilands() const override;
+
+ CComPtr<IDiaSourceFile> getDiaFile() const { return SourceFile; }
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaSourceFile> SourceFile;
+};
+}
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASupport.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
new file mode 100644
index 0000000000..3e01baaf9c
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
@@ -0,0 +1,51 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIASupport.h - Common header includes for DIA ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Common defines and header includes for all LLVMDebugInfoPDBDIA. The
+// definitions here configure the necessary #defines and include system headers
+// in the proper order for using DIA.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
+
+// Require at least Vista
+#define NTDDI_VERSION NTDDI_VISTA
+#define _WIN32_WINNT _WIN32_WINNT_VISTA
+#define WINVER _WIN32_WINNT_VISTA
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+
+// atlbase.h has to come before windows.h
+#include <atlbase.h>
+#include <windows.h>
+
+// DIA headers must come after windows headers.
+#include <cvconst.h>
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+#include <dia2.h>
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+#include <diacreate.h>
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIATable.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIATable.h
new file mode 100644
index 0000000000..97085ae5c9
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIATable.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIATable.h - DIA implementation of IPDBTable -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBTable.h"
+
+namespace llvm {
+namespace pdb {
+class DIATable : public IPDBTable {
+public:
+ explicit DIATable(CComPtr<IDiaTable> DiaTable);
+
+ uint32_t getItemCount() const override;
+ std::string getName() const override;
+ PDB_TableType getTableType() const override;
+
+private:
+ CComPtr<IDiaTable> Table;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAUtils.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAUtils.h
new file mode 100644
index 0000000000..fe0bd1b612
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/DIA/DIAUtils.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DIAUtils.h - Utility functions for working with DIA ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ConvertUTF.h"
+
+template <typename Obj>
+std::string invokeBstrMethod(Obj &Object,
+ HRESULT (__stdcall Obj::*Func)(BSTR *)) {
+ CComBSTR Str16;
+ HRESULT Result = (Object.*Func)(&Str16);
+ if (S_OK != Result)
+ return std::string();
+
+ std::string Str8;
+ llvm::ArrayRef<char> StrBytes(reinterpret_cast<char *>(Str16.m_str),
+ Str16.ByteLength());
+ llvm::convertUTF16ToUTF8String(StrBytes, Str8);
+ return Str8;
+}
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/Native/Formatters.h b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/Native/Formatters.h
new file mode 100644
index 0000000000..d5fd6a83d0
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/DebugInfo/PDB/Native/Formatters.h
@@ -0,0 +1,55 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Formatters.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_FORMATTERS_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_FORMATTERS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/Formatters.h"
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/FormatProviders.h"
+
+#define FORMAT_CASE(Value, Name) \
+ case Value: \
+ Stream << Name; \
+ break;
+
+namespace llvm {
+template <> struct format_provider<pdb::PdbRaw_ImplVer> {
+ static void format(const pdb::PdbRaw_ImplVer &V, llvm::raw_ostream &Stream,
+ StringRef Style) {
+ switch (V) {
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC110, "VC110")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC140, "VC140")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC2, "VC2")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC4, "VC4")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC41, "VC41")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC50, "VC50")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC70, "VC70")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC70Dep, "VC70Dep")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC80, "VC80")
+ FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC98, "VC98")
+ }
+ }
+};
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/ExecutionEngine/OProfileWrapper.h b/contrib/libs/llvm16/include/llvm/ExecutionEngine/OProfileWrapper.h
new file mode 100644
index 0000000000..8993cf64a6
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/ExecutionEngine/OProfileWrapper.h
@@ -0,0 +1,134 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- OProfileWrapper.h - OProfile JIT API Wrapper ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file defines a OProfileWrapper object that detects if the oprofile
+// daemon is running, and provides wrappers for opagent functions used to
+// communicate with the oprofile JIT interface. The dynamic library libopagent
+// does not need to be linked directly as this object lazily loads the library
+// when the first op_ function is called.
+//
+// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
+// definition of the interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+#define LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <opagent.h>
+
+namespace llvm {
+
+
+class OProfileWrapper {
+ typedef op_agent_t (*op_open_agent_ptr_t)();
+ typedef int (*op_close_agent_ptr_t)(op_agent_t);
+ typedef int (*op_write_native_code_ptr_t)(op_agent_t,
+ const char*,
+ uint64_t,
+ void const*,
+ const unsigned int);
+ typedef int (*op_write_debug_line_info_ptr_t)(op_agent_t,
+ void const*,
+ size_t,
+ struct debug_line_info const*);
+ typedef int (*op_unload_native_code_ptr_t)(op_agent_t, uint64_t);
+
+ // Also used for op_minor_version function which has the same signature
+ typedef int (*op_major_version_ptr_t)();
+
+ // This is not a part of the opagent API, but is useful nonetheless
+ typedef bool (*IsOProfileRunningPtrT)();
+
+
+ op_agent_t Agent;
+ op_open_agent_ptr_t OpenAgentFunc;
+ op_close_agent_ptr_t CloseAgentFunc;
+ op_write_native_code_ptr_t WriteNativeCodeFunc;
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoFunc;
+ op_unload_native_code_ptr_t UnloadNativeCodeFunc;
+ op_major_version_ptr_t MajorVersionFunc;
+ op_major_version_ptr_t MinorVersionFunc;
+ IsOProfileRunningPtrT IsOProfileRunningFunc;
+
+ bool Initialized;
+
+public:
+ OProfileWrapper();
+
+ // For testing with a mock opagent implementation, skips the dynamic load and
+ // the function resolution.
+ OProfileWrapper(op_open_agent_ptr_t OpenAgentImpl,
+ op_close_agent_ptr_t CloseAgentImpl,
+ op_write_native_code_ptr_t WriteNativeCodeImpl,
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoImpl,
+ op_unload_native_code_ptr_t UnloadNativeCodeImpl,
+ op_major_version_ptr_t MajorVersionImpl,
+ op_major_version_ptr_t MinorVersionImpl,
+ IsOProfileRunningPtrT MockIsOProfileRunningImpl = 0)
+ : OpenAgentFunc(OpenAgentImpl),
+ CloseAgentFunc(CloseAgentImpl),
+ WriteNativeCodeFunc(WriteNativeCodeImpl),
+ WriteDebugLineInfoFunc(WriteDebugLineInfoImpl),
+ UnloadNativeCodeFunc(UnloadNativeCodeImpl),
+ MajorVersionFunc(MajorVersionImpl),
+ MinorVersionFunc(MinorVersionImpl),
+ IsOProfileRunningFunc(MockIsOProfileRunningImpl),
+ Initialized(true)
+ {
+ }
+
+ // Calls op_open_agent in the oprofile JIT library and saves the returned
+ // op_agent_t handle internally so it can be used when calling all the other
+ // op_* functions. Callers of this class do not need to keep track of
+ // op_agent_t objects.
+ bool op_open_agent();
+
+ int op_close_agent();
+ int op_write_native_code(const char* name,
+ uint64_t addr,
+ void const* code,
+ const unsigned int size);
+ int op_write_debug_line_info(void const* code,
+ size_t num_entries,
+ struct debug_line_info const* info);
+ int op_unload_native_code(uint64_t addr);
+ int op_major_version();
+ int op_minor_version();
+
+ // Returns true if the oprofiled process is running, the opagent library is
+ // loaded and a connection to the agent has been established, and false
+ // otherwise.
+ bool isAgentAvailable();
+
+private:
+ // Loads the libopagent library and initializes this wrapper if the oprofile
+ // daemon is running
+ bool initialize();
+
+ // Searches /proc for the oprofile daemon and returns true if the process if
+ // found, or false otherwise.
+ bool checkForOProfileProcEntry();
+
+ bool isOProfileRunning();
+};
+
+} // namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.inc b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.inc
new file mode 100644
index 0000000000..09ca8e6666
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.inc
@@ -0,0 +1,8441 @@
+#ifdef GEN_FLANG_DIRECTIVE_CLAUSE_SETS
+#undef GEN_FLANG_DIRECTIVE_CLAUSE_SETS
+
+namespace llvm {
+namespace omp {
+
+ // Sets for allocate
+
+ static OmpClauseSet allowedClauses_OMPD_allocate {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_allocate {
+ llvm::omp::Clause::OMPC_allocator,
+ llvm::omp::Clause::OMPC_align,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_allocate {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_allocate {
+ };
+
+ // Sets for assumes
+
+ static OmpClauseSet allowedClauses_OMPD_assumes {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_assumes {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_assumes {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_assumes {
+ };
+
+ // Sets for atomic
+
+ static OmpClauseSet allowedClauses_OMPD_atomic {
+ llvm::omp::Clause::OMPC_read,
+ llvm::omp::Clause::OMPC_write,
+ llvm::omp::Clause::OMPC_update,
+ llvm::omp::Clause::OMPC_capture,
+ llvm::omp::Clause::OMPC_compare,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_atomic {
+ llvm::omp::Clause::OMPC_seq_cst,
+ llvm::omp::Clause::OMPC_acq_rel,
+ llvm::omp::Clause::OMPC_acquire,
+ llvm::omp::Clause::OMPC_release,
+ llvm::omp::Clause::OMPC_relaxed,
+ llvm::omp::Clause::OMPC_hint,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_atomic {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_atomic {
+ };
+
+ // Sets for barrier
+
+ static OmpClauseSet allowedClauses_OMPD_barrier {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_barrier {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_barrier {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_barrier {
+ };
+
+ // Sets for begin assumes
+
+ static OmpClauseSet allowedClauses_OMPD_begin_assumes {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_begin_assumes {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_begin_assumes {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_begin_assumes {
+ };
+
+ // Sets for begin declare target
+
+ static OmpClauseSet allowedClauses_OMPD_begin_declare_target {
+ llvm::omp::Clause::OMPC_to,
+ llvm::omp::Clause::OMPC_link,
+ llvm::omp::Clause::OMPC_device_type,
+ llvm::omp::Clause::OMPC_indirect,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_begin_declare_target {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_begin_declare_target {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_begin_declare_target {
+ };
+
+ // Sets for begin declare variant
+
+ static OmpClauseSet allowedClauses_OMPD_begin_declare_variant {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_begin_declare_variant {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_begin_declare_variant {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_begin_declare_variant {
+ };
+
+ // Sets for cancel
+
+ static OmpClauseSet allowedClauses_OMPD_cancel {
+ llvm::omp::Clause::OMPC_if,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_cancel {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_cancel {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_cancel {
+ };
+
+ // Sets for cancellation point
+
+ static OmpClauseSet allowedClauses_OMPD_cancellation_point {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_cancellation_point {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_cancellation_point {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_cancellation_point {
+ };
+
+ // Sets for critical
+
+ static OmpClauseSet allowedClauses_OMPD_critical {
+ llvm::omp::Clause::OMPC_hint,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_critical {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_critical {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_critical {
+ };
+
+ // Sets for declare mapper
+
+ static OmpClauseSet allowedClauses_OMPD_declare_mapper {
+ llvm::omp::Clause::OMPC_map,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_declare_mapper {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_declare_mapper {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_declare_mapper {
+ };
+
+ // Sets for declare reduction
+
+ static OmpClauseSet allowedClauses_OMPD_declare_reduction {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_declare_reduction {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_declare_reduction {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_declare_reduction {
+ };
+
+ // Sets for declare simd
+
+ static OmpClauseSet allowedClauses_OMPD_declare_simd {
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_uniform,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_declare_simd {
+ llvm::omp::Clause::OMPC_simdlen,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_declare_simd {
+ llvm::omp::Clause::OMPC_inbranch,
+ llvm::omp::Clause::OMPC_notinbranch,
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_declare_simd {
+ };
+
+ // Sets for declare target
+
+ static OmpClauseSet allowedClauses_OMPD_declare_target {
+ llvm::omp::Clause::OMPC_to,
+ llvm::omp::Clause::OMPC_link,
+ llvm::omp::Clause::OMPC_indirect,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_declare_target {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_declare_target {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_declare_target {
+ };
+
+ // Sets for declare variant
+
+ static OmpClauseSet allowedClauses_OMPD_declare_variant {
+ llvm::omp::Clause::OMPC_match,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_declare_variant {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_declare_variant {
+ llvm::omp::Clause::OMPC_adjust_args,
+ llvm::omp::Clause::OMPC_append_args,
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_declare_variant {
+ };
+
+ // Sets for depobj
+
+ static OmpClauseSet allowedClauses_OMPD_depobj {
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_destroy,
+ llvm::omp::Clause::OMPC_update,
+ llvm::omp::Clause::OMPC_depobj,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_depobj {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_depobj {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_depobj {
+ };
+
+ // Sets for distribute
+
+ static OmpClauseSet allowedClauses_OMPD_distribute {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_distribute {
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_distribute {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_distribute {
+ };
+
+ // Sets for distribute parallel do
+
+ static OmpClauseSet allowedClauses_OMPD_distribute_parallel_do {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_linear,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_do {
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_do {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_distribute_parallel_do {
+ };
+
+ // Sets for distribute parallel do simd
+
+ static OmpClauseSet allowedClauses_OMPD_distribute_parallel_do_simd {
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_do_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_do_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_distribute_parallel_do_simd {
+ };
+
+ // Sets for distribute parallel for
+
+ static OmpClauseSet allowedClauses_OMPD_distribute_parallel_for {
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_for {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_for {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_distribute_parallel_for {
+ };
+
+ // Sets for distribute parallel for simd
+
+ static OmpClauseSet allowedClauses_OMPD_distribute_parallel_for_simd {
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_for_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_for_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_distribute_parallel_for_simd {
+ };
+
+ // Sets for distribute simd
+
+ static OmpClauseSet allowedClauses_OMPD_distribute_simd {
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_distribute_simd {
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_distribute_simd {
+ };
+
+ // Sets for do
+
+ static OmpClauseSet allowedClauses_OMPD_do {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_reduction,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_do {
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_do {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_do {
+ };
+
+ // Sets for do simd
+
+ static OmpClauseSet allowedClauses_OMPD_do_simd {
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_reduction,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_do_simd {
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_do_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_do_simd {
+ };
+
+ // Sets for end assumes
+
+ static OmpClauseSet allowedClauses_OMPD_end_assumes {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_assumes {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_assumes {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_assumes {
+ };
+
+ // Sets for end declare target
+
+ static OmpClauseSet allowedClauses_OMPD_end_declare_target {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_declare_target {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_declare_target {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_declare_target {
+ };
+
+ // Sets for end declare variant
+
+ static OmpClauseSet allowedClauses_OMPD_end_declare_variant {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_declare_variant {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_declare_variant {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_declare_variant {
+ };
+
+ // Sets for end do
+
+ static OmpClauseSet allowedClauses_OMPD_end_do {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_do {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_do {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_do {
+ };
+
+ // Sets for end do simd
+
+ static OmpClauseSet allowedClauses_OMPD_end_do_simd {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_do_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_do_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_do_simd {
+ };
+
+ // Sets for end sections
+
+ static OmpClauseSet allowedClauses_OMPD_end_sections {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_sections {
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_sections {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_sections {
+ };
+
+ // Sets for end single
+
+ static OmpClauseSet allowedClauses_OMPD_end_single {
+ llvm::omp::Clause::OMPC_copyprivate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_single {
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_single {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_single {
+ };
+
+ // Sets for end workshare
+
+ static OmpClauseSet allowedClauses_OMPD_end_workshare {
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_end_workshare {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_end_workshare {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_end_workshare {
+ };
+
+ // Sets for error
+
+ static OmpClauseSet allowedClauses_OMPD_error {
+ llvm::omp::Clause::OMPC_at,
+ llvm::omp::Clause::OMPC_severity,
+ llvm::omp::Clause::OMPC_message,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_error {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_error {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_error {
+ };
+
+ // Sets for flush
+
+ static OmpClauseSet allowedClauses_OMPD_flush {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_flush {
+ llvm::omp::Clause::OMPC_acq_rel,
+ llvm::omp::Clause::OMPC_acquire,
+ llvm::omp::Clause::OMPC_release,
+ llvm::omp::Clause::OMPC_flush,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_flush {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_flush {
+ };
+
+ // Sets for for
+
+ static OmpClauseSet allowedClauses_OMPD_for {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_for {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_for {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_for {
+ };
+
+ // Sets for for simd
+
+ static OmpClauseSet allowedClauses_OMPD_for_simd {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_for_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_for_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_for_simd {
+ };
+
+ // Sets for masked taskloop
+
+ static OmpClauseSet allowedClauses_OMPD_masked_taskloop {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_filter,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_masked_taskloop {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_masked_taskloop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_masked_taskloop {
+ };
+
+ // Sets for masked taskloop simd
+
+ static OmpClauseSet allowedClauses_OMPD_masked_taskloop_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_filter,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_masked_taskloop_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_masked_taskloop_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_masked_taskloop_simd {
+ };
+
+ // Sets for master
+
+ static OmpClauseSet allowedClauses_OMPD_master {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_master {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_master {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_master {
+ };
+
+ // Sets for master taskloop
+
+ static OmpClauseSet allowedClauses_OMPD_master_taskloop {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_master_taskloop {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_master_taskloop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_master_taskloop {
+ };
+
+ // Sets for master taskloop simd
+
+ static OmpClauseSet allowedClauses_OMPD_master_taskloop_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_master_taskloop_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_master_taskloop_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_master_taskloop_simd {
+ };
+
+ // Sets for metadirective
+
+ static OmpClauseSet allowedClauses_OMPD_metadirective {
+ llvm::omp::Clause::OMPC_when,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_metadirective {
+ llvm::omp::Clause::OMPC_default,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_metadirective {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_metadirective {
+ };
+
+ // Sets for nothing
+
+ static OmpClauseSet allowedClauses_OMPD_nothing {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_nothing {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_nothing {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_nothing {
+ };
+
+ // Sets for ordered
+
+ static OmpClauseSet allowedClauses_OMPD_ordered {
+ llvm::omp::Clause::OMPC_depend,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_ordered {
+ llvm::omp::Clause::OMPC_threads,
+ llvm::omp::Clause::OMPC_simd,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_ordered {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_ordered {
+ };
+
+ // Sets for parallel
+
+ static OmpClauseSet allowedClauses_OMPD_parallel {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel {
+ };
+
+ // Sets for parallel do
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_do {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_do {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_collapse,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_do {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_do {
+ };
+
+ // Sets for parallel do simd
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_do_simd {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_do_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_do_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_do_simd {
+ };
+
+ // Sets for parallel for
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_for {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_for {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_for {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_for {
+ };
+
+ // Sets for parallel for simd
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_for_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_for_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_for_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_for_simd {
+ };
+
+ // Sets for parallel masked
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_masked {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_filter,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_masked {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_masked {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_masked {
+ };
+
+ // Sets for parallel masked taskloop
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_masked_taskloop {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_filter,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_masked_taskloop {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_masked_taskloop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_masked_taskloop {
+ };
+
+ // Sets for parallel masked taskloop simd
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_masked_taskloop_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_filter,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_masked_taskloop_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_masked_taskloop_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_masked_taskloop_simd {
+ };
+
+ // Sets for parallel master
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_master {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_master {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_master {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_master {
+ };
+
+ // Sets for parallel master taskloop
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_master_taskloop {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_copyin,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_master_taskloop {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_master_taskloop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_master_taskloop {
+ };
+
+ // Sets for parallel master taskloop simd
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_master_taskloop_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_priority,
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_num_tasks,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_master_taskloop_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_master_taskloop_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_master_taskloop_simd {
+ };
+
+ // Sets for parallel sections
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_sections {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_sections {
+ llvm::omp::Clause::OMPC_num_threads,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_sections {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_sections {
+ };
+
+ // Sets for parallel workshare
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_workshare {
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_workshare {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_workshare {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_workshare {
+ };
+
+ // Sets for requires
+
+ static OmpClauseSet allowedClauses_OMPD_requires {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_requires {
+ llvm::omp::Clause::OMPC_unified_address,
+ llvm::omp::Clause::OMPC_unified_shared_memory,
+ llvm::omp::Clause::OMPC_reverse_offload,
+ llvm::omp::Clause::OMPC_dynamic_allocators,
+ llvm::omp::Clause::OMPC_atomic_default_mem_order,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_requires {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_requires {
+ };
+
+ // Sets for scan
+
+ static OmpClauseSet allowedClauses_OMPD_scan {
+ llvm::omp::Clause::OMPC_inclusive,
+ llvm::omp::Clause::OMPC_exclusive,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_scan {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_scan {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_scan {
+ };
+
+ // Sets for section
+
+ static OmpClauseSet allowedClauses_OMPD_section {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_section {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_section {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_section {
+ };
+
+ // Sets for sections
+
+ static OmpClauseSet allowedClauses_OMPD_sections {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_sections {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_sections {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_sections {
+ };
+
+ // Sets for simd
+
+ static OmpClauseSet allowedClauses_OMPD_simd {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_simd {
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_if,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_simd {
+ };
+
+ // Sets for single
+
+ static OmpClauseSet allowedClauses_OMPD_single {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_copyprivate,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_single {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_single {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_single {
+ };
+
+ // Sets for target
+
+ static OmpClauseSet allowedClauses_OMPD_target {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target {
+ };
+
+ // Sets for target data
+
+ static OmpClauseSet allowedClauses_OMPD_target_data {
+ llvm::omp::Clause::OMPC_use_device_ptr,
+ llvm::omp::Clause::OMPC_use_device_addr,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_data {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_if,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_data {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_data {
+ llvm::omp::Clause::OMPC_map,
+ };
+
+ // Sets for target enter data
+
+ static OmpClauseSet allowedClauses_OMPD_target_enter_data {
+ llvm::omp::Clause::OMPC_depend,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_enter_data {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_enter_data {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_enter_data {
+ llvm::omp::Clause::OMPC_map,
+ };
+
+ // Sets for target exit data
+
+ static OmpClauseSet allowedClauses_OMPD_target_exit_data {
+ llvm::omp::Clause::OMPC_depend,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_exit_data {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_exit_data {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_exit_data {
+ llvm::omp::Clause::OMPC_map,
+ };
+
+ // Sets for target parallel
+
+ static OmpClauseSet allowedClauses_OMPD_target_parallel {
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_parallel {
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_parallel {
+ };
+
+ // Sets for target parallel do
+
+ static OmpClauseSet allowedClauses_OMPD_target_parallel_do {
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_allocator,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_copyin,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_do {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_do {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_parallel_do {
+ };
+
+ // Sets for target parallel do simd
+
+ static OmpClauseSet allowedClauses_OMPD_target_parallel_do_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_do_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_do_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_parallel_do_simd {
+ };
+
+ // Sets for target parallel for
+
+ static OmpClauseSet allowedClauses_OMPD_target_parallel_for {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_for {
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_for {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_parallel_for {
+ };
+
+ // Sets for target parallel for simd
+
+ static OmpClauseSet allowedClauses_OMPD_target_parallel_for_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_for_simd {
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_for_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_parallel_for_simd {
+ };
+
+ // Sets for target simd
+
+ static OmpClauseSet allowedClauses_OMPD_target_simd {
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_simd {
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_simd {
+ };
+
+ // Sets for target teams
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ llvm::omp::Clause::OMPC_shared,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams {
+ };
+
+ // Sets for target teams distribute
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams_distribute {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_lastprivate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams_distribute {
+ };
+
+ // Sets for target teams distribute parallel do
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_do {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_do {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_do {
+ };
+
+ // Sets for target teams distribute parallel do simd
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_do_simd {
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_nontemporal,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_do_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_do_simd {
+ };
+
+ // Sets for target teams distribute parallel for
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_for {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_for {
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_for {
+ };
+
+ // Sets for target teams distribute parallel for simd
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_for_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_for_simd {
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_for_simd {
+ };
+
+ // Sets for target teams distribute simd
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_simd {
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_simd {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_simd {
+ };
+
+ // Sets for target update
+
+ static OmpClauseSet allowedClauses_OMPD_target_update {
+ llvm::omp::Clause::OMPC_to,
+ llvm::omp::Clause::OMPC_from,
+ llvm::omp::Clause::OMPC_depend,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_update {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_update {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_update {
+ };
+
+ // Sets for task
+
+ static OmpClauseSet allowedClauses_OMPD_task {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_detach,
+ llvm::omp::Clause::OMPC_affinity,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_task {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_priority,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_task {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_task {
+ };
+
+ // Sets for taskgroup
+
+ static OmpClauseSet allowedClauses_OMPD_taskgroup {
+ llvm::omp::Clause::OMPC_task_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_taskgroup {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_taskgroup {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_taskgroup {
+ };
+
+ // Sets for taskloop
+
+ static OmpClauseSet allowedClauses_OMPD_taskloop {
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_untied,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_taskloop {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_priority,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_taskloop {
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_num_tasks,
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_taskloop {
+ };
+
+ // Sets for taskloop simd
+
+ static OmpClauseSet allowedClauses_OMPD_taskloop_simd {
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_in_reduction,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_mergeable,
+ llvm::omp::Clause::OMPC_nogroup,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_untied,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_taskloop_simd {
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_final,
+ llvm::omp::Clause::OMPC_priority,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_taskloop_simd {
+ llvm::omp::Clause::OMPC_grainsize,
+ llvm::omp::Clause::OMPC_num_tasks,
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_taskloop_simd {
+ };
+
+ // Sets for taskwait
+
+ static OmpClauseSet allowedClauses_OMPD_taskwait {
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_nowait,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_taskwait {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_taskwait {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_taskwait {
+ };
+
+ // Sets for taskyield
+
+ static OmpClauseSet allowedClauses_OMPD_taskyield {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_taskyield {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_taskyield {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_taskyield {
+ };
+
+ // Sets for teams
+
+ static OmpClauseSet allowedClauses_OMPD_teams {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams {
+ };
+
+ // Sets for teams distribute
+
+ static OmpClauseSet allowedClauses_OMPD_teams_distribute {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_allocate,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams_distribute {
+ };
+
+ // Sets for teams distribute parallel do
+
+ static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_do {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_linear,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_do {
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_ordered,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_do {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_do {
+ };
+
+ // Sets for teams distribute parallel do simd
+
+ static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_do_simd {
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_nontemporal,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_do_simd {
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_if,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_do_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_do_simd {
+ };
+
+ // Sets for teams distribute parallel for
+
+ static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_for {
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_for {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_for {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_for {
+ };
+
+ // Sets for teams distribute parallel for simd
+
+ static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_for_simd {
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_schedule,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_for_simd {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_for_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_for_simd {
+ };
+
+ // Sets for teams distribute simd
+
+ static OmpClauseSet allowedClauses_OMPD_teams_distribute_simd {
+ llvm::omp::Clause::OMPC_aligned,
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_linear,
+ llvm::omp::Clause::OMPC_nontemporal,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_simd {
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_dist_schedule,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_safelen,
+ llvm::omp::Clause::OMPC_simdlen,
+ llvm::omp::Clause::OMPC_thread_limit,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_simd {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams_distribute_simd {
+ };
+
+ // Sets for threadprivate
+
+ static OmpClauseSet allowedClauses_OMPD_threadprivate {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_threadprivate {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_threadprivate {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_threadprivate {
+ };
+
+ // Sets for tile
+
+ static OmpClauseSet allowedClauses_OMPD_tile {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_tile {
+ llvm::omp::Clause::OMPC_sizes,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_tile {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_tile {
+ };
+
+ // Sets for unknown
+
+ static OmpClauseSet allowedClauses_OMPD_unknown {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_unknown {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_unknown {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_unknown {
+ };
+
+ // Sets for unroll
+
+ static OmpClauseSet allowedClauses_OMPD_unroll {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_unroll {
+ llvm::omp::Clause::OMPC_full,
+ llvm::omp::Clause::OMPC_partial,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_unroll {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_unroll {
+ };
+
+ // Sets for workshare
+
+ static OmpClauseSet allowedClauses_OMPD_workshare {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_workshare {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_workshare {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_workshare {
+ };
+
+ // Sets for dispatch
+
+ static OmpClauseSet allowedClauses_OMPD_dispatch {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_novariants,
+ llvm::omp::Clause::OMPC_nocontext,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_dispatch {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_dispatch {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_dispatch {
+ };
+
+ // Sets for interop
+
+ static OmpClauseSet allowedClauses_OMPD_interop {
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_destroy,
+ llvm::omp::Clause::OMPC_init,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_use,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_interop {
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_interop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_interop {
+ };
+
+ // Sets for loop
+
+ static OmpClauseSet allowedClauses_OMPD_loop {
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_loop {
+ llvm::omp::Clause::OMPC_bind,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_order,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_loop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_loop {
+ };
+
+ // Sets for masked
+
+ static OmpClauseSet allowedClauses_OMPD_masked {
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_masked {
+ llvm::omp::Clause::OMPC_filter,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_masked {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_masked {
+ };
+
+ // Sets for parallel loop
+
+ static OmpClauseSet allowedClauses_OMPD_parallel_loop {
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_parallel_loop {
+ llvm::omp::Clause::OMPC_bind,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_proc_bind,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_loop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_parallel_loop {
+ };
+
+ // Sets for target parallel loop
+
+ static OmpClauseSet allowedClauses_OMPD_target_parallel_loop {
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_copyin,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_loop {
+ llvm::omp::Clause::OMPC_bind,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_num_threads,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_proc_bind,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_loop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_parallel_loop {
+ };
+
+ // Sets for target teams loop
+
+ static OmpClauseSet allowedClauses_OMPD_target_teams_loop {
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_depend,
+ llvm::omp::Clause::OMPC_defaultmap,
+ llvm::omp::Clause::OMPC_device,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_is_device_ptr,
+ llvm::omp::Clause::OMPC_has_device_addr,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_map,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ llvm::omp::Clause::OMPC_uses_allocators,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_target_teams_loop {
+ llvm::omp::Clause::OMPC_bind,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_if,
+ llvm::omp::Clause::OMPC_nowait,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_thread_limit,
+ llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_loop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_target_teams_loop {
+ };
+
+ // Sets for teams loop
+
+ static OmpClauseSet allowedClauses_OMPD_teams_loop {
+ llvm::omp::Clause::OMPC_allocate,
+ llvm::omp::Clause::OMPC_firstprivate,
+ llvm::omp::Clause::OMPC_lastprivate,
+ llvm::omp::Clause::OMPC_private,
+ llvm::omp::Clause::OMPC_reduction,
+ llvm::omp::Clause::OMPC_shared,
+ };
+
+ static OmpClauseSet allowedOnceClauses_OMPD_teams_loop {
+ llvm::omp::Clause::OMPC_bind,
+ llvm::omp::Clause::OMPC_collapse,
+ llvm::omp::Clause::OMPC_default,
+ llvm::omp::Clause::OMPC_num_teams,
+ llvm::omp::Clause::OMPC_order,
+ llvm::omp::Clause::OMPC_thread_limit,
+ };
+
+ static OmpClauseSet allowedExclusiveClauses_OMPD_teams_loop {
+ };
+
+ static OmpClauseSet requiredClauses_OMPD_teams_loop {
+ };
+} // namespace omp
+} // namespace llvm
+
+#endif // GEN_FLANG_DIRECTIVE_CLAUSE_SETS
+
+#ifdef GEN_FLANG_DIRECTIVE_CLAUSE_MAP
+#undef GEN_FLANG_DIRECTIVE_CLAUSE_MAP
+
+{
+ {llvm::omp::Directive::OMPD_allocate,
+ {
+ llvm::omp::allowedClauses_OMPD_allocate,
+ llvm::omp::allowedOnceClauses_OMPD_allocate,
+ llvm::omp::allowedExclusiveClauses_OMPD_allocate,
+ llvm::omp::requiredClauses_OMPD_allocate,
+ }
+ },
+ {llvm::omp::Directive::OMPD_assumes,
+ {
+ llvm::omp::allowedClauses_OMPD_assumes,
+ llvm::omp::allowedOnceClauses_OMPD_assumes,
+ llvm::omp::allowedExclusiveClauses_OMPD_assumes,
+ llvm::omp::requiredClauses_OMPD_assumes,
+ }
+ },
+ {llvm::omp::Directive::OMPD_atomic,
+ {
+ llvm::omp::allowedClauses_OMPD_atomic,
+ llvm::omp::allowedOnceClauses_OMPD_atomic,
+ llvm::omp::allowedExclusiveClauses_OMPD_atomic,
+ llvm::omp::requiredClauses_OMPD_atomic,
+ }
+ },
+ {llvm::omp::Directive::OMPD_barrier,
+ {
+ llvm::omp::allowedClauses_OMPD_barrier,
+ llvm::omp::allowedOnceClauses_OMPD_barrier,
+ llvm::omp::allowedExclusiveClauses_OMPD_barrier,
+ llvm::omp::requiredClauses_OMPD_barrier,
+ }
+ },
+ {llvm::omp::Directive::OMPD_begin_assumes,
+ {
+ llvm::omp::allowedClauses_OMPD_begin_assumes,
+ llvm::omp::allowedOnceClauses_OMPD_begin_assumes,
+ llvm::omp::allowedExclusiveClauses_OMPD_begin_assumes,
+ llvm::omp::requiredClauses_OMPD_begin_assumes,
+ }
+ },
+ {llvm::omp::Directive::OMPD_begin_declare_target,
+ {
+ llvm::omp::allowedClauses_OMPD_begin_declare_target,
+ llvm::omp::allowedOnceClauses_OMPD_begin_declare_target,
+ llvm::omp::allowedExclusiveClauses_OMPD_begin_declare_target,
+ llvm::omp::requiredClauses_OMPD_begin_declare_target,
+ }
+ },
+ {llvm::omp::Directive::OMPD_begin_declare_variant,
+ {
+ llvm::omp::allowedClauses_OMPD_begin_declare_variant,
+ llvm::omp::allowedOnceClauses_OMPD_begin_declare_variant,
+ llvm::omp::allowedExclusiveClauses_OMPD_begin_declare_variant,
+ llvm::omp::requiredClauses_OMPD_begin_declare_variant,
+ }
+ },
+ {llvm::omp::Directive::OMPD_cancel,
+ {
+ llvm::omp::allowedClauses_OMPD_cancel,
+ llvm::omp::allowedOnceClauses_OMPD_cancel,
+ llvm::omp::allowedExclusiveClauses_OMPD_cancel,
+ llvm::omp::requiredClauses_OMPD_cancel,
+ }
+ },
+ {llvm::omp::Directive::OMPD_cancellation_point,
+ {
+ llvm::omp::allowedClauses_OMPD_cancellation_point,
+ llvm::omp::allowedOnceClauses_OMPD_cancellation_point,
+ llvm::omp::allowedExclusiveClauses_OMPD_cancellation_point,
+ llvm::omp::requiredClauses_OMPD_cancellation_point,
+ }
+ },
+ {llvm::omp::Directive::OMPD_critical,
+ {
+ llvm::omp::allowedClauses_OMPD_critical,
+ llvm::omp::allowedOnceClauses_OMPD_critical,
+ llvm::omp::allowedExclusiveClauses_OMPD_critical,
+ llvm::omp::requiredClauses_OMPD_critical,
+ }
+ },
+ {llvm::omp::Directive::OMPD_declare_mapper,
+ {
+ llvm::omp::allowedClauses_OMPD_declare_mapper,
+ llvm::omp::allowedOnceClauses_OMPD_declare_mapper,
+ llvm::omp::allowedExclusiveClauses_OMPD_declare_mapper,
+ llvm::omp::requiredClauses_OMPD_declare_mapper,
+ }
+ },
+ {llvm::omp::Directive::OMPD_declare_reduction,
+ {
+ llvm::omp::allowedClauses_OMPD_declare_reduction,
+ llvm::omp::allowedOnceClauses_OMPD_declare_reduction,
+ llvm::omp::allowedExclusiveClauses_OMPD_declare_reduction,
+ llvm::omp::requiredClauses_OMPD_declare_reduction,
+ }
+ },
+ {llvm::omp::Directive::OMPD_declare_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_declare_simd,
+ llvm::omp::allowedOnceClauses_OMPD_declare_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_declare_simd,
+ llvm::omp::requiredClauses_OMPD_declare_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_declare_target,
+ {
+ llvm::omp::allowedClauses_OMPD_declare_target,
+ llvm::omp::allowedOnceClauses_OMPD_declare_target,
+ llvm::omp::allowedExclusiveClauses_OMPD_declare_target,
+ llvm::omp::requiredClauses_OMPD_declare_target,
+ }
+ },
+ {llvm::omp::Directive::OMPD_declare_variant,
+ {
+ llvm::omp::allowedClauses_OMPD_declare_variant,
+ llvm::omp::allowedOnceClauses_OMPD_declare_variant,
+ llvm::omp::allowedExclusiveClauses_OMPD_declare_variant,
+ llvm::omp::requiredClauses_OMPD_declare_variant,
+ }
+ },
+ {llvm::omp::Directive::OMPD_depobj,
+ {
+ llvm::omp::allowedClauses_OMPD_depobj,
+ llvm::omp::allowedOnceClauses_OMPD_depobj,
+ llvm::omp::allowedExclusiveClauses_OMPD_depobj,
+ llvm::omp::requiredClauses_OMPD_depobj,
+ }
+ },
+ {llvm::omp::Directive::OMPD_distribute,
+ {
+ llvm::omp::allowedClauses_OMPD_distribute,
+ llvm::omp::allowedOnceClauses_OMPD_distribute,
+ llvm::omp::allowedExclusiveClauses_OMPD_distribute,
+ llvm::omp::requiredClauses_OMPD_distribute,
+ }
+ },
+ {llvm::omp::Directive::OMPD_distribute_parallel_do,
+ {
+ llvm::omp::allowedClauses_OMPD_distribute_parallel_do,
+ llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_do,
+ llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_do,
+ llvm::omp::requiredClauses_OMPD_distribute_parallel_do,
+ }
+ },
+ {llvm::omp::Directive::OMPD_distribute_parallel_do_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_distribute_parallel_do_simd,
+ llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_do_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_do_simd,
+ llvm::omp::requiredClauses_OMPD_distribute_parallel_do_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_distribute_parallel_for,
+ {
+ llvm::omp::allowedClauses_OMPD_distribute_parallel_for,
+ llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_for,
+ llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_for,
+ llvm::omp::requiredClauses_OMPD_distribute_parallel_for,
+ }
+ },
+ {llvm::omp::Directive::OMPD_distribute_parallel_for_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_distribute_parallel_for_simd,
+ llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_for_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_for_simd,
+ llvm::omp::requiredClauses_OMPD_distribute_parallel_for_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_distribute_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_distribute_simd,
+ llvm::omp::allowedOnceClauses_OMPD_distribute_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_distribute_simd,
+ llvm::omp::requiredClauses_OMPD_distribute_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_do,
+ {
+ llvm::omp::allowedClauses_OMPD_do,
+ llvm::omp::allowedOnceClauses_OMPD_do,
+ llvm::omp::allowedExclusiveClauses_OMPD_do,
+ llvm::omp::requiredClauses_OMPD_do,
+ }
+ },
+ {llvm::omp::Directive::OMPD_do_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_do_simd,
+ llvm::omp::allowedOnceClauses_OMPD_do_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_do_simd,
+ llvm::omp::requiredClauses_OMPD_do_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_assumes,
+ {
+ llvm::omp::allowedClauses_OMPD_end_assumes,
+ llvm::omp::allowedOnceClauses_OMPD_end_assumes,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_assumes,
+ llvm::omp::requiredClauses_OMPD_end_assumes,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_declare_target,
+ {
+ llvm::omp::allowedClauses_OMPD_end_declare_target,
+ llvm::omp::allowedOnceClauses_OMPD_end_declare_target,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_declare_target,
+ llvm::omp::requiredClauses_OMPD_end_declare_target,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_declare_variant,
+ {
+ llvm::omp::allowedClauses_OMPD_end_declare_variant,
+ llvm::omp::allowedOnceClauses_OMPD_end_declare_variant,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_declare_variant,
+ llvm::omp::requiredClauses_OMPD_end_declare_variant,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_do,
+ {
+ llvm::omp::allowedClauses_OMPD_end_do,
+ llvm::omp::allowedOnceClauses_OMPD_end_do,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_do,
+ llvm::omp::requiredClauses_OMPD_end_do,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_do_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_end_do_simd,
+ llvm::omp::allowedOnceClauses_OMPD_end_do_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_do_simd,
+ llvm::omp::requiredClauses_OMPD_end_do_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_sections,
+ {
+ llvm::omp::allowedClauses_OMPD_end_sections,
+ llvm::omp::allowedOnceClauses_OMPD_end_sections,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_sections,
+ llvm::omp::requiredClauses_OMPD_end_sections,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_single,
+ {
+ llvm::omp::allowedClauses_OMPD_end_single,
+ llvm::omp::allowedOnceClauses_OMPD_end_single,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_single,
+ llvm::omp::requiredClauses_OMPD_end_single,
+ }
+ },
+ {llvm::omp::Directive::OMPD_end_workshare,
+ {
+ llvm::omp::allowedClauses_OMPD_end_workshare,
+ llvm::omp::allowedOnceClauses_OMPD_end_workshare,
+ llvm::omp::allowedExclusiveClauses_OMPD_end_workshare,
+ llvm::omp::requiredClauses_OMPD_end_workshare,
+ }
+ },
+ {llvm::omp::Directive::OMPD_error,
+ {
+ llvm::omp::allowedClauses_OMPD_error,
+ llvm::omp::allowedOnceClauses_OMPD_error,
+ llvm::omp::allowedExclusiveClauses_OMPD_error,
+ llvm::omp::requiredClauses_OMPD_error,
+ }
+ },
+ {llvm::omp::Directive::OMPD_flush,
+ {
+ llvm::omp::allowedClauses_OMPD_flush,
+ llvm::omp::allowedOnceClauses_OMPD_flush,
+ llvm::omp::allowedExclusiveClauses_OMPD_flush,
+ llvm::omp::requiredClauses_OMPD_flush,
+ }
+ },
+ {llvm::omp::Directive::OMPD_for,
+ {
+ llvm::omp::allowedClauses_OMPD_for,
+ llvm::omp::allowedOnceClauses_OMPD_for,
+ llvm::omp::allowedExclusiveClauses_OMPD_for,
+ llvm::omp::requiredClauses_OMPD_for,
+ }
+ },
+ {llvm::omp::Directive::OMPD_for_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_for_simd,
+ llvm::omp::allowedOnceClauses_OMPD_for_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_for_simd,
+ llvm::omp::requiredClauses_OMPD_for_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_masked_taskloop,
+ {
+ llvm::omp::allowedClauses_OMPD_masked_taskloop,
+ llvm::omp::allowedOnceClauses_OMPD_masked_taskloop,
+ llvm::omp::allowedExclusiveClauses_OMPD_masked_taskloop,
+ llvm::omp::requiredClauses_OMPD_masked_taskloop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_masked_taskloop_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_masked_taskloop_simd,
+ llvm::omp::allowedOnceClauses_OMPD_masked_taskloop_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_masked_taskloop_simd,
+ llvm::omp::requiredClauses_OMPD_masked_taskloop_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_master,
+ {
+ llvm::omp::allowedClauses_OMPD_master,
+ llvm::omp::allowedOnceClauses_OMPD_master,
+ llvm::omp::allowedExclusiveClauses_OMPD_master,
+ llvm::omp::requiredClauses_OMPD_master,
+ }
+ },
+ {llvm::omp::Directive::OMPD_master_taskloop,
+ {
+ llvm::omp::allowedClauses_OMPD_master_taskloop,
+ llvm::omp::allowedOnceClauses_OMPD_master_taskloop,
+ llvm::omp::allowedExclusiveClauses_OMPD_master_taskloop,
+ llvm::omp::requiredClauses_OMPD_master_taskloop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_master_taskloop_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_master_taskloop_simd,
+ llvm::omp::allowedOnceClauses_OMPD_master_taskloop_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_master_taskloop_simd,
+ llvm::omp::requiredClauses_OMPD_master_taskloop_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_metadirective,
+ {
+ llvm::omp::allowedClauses_OMPD_metadirective,
+ llvm::omp::allowedOnceClauses_OMPD_metadirective,
+ llvm::omp::allowedExclusiveClauses_OMPD_metadirective,
+ llvm::omp::requiredClauses_OMPD_metadirective,
+ }
+ },
+ {llvm::omp::Directive::OMPD_nothing,
+ {
+ llvm::omp::allowedClauses_OMPD_nothing,
+ llvm::omp::allowedOnceClauses_OMPD_nothing,
+ llvm::omp::allowedExclusiveClauses_OMPD_nothing,
+ llvm::omp::requiredClauses_OMPD_nothing,
+ }
+ },
+ {llvm::omp::Directive::OMPD_ordered,
+ {
+ llvm::omp::allowedClauses_OMPD_ordered,
+ llvm::omp::allowedOnceClauses_OMPD_ordered,
+ llvm::omp::allowedExclusiveClauses_OMPD_ordered,
+ llvm::omp::requiredClauses_OMPD_ordered,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel,
+ llvm::omp::allowedOnceClauses_OMPD_parallel,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel,
+ llvm::omp::requiredClauses_OMPD_parallel,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_do,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_do,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_do,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_do,
+ llvm::omp::requiredClauses_OMPD_parallel_do,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_do_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_do_simd,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_do_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_do_simd,
+ llvm::omp::requiredClauses_OMPD_parallel_do_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_for,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_for,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_for,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_for,
+ llvm::omp::requiredClauses_OMPD_parallel_for,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_for_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_for_simd,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_for_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_for_simd,
+ llvm::omp::requiredClauses_OMPD_parallel_for_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_masked,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_masked,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_masked,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_masked,
+ llvm::omp::requiredClauses_OMPD_parallel_masked,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_masked_taskloop,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_masked_taskloop,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_masked_taskloop,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_masked_taskloop,
+ llvm::omp::requiredClauses_OMPD_parallel_masked_taskloop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_masked_taskloop_simd,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_masked_taskloop_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_masked_taskloop_simd,
+ llvm::omp::requiredClauses_OMPD_parallel_masked_taskloop_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_master,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_master,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_master,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_master,
+ llvm::omp::requiredClauses_OMPD_parallel_master,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_master_taskloop,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_master_taskloop,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_master_taskloop,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_master_taskloop,
+ llvm::omp::requiredClauses_OMPD_parallel_master_taskloop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_master_taskloop_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_master_taskloop_simd,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_master_taskloop_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_master_taskloop_simd,
+ llvm::omp::requiredClauses_OMPD_parallel_master_taskloop_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_sections,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_sections,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_sections,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_sections,
+ llvm::omp::requiredClauses_OMPD_parallel_sections,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_workshare,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_workshare,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_workshare,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_workshare,
+ llvm::omp::requiredClauses_OMPD_parallel_workshare,
+ }
+ },
+ {llvm::omp::Directive::OMPD_requires,
+ {
+ llvm::omp::allowedClauses_OMPD_requires,
+ llvm::omp::allowedOnceClauses_OMPD_requires,
+ llvm::omp::allowedExclusiveClauses_OMPD_requires,
+ llvm::omp::requiredClauses_OMPD_requires,
+ }
+ },
+ {llvm::omp::Directive::OMPD_scan,
+ {
+ llvm::omp::allowedClauses_OMPD_scan,
+ llvm::omp::allowedOnceClauses_OMPD_scan,
+ llvm::omp::allowedExclusiveClauses_OMPD_scan,
+ llvm::omp::requiredClauses_OMPD_scan,
+ }
+ },
+ {llvm::omp::Directive::OMPD_section,
+ {
+ llvm::omp::allowedClauses_OMPD_section,
+ llvm::omp::allowedOnceClauses_OMPD_section,
+ llvm::omp::allowedExclusiveClauses_OMPD_section,
+ llvm::omp::requiredClauses_OMPD_section,
+ }
+ },
+ {llvm::omp::Directive::OMPD_sections,
+ {
+ llvm::omp::allowedClauses_OMPD_sections,
+ llvm::omp::allowedOnceClauses_OMPD_sections,
+ llvm::omp::allowedExclusiveClauses_OMPD_sections,
+ llvm::omp::requiredClauses_OMPD_sections,
+ }
+ },
+ {llvm::omp::Directive::OMPD_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_simd,
+ llvm::omp::allowedOnceClauses_OMPD_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_simd,
+ llvm::omp::requiredClauses_OMPD_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_single,
+ {
+ llvm::omp::allowedClauses_OMPD_single,
+ llvm::omp::allowedOnceClauses_OMPD_single,
+ llvm::omp::allowedExclusiveClauses_OMPD_single,
+ llvm::omp::requiredClauses_OMPD_single,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target,
+ {
+ llvm::omp::allowedClauses_OMPD_target,
+ llvm::omp::allowedOnceClauses_OMPD_target,
+ llvm::omp::allowedExclusiveClauses_OMPD_target,
+ llvm::omp::requiredClauses_OMPD_target,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_data,
+ {
+ llvm::omp::allowedClauses_OMPD_target_data,
+ llvm::omp::allowedOnceClauses_OMPD_target_data,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_data,
+ llvm::omp::requiredClauses_OMPD_target_data,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_enter_data,
+ {
+ llvm::omp::allowedClauses_OMPD_target_enter_data,
+ llvm::omp::allowedOnceClauses_OMPD_target_enter_data,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_enter_data,
+ llvm::omp::requiredClauses_OMPD_target_enter_data,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_exit_data,
+ {
+ llvm::omp::allowedClauses_OMPD_target_exit_data,
+ llvm::omp::allowedOnceClauses_OMPD_target_exit_data,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_exit_data,
+ llvm::omp::requiredClauses_OMPD_target_exit_data,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_parallel,
+ {
+ llvm::omp::allowedClauses_OMPD_target_parallel,
+ llvm::omp::allowedOnceClauses_OMPD_target_parallel,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_parallel,
+ llvm::omp::requiredClauses_OMPD_target_parallel,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_parallel_do,
+ {
+ llvm::omp::allowedClauses_OMPD_target_parallel_do,
+ llvm::omp::allowedOnceClauses_OMPD_target_parallel_do,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_do,
+ llvm::omp::requiredClauses_OMPD_target_parallel_do,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_parallel_do_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_target_parallel_do_simd,
+ llvm::omp::allowedOnceClauses_OMPD_target_parallel_do_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_do_simd,
+ llvm::omp::requiredClauses_OMPD_target_parallel_do_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_parallel_for,
+ {
+ llvm::omp::allowedClauses_OMPD_target_parallel_for,
+ llvm::omp::allowedOnceClauses_OMPD_target_parallel_for,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_for,
+ llvm::omp::requiredClauses_OMPD_target_parallel_for,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_parallel_for_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_target_parallel_for_simd,
+ llvm::omp::allowedOnceClauses_OMPD_target_parallel_for_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_for_simd,
+ llvm::omp::requiredClauses_OMPD_target_parallel_for_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_target_simd,
+ llvm::omp::allowedOnceClauses_OMPD_target_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_simd,
+ llvm::omp::requiredClauses_OMPD_target_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams,
+ llvm::omp::requiredClauses_OMPD_target_teams,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams_distribute,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams_distribute,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute,
+ llvm::omp::requiredClauses_OMPD_target_teams_distribute,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_do,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_do,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_do,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do,
+ llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_do,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_do_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_do_simd,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_do_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do_simd,
+ llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_do_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_for,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_for,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_for,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for,
+ llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_for,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_for_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_for_simd,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_for_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for_simd,
+ llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_for_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams_distribute_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams_distribute_simd,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_simd,
+ llvm::omp::requiredClauses_OMPD_target_teams_distribute_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_update,
+ {
+ llvm::omp::allowedClauses_OMPD_target_update,
+ llvm::omp::allowedOnceClauses_OMPD_target_update,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_update,
+ llvm::omp::requiredClauses_OMPD_target_update,
+ }
+ },
+ {llvm::omp::Directive::OMPD_task,
+ {
+ llvm::omp::allowedClauses_OMPD_task,
+ llvm::omp::allowedOnceClauses_OMPD_task,
+ llvm::omp::allowedExclusiveClauses_OMPD_task,
+ llvm::omp::requiredClauses_OMPD_task,
+ }
+ },
+ {llvm::omp::Directive::OMPD_taskgroup,
+ {
+ llvm::omp::allowedClauses_OMPD_taskgroup,
+ llvm::omp::allowedOnceClauses_OMPD_taskgroup,
+ llvm::omp::allowedExclusiveClauses_OMPD_taskgroup,
+ llvm::omp::requiredClauses_OMPD_taskgroup,
+ }
+ },
+ {llvm::omp::Directive::OMPD_taskloop,
+ {
+ llvm::omp::allowedClauses_OMPD_taskloop,
+ llvm::omp::allowedOnceClauses_OMPD_taskloop,
+ llvm::omp::allowedExclusiveClauses_OMPD_taskloop,
+ llvm::omp::requiredClauses_OMPD_taskloop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_taskloop_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_taskloop_simd,
+ llvm::omp::allowedOnceClauses_OMPD_taskloop_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_taskloop_simd,
+ llvm::omp::requiredClauses_OMPD_taskloop_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_taskwait,
+ {
+ llvm::omp::allowedClauses_OMPD_taskwait,
+ llvm::omp::allowedOnceClauses_OMPD_taskwait,
+ llvm::omp::allowedExclusiveClauses_OMPD_taskwait,
+ llvm::omp::requiredClauses_OMPD_taskwait,
+ }
+ },
+ {llvm::omp::Directive::OMPD_taskyield,
+ {
+ llvm::omp::allowedClauses_OMPD_taskyield,
+ llvm::omp::allowedOnceClauses_OMPD_taskyield,
+ llvm::omp::allowedExclusiveClauses_OMPD_taskyield,
+ llvm::omp::requiredClauses_OMPD_taskyield,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams,
+ {
+ llvm::omp::allowedClauses_OMPD_teams,
+ llvm::omp::allowedOnceClauses_OMPD_teams,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams,
+ llvm::omp::requiredClauses_OMPD_teams,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams_distribute,
+ {
+ llvm::omp::allowedClauses_OMPD_teams_distribute,
+ llvm::omp::allowedOnceClauses_OMPD_teams_distribute,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute,
+ llvm::omp::requiredClauses_OMPD_teams_distribute,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams_distribute_parallel_do,
+ {
+ llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_do,
+ llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_do,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_do,
+ llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_do,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams_distribute_parallel_do_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_do_simd,
+ llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_do_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_do_simd,
+ llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_do_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams_distribute_parallel_for,
+ {
+ llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_for,
+ llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_for,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_for,
+ llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_for,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams_distribute_parallel_for_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_for_simd,
+ llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_for_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_for_simd,
+ llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_for_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams_distribute_simd,
+ {
+ llvm::omp::allowedClauses_OMPD_teams_distribute_simd,
+ llvm::omp::allowedOnceClauses_OMPD_teams_distribute_simd,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_simd,
+ llvm::omp::requiredClauses_OMPD_teams_distribute_simd,
+ }
+ },
+ {llvm::omp::Directive::OMPD_threadprivate,
+ {
+ llvm::omp::allowedClauses_OMPD_threadprivate,
+ llvm::omp::allowedOnceClauses_OMPD_threadprivate,
+ llvm::omp::allowedExclusiveClauses_OMPD_threadprivate,
+ llvm::omp::requiredClauses_OMPD_threadprivate,
+ }
+ },
+ {llvm::omp::Directive::OMPD_tile,
+ {
+ llvm::omp::allowedClauses_OMPD_tile,
+ llvm::omp::allowedOnceClauses_OMPD_tile,
+ llvm::omp::allowedExclusiveClauses_OMPD_tile,
+ llvm::omp::requiredClauses_OMPD_tile,
+ }
+ },
+ {llvm::omp::Directive::OMPD_unknown,
+ {
+ llvm::omp::allowedClauses_OMPD_unknown,
+ llvm::omp::allowedOnceClauses_OMPD_unknown,
+ llvm::omp::allowedExclusiveClauses_OMPD_unknown,
+ llvm::omp::requiredClauses_OMPD_unknown,
+ }
+ },
+ {llvm::omp::Directive::OMPD_unroll,
+ {
+ llvm::omp::allowedClauses_OMPD_unroll,
+ llvm::omp::allowedOnceClauses_OMPD_unroll,
+ llvm::omp::allowedExclusiveClauses_OMPD_unroll,
+ llvm::omp::requiredClauses_OMPD_unroll,
+ }
+ },
+ {llvm::omp::Directive::OMPD_workshare,
+ {
+ llvm::omp::allowedClauses_OMPD_workshare,
+ llvm::omp::allowedOnceClauses_OMPD_workshare,
+ llvm::omp::allowedExclusiveClauses_OMPD_workshare,
+ llvm::omp::requiredClauses_OMPD_workshare,
+ }
+ },
+ {llvm::omp::Directive::OMPD_dispatch,
+ {
+ llvm::omp::allowedClauses_OMPD_dispatch,
+ llvm::omp::allowedOnceClauses_OMPD_dispatch,
+ llvm::omp::allowedExclusiveClauses_OMPD_dispatch,
+ llvm::omp::requiredClauses_OMPD_dispatch,
+ }
+ },
+ {llvm::omp::Directive::OMPD_interop,
+ {
+ llvm::omp::allowedClauses_OMPD_interop,
+ llvm::omp::allowedOnceClauses_OMPD_interop,
+ llvm::omp::allowedExclusiveClauses_OMPD_interop,
+ llvm::omp::requiredClauses_OMPD_interop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_loop,
+ {
+ llvm::omp::allowedClauses_OMPD_loop,
+ llvm::omp::allowedOnceClauses_OMPD_loop,
+ llvm::omp::allowedExclusiveClauses_OMPD_loop,
+ llvm::omp::requiredClauses_OMPD_loop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_masked,
+ {
+ llvm::omp::allowedClauses_OMPD_masked,
+ llvm::omp::allowedOnceClauses_OMPD_masked,
+ llvm::omp::allowedExclusiveClauses_OMPD_masked,
+ llvm::omp::requiredClauses_OMPD_masked,
+ }
+ },
+ {llvm::omp::Directive::OMPD_parallel_loop,
+ {
+ llvm::omp::allowedClauses_OMPD_parallel_loop,
+ llvm::omp::allowedOnceClauses_OMPD_parallel_loop,
+ llvm::omp::allowedExclusiveClauses_OMPD_parallel_loop,
+ llvm::omp::requiredClauses_OMPD_parallel_loop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_parallel_loop,
+ {
+ llvm::omp::allowedClauses_OMPD_target_parallel_loop,
+ llvm::omp::allowedOnceClauses_OMPD_target_parallel_loop,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_loop,
+ llvm::omp::requiredClauses_OMPD_target_parallel_loop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_target_teams_loop,
+ {
+ llvm::omp::allowedClauses_OMPD_target_teams_loop,
+ llvm::omp::allowedOnceClauses_OMPD_target_teams_loop,
+ llvm::omp::allowedExclusiveClauses_OMPD_target_teams_loop,
+ llvm::omp::requiredClauses_OMPD_target_teams_loop,
+ }
+ },
+ {llvm::omp::Directive::OMPD_teams_loop,
+ {
+ llvm::omp::allowedClauses_OMPD_teams_loop,
+ llvm::omp::allowedOnceClauses_OMPD_teams_loop,
+ llvm::omp::allowedExclusiveClauses_OMPD_teams_loop,
+ llvm::omp::requiredClauses_OMPD_teams_loop,
+ }
+ },
+}
+
+#endif // GEN_FLANG_DIRECTIVE_CLAUSE_MAP
+
+#ifdef GEN_FLANG_CLAUSE_PARSER_CLASSES
+#undef GEN_FLANG_CLAUSE_PARSER_CLASSES
+
+EMPTY_CLASS(AcqRel);
+EMPTY_CLASS(Acquire);
+EMPTY_CLASS(AdjustArgs);
+EMPTY_CLASS(Affinity);
+EMPTY_CLASS(Align);
+WRAPPER_CLASS(Aligned, OmpAlignedClause);
+WRAPPER_CLASS(Allocate, OmpAllocateClause);
+WRAPPER_CLASS(Allocator, ScalarIntExpr);
+EMPTY_CLASS(AppendArgs);
+EMPTY_CLASS(At);
+WRAPPER_CLASS(AtomicDefaultMemOrder, OmpAtomicDefaultMemOrderClause);
+EMPTY_CLASS(Bind);
+EMPTY_CLASS(CancellationConstructType);
+EMPTY_CLASS(Capture);
+WRAPPER_CLASS(Collapse, ScalarIntConstantExpr);
+EMPTY_CLASS(Compare);
+WRAPPER_CLASS(Copyprivate, OmpObjectList);
+WRAPPER_CLASS(Copyin, OmpObjectList);
+WRAPPER_CLASS(Default, OmpDefaultClause);
+WRAPPER_CLASS(Defaultmap, OmpDefaultmapClause);
+WRAPPER_CLASS(Depend, OmpDependClause);
+EMPTY_CLASS(Depobj);
+EMPTY_CLASS(Destroy);
+EMPTY_CLASS(Detach);
+WRAPPER_CLASS(Device, OmpDeviceClause);
+EMPTY_CLASS(DeviceType);
+WRAPPER_CLASS(DistSchedule, std::optional<ScalarIntExpr>);
+EMPTY_CLASS(DynamicAllocators);
+EMPTY_CLASS(Exclusive);
+WRAPPER_CLASS(Filter, ScalarIntExpr);
+WRAPPER_CLASS(Final, ScalarLogicalExpr);
+WRAPPER_CLASS(Firstprivate, OmpObjectList);
+EMPTY_CLASS(Flush);
+WRAPPER_CLASS(From, OmpObjectList);
+EMPTY_CLASS(Full);
+WRAPPER_CLASS(Grainsize, ScalarIntExpr);
+WRAPPER_CLASS(HasDeviceAddr, std::list<Name>);
+WRAPPER_CLASS(Hint, ConstantExpr);
+WRAPPER_CLASS(If, OmpIfClause);
+WRAPPER_CLASS(InReduction, OmpInReductionClause);
+EMPTY_CLASS(Inbranch);
+EMPTY_CLASS(Inclusive);
+EMPTY_CLASS(Indirect);
+EMPTY_CLASS(Init);
+WRAPPER_CLASS(IsDevicePtr, std::list<Name>);
+WRAPPER_CLASS(Lastprivate, OmpObjectList);
+WRAPPER_CLASS(Linear, OmpLinearClause);
+WRAPPER_CLASS(Link, OmpObjectList);
+WRAPPER_CLASS(Map, OmpMapClause);
+EMPTY_CLASS(Match);
+EMPTY_CLASS(MemoryOrder);
+EMPTY_CLASS(Mergeable);
+EMPTY_CLASS(Message);
+EMPTY_CLASS(Nogroup);
+EMPTY_CLASS(Nowait);
+WRAPPER_CLASS(Nocontext, ScalarLogicalExpr);
+WRAPPER_CLASS(Nontemporal, std::list<Name>);
+EMPTY_CLASS(Notinbranch);
+WRAPPER_CLASS(Novariants, ScalarLogicalExpr);
+WRAPPER_CLASS(NumTasks, ScalarIntExpr);
+WRAPPER_CLASS(NumTeams, ScalarIntExpr);
+WRAPPER_CLASS(NumThreads, ScalarIntExpr);
+WRAPPER_CLASS(OmpxDynCgroupMem, ScalarIntExpr);
+EMPTY_CLASS(Order);
+WRAPPER_CLASS(Ordered, std::optional<ScalarIntConstantExpr>);
+WRAPPER_CLASS(Partial, std::optional<ScalarIntConstantExpr>);
+WRAPPER_CLASS(Priority, ScalarIntExpr);
+WRAPPER_CLASS(Private, OmpObjectList);
+WRAPPER_CLASS(ProcBind, OmpProcBindClause);
+EMPTY_CLASS(Read);
+WRAPPER_CLASS(Reduction, OmpReductionClause);
+EMPTY_CLASS(Relaxed);
+EMPTY_CLASS(Release);
+EMPTY_CLASS(ReverseOffload);
+WRAPPER_CLASS(Safelen, ScalarIntConstantExpr);
+WRAPPER_CLASS(Schedule, OmpScheduleClause);
+EMPTY_CLASS(SeqCst);
+EMPTY_CLASS(Severity);
+WRAPPER_CLASS(Shared, OmpObjectList);
+EMPTY_CLASS(Simd);
+WRAPPER_CLASS(Simdlen, ScalarIntConstantExpr);
+WRAPPER_CLASS(Sizes, std::list<ScalarIntExpr>);
+WRAPPER_CLASS(TaskReduction, OmpReductionClause);
+WRAPPER_CLASS(ThreadLimit, ScalarIntExpr);
+EMPTY_CLASS(Threadprivate);
+EMPTY_CLASS(Threads);
+WRAPPER_CLASS(To, OmpObjectList);
+EMPTY_CLASS(UnifiedAddress);
+EMPTY_CLASS(UnifiedSharedMemory);
+WRAPPER_CLASS(Uniform, std::list<Name>);
+EMPTY_CLASS(Unknown);
+EMPTY_CLASS(Untied);
+EMPTY_CLASS(Update);
+EMPTY_CLASS(Use);
+EMPTY_CLASS(UseDeviceAddr);
+WRAPPER_CLASS(UseDevicePtr, std::list<Name>);
+EMPTY_CLASS(UsesAllocators);
+EMPTY_CLASS(When);
+EMPTY_CLASS(Write);
+
+#endif // GEN_FLANG_CLAUSE_PARSER_CLASSES
+
+#ifdef GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST
+#undef GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST
+
+AcqRel
+, Acquire
+, AdjustArgs
+, Affinity
+, Align
+, Aligned
+, Allocate
+, Allocator
+, AppendArgs
+, At
+, AtomicDefaultMemOrder
+, Bind
+, CancellationConstructType
+, Capture
+, Collapse
+, Compare
+, Copyprivate
+, Copyin
+, Default
+, Defaultmap
+, Depend
+, Depobj
+, Destroy
+, Detach
+, Device
+, DeviceType
+, DistSchedule
+, DynamicAllocators
+, Exclusive
+, Filter
+, Final
+, Firstprivate
+, Flush
+, From
+, Full
+, Grainsize
+, HasDeviceAddr
+, Hint
+, If
+, InReduction
+, Inbranch
+, Inclusive
+, Indirect
+, Init
+, IsDevicePtr
+, Lastprivate
+, Linear
+, Link
+, Map
+, Match
+, MemoryOrder
+, Mergeable
+, Message
+, Nogroup
+, Nowait
+, Nocontext
+, Nontemporal
+, Notinbranch
+, Novariants
+, NumTasks
+, NumTeams
+, NumThreads
+, OmpxDynCgroupMem
+, Order
+, Ordered
+, Partial
+, Priority
+, Private
+, ProcBind
+, Read
+, Reduction
+, Relaxed
+, Release
+, ReverseOffload
+, Safelen
+, Schedule
+, SeqCst
+, Severity
+, Shared
+, Simd
+, Simdlen
+, Sizes
+, TaskReduction
+, ThreadLimit
+, Threadprivate
+, Threads
+, To
+, UnifiedAddress
+, UnifiedSharedMemory
+, Uniform
+, Unknown
+, Untied
+, Update
+, Use
+, UseDeviceAddr
+, UseDevicePtr
+, UsesAllocators
+, When
+, Write
+
+#endif // GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST
+
+#ifdef GEN_FLANG_DUMP_PARSE_TREE_CLAUSES
+#undef GEN_FLANG_DUMP_PARSE_TREE_CLAUSES
+
+NODE(OmpClause, AcqRel)
+NODE(OmpClause, Acquire)
+NODE(OmpClause, AdjustArgs)
+NODE(OmpClause, Affinity)
+NODE(OmpClause, Align)
+NODE(OmpClause, Aligned)
+NODE(OmpClause, Allocate)
+NODE(OmpClause, Allocator)
+NODE(OmpClause, AppendArgs)
+NODE(OmpClause, At)
+NODE(OmpClause, AtomicDefaultMemOrder)
+NODE(OmpClause, Bind)
+NODE(OmpClause, CancellationConstructType)
+NODE(OmpClause, Capture)
+NODE(OmpClause, Collapse)
+NODE(OmpClause, Compare)
+NODE(OmpClause, Copyprivate)
+NODE(OmpClause, Copyin)
+NODE(OmpClause, Default)
+NODE(OmpClause, Defaultmap)
+NODE(OmpClause, Depend)
+NODE(OmpClause, Depobj)
+NODE(OmpClause, Destroy)
+NODE(OmpClause, Detach)
+NODE(OmpClause, Device)
+NODE(OmpClause, DeviceType)
+NODE(OmpClause, DistSchedule)
+NODE(OmpClause, DynamicAllocators)
+NODE(OmpClause, Exclusive)
+NODE(OmpClause, Filter)
+NODE(OmpClause, Final)
+NODE(OmpClause, Firstprivate)
+NODE(OmpClause, Flush)
+NODE(OmpClause, From)
+NODE(OmpClause, Full)
+NODE(OmpClause, Grainsize)
+NODE(OmpClause, HasDeviceAddr)
+NODE(OmpClause, Hint)
+NODE(OmpClause, If)
+NODE(OmpClause, InReduction)
+NODE(OmpClause, Inbranch)
+NODE(OmpClause, Inclusive)
+NODE(OmpClause, Indirect)
+NODE(OmpClause, Init)
+NODE(OmpClause, IsDevicePtr)
+NODE(OmpClause, Lastprivate)
+NODE(OmpClause, Linear)
+NODE(OmpClause, Link)
+NODE(OmpClause, Map)
+NODE(OmpClause, Match)
+NODE(OmpClause, MemoryOrder)
+NODE(OmpClause, Mergeable)
+NODE(OmpClause, Message)
+NODE(OmpClause, Nogroup)
+NODE(OmpClause, Nowait)
+NODE(OmpClause, Nocontext)
+NODE(OmpClause, Nontemporal)
+NODE(OmpClause, Notinbranch)
+NODE(OmpClause, Novariants)
+NODE(OmpClause, NumTasks)
+NODE(OmpClause, NumTeams)
+NODE(OmpClause, NumThreads)
+NODE(OmpClause, OmpxDynCgroupMem)
+NODE(OmpClause, Order)
+NODE(OmpClause, Ordered)
+NODE(OmpClause, Partial)
+NODE(OmpClause, Priority)
+NODE(OmpClause, Private)
+NODE(OmpClause, ProcBind)
+NODE(OmpClause, Read)
+NODE(OmpClause, Reduction)
+NODE(OmpClause, Relaxed)
+NODE(OmpClause, Release)
+NODE(OmpClause, ReverseOffload)
+NODE(OmpClause, Safelen)
+NODE(OmpClause, Schedule)
+NODE(OmpClause, SeqCst)
+NODE(OmpClause, Severity)
+NODE(OmpClause, Shared)
+NODE(OmpClause, Simd)
+NODE(OmpClause, Simdlen)
+NODE(OmpClause, Sizes)
+NODE(OmpClause, TaskReduction)
+NODE(OmpClause, ThreadLimit)
+NODE(OmpClause, Threadprivate)
+NODE(OmpClause, Threads)
+NODE(OmpClause, To)
+NODE(OmpClause, UnifiedAddress)
+NODE(OmpClause, UnifiedSharedMemory)
+NODE(OmpClause, Uniform)
+NODE(OmpClause, Unknown)
+NODE(OmpClause, Untied)
+NODE(OmpClause, Update)
+NODE(OmpClause, Use)
+NODE(OmpClause, UseDeviceAddr)
+NODE(OmpClause, UseDevicePtr)
+NODE(OmpClause, UsesAllocators)
+NODE(OmpClause, When)
+NODE(OmpClause, Write)
+
+#endif // GEN_FLANG_DUMP_PARSE_TREE_CLAUSES
+
+#ifdef GEN_FLANG_CLAUSE_UNPARSE
+#undef GEN_FLANG_CLAUSE_UNPARSE
+
+void Before(const OmpClause::AcqRel &) { Word("ACQ_REL"); }
+void Before(const OmpClause::Acquire &) { Word("ACQUIRE"); }
+void Before(const OmpClause::AdjustArgs &) { Word("ADJUST_ARGS"); }
+void Before(const OmpClause::Affinity &) { Word("AFFINITY"); }
+void Before(const OmpClause::Align &) { Word("ALIGN"); }
+void Unparse(const OmpClause::Aligned &x) {
+ Word("ALIGNED");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Allocate &x) {
+ Word("ALLOCATE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Allocator &x) {
+ Word("ALLOCATOR");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::AppendArgs &) { Word("APPEND_ARGS"); }
+void Before(const OmpClause::At &) { Word("AT"); }
+void Unparse(const OmpClause::AtomicDefaultMemOrder &x) {
+ Word("ATOMIC_DEFAULT_MEM_ORDER");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Bind &) { Word("BIND"); }
+void Before(const OmpClause::CancellationConstructType &) { Word("CANCELLATION_CONSTRUCT_TYPE"); }
+void Before(const OmpClause::Capture &) { Word("CAPTURE"); }
+void Unparse(const OmpClause::Collapse &x) {
+ Word("COLLAPSE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Compare &) { Word("COMPARE"); }
+void Unparse(const OmpClause::Copyprivate &x) {
+ Word("COPYPRIVATE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Copyin &x) {
+ Word("COPYIN");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Default &x) {
+ Word("DEFAULT");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Defaultmap &x) {
+ Word("DEFAULTMAP");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Depend &x) {
+ Word("DEPEND");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Depobj &) { Word("DEPOBJ"); }
+void Before(const OmpClause::Destroy &) { Word("DESTROY"); }
+void Before(const OmpClause::Detach &) { Word("DETACH"); }
+void Unparse(const OmpClause::Device &x) {
+ Word("DEVICE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::DeviceType &) { Word("DEVICE_TYPE"); }
+void Unparse(const OmpClause::DistSchedule &x) {
+ Word("DIST_SCHEDULE");
+ Walk("(", x.v, ")");
+}
+void Before(const OmpClause::DynamicAllocators &) { Word("DYNAMIC_ALLOCATORS"); }
+void Before(const OmpClause::Exclusive &) { Word("EXCLUSIVE"); }
+void Unparse(const OmpClause::Filter &x) {
+ Word("FILTER");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Final &x) {
+ Word("FINAL");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Firstprivate &x) {
+ Word("FIRSTPRIVATE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Flush &) { Word("FLUSH"); }
+void Unparse(const OmpClause::From &x) {
+ Word("FROM");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Full &) { Word("FULL"); }
+void Unparse(const OmpClause::Grainsize &x) {
+ Word("GRAINSIZE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::HasDeviceAddr &x) {
+ Word("HAS_DEVICE_ADDR");
+ Put("(");
+ Walk(x.v, ",");
+ Put(")");
+}
+void Unparse(const OmpClause::Hint &x) {
+ Word("HINT");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::If &x) {
+ Word("IF");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::InReduction &x) {
+ Word("IN_REDUCTION");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Inbranch &) { Word("INBRANCH"); }
+void Before(const OmpClause::Inclusive &) { Word("INCLUSIVE"); }
+void Before(const OmpClause::Indirect &) { Word("INDIRECT"); }
+void Before(const OmpClause::Init &) { Word("INIT"); }
+void Unparse(const OmpClause::IsDevicePtr &x) {
+ Word("IS_DEVICE_PTR");
+ Put("(");
+ Walk(x.v, ",");
+ Put(")");
+}
+void Unparse(const OmpClause::Lastprivate &x) {
+ Word("LASTPRIVATE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Linear &x) {
+ Word("LINEAR");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Link &x) {
+ Word("LINK");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Map &x) {
+ Word("MAP");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Match &) { Word("MATCH"); }
+void Before(const OmpClause::MemoryOrder &) { Word("MEMORY_ORDER"); }
+void Before(const OmpClause::Mergeable &) { Word("MERGEABLE"); }
+void Before(const OmpClause::Message &) { Word("MESSAGE"); }
+void Before(const OmpClause::Nogroup &) { Word("NOGROUP"); }
+void Before(const OmpClause::Nowait &) { Word("NOWAIT"); }
+void Unparse(const OmpClause::Nocontext &x) {
+ Word("NOCONTEXT");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Nontemporal &x) {
+ Word("NONTEMPORAL");
+ Put("(");
+ Walk(x.v, ",");
+ Put(")");
+}
+void Before(const OmpClause::Notinbranch &) { Word("NOTINBRANCH"); }
+void Unparse(const OmpClause::Novariants &x) {
+ Word("NOVARIANTS");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::NumTasks &x) {
+ Word("NUM_TASKS");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::NumTeams &x) {
+ Word("NUM_TEAMS");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::NumThreads &x) {
+ Word("NUM_THREADS");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::OmpxDynCgroupMem &x) {
+ Word("OMPX_DYN_CGROUP_MEM");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Order &) { Word("ORDER"); }
+void Unparse(const OmpClause::Ordered &x) {
+ Word("ORDERED");
+ Walk("(", x.v, ")");
+}
+void Unparse(const OmpClause::Partial &x) {
+ Word("PARTIAL");
+ Walk("(", x.v, ")");
+}
+void Unparse(const OmpClause::Priority &x) {
+ Word("PRIORITY");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Private &x) {
+ Word("PRIVATE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::ProcBind &x) {
+ Word("PROC_BIND");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Read &) { Word("READ"); }
+void Unparse(const OmpClause::Reduction &x) {
+ Word("REDUCTION");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Relaxed &) { Word("RELAXED"); }
+void Before(const OmpClause::Release &) { Word("RELEASE"); }
+void Before(const OmpClause::ReverseOffload &) { Word("REVERSE_OFFLOAD"); }
+void Unparse(const OmpClause::Safelen &x) {
+ Word("SAFELEN");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Schedule &x) {
+ Word("SCHEDULE");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::SeqCst &) { Word("SEQ_CST"); }
+void Before(const OmpClause::Severity &) { Word("SEVERITY"); }
+void Unparse(const OmpClause::Shared &x) {
+ Word("SHARED");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Simd &) { Word("SIMD"); }
+void Unparse(const OmpClause::Simdlen &x) {
+ Word("SIMDLEN");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::Sizes &x) {
+ Word("SIZES");
+ Put("(");
+ Walk(x.v, ",");
+ Put(")");
+}
+void Unparse(const OmpClause::TaskReduction &x) {
+ Word("TASK_REDUCTION");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Unparse(const OmpClause::ThreadLimit &x) {
+ Word("THREAD_LIMIT");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::Threadprivate &) { Word("THREADPRIVATE"); }
+void Before(const OmpClause::Threads &) { Word("THREADS"); }
+void Unparse(const OmpClause::To &x) {
+ Word("TO");
+ Put("(");
+ Walk(x.v);
+ Put(")");
+}
+void Before(const OmpClause::UnifiedAddress &) { Word("UNIFIED_ADDRESS"); }
+void Before(const OmpClause::UnifiedSharedMemory &) { Word("UNIFIED_SHARED_MEMORY"); }
+void Unparse(const OmpClause::Uniform &x) {
+ Word("UNIFORM");
+ Put("(");
+ Walk(x.v, ",");
+ Put(")");
+}
+void Before(const OmpClause::Unknown &) { Word("UNKNOWN"); }
+void Before(const OmpClause::Untied &) { Word("UNTIED"); }
+void Before(const OmpClause::Update &) { Word("UPDATE"); }
+void Before(const OmpClause::Use &) { Word("USE"); }
+void Before(const OmpClause::UseDeviceAddr &) { Word("USE_DEVICE_ADDR"); }
+void Unparse(const OmpClause::UseDevicePtr &x) {
+ Word("USE_DEVICE_PTR");
+ Put("(");
+ Walk(x.v, ",");
+ Put(")");
+}
+void Before(const OmpClause::UsesAllocators &) { Word("USES_ALLOCATORS"); }
+void Before(const OmpClause::When &) { Word("WHEN"); }
+void Before(const OmpClause::Write &) { Word("WRITE"); }
+
+#endif // GEN_FLANG_CLAUSE_UNPARSE
+
+#ifdef GEN_FLANG_CLAUSE_CHECK_ENTER
+#undef GEN_FLANG_CLAUSE_CHECK_ENTER
+
+void Enter(const parser::OmpClause::AcqRel &);
+void Enter(const parser::OmpClause::Acquire &);
+void Enter(const parser::OmpClause::AdjustArgs &);
+void Enter(const parser::OmpClause::Affinity &);
+void Enter(const parser::OmpClause::Align &);
+void Enter(const parser::OmpClause::Aligned &);
+void Enter(const parser::OmpClause::Allocate &);
+void Enter(const parser::OmpClause::Allocator &);
+void Enter(const parser::OmpClause::AppendArgs &);
+void Enter(const parser::OmpClause::At &);
+void Enter(const parser::OmpClause::AtomicDefaultMemOrder &);
+void Enter(const parser::OmpClause::Bind &);
+void Enter(const parser::OmpClause::CancellationConstructType &);
+void Enter(const parser::OmpClause::Capture &);
+void Enter(const parser::OmpClause::Collapse &);
+void Enter(const parser::OmpClause::Compare &);
+void Enter(const parser::OmpClause::Copyprivate &);
+void Enter(const parser::OmpClause::Copyin &);
+void Enter(const parser::OmpClause::Default &);
+void Enter(const parser::OmpClause::Defaultmap &);
+void Enter(const parser::OmpClause::Depend &);
+void Enter(const parser::OmpClause::Depobj &);
+void Enter(const parser::OmpClause::Destroy &);
+void Enter(const parser::OmpClause::Detach &);
+void Enter(const parser::OmpClause::Device &);
+void Enter(const parser::OmpClause::DeviceType &);
+void Enter(const parser::OmpClause::DistSchedule &);
+void Enter(const parser::OmpClause::DynamicAllocators &);
+void Enter(const parser::OmpClause::Exclusive &);
+void Enter(const parser::OmpClause::Filter &);
+void Enter(const parser::OmpClause::Final &);
+void Enter(const parser::OmpClause::Firstprivate &);
+void Enter(const parser::OmpClause::Flush &);
+void Enter(const parser::OmpClause::From &);
+void Enter(const parser::OmpClause::Full &);
+void Enter(const parser::OmpClause::Grainsize &);
+void Enter(const parser::OmpClause::HasDeviceAddr &);
+void Enter(const parser::OmpClause::Hint &);
+void Enter(const parser::OmpClause::If &);
+void Enter(const parser::OmpClause::InReduction &);
+void Enter(const parser::OmpClause::Inbranch &);
+void Enter(const parser::OmpClause::Inclusive &);
+void Enter(const parser::OmpClause::Indirect &);
+void Enter(const parser::OmpClause::Init &);
+void Enter(const parser::OmpClause::IsDevicePtr &);
+void Enter(const parser::OmpClause::Lastprivate &);
+void Enter(const parser::OmpClause::Linear &);
+void Enter(const parser::OmpClause::Link &);
+void Enter(const parser::OmpClause::Map &);
+void Enter(const parser::OmpClause::Match &);
+void Enter(const parser::OmpClause::MemoryOrder &);
+void Enter(const parser::OmpClause::Mergeable &);
+void Enter(const parser::OmpClause::Message &);
+void Enter(const parser::OmpClause::Nogroup &);
+void Enter(const parser::OmpClause::Nowait &);
+void Enter(const parser::OmpClause::Nocontext &);
+void Enter(const parser::OmpClause::Nontemporal &);
+void Enter(const parser::OmpClause::Notinbranch &);
+void Enter(const parser::OmpClause::Novariants &);
+void Enter(const parser::OmpClause::NumTasks &);
+void Enter(const parser::OmpClause::NumTeams &);
+void Enter(const parser::OmpClause::NumThreads &);
+void Enter(const parser::OmpClause::OmpxDynCgroupMem &);
+void Enter(const parser::OmpClause::Order &);
+void Enter(const parser::OmpClause::Ordered &);
+void Enter(const parser::OmpClause::Partial &);
+void Enter(const parser::OmpClause::Priority &);
+void Enter(const parser::OmpClause::Private &);
+void Enter(const parser::OmpClause::ProcBind &);
+void Enter(const parser::OmpClause::Read &);
+void Enter(const parser::OmpClause::Reduction &);
+void Enter(const parser::OmpClause::Relaxed &);
+void Enter(const parser::OmpClause::Release &);
+void Enter(const parser::OmpClause::ReverseOffload &);
+void Enter(const parser::OmpClause::Safelen &);
+void Enter(const parser::OmpClause::Schedule &);
+void Enter(const parser::OmpClause::SeqCst &);
+void Enter(const parser::OmpClause::Severity &);
+void Enter(const parser::OmpClause::Shared &);
+void Enter(const parser::OmpClause::Simd &);
+void Enter(const parser::OmpClause::Simdlen &);
+void Enter(const parser::OmpClause::Sizes &);
+void Enter(const parser::OmpClause::TaskReduction &);
+void Enter(const parser::OmpClause::ThreadLimit &);
+void Enter(const parser::OmpClause::Threadprivate &);
+void Enter(const parser::OmpClause::Threads &);
+void Enter(const parser::OmpClause::To &);
+void Enter(const parser::OmpClause::UnifiedAddress &);
+void Enter(const parser::OmpClause::UnifiedSharedMemory &);
+void Enter(const parser::OmpClause::Uniform &);
+void Enter(const parser::OmpClause::Unknown &);
+void Enter(const parser::OmpClause::Untied &);
+void Enter(const parser::OmpClause::Update &);
+void Enter(const parser::OmpClause::Use &);
+void Enter(const parser::OmpClause::UseDeviceAddr &);
+void Enter(const parser::OmpClause::UseDevicePtr &);
+void Enter(const parser::OmpClause::UsesAllocators &);
+void Enter(const parser::OmpClause::When &);
+void Enter(const parser::OmpClause::Write &);
+
+#endif // GEN_FLANG_CLAUSE_CHECK_ENTER
+
+#ifdef GEN_FLANG_CLAUSE_PARSER_KIND_MAP
+#undef GEN_FLANG_CLAUSE_PARSER_KIND_MAP
+
+if constexpr (std::is_same_v<A, parser::OmpClause::AcqRel>)
+ return llvm::omp::Clause::OMPC_acq_rel;
+if constexpr (std::is_same_v<A, parser::OmpClause::Acquire>)
+ return llvm::omp::Clause::OMPC_acquire;
+if constexpr (std::is_same_v<A, parser::OmpClause::AdjustArgs>)
+ return llvm::omp::Clause::OMPC_adjust_args;
+if constexpr (std::is_same_v<A, parser::OmpClause::Affinity>)
+ return llvm::omp::Clause::OMPC_affinity;
+if constexpr (std::is_same_v<A, parser::OmpClause::Align>)
+ return llvm::omp::Clause::OMPC_align;
+if constexpr (std::is_same_v<A, parser::OmpClause::Aligned>)
+ return llvm::omp::Clause::OMPC_aligned;
+if constexpr (std::is_same_v<A, parser::OmpClause::Allocate>)
+ return llvm::omp::Clause::OMPC_allocate;
+if constexpr (std::is_same_v<A, parser::OmpClause::Allocator>)
+ return llvm::omp::Clause::OMPC_allocator;
+if constexpr (std::is_same_v<A, parser::OmpClause::AppendArgs>)
+ return llvm::omp::Clause::OMPC_append_args;
+if constexpr (std::is_same_v<A, parser::OmpClause::At>)
+ return llvm::omp::Clause::OMPC_at;
+if constexpr (std::is_same_v<A, parser::OmpClause::AtomicDefaultMemOrder>)
+ return llvm::omp::Clause::OMPC_atomic_default_mem_order;
+if constexpr (std::is_same_v<A, parser::OmpClause::Bind>)
+ return llvm::omp::Clause::OMPC_bind;
+if constexpr (std::is_same_v<A, parser::OmpClause::CancellationConstructType>)
+ return llvm::omp::Clause::OMPC_cancellation_construct_type;
+if constexpr (std::is_same_v<A, parser::OmpClause::Capture>)
+ return llvm::omp::Clause::OMPC_capture;
+if constexpr (std::is_same_v<A, parser::OmpClause::Collapse>)
+ return llvm::omp::Clause::OMPC_collapse;
+if constexpr (std::is_same_v<A, parser::OmpClause::Compare>)
+ return llvm::omp::Clause::OMPC_compare;
+if constexpr (std::is_same_v<A, parser::OmpClause::Copyprivate>)
+ return llvm::omp::Clause::OMPC_copyprivate;
+if constexpr (std::is_same_v<A, parser::OmpClause::Copyin>)
+ return llvm::omp::Clause::OMPC_copyin;
+if constexpr (std::is_same_v<A, parser::OmpClause::Default>)
+ return llvm::omp::Clause::OMPC_default;
+if constexpr (std::is_same_v<A, parser::OmpClause::Defaultmap>)
+ return llvm::omp::Clause::OMPC_defaultmap;
+if constexpr (std::is_same_v<A, parser::OmpClause::Depend>)
+ return llvm::omp::Clause::OMPC_depend;
+if constexpr (std::is_same_v<A, parser::OmpClause::Depobj>)
+ return llvm::omp::Clause::OMPC_depobj;
+if constexpr (std::is_same_v<A, parser::OmpClause::Destroy>)
+ return llvm::omp::Clause::OMPC_destroy;
+if constexpr (std::is_same_v<A, parser::OmpClause::Detach>)
+ return llvm::omp::Clause::OMPC_detach;
+if constexpr (std::is_same_v<A, parser::OmpClause::Device>)
+ return llvm::omp::Clause::OMPC_device;
+if constexpr (std::is_same_v<A, parser::OmpClause::DeviceType>)
+ return llvm::omp::Clause::OMPC_device_type;
+if constexpr (std::is_same_v<A, parser::OmpClause::DistSchedule>)
+ return llvm::omp::Clause::OMPC_dist_schedule;
+if constexpr (std::is_same_v<A, parser::OmpClause::DynamicAllocators>)
+ return llvm::omp::Clause::OMPC_dynamic_allocators;
+if constexpr (std::is_same_v<A, parser::OmpClause::Exclusive>)
+ return llvm::omp::Clause::OMPC_exclusive;
+if constexpr (std::is_same_v<A, parser::OmpClause::Filter>)
+ return llvm::omp::Clause::OMPC_filter;
+if constexpr (std::is_same_v<A, parser::OmpClause::Final>)
+ return llvm::omp::Clause::OMPC_final;
+if constexpr (std::is_same_v<A, parser::OmpClause::Firstprivate>)
+ return llvm::omp::Clause::OMPC_firstprivate;
+if constexpr (std::is_same_v<A, parser::OmpClause::Flush>)
+ return llvm::omp::Clause::OMPC_flush;
+if constexpr (std::is_same_v<A, parser::OmpClause::From>)
+ return llvm::omp::Clause::OMPC_from;
+if constexpr (std::is_same_v<A, parser::OmpClause::Full>)
+ return llvm::omp::Clause::OMPC_full;
+if constexpr (std::is_same_v<A, parser::OmpClause::Grainsize>)
+ return llvm::omp::Clause::OMPC_grainsize;
+if constexpr (std::is_same_v<A, parser::OmpClause::HasDeviceAddr>)
+ return llvm::omp::Clause::OMPC_has_device_addr;
+if constexpr (std::is_same_v<A, parser::OmpClause::Hint>)
+ return llvm::omp::Clause::OMPC_hint;
+if constexpr (std::is_same_v<A, parser::OmpClause::If>)
+ return llvm::omp::Clause::OMPC_if;
+if constexpr (std::is_same_v<A, parser::OmpClause::InReduction>)
+ return llvm::omp::Clause::OMPC_in_reduction;
+if constexpr (std::is_same_v<A, parser::OmpClause::Inbranch>)
+ return llvm::omp::Clause::OMPC_inbranch;
+if constexpr (std::is_same_v<A, parser::OmpClause::Inclusive>)
+ return llvm::omp::Clause::OMPC_inclusive;
+if constexpr (std::is_same_v<A, parser::OmpClause::Indirect>)
+ return llvm::omp::Clause::OMPC_indirect;
+if constexpr (std::is_same_v<A, parser::OmpClause::Init>)
+ return llvm::omp::Clause::OMPC_init;
+if constexpr (std::is_same_v<A, parser::OmpClause::IsDevicePtr>)
+ return llvm::omp::Clause::OMPC_is_device_ptr;
+if constexpr (std::is_same_v<A, parser::OmpClause::Lastprivate>)
+ return llvm::omp::Clause::OMPC_lastprivate;
+if constexpr (std::is_same_v<A, parser::OmpClause::Linear>)
+ return llvm::omp::Clause::OMPC_linear;
+if constexpr (std::is_same_v<A, parser::OmpClause::Link>)
+ return llvm::omp::Clause::OMPC_link;
+if constexpr (std::is_same_v<A, parser::OmpClause::Map>)
+ return llvm::omp::Clause::OMPC_map;
+if constexpr (std::is_same_v<A, parser::OmpClause::Match>)
+ return llvm::omp::Clause::OMPC_match;
+if constexpr (std::is_same_v<A, parser::OmpClause::MemoryOrder>)
+ return llvm::omp::Clause::OMPC_memory_order;
+if constexpr (std::is_same_v<A, parser::OmpClause::Mergeable>)
+ return llvm::omp::Clause::OMPC_mergeable;
+if constexpr (std::is_same_v<A, parser::OmpClause::Message>)
+ return llvm::omp::Clause::OMPC_message;
+if constexpr (std::is_same_v<A, parser::OmpClause::Nogroup>)
+ return llvm::omp::Clause::OMPC_nogroup;
+if constexpr (std::is_same_v<A, parser::OmpClause::Nowait>)
+ return llvm::omp::Clause::OMPC_nowait;
+if constexpr (std::is_same_v<A, parser::OmpClause::Nocontext>)
+ return llvm::omp::Clause::OMPC_nocontext;
+if constexpr (std::is_same_v<A, parser::OmpClause::Nontemporal>)
+ return llvm::omp::Clause::OMPC_nontemporal;
+if constexpr (std::is_same_v<A, parser::OmpClause::Notinbranch>)
+ return llvm::omp::Clause::OMPC_notinbranch;
+if constexpr (std::is_same_v<A, parser::OmpClause::Novariants>)
+ return llvm::omp::Clause::OMPC_novariants;
+if constexpr (std::is_same_v<A, parser::OmpClause::NumTasks>)
+ return llvm::omp::Clause::OMPC_num_tasks;
+if constexpr (std::is_same_v<A, parser::OmpClause::NumTeams>)
+ return llvm::omp::Clause::OMPC_num_teams;
+if constexpr (std::is_same_v<A, parser::OmpClause::NumThreads>)
+ return llvm::omp::Clause::OMPC_num_threads;
+if constexpr (std::is_same_v<A, parser::OmpClause::OmpxDynCgroupMem>)
+ return llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem;
+if constexpr (std::is_same_v<A, parser::OmpClause::Order>)
+ return llvm::omp::Clause::OMPC_order;
+if constexpr (std::is_same_v<A, parser::OmpClause::Ordered>)
+ return llvm::omp::Clause::OMPC_ordered;
+if constexpr (std::is_same_v<A, parser::OmpClause::Partial>)
+ return llvm::omp::Clause::OMPC_partial;
+if constexpr (std::is_same_v<A, parser::OmpClause::Priority>)
+ return llvm::omp::Clause::OMPC_priority;
+if constexpr (std::is_same_v<A, parser::OmpClause::Private>)
+ return llvm::omp::Clause::OMPC_private;
+if constexpr (std::is_same_v<A, parser::OmpClause::ProcBind>)
+ return llvm::omp::Clause::OMPC_proc_bind;
+if constexpr (std::is_same_v<A, parser::OmpClause::Read>)
+ return llvm::omp::Clause::OMPC_read;
+if constexpr (std::is_same_v<A, parser::OmpClause::Reduction>)
+ return llvm::omp::Clause::OMPC_reduction;
+if constexpr (std::is_same_v<A, parser::OmpClause::Relaxed>)
+ return llvm::omp::Clause::OMPC_relaxed;
+if constexpr (std::is_same_v<A, parser::OmpClause::Release>)
+ return llvm::omp::Clause::OMPC_release;
+if constexpr (std::is_same_v<A, parser::OmpClause::ReverseOffload>)
+ return llvm::omp::Clause::OMPC_reverse_offload;
+if constexpr (std::is_same_v<A, parser::OmpClause::Safelen>)
+ return llvm::omp::Clause::OMPC_safelen;
+if constexpr (std::is_same_v<A, parser::OmpClause::Schedule>)
+ return llvm::omp::Clause::OMPC_schedule;
+if constexpr (std::is_same_v<A, parser::OmpClause::SeqCst>)
+ return llvm::omp::Clause::OMPC_seq_cst;
+if constexpr (std::is_same_v<A, parser::OmpClause::Severity>)
+ return llvm::omp::Clause::OMPC_severity;
+if constexpr (std::is_same_v<A, parser::OmpClause::Shared>)
+ return llvm::omp::Clause::OMPC_shared;
+if constexpr (std::is_same_v<A, parser::OmpClause::Simd>)
+ return llvm::omp::Clause::OMPC_simd;
+if constexpr (std::is_same_v<A, parser::OmpClause::Simdlen>)
+ return llvm::omp::Clause::OMPC_simdlen;
+if constexpr (std::is_same_v<A, parser::OmpClause::Sizes>)
+ return llvm::omp::Clause::OMPC_sizes;
+if constexpr (std::is_same_v<A, parser::OmpClause::TaskReduction>)
+ return llvm::omp::Clause::OMPC_task_reduction;
+if constexpr (std::is_same_v<A, parser::OmpClause::ThreadLimit>)
+ return llvm::omp::Clause::OMPC_thread_limit;
+if constexpr (std::is_same_v<A, parser::OmpClause::Threadprivate>)
+ return llvm::omp::Clause::OMPC_threadprivate;
+if constexpr (std::is_same_v<A, parser::OmpClause::Threads>)
+ return llvm::omp::Clause::OMPC_threads;
+if constexpr (std::is_same_v<A, parser::OmpClause::To>)
+ return llvm::omp::Clause::OMPC_to;
+if constexpr (std::is_same_v<A, parser::OmpClause::UnifiedAddress>)
+ return llvm::omp::Clause::OMPC_unified_address;
+if constexpr (std::is_same_v<A, parser::OmpClause::UnifiedSharedMemory>)
+ return llvm::omp::Clause::OMPC_unified_shared_memory;
+if constexpr (std::is_same_v<A, parser::OmpClause::Uniform>)
+ return llvm::omp::Clause::OMPC_uniform;
+if constexpr (std::is_same_v<A, parser::OmpClause::Unknown>)
+ return llvm::omp::Clause::OMPC_unknown;
+if constexpr (std::is_same_v<A, parser::OmpClause::Untied>)
+ return llvm::omp::Clause::OMPC_untied;
+if constexpr (std::is_same_v<A, parser::OmpClause::Update>)
+ return llvm::omp::Clause::OMPC_update;
+if constexpr (std::is_same_v<A, parser::OmpClause::Use>)
+ return llvm::omp::Clause::OMPC_use;
+if constexpr (std::is_same_v<A, parser::OmpClause::UseDeviceAddr>)
+ return llvm::omp::Clause::OMPC_use_device_addr;
+if constexpr (std::is_same_v<A, parser::OmpClause::UseDevicePtr>)
+ return llvm::omp::Clause::OMPC_use_device_ptr;
+if constexpr (std::is_same_v<A, parser::OmpClause::UsesAllocators>)
+ return llvm::omp::Clause::OMPC_uses_allocators;
+if constexpr (std::is_same_v<A, parser::OmpClause::When>)
+ return llvm::omp::Clause::OMPC_when;
+if constexpr (std::is_same_v<A, parser::OmpClause::Write>)
+ return llvm::omp::Clause::OMPC_write;
+llvm_unreachable("Invalid OpenMP Parser clause");
+
+#endif // GEN_FLANG_CLAUSE_PARSER_KIND_MAP
+
+#ifdef GEN_FLANG_CLAUSES_PARSER
+#undef GEN_FLANG_CLAUSES_PARSER
+
+TYPE_PARSER(
+ "write" >> construct<OmpClause>(construct<OmpClause::Write>()) ||
+ "when" >> construct<OmpClause>(construct<OmpClause::When>()) ||
+ "uses_allocators" >> construct<OmpClause>(construct<OmpClause::UsesAllocators>()) ||
+ "use_device_ptr" >> construct<OmpClause>(construct<OmpClause::UseDevicePtr>(parenthesized(name))) ||
+ "use_device_addr" >> construct<OmpClause>(construct<OmpClause::UseDeviceAddr>()) ||
+ "use" >> construct<OmpClause>(construct<OmpClause::Use>()) ||
+ "update" >> construct<OmpClause>(construct<OmpClause::Update>()) ||
+ "untied" >> construct<OmpClause>(construct<OmpClause::Untied>()) ||
+ "unknown" >> construct<OmpClause>(construct<OmpClause::Unknown>()) ||
+ "uniform" >> construct<OmpClause>(construct<OmpClause::Uniform>(parenthesized(name))) ||
+ "unified_shared_memory" >> construct<OmpClause>(construct<OmpClause::UnifiedSharedMemory>()) ||
+ "unified_address" >> construct<OmpClause>(construct<OmpClause::UnifiedAddress>()) ||
+ "to" >> construct<OmpClause>(construct<OmpClause::To>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "threads" >> construct<OmpClause>(construct<OmpClause::Threads>()) ||
+ "threadprivate" >> construct<OmpClause>(construct<OmpClause::Threadprivate>()) ||
+ "thread_limit" >> construct<OmpClause>(construct<OmpClause::ThreadLimit>(parenthesized(scalarIntExpr))) ||
+ "task_reduction" >> construct<OmpClause>(construct<OmpClause::TaskReduction>(parenthesized(Parser<OmpReductionClause>{}))) ||
+ "sizes" >> construct<OmpClause>(construct<OmpClause::Sizes>(parenthesized(scalarIntExpr))) ||
+ "simdlen" >> construct<OmpClause>(construct<OmpClause::Simdlen>(parenthesized(scalarIntConstantExpr))) ||
+ "simd" >> construct<OmpClause>(construct<OmpClause::Simd>()) ||
+ "shared" >> construct<OmpClause>(construct<OmpClause::Shared>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "severity" >> construct<OmpClause>(construct<OmpClause::Severity>()) ||
+ "seq_cst" >> construct<OmpClause>(construct<OmpClause::SeqCst>()) ||
+ "schedule" >> construct<OmpClause>(construct<OmpClause::Schedule>(parenthesized(Parser<OmpScheduleClause>{}))) ||
+ "safelen" >> construct<OmpClause>(construct<OmpClause::Safelen>(parenthesized(scalarIntConstantExpr))) ||
+ "reverse_offload" >> construct<OmpClause>(construct<OmpClause::ReverseOffload>()) ||
+ "release" >> construct<OmpClause>(construct<OmpClause::Release>()) ||
+ "relaxed" >> construct<OmpClause>(construct<OmpClause::Relaxed>()) ||
+ "reduction" >> construct<OmpClause>(construct<OmpClause::Reduction>(parenthesized(Parser<OmpReductionClause>{}))) ||
+ "read" >> construct<OmpClause>(construct<OmpClause::Read>()) ||
+ "proc_bind" >> construct<OmpClause>(construct<OmpClause::ProcBind>(parenthesized(Parser<OmpProcBindClause>{}))) ||
+ "private" >> construct<OmpClause>(construct<OmpClause::Private>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "priority" >> construct<OmpClause>(construct<OmpClause::Priority>(parenthesized(scalarIntExpr))) ||
+ "partial" >> construct<OmpClause>(construct<OmpClause::Partial>(maybe(parenthesized(scalarIntConstantExpr)))) ||
+ "ordered" >> construct<OmpClause>(construct<OmpClause::Ordered>(maybe(parenthesized(scalarIntConstantExpr)))) ||
+ "order" >> construct<OmpClause>(construct<OmpClause::Order>()) ||
+ "ompx_dyn_cgroup_mem" >> construct<OmpClause>(construct<OmpClause::OmpxDynCgroupMem>(parenthesized(scalarIntExpr))) ||
+ "num_threads" >> construct<OmpClause>(construct<OmpClause::NumThreads>(parenthesized(scalarIntExpr))) ||
+ "num_teams" >> construct<OmpClause>(construct<OmpClause::NumTeams>(parenthesized(scalarIntExpr))) ||
+ "num_tasks" >> construct<OmpClause>(construct<OmpClause::NumTasks>(parenthesized(scalarIntExpr))) ||
+ "nowait" >> construct<OmpClause>(construct<OmpClause::Nowait>()) ||
+ "novariants" >> construct<OmpClause>(construct<OmpClause::Novariants>(parenthesized(scalarLogicalExpr))) ||
+ "notinbranch" >> construct<OmpClause>(construct<OmpClause::Notinbranch>()) ||
+ "nontemporal" >> construct<OmpClause>(construct<OmpClause::Nontemporal>(parenthesized(name))) ||
+ "nogroup" >> construct<OmpClause>(construct<OmpClause::Nogroup>()) ||
+ "nocontext" >> construct<OmpClause>(construct<OmpClause::Nocontext>(parenthesized(scalarLogicalExpr))) ||
+ "message" >> construct<OmpClause>(construct<OmpClause::Message>()) ||
+ "mergeable" >> construct<OmpClause>(construct<OmpClause::Mergeable>()) ||
+ "memory_order" >> construct<OmpClause>(construct<OmpClause::MemoryOrder>()) ||
+ "match" >> construct<OmpClause>(construct<OmpClause::Match>()) ||
+ "map" >> construct<OmpClause>(construct<OmpClause::Map>(parenthesized(Parser<OmpMapClause>{}))) ||
+ "link" >> construct<OmpClause>(construct<OmpClause::Link>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "linear" >> construct<OmpClause>(construct<OmpClause::Linear>(parenthesized(Parser<OmpLinearClause>{}))) ||
+ "lastprivate" >> construct<OmpClause>(construct<OmpClause::Lastprivate>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "is_device_ptr" >> construct<OmpClause>(construct<OmpClause::IsDevicePtr>(parenthesized(name))) ||
+ "init" >> construct<OmpClause>(construct<OmpClause::Init>()) ||
+ "indirect" >> construct<OmpClause>(construct<OmpClause::Indirect>()) ||
+ "inclusive" >> construct<OmpClause>(construct<OmpClause::Inclusive>()) ||
+ "inbranch" >> construct<OmpClause>(construct<OmpClause::Inbranch>()) ||
+ "in_reduction" >> construct<OmpClause>(construct<OmpClause::InReduction>(parenthesized(Parser<OmpInReductionClause>{}))) ||
+ "if" >> construct<OmpClause>(construct<OmpClause::If>(parenthesized(Parser<OmpIfClause>{}))) ||
+ "hint" >> construct<OmpClause>(construct<OmpClause::Hint>(parenthesized(Parser<ConstantExpr>{}))) ||
+ "has_device_addr" >> construct<OmpClause>(construct<OmpClause::HasDeviceAddr>(parenthesized(name))) ||
+ "grainsize" >> construct<OmpClause>(construct<OmpClause::Grainsize>(parenthesized(scalarIntExpr))) ||
+ "full" >> construct<OmpClause>(construct<OmpClause::Full>()) ||
+ "from" >> construct<OmpClause>(construct<OmpClause::From>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "flush" >> construct<OmpClause>(construct<OmpClause::Flush>()) ||
+ "firstprivate" >> construct<OmpClause>(construct<OmpClause::Firstprivate>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "final" >> construct<OmpClause>(construct<OmpClause::Final>(parenthesized(scalarLogicalExpr))) ||
+ "filter" >> construct<OmpClause>(construct<OmpClause::Filter>(parenthesized(scalarIntExpr))) ||
+ "exclusive" >> construct<OmpClause>(construct<OmpClause::Exclusive>()) ||
+ "dynamic_allocators" >> construct<OmpClause>(construct<OmpClause::DynamicAllocators>()) ||
+ "dist_schedule" >> construct<OmpClause>(construct<OmpClause::DistSchedule>(maybe(parenthesized(scalarIntExpr)))) ||
+ "device_type" >> construct<OmpClause>(construct<OmpClause::DeviceType>()) ||
+ "device" >> construct<OmpClause>(construct<OmpClause::Device>(parenthesized(Parser<OmpDeviceClause>{}))) ||
+ "detach" >> construct<OmpClause>(construct<OmpClause::Detach>()) ||
+ "destroy" >> construct<OmpClause>(construct<OmpClause::Destroy>()) ||
+ "depobj" >> construct<OmpClause>(construct<OmpClause::Depobj>()) ||
+ "depend" >> construct<OmpClause>(construct<OmpClause::Depend>(parenthesized(Parser<OmpDependClause>{}))) ||
+ "defaultmap" >> construct<OmpClause>(construct<OmpClause::Defaultmap>(parenthesized(Parser<OmpDefaultmapClause>{}))) ||
+ "default" >> construct<OmpClause>(construct<OmpClause::Default>(parenthesized(Parser<OmpDefaultClause>{}))) ||
+ "copyprivate" >> construct<OmpClause>(construct<OmpClause::Copyprivate>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "copyin" >> construct<OmpClause>(construct<OmpClause::Copyin>(parenthesized(Parser<OmpObjectList>{}))) ||
+ "compare" >> construct<OmpClause>(construct<OmpClause::Compare>()) ||
+ "collapse" >> construct<OmpClause>(construct<OmpClause::Collapse>(parenthesized(scalarIntConstantExpr))) ||
+ "capture" >> construct<OmpClause>(construct<OmpClause::Capture>()) ||
+ "cancellation_construct_type" >> construct<OmpClause>(construct<OmpClause::CancellationConstructType>()) ||
+ "bind" >> construct<OmpClause>(construct<OmpClause::Bind>()) ||
+ "atomic_default_mem_order" >> construct<OmpClause>(construct<OmpClause::AtomicDefaultMemOrder>(parenthesized(Parser<OmpAtomicDefaultMemOrderClause>{}))) ||
+ "at" >> construct<OmpClause>(construct<OmpClause::At>()) ||
+ "append_args" >> construct<OmpClause>(construct<OmpClause::AppendArgs>()) ||
+ "allocator" >> construct<OmpClause>(construct<OmpClause::Allocator>(parenthesized(scalarIntExpr))) ||
+ "allocate" >> construct<OmpClause>(construct<OmpClause::Allocate>(parenthesized(Parser<OmpAllocateClause>{}))) ||
+ "aligned" >> construct<OmpClause>(construct<OmpClause::Aligned>(parenthesized(Parser<OmpAlignedClause>{}))) ||
+ "align" >> construct<OmpClause>(construct<OmpClause::Align>()) ||
+ "affinity" >> construct<OmpClause>(construct<OmpClause::Affinity>()) ||
+ "adjust_args" >> construct<OmpClause>(construct<OmpClause::AdjustArgs>()) ||
+ "acquire" >> construct<OmpClause>(construct<OmpClause::Acquire>()) ||
+ "acq_rel" >> construct<OmpClause>(construct<OmpClause::AcqRel>())
+)
+
+#endif // GEN_FLANG_CLAUSES_PARSER
+
+#ifdef GEN_CLANG_CLAUSE_CLASS
+#undef GEN_CLANG_CLAUSE_CLASS
+
+#ifndef CLAUSE
+#define CLAUSE(Enum, Str, Implicit)
+#endif
+#ifndef CLAUSE_CLASS
+#define CLAUSE_CLASS(Enum, Str, Class)
+#endif
+#ifndef CLAUSE_NO_CLASS
+#define CLAUSE_NO_CLASS(Enum, Str)
+#endif
+
+#define __CLAUSE(Name, Class) \
+ CLAUSE(OMPC_##Name, #Name, /* Implicit */ false) \
+ CLAUSE_CLASS(OMPC_##Name, #Name, Class)
+#define __CLAUSE_NO_CLASS(Name) \
+ CLAUSE(OMPC_##Name, #Name, /* Implicit */ false) \
+ CLAUSE_NO_CLASS(OMPC_##Name, #Name)
+#define __IMPLICIT_CLAUSE_CLASS(Name, Str, Class) \
+ CLAUSE(OMPC_##Name, Str, /* Implicit */ true) \
+ CLAUSE_CLASS(OMPC_##Name, Str, Class)
+#define __IMPLICIT_CLAUSE_NO_CLASS(Name, Str) \
+ CLAUSE(OMPC_##Name, Str, /* Implicit */ true) \
+ CLAUSE_NO_CLASS(OMPC_##Name, Str)
+
+__CLAUSE(acq_rel, OMPAcqRelClause)
+__CLAUSE(acquire, OMPAcquireClause)
+__CLAUSE_NO_CLASS(adjust_args)
+__CLAUSE(affinity, OMPAffinityClause)
+__CLAUSE(align, OMPAlignClause)
+__CLAUSE(aligned, OMPAlignedClause)
+__CLAUSE(allocate, OMPAllocateClause)
+__CLAUSE(allocator, OMPAllocatorClause)
+__CLAUSE_NO_CLASS(append_args)
+__CLAUSE(at, OMPAtClause)
+__CLAUSE(atomic_default_mem_order, OMPAtomicDefaultMemOrderClause)
+__CLAUSE(bind, OMPBindClause)
+__CLAUSE_NO_CLASS(cancellation_construct_type)
+__CLAUSE(capture, OMPCaptureClause)
+__CLAUSE(collapse, OMPCollapseClause)
+__CLAUSE(compare, OMPCompareClause)
+__CLAUSE(copyprivate, OMPCopyprivateClause)
+__CLAUSE(copyin, OMPCopyinClause)
+__CLAUSE(default, OMPDefaultClause)
+__CLAUSE(defaultmap, OMPDefaultmapClause)
+__CLAUSE(depend, OMPDependClause)
+__IMPLICIT_CLAUSE_CLASS(depobj, "depobj", OMPDepobjClause)
+__CLAUSE(destroy, OMPDestroyClause)
+__CLAUSE(detach, OMPDetachClause)
+__CLAUSE(device, OMPDeviceClause)
+__CLAUSE_NO_CLASS(device_type)
+__CLAUSE(dist_schedule, OMPDistScheduleClause)
+__CLAUSE(dynamic_allocators, OMPDynamicAllocatorsClause)
+__CLAUSE(exclusive, OMPExclusiveClause)
+__CLAUSE(filter, OMPFilterClause)
+__CLAUSE(final, OMPFinalClause)
+__CLAUSE(firstprivate, OMPFirstprivateClause)
+__IMPLICIT_CLAUSE_CLASS(flush, "flush", OMPFlushClause)
+__CLAUSE(from, OMPFromClause)
+__CLAUSE(full, OMPFullClause)
+__CLAUSE(grainsize, OMPGrainsizeClause)
+__CLAUSE(has_device_addr, OMPHasDeviceAddrClause)
+__CLAUSE(hint, OMPHintClause)
+__CLAUSE(if, OMPIfClause)
+__CLAUSE(in_reduction, OMPInReductionClause)
+__CLAUSE_NO_CLASS(inbranch)
+__CLAUSE(inclusive, OMPInclusiveClause)
+__CLAUSE_NO_CLASS(indirect)
+__CLAUSE(init, OMPInitClause)
+__CLAUSE(is_device_ptr, OMPIsDevicePtrClause)
+__CLAUSE(lastprivate, OMPLastprivateClause)
+__CLAUSE(linear, OMPLinearClause)
+__CLAUSE_NO_CLASS(link)
+__CLAUSE(map, OMPMapClause)
+__CLAUSE_NO_CLASS(match)
+__CLAUSE_NO_CLASS(memory_order)
+__CLAUSE(mergeable, OMPMergeableClause)
+__CLAUSE(message, OMPMessageClause)
+__CLAUSE(nogroup, OMPNogroupClause)
+__CLAUSE(nowait, OMPNowaitClause)
+__CLAUSE(nocontext, OMPNocontextClause)
+__CLAUSE(nontemporal, OMPNontemporalClause)
+__CLAUSE_NO_CLASS(notinbranch)
+__CLAUSE(novariants, OMPNovariantsClause)
+__CLAUSE(num_tasks, OMPNumTasksClause)
+__CLAUSE(num_teams, OMPNumTeamsClause)
+__CLAUSE(num_threads, OMPNumThreadsClause)
+__CLAUSE(ompx_dyn_cgroup_mem, OMPXDynCGroupMemClause)
+__CLAUSE(order, OMPOrderClause)
+__CLAUSE(ordered, OMPOrderedClause)
+__CLAUSE(partial, OMPPartialClause)
+__CLAUSE(priority, OMPPriorityClause)
+__CLAUSE(private, OMPPrivateClause)
+__CLAUSE(proc_bind, OMPProcBindClause)
+__CLAUSE(read, OMPReadClause)
+__CLAUSE(reduction, OMPReductionClause)
+__CLAUSE(relaxed, OMPRelaxedClause)
+__CLAUSE(release, OMPReleaseClause)
+__CLAUSE(reverse_offload, OMPReverseOffloadClause)
+__CLAUSE(safelen, OMPSafelenClause)
+__CLAUSE(schedule, OMPScheduleClause)
+__CLAUSE(seq_cst, OMPSeqCstClause)
+__CLAUSE(severity, OMPSeverityClause)
+__CLAUSE(shared, OMPSharedClause)
+__CLAUSE(simd, OMPSIMDClause)
+__CLAUSE(simdlen, OMPSimdlenClause)
+__CLAUSE(sizes, OMPSizesClause)
+__CLAUSE(task_reduction, OMPTaskReductionClause)
+__CLAUSE(thread_limit, OMPThreadLimitClause)
+__IMPLICIT_CLAUSE_NO_CLASS(threadprivate, "threadprivate")
+__CLAUSE(threads, OMPThreadsClause)
+__CLAUSE(to, OMPToClause)
+__CLAUSE(unified_address, OMPUnifiedAddressClause)
+__CLAUSE(unified_shared_memory, OMPUnifiedSharedMemoryClause)
+__CLAUSE_NO_CLASS(uniform)
+__IMPLICIT_CLAUSE_NO_CLASS(unknown, "unknown")
+__CLAUSE(untied, OMPUntiedClause)
+__CLAUSE(update, OMPUpdateClause)
+__CLAUSE(use, OMPUseClause)
+__CLAUSE(use_device_addr, OMPUseDeviceAddrClause)
+__CLAUSE(use_device_ptr, OMPUseDevicePtrClause)
+__CLAUSE(uses_allocators, OMPUsesAllocatorsClause)
+__CLAUSE_NO_CLASS(when)
+__CLAUSE(write, OMPWriteClause)
+
+#undef __IMPLICIT_CLAUSE_NO_CLASS
+#undef __IMPLICIT_CLAUSE_CLASS
+#undef __CLAUSE
+#undef CLAUSE_NO_CLASS
+#undef CLAUSE_CLASS
+#undef CLAUSE
+
+#endif // GEN_CLANG_CLAUSE_CLASS
+
+#ifdef GEN_DIRECTIVES_IMPL
+#undef GEN_DIRECTIVES_IMPL
+
+Directive llvm::omp::getOpenMPDirectiveKind(llvm::StringRef Str) {
+ return llvm::StringSwitch<Directive>(Str)
+ .Case("allocate",OMPD_allocate)
+ .Case("assumes",OMPD_assumes)
+ .Case("atomic",OMPD_atomic)
+ .Case("barrier",OMPD_barrier)
+ .Case("begin assumes",OMPD_begin_assumes)
+ .Case("begin declare target",OMPD_begin_declare_target)
+ .Case("begin declare variant",OMPD_begin_declare_variant)
+ .Case("cancel",OMPD_cancel)
+ .Case("cancellation point",OMPD_cancellation_point)
+ .Case("critical",OMPD_critical)
+ .Case("declare mapper",OMPD_declare_mapper)
+ .Case("declare reduction",OMPD_declare_reduction)
+ .Case("declare simd",OMPD_declare_simd)
+ .Case("declare target",OMPD_declare_target)
+ .Case("declare variant",OMPD_declare_variant)
+ .Case("depobj",OMPD_depobj)
+ .Case("distribute",OMPD_distribute)
+ .Case("distribute parallel do",OMPD_distribute_parallel_do)
+ .Case("distribute parallel do simd",OMPD_distribute_parallel_do_simd)
+ .Case("distribute parallel for",OMPD_distribute_parallel_for)
+ .Case("distribute parallel for simd",OMPD_distribute_parallel_for_simd)
+ .Case("distribute simd",OMPD_distribute_simd)
+ .Case("do",OMPD_do)
+ .Case("do simd",OMPD_do_simd)
+ .Case("end assumes",OMPD_end_assumes)
+ .Case("end declare target",OMPD_end_declare_target)
+ .Case("end declare variant",OMPD_end_declare_variant)
+ .Case("end do",OMPD_end_do)
+ .Case("end do simd",OMPD_end_do_simd)
+ .Case("end sections",OMPD_end_sections)
+ .Case("end single",OMPD_end_single)
+ .Case("end workshare",OMPD_end_workshare)
+ .Case("error",OMPD_error)
+ .Case("flush",OMPD_flush)
+ .Case("for",OMPD_for)
+ .Case("for simd",OMPD_for_simd)
+ .Case("masked taskloop",OMPD_masked_taskloop)
+ .Case("masked taskloop simd",OMPD_masked_taskloop_simd)
+ .Case("master",OMPD_master)
+ .Case("master taskloop",OMPD_master_taskloop)
+ .Case("master taskloop simd",OMPD_master_taskloop_simd)
+ .Case("metadirective",OMPD_metadirective)
+ .Case("nothing",OMPD_nothing)
+ .Case("ordered",OMPD_ordered)
+ .Case("parallel",OMPD_parallel)
+ .Case("parallel do",OMPD_parallel_do)
+ .Case("parallel do simd",OMPD_parallel_do_simd)
+ .Case("parallel for",OMPD_parallel_for)
+ .Case("parallel for simd",OMPD_parallel_for_simd)
+ .Case("parallel masked",OMPD_parallel_masked)
+ .Case("parallel masked taskloop",OMPD_parallel_masked_taskloop)
+ .Case("parallel masked taskloop simd",OMPD_parallel_masked_taskloop_simd)
+ .Case("parallel master",OMPD_parallel_master)
+ .Case("parallel master taskloop",OMPD_parallel_master_taskloop)
+ .Case("parallel master taskloop simd",OMPD_parallel_master_taskloop_simd)
+ .Case("parallel sections",OMPD_parallel_sections)
+ .Case("parallel workshare",OMPD_parallel_workshare)
+ .Case("requires",OMPD_requires)
+ .Case("scan",OMPD_scan)
+ .Case("section",OMPD_section)
+ .Case("sections",OMPD_sections)
+ .Case("simd",OMPD_simd)
+ .Case("single",OMPD_single)
+ .Case("target",OMPD_target)
+ .Case("target data",OMPD_target_data)
+ .Case("target enter data",OMPD_target_enter_data)
+ .Case("target exit data",OMPD_target_exit_data)
+ .Case("target parallel",OMPD_target_parallel)
+ .Case("target parallel do",OMPD_target_parallel_do)
+ .Case("target parallel do simd",OMPD_target_parallel_do_simd)
+ .Case("target parallel for",OMPD_target_parallel_for)
+ .Case("target parallel for simd",OMPD_target_parallel_for_simd)
+ .Case("target simd",OMPD_target_simd)
+ .Case("target teams",OMPD_target_teams)
+ .Case("target teams distribute",OMPD_target_teams_distribute)
+ .Case("target teams distribute parallel do",OMPD_target_teams_distribute_parallel_do)
+ .Case("target teams distribute parallel do simd",OMPD_target_teams_distribute_parallel_do_simd)
+ .Case("target teams distribute parallel for",OMPD_target_teams_distribute_parallel_for)
+ .Case("target teams distribute parallel for simd",OMPD_target_teams_distribute_parallel_for_simd)
+ .Case("target teams distribute simd",OMPD_target_teams_distribute_simd)
+ .Case("target update",OMPD_target_update)
+ .Case("task",OMPD_task)
+ .Case("taskgroup",OMPD_taskgroup)
+ .Case("taskloop",OMPD_taskloop)
+ .Case("taskloop simd",OMPD_taskloop_simd)
+ .Case("taskwait",OMPD_taskwait)
+ .Case("taskyield",OMPD_taskyield)
+ .Case("teams",OMPD_teams)
+ .Case("teams distribute",OMPD_teams_distribute)
+ .Case("teams distribute parallel do",OMPD_teams_distribute_parallel_do)
+ .Case("teams distribute parallel do simd",OMPD_teams_distribute_parallel_do_simd)
+ .Case("teams distribute parallel for",OMPD_teams_distribute_parallel_for)
+ .Case("teams distribute parallel for simd",OMPD_teams_distribute_parallel_for_simd)
+ .Case("teams distribute simd",OMPD_teams_distribute_simd)
+ .Case("threadprivate",OMPD_threadprivate)
+ .Case("tile",OMPD_tile)
+ .Case("unknown",OMPD_unknown)
+ .Case("unroll",OMPD_unroll)
+ .Case("workshare",OMPD_workshare)
+ .Case("dispatch",OMPD_dispatch)
+ .Case("interop",OMPD_interop)
+ .Case("loop",OMPD_loop)
+ .Case("masked",OMPD_masked)
+ .Case("parallel loop",OMPD_parallel_loop)
+ .Case("target parallel loop",OMPD_target_parallel_loop)
+ .Case("target teams loop",OMPD_target_teams_loop)
+ .Case("teams loop",OMPD_teams_loop)
+ .Default(OMPD_unknown);
+}
+
+llvm::StringRef llvm::omp::getOpenMPDirectiveName(Directive Kind) {
+ switch (Kind) {
+ case OMPD_allocate:
+ return "allocate";
+ case OMPD_assumes:
+ return "assumes";
+ case OMPD_atomic:
+ return "atomic";
+ case OMPD_barrier:
+ return "barrier";
+ case OMPD_begin_assumes:
+ return "begin assumes";
+ case OMPD_begin_declare_target:
+ return "begin declare target";
+ case OMPD_begin_declare_variant:
+ return "begin declare variant";
+ case OMPD_cancel:
+ return "cancel";
+ case OMPD_cancellation_point:
+ return "cancellation point";
+ case OMPD_critical:
+ return "critical";
+ case OMPD_declare_mapper:
+ return "declare mapper";
+ case OMPD_declare_reduction:
+ return "declare reduction";
+ case OMPD_declare_simd:
+ return "declare simd";
+ case OMPD_declare_target:
+ return "declare target";
+ case OMPD_declare_variant:
+ return "declare variant";
+ case OMPD_depobj:
+ return "depobj";
+ case OMPD_distribute:
+ return "distribute";
+ case OMPD_distribute_parallel_do:
+ return "distribute parallel do";
+ case OMPD_distribute_parallel_do_simd:
+ return "distribute parallel do simd";
+ case OMPD_distribute_parallel_for:
+ return "distribute parallel for";
+ case OMPD_distribute_parallel_for_simd:
+ return "distribute parallel for simd";
+ case OMPD_distribute_simd:
+ return "distribute simd";
+ case OMPD_do:
+ return "do";
+ case OMPD_do_simd:
+ return "do simd";
+ case OMPD_end_assumes:
+ return "end assumes";
+ case OMPD_end_declare_target:
+ return "end declare target";
+ case OMPD_end_declare_variant:
+ return "end declare variant";
+ case OMPD_end_do:
+ return "end do";
+ case OMPD_end_do_simd:
+ return "end do simd";
+ case OMPD_end_sections:
+ return "end sections";
+ case OMPD_end_single:
+ return "end single";
+ case OMPD_end_workshare:
+ return "end workshare";
+ case OMPD_error:
+ return "error";
+ case OMPD_flush:
+ return "flush";
+ case OMPD_for:
+ return "for";
+ case OMPD_for_simd:
+ return "for simd";
+ case OMPD_masked_taskloop:
+ return "masked taskloop";
+ case OMPD_masked_taskloop_simd:
+ return "masked taskloop simd";
+ case OMPD_master:
+ return "master";
+ case OMPD_master_taskloop:
+ return "master taskloop";
+ case OMPD_master_taskloop_simd:
+ return "master taskloop simd";
+ case OMPD_metadirective:
+ return "metadirective";
+ case OMPD_nothing:
+ return "nothing";
+ case OMPD_ordered:
+ return "ordered";
+ case OMPD_parallel:
+ return "parallel";
+ case OMPD_parallel_do:
+ return "parallel do";
+ case OMPD_parallel_do_simd:
+ return "parallel do simd";
+ case OMPD_parallel_for:
+ return "parallel for";
+ case OMPD_parallel_for_simd:
+ return "parallel for simd";
+ case OMPD_parallel_masked:
+ return "parallel masked";
+ case OMPD_parallel_masked_taskloop:
+ return "parallel masked taskloop";
+ case OMPD_parallel_masked_taskloop_simd:
+ return "parallel masked taskloop simd";
+ case OMPD_parallel_master:
+ return "parallel master";
+ case OMPD_parallel_master_taskloop:
+ return "parallel master taskloop";
+ case OMPD_parallel_master_taskloop_simd:
+ return "parallel master taskloop simd";
+ case OMPD_parallel_sections:
+ return "parallel sections";
+ case OMPD_parallel_workshare:
+ return "parallel workshare";
+ case OMPD_requires:
+ return "requires";
+ case OMPD_scan:
+ return "scan";
+ case OMPD_section:
+ return "section";
+ case OMPD_sections:
+ return "sections";
+ case OMPD_simd:
+ return "simd";
+ case OMPD_single:
+ return "single";
+ case OMPD_target:
+ return "target";
+ case OMPD_target_data:
+ return "target data";
+ case OMPD_target_enter_data:
+ return "target enter data";
+ case OMPD_target_exit_data:
+ return "target exit data";
+ case OMPD_target_parallel:
+ return "target parallel";
+ case OMPD_target_parallel_do:
+ return "target parallel do";
+ case OMPD_target_parallel_do_simd:
+ return "target parallel do simd";
+ case OMPD_target_parallel_for:
+ return "target parallel for";
+ case OMPD_target_parallel_for_simd:
+ return "target parallel for simd";
+ case OMPD_target_simd:
+ return "target simd";
+ case OMPD_target_teams:
+ return "target teams";
+ case OMPD_target_teams_distribute:
+ return "target teams distribute";
+ case OMPD_target_teams_distribute_parallel_do:
+ return "target teams distribute parallel do";
+ case OMPD_target_teams_distribute_parallel_do_simd:
+ return "target teams distribute parallel do simd";
+ case OMPD_target_teams_distribute_parallel_for:
+ return "target teams distribute parallel for";
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ return "target teams distribute parallel for simd";
+ case OMPD_target_teams_distribute_simd:
+ return "target teams distribute simd";
+ case OMPD_target_update:
+ return "target update";
+ case OMPD_task:
+ return "task";
+ case OMPD_taskgroup:
+ return "taskgroup";
+ case OMPD_taskloop:
+ return "taskloop";
+ case OMPD_taskloop_simd:
+ return "taskloop simd";
+ case OMPD_taskwait:
+ return "taskwait";
+ case OMPD_taskyield:
+ return "taskyield";
+ case OMPD_teams:
+ return "teams";
+ case OMPD_teams_distribute:
+ return "teams distribute";
+ case OMPD_teams_distribute_parallel_do:
+ return "teams distribute parallel do";
+ case OMPD_teams_distribute_parallel_do_simd:
+ return "teams distribute parallel do simd";
+ case OMPD_teams_distribute_parallel_for:
+ return "teams distribute parallel for";
+ case OMPD_teams_distribute_parallel_for_simd:
+ return "teams distribute parallel for simd";
+ case OMPD_teams_distribute_simd:
+ return "teams distribute simd";
+ case OMPD_threadprivate:
+ return "threadprivate";
+ case OMPD_tile:
+ return "tile";
+ case OMPD_unknown:
+ return "unknown";
+ case OMPD_unroll:
+ return "unroll";
+ case OMPD_workshare:
+ return "workshare";
+ case OMPD_dispatch:
+ return "dispatch";
+ case OMPD_interop:
+ return "interop";
+ case OMPD_loop:
+ return "loop";
+ case OMPD_masked:
+ return "masked";
+ case OMPD_parallel_loop:
+ return "parallel loop";
+ case OMPD_target_parallel_loop:
+ return "target parallel loop";
+ case OMPD_target_teams_loop:
+ return "target teams loop";
+ case OMPD_teams_loop:
+ return "teams loop";
+ }
+ llvm_unreachable("Invalid OpenMP Directive kind");
+}
+
+Clause llvm::omp::getOpenMPClauseKind(llvm::StringRef Str) {
+ return llvm::StringSwitch<Clause>(Str)
+ .Case("acq_rel",OMPC_acq_rel)
+ .Case("acquire",OMPC_acquire)
+ .Case("adjust_args",OMPC_adjust_args)
+ .Case("affinity",OMPC_affinity)
+ .Case("align",OMPC_align)
+ .Case("aligned",OMPC_aligned)
+ .Case("allocate",OMPC_allocate)
+ .Case("allocator",OMPC_allocator)
+ .Case("append_args",OMPC_append_args)
+ .Case("at",OMPC_at)
+ .Case("atomic_default_mem_order",OMPC_atomic_default_mem_order)
+ .Case("bind",OMPC_bind)
+ .Case("cancellation_construct_type",OMPC_cancellation_construct_type)
+ .Case("capture",OMPC_capture)
+ .Case("collapse",OMPC_collapse)
+ .Case("compare",OMPC_compare)
+ .Case("copyprivate",OMPC_copyprivate)
+ .Case("copyin",OMPC_copyin)
+ .Case("default",OMPC_default)
+ .Case("defaultmap",OMPC_defaultmap)
+ .Case("depend",OMPC_depend)
+ .Case("depobj",OMPC_unknown)
+ .Case("destroy",OMPC_destroy)
+ .Case("detach",OMPC_detach)
+ .Case("device",OMPC_device)
+ .Case("device_type",OMPC_device_type)
+ .Case("dist_schedule",OMPC_dist_schedule)
+ .Case("dynamic_allocators",OMPC_dynamic_allocators)
+ .Case("exclusive",OMPC_exclusive)
+ .Case("filter",OMPC_filter)
+ .Case("final",OMPC_final)
+ .Case("firstprivate",OMPC_firstprivate)
+ .Case("flush",OMPC_unknown)
+ .Case("from",OMPC_from)
+ .Case("full",OMPC_full)
+ .Case("grainsize",OMPC_grainsize)
+ .Case("has_device_addr",OMPC_has_device_addr)
+ .Case("hint",OMPC_hint)
+ .Case("if",OMPC_if)
+ .Case("in_reduction",OMPC_in_reduction)
+ .Case("inbranch",OMPC_inbranch)
+ .Case("inclusive",OMPC_inclusive)
+ .Case("indirect",OMPC_indirect)
+ .Case("init",OMPC_init)
+ .Case("is_device_ptr",OMPC_is_device_ptr)
+ .Case("lastprivate",OMPC_lastprivate)
+ .Case("linear",OMPC_linear)
+ .Case("link",OMPC_link)
+ .Case("map",OMPC_map)
+ .Case("match",OMPC_match)
+ .Case("memory_order",OMPC_memory_order)
+ .Case("mergeable",OMPC_mergeable)
+ .Case("message",OMPC_message)
+ .Case("nogroup",OMPC_nogroup)
+ .Case("nowait",OMPC_nowait)
+ .Case("nocontext",OMPC_nocontext)
+ .Case("nontemporal",OMPC_nontemporal)
+ .Case("notinbranch",OMPC_notinbranch)
+ .Case("novariants",OMPC_novariants)
+ .Case("num_tasks",OMPC_num_tasks)
+ .Case("num_teams",OMPC_num_teams)
+ .Case("num_threads",OMPC_num_threads)
+ .Case("ompx_dyn_cgroup_mem",OMPC_ompx_dyn_cgroup_mem)
+ .Case("order",OMPC_order)
+ .Case("ordered",OMPC_ordered)
+ .Case("partial",OMPC_partial)
+ .Case("priority",OMPC_priority)
+ .Case("private",OMPC_private)
+ .Case("proc_bind",OMPC_proc_bind)
+ .Case("read",OMPC_read)
+ .Case("reduction",OMPC_reduction)
+ .Case("relaxed",OMPC_relaxed)
+ .Case("release",OMPC_release)
+ .Case("reverse_offload",OMPC_reverse_offload)
+ .Case("safelen",OMPC_safelen)
+ .Case("schedule",OMPC_schedule)
+ .Case("seq_cst",OMPC_seq_cst)
+ .Case("severity",OMPC_severity)
+ .Case("shared",OMPC_shared)
+ .Case("simd",OMPC_simd)
+ .Case("simdlen",OMPC_simdlen)
+ .Case("sizes",OMPC_sizes)
+ .Case("task_reduction",OMPC_task_reduction)
+ .Case("thread_limit",OMPC_thread_limit)
+ .Case("threadprivate",OMPC_unknown)
+ .Case("threads",OMPC_threads)
+ .Case("to",OMPC_to)
+ .Case("unified_address",OMPC_unified_address)
+ .Case("unified_shared_memory",OMPC_unified_shared_memory)
+ .Case("uniform",OMPC_uniform)
+ .Case("unknown",OMPC_unknown)
+ .Case("untied",OMPC_untied)
+ .Case("update",OMPC_update)
+ .Case("use",OMPC_use)
+ .Case("use_device_addr",OMPC_use_device_addr)
+ .Case("use_device_ptr",OMPC_use_device_ptr)
+ .Case("uses_allocators",OMPC_uses_allocators)
+ .Case("when",OMPC_when)
+ .Case("write",OMPC_write)
+ .Default(OMPC_unknown);
+}
+
+llvm::StringRef llvm::omp::getOpenMPClauseName(Clause Kind) {
+ switch (Kind) {
+ case OMPC_acq_rel:
+ return "acq_rel";
+ case OMPC_acquire:
+ return "acquire";
+ case OMPC_adjust_args:
+ return "adjust_args";
+ case OMPC_affinity:
+ return "affinity";
+ case OMPC_align:
+ return "align";
+ case OMPC_aligned:
+ return "aligned";
+ case OMPC_allocate:
+ return "allocate";
+ case OMPC_allocator:
+ return "allocator";
+ case OMPC_append_args:
+ return "append_args";
+ case OMPC_at:
+ return "at";
+ case OMPC_atomic_default_mem_order:
+ return "atomic_default_mem_order";
+ case OMPC_bind:
+ return "bind";
+ case OMPC_cancellation_construct_type:
+ return "cancellation_construct_type";
+ case OMPC_capture:
+ return "capture";
+ case OMPC_collapse:
+ return "collapse";
+ case OMPC_compare:
+ return "compare";
+ case OMPC_copyprivate:
+ return "copyprivate";
+ case OMPC_copyin:
+ return "copyin";
+ case OMPC_default:
+ return "default";
+ case OMPC_defaultmap:
+ return "defaultmap";
+ case OMPC_depend:
+ return "depend";
+ case OMPC_depobj:
+ return "depobj";
+ case OMPC_destroy:
+ return "destroy";
+ case OMPC_detach:
+ return "detach";
+ case OMPC_device:
+ return "device";
+ case OMPC_device_type:
+ return "device_type";
+ case OMPC_dist_schedule:
+ return "dist_schedule";
+ case OMPC_dynamic_allocators:
+ return "dynamic_allocators";
+ case OMPC_exclusive:
+ return "exclusive";
+ case OMPC_filter:
+ return "filter";
+ case OMPC_final:
+ return "final";
+ case OMPC_firstprivate:
+ return "firstprivate";
+ case OMPC_flush:
+ return "flush";
+ case OMPC_from:
+ return "from";
+ case OMPC_full:
+ return "full";
+ case OMPC_grainsize:
+ return "grainsize";
+ case OMPC_has_device_addr:
+ return "has_device_addr";
+ case OMPC_hint:
+ return "hint";
+ case OMPC_if:
+ return "if";
+ case OMPC_in_reduction:
+ return "in_reduction";
+ case OMPC_inbranch:
+ return "inbranch";
+ case OMPC_inclusive:
+ return "inclusive";
+ case OMPC_indirect:
+ return "indirect";
+ case OMPC_init:
+ return "init";
+ case OMPC_is_device_ptr:
+ return "is_device_ptr";
+ case OMPC_lastprivate:
+ return "lastprivate";
+ case OMPC_linear:
+ return "linear";
+ case OMPC_link:
+ return "link";
+ case OMPC_map:
+ return "map";
+ case OMPC_match:
+ return "match";
+ case OMPC_memory_order:
+ return "memory_order";
+ case OMPC_mergeable:
+ return "mergeable";
+ case OMPC_message:
+ return "message";
+ case OMPC_nogroup:
+ return "nogroup";
+ case OMPC_nowait:
+ return "nowait";
+ case OMPC_nocontext:
+ return "nocontext";
+ case OMPC_nontemporal:
+ return "nontemporal";
+ case OMPC_notinbranch:
+ return "notinbranch";
+ case OMPC_novariants:
+ return "novariants";
+ case OMPC_num_tasks:
+ return "num_tasks";
+ case OMPC_num_teams:
+ return "num_teams";
+ case OMPC_num_threads:
+ return "num_threads";
+ case OMPC_ompx_dyn_cgroup_mem:
+ return "ompx_dyn_cgroup_mem";
+ case OMPC_order:
+ return "order";
+ case OMPC_ordered:
+ return "ordered";
+ case OMPC_partial:
+ return "partial";
+ case OMPC_priority:
+ return "priority";
+ case OMPC_private:
+ return "private";
+ case OMPC_proc_bind:
+ return "proc_bind";
+ case OMPC_read:
+ return "read";
+ case OMPC_reduction:
+ return "reduction";
+ case OMPC_relaxed:
+ return "relaxed";
+ case OMPC_release:
+ return "release";
+ case OMPC_reverse_offload:
+ return "reverse_offload";
+ case OMPC_safelen:
+ return "safelen";
+ case OMPC_schedule:
+ return "schedule";
+ case OMPC_seq_cst:
+ return "seq_cst";
+ case OMPC_severity:
+ return "severity";
+ case OMPC_shared:
+ return "shared";
+ case OMPC_simd:
+ return "simd";
+ case OMPC_simdlen:
+ return "simdlen";
+ case OMPC_sizes:
+ return "sizes";
+ case OMPC_task_reduction:
+ return "task_reduction";
+ case OMPC_thread_limit:
+ return "thread_limit";
+ case OMPC_threadprivate:
+ return "threadprivate or thread local";
+ case OMPC_threads:
+ return "threads";
+ case OMPC_to:
+ return "to";
+ case OMPC_unified_address:
+ return "unified_address";
+ case OMPC_unified_shared_memory:
+ return "unified_shared_memory";
+ case OMPC_uniform:
+ return "uniform";
+ case OMPC_unknown:
+ return "unknown";
+ case OMPC_untied:
+ return "untied";
+ case OMPC_update:
+ return "update";
+ case OMPC_use:
+ return "use";
+ case OMPC_use_device_addr:
+ return "use_device_addr";
+ case OMPC_use_device_ptr:
+ return "use_device_ptr";
+ case OMPC_uses_allocators:
+ return "uses_allocators";
+ case OMPC_when:
+ return "when";
+ case OMPC_write:
+ return "write";
+ }
+ llvm_unreachable("Invalid OpenMP Clause kind");
+}
+
+CancellationConstructType llvm::omp::getCancellationConstructType(llvm::StringRef Str) {
+ return llvm::StringSwitch<CancellationConstructType>(Str)
+ .Case("parallel",OMP_CANCELLATION_CONSTRUCT_Parallel)
+ .Case("loop",OMP_CANCELLATION_CONSTRUCT_Loop)
+ .Case("sections",OMP_CANCELLATION_CONSTRUCT_Sections)
+ .Case("taskgroup",OMP_CANCELLATION_CONSTRUCT_Taskgroup)
+ .Case("none",OMP_CANCELLATION_CONSTRUCT_None)
+ .Default(OMP_CANCELLATION_CONSTRUCT_None);
+}
+
+llvm::StringRef llvm::omp::getOpenMPCancellationConstructTypeName(llvm::omp::CancellationConstructType x) {
+ switch (x) {
+ case OMP_CANCELLATION_CONSTRUCT_Parallel:
+ return "parallel";
+ case OMP_CANCELLATION_CONSTRUCT_Loop:
+ return "loop";
+ case OMP_CANCELLATION_CONSTRUCT_Sections:
+ return "sections";
+ case OMP_CANCELLATION_CONSTRUCT_Taskgroup:
+ return "taskgroup";
+ case OMP_CANCELLATION_CONSTRUCT_None:
+ return "none";
+ }
+ llvm_unreachable("Invalid OpenMP CancellationConstructType kind");
+}
+
+GrainsizeType llvm::omp::getGrainsizeType(llvm::StringRef Str) {
+ return llvm::StringSwitch<GrainsizeType>(Str)
+ .Case("strict",OMP_GRAINSIZE_Strict)
+ .Case("unkonwn",OMP_GRAINSIZE_Unknown)
+ .Default(OMP_GRAINSIZE_Unknown);
+}
+
+llvm::StringRef llvm::omp::getOpenMPGrainsizeTypeName(llvm::omp::GrainsizeType x) {
+ switch (x) {
+ case OMP_GRAINSIZE_Strict:
+ return "strict";
+ case OMP_GRAINSIZE_Unknown:
+ return "unkonwn";
+ }
+ llvm_unreachable("Invalid OpenMP GrainsizeType kind");
+}
+
+MemoryOrderKind llvm::omp::getMemoryOrderKind(llvm::StringRef Str) {
+ return llvm::StringSwitch<MemoryOrderKind>(Str)
+ .Case("seq_cst",OMP_MEMORY_ORDER_SeqCst)
+ .Case("acq_rel",OMP_MEMORY_ORDER_AcqRel)
+ .Case("acquire",OMP_MEMORY_ORDER_Acquire)
+ .Case("release",OMP_MEMORY_ORDER_Release)
+ .Case("relaxed",OMP_MEMORY_ORDER_Relaxed)
+ .Case("default",OMP_MEMORY_ORDER_Default)
+ .Default(OMP_MEMORY_ORDER_Default);
+}
+
+llvm::StringRef llvm::omp::getOpenMPMemoryOrderKindName(llvm::omp::MemoryOrderKind x) {
+ switch (x) {
+ case OMP_MEMORY_ORDER_SeqCst:
+ return "seq_cst";
+ case OMP_MEMORY_ORDER_AcqRel:
+ return "acq_rel";
+ case OMP_MEMORY_ORDER_Acquire:
+ return "acquire";
+ case OMP_MEMORY_ORDER_Release:
+ return "release";
+ case OMP_MEMORY_ORDER_Relaxed:
+ return "relaxed";
+ case OMP_MEMORY_ORDER_Default:
+ return "default";
+ }
+ llvm_unreachable("Invalid OpenMP MemoryOrderKind kind");
+}
+
+NumTasksType llvm::omp::getNumTasksType(llvm::StringRef Str) {
+ return llvm::StringSwitch<NumTasksType>(Str)
+ .Case("strict",OMP_NUMTASKS_Strict)
+ .Case("unkonwn",OMP_NUMTASKS_Unknown)
+ .Default(OMP_NUMTASKS_Unknown);
+}
+
+llvm::StringRef llvm::omp::getOpenMPNumTasksTypeName(llvm::omp::NumTasksType x) {
+ switch (x) {
+ case OMP_NUMTASKS_Strict:
+ return "strict";
+ case OMP_NUMTASKS_Unknown:
+ return "unkonwn";
+ }
+ llvm_unreachable("Invalid OpenMP NumTasksType kind");
+}
+
+OrderKind llvm::omp::getOrderKind(llvm::StringRef Str) {
+ return llvm::StringSwitch<OrderKind>(Str)
+ .Case("unknown",OMP_ORDER_unknown)
+ .Case("concurrent",OMP_ORDER_concurrent)
+ .Default(OMP_ORDER_unknown);
+}
+
+llvm::StringRef llvm::omp::getOpenMPOrderKindName(llvm::omp::OrderKind x) {
+ switch (x) {
+ case OMP_ORDER_unknown:
+ return "unknown";
+ case OMP_ORDER_concurrent:
+ return "concurrent";
+ }
+ llvm_unreachable("Invalid OpenMP OrderKind kind");
+}
+
+ProcBindKind llvm::omp::getProcBindKind(llvm::StringRef Str) {
+ return llvm::StringSwitch<ProcBindKind>(Str)
+ .Case("primary",OMP_PROC_BIND_primary)
+ .Case("master",OMP_PROC_BIND_master)
+ .Case("close",OMP_PROC_BIND_close)
+ .Case("spread",OMP_PROC_BIND_spread)
+ .Case("default",OMP_PROC_BIND_default)
+ .Case("unknown",OMP_PROC_BIND_unknown)
+ .Default(OMP_PROC_BIND_unknown);
+}
+
+llvm::StringRef llvm::omp::getOpenMPProcBindKindName(llvm::omp::ProcBindKind x) {
+ switch (x) {
+ case OMP_PROC_BIND_primary:
+ return "primary";
+ case OMP_PROC_BIND_master:
+ return "master";
+ case OMP_PROC_BIND_close:
+ return "close";
+ case OMP_PROC_BIND_spread:
+ return "spread";
+ case OMP_PROC_BIND_default:
+ return "default";
+ case OMP_PROC_BIND_unknown:
+ return "unknown";
+ }
+ llvm_unreachable("Invalid OpenMP ProcBindKind kind");
+}
+
+ScheduleKind llvm::omp::getScheduleKind(llvm::StringRef Str) {
+ return llvm::StringSwitch<ScheduleKind>(Str)
+ .Case("static",OMP_SCHEDULE_Static)
+ .Case("dynamic",OMP_SCHEDULE_Dynamic)
+ .Case("guided",OMP_SCHEDULE_Guided)
+ .Case("auto",OMP_SCHEDULE_Auto)
+ .Case("runtime",OMP_SCHEDULE_Runtime)
+ .Case("default",OMP_SCHEDULE_Default)
+ .Default(OMP_SCHEDULE_Default);
+}
+
+llvm::StringRef llvm::omp::getOpenMPScheduleKindName(llvm::omp::ScheduleKind x) {
+ switch (x) {
+ case OMP_SCHEDULE_Static:
+ return "static";
+ case OMP_SCHEDULE_Dynamic:
+ return "dynamic";
+ case OMP_SCHEDULE_Guided:
+ return "guided";
+ case OMP_SCHEDULE_Auto:
+ return "auto";
+ case OMP_SCHEDULE_Runtime:
+ return "runtime";
+ case OMP_SCHEDULE_Default:
+ return "default";
+ }
+ llvm_unreachable("Invalid OpenMP ScheduleKind kind");
+}
+
+bool llvm::omp::isAllowedClauseForDirective(Directive D, Clause C, unsigned Version) {
+ assert(unsigned(D) <= llvm::omp::Directive_enumSize);
+ assert(unsigned(C) <= llvm::omp::Clause_enumSize);
+ switch (D) {
+ case OMPD_allocate:
+ switch (C) {
+ case OMPC_allocator:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_align:
+ return 51 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_assumes:
+ return false;
+ break;
+ case OMPD_atomic:
+ switch (C) {
+ case OMPC_read:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_write:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_update:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_capture:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_compare:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_seq_cst:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_acq_rel:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_acquire:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_release:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_relaxed:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_hint:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_barrier:
+ return false;
+ break;
+ case OMPD_begin_assumes:
+ return false;
+ break;
+ case OMPD_begin_declare_target:
+ switch (C) {
+ case OMPC_to:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_link:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device_type:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_indirect:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_begin_declare_variant:
+ return false;
+ break;
+ case OMPD_cancel:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_cancellation_point:
+ return false;
+ break;
+ case OMPD_critical:
+ switch (C) {
+ case OMPC_hint:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_declare_mapper:
+ switch (C) {
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_declare_reduction:
+ return false;
+ break;
+ case OMPD_declare_simd:
+ switch (C) {
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uniform:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_inbranch:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_notinbranch:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_declare_target:
+ switch (C) {
+ case OMPC_to:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_link:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_indirect:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_declare_variant:
+ switch (C) {
+ case OMPC_match:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_adjust_args:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_append_args:
+ return 51 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_depobj:
+ switch (C) {
+ case OMPC_depend:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_destroy:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_update:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_depobj:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_distribute:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_distribute_parallel_do:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_distribute_parallel_do_simd:
+ switch (C) {
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_distribute_parallel_for:
+ switch (C) {
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_distribute_parallel_for_simd:
+ switch (C) {
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_distribute_simd:
+ switch (C) {
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_do:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_do_simd:
+ switch (C) {
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_end_assumes:
+ return false;
+ break;
+ case OMPD_end_declare_target:
+ return false;
+ break;
+ case OMPD_end_declare_variant:
+ return false;
+ break;
+ case OMPD_end_do:
+ return false;
+ break;
+ case OMPD_end_do_simd:
+ return false;
+ break;
+ case OMPD_end_sections:
+ switch (C) {
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_end_single:
+ switch (C) {
+ case OMPC_copyprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_end_workshare:
+ switch (C) {
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_error:
+ switch (C) {
+ case OMPC_at:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_severity:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_message:
+ return 51 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_flush:
+ switch (C) {
+ case OMPC_acq_rel:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_acquire:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_release:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_flush:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_for:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_for_simd:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_masked_taskloop:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_filter:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_masked_taskloop_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_filter:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_master:
+ return false;
+ break;
+ case OMPD_master_taskloop:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_master_taskloop_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_metadirective:
+ switch (C) {
+ case OMPC_when:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_nothing:
+ return false;
+ break;
+ case OMPD_ordered:
+ switch (C) {
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simd:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_do:
+ switch (C) {
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_do_simd:
+ switch (C) {
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_for:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_for_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_masked:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_filter:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_masked_taskloop:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_filter:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_masked_taskloop_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_filter:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_master:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_master_taskloop:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_master_taskloop_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_sections:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_workshare:
+ switch (C) {
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_requires:
+ switch (C) {
+ case OMPC_unified_address:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_unified_shared_memory:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reverse_offload:
+ return 99 <= Version && 2147483647 >= Version;
+ case OMPC_dynamic_allocators:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_atomic_default_mem_order:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_scan:
+ switch (C) {
+ case OMPC_inclusive:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_exclusive:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_section:
+ return false;
+ break;
+ case OMPD_sections:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_simd:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_single:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_data:
+ switch (C) {
+ case OMPC_use_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_use_device_addr:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_enter_data:
+ switch (C) {
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_exit_data:
+ switch (C) {
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_parallel:
+ switch (C) {
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_parallel_do:
+ switch (C) {
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_allocator:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_parallel_do_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_parallel_for:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_parallel_for_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_simd:
+ switch (C) {
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams_distribute:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams_distribute_parallel_do:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams_distribute_parallel_do_simd:
+ switch (C) {
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams_distribute_parallel_for:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ switch (C) {
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams_distribute_simd:
+ switch (C) {
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_update:
+ switch (C) {
+ case OMPC_to:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_from:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_task:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_detach:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_affinity:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_taskgroup:
+ switch (C) {
+ case OMPC_task_reduction:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_taskloop:
+ switch (C) {
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_taskloop_simd:
+ switch (C) {
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_in_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_mergeable:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nogroup:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_untied:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_final:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_priority:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_grainsize:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_tasks:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_taskwait:
+ switch (C) {
+ case OMPC_depend:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 51 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_taskyield:
+ return false;
+ break;
+ case OMPD_teams:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_teams_distribute:
+ switch (C) {
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_teams_distribute_parallel_do:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ordered:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_teams_distribute_parallel_do_simd:
+ switch (C) {
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_teams_distribute_parallel_for:
+ switch (C) {
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_teams_distribute_parallel_for_simd:
+ switch (C) {
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_teams_distribute_simd:
+ switch (C) {
+ case OMPC_aligned:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_linear:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nontemporal:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_dist_schedule:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_safelen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_simdlen:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_threadprivate:
+ return false;
+ break;
+ case OMPD_tile:
+ switch (C) {
+ case OMPC_sizes:
+ return 51 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_unknown:
+ return false;
+ break;
+ case OMPD_unroll:
+ switch (C) {
+ case OMPC_full:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_partial:
+ return 51 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_workshare:
+ return false;
+ break;
+ case OMPD_dispatch:
+ switch (C) {
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_novariants:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nocontext:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_interop:
+ switch (C) {
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_destroy:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_init:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_use:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_loop:
+ switch (C) {
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_bind:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_masked:
+ switch (C) {
+ case OMPC_filter:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_parallel_loop:
+ switch (C) {
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_bind:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_parallel_loop:
+ switch (C) {
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_copyin:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_bind:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_threads:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_proc_bind:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_target_teams_loop:
+ switch (C) {
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_depend:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_defaultmap:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_device:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_is_device_ptr:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_has_device_addr:
+ return 51 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_map:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_uses_allocators:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_bind:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_if:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_nowait:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_ompx_dyn_cgroup_mem:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ case OMPD_teams_loop:
+ switch (C) {
+ case OMPC_allocate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_firstprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_lastprivate:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_private:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_reduction:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_shared:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_bind:
+ return 50 <= Version && 2147483647 >= Version;
+ case OMPC_collapse:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_default:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_num_teams:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_order:
+ return 1 <= Version && 2147483647 >= Version;
+ case OMPC_thread_limit:
+ return 1 <= Version && 2147483647 >= Version;
+ default:
+ return false;
+ }
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP Directive kind");
+}
+
+#endif // GEN_DIRECTIVES_IMPL
+
diff --git a/contrib/libs/llvm16/include/llvm/FuzzMutate/FuzzerCLI.h b/contrib/libs/llvm16/include/llvm/FuzzMutate/FuzzerCLI.h
new file mode 100644
index 0000000000..15aa11cc37
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/FuzzMutate/FuzzerCLI.h
@@ -0,0 +1,68 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- FuzzerCLI.h - Common logic for CLIs of fuzzers ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common logic needed to implement LLVM's fuzz targets' CLIs - including LLVM
+// concepts like cl::opt and libFuzzer concepts like -ignore_remaining_args=1.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZMUTATE_FUZZERCLI_H
+#define LLVM_FUZZMUTATE_FUZZERCLI_H
+
+#include "llvm/Support/DataTypes.h"
+#include <stddef.h>
+
+namespace llvm {
+
+class StringRef;
+
+/// Parse cl::opts from a fuzz target commandline.
+///
+/// This handles all arguments after -ignore_remaining_args=1 as cl::opts.
+void parseFuzzerCLOpts(int ArgC, char *ArgV[]);
+
+/// Handle backend options that are encoded in the executable name.
+///
+/// Parses some common backend options out of a specially crafted executable
+/// name (argv[0]). For example, a name like llvm-foo-fuzzer--aarch64-gisel
+/// might set up an AArch64 triple and the Global ISel selector. This should be
+/// called *before* parseFuzzerCLOpts if calling both.
+///
+/// This is meant to be used for environments like OSS-Fuzz that aren't capable
+/// of passing in command line arguments in the normal way.
+void handleExecNameEncodedBEOpts(StringRef ExecName);
+
+/// Handle optimizer options which are encoded in the executable name.
+/// Same semantics as in 'handleExecNameEncodedBEOpts'.
+void handleExecNameEncodedOptimizerOpts(StringRef ExecName);
+
+using FuzzerTestFun = int (*)(const uint8_t *Data, size_t Size);
+using FuzzerInitFun = int (*)(int *argc, char ***argv);
+
+/// Runs a fuzz target on the inputs specified on the command line.
+///
+/// Useful for testing fuzz targets without linking to libFuzzer. Finds inputs
+/// in the argument list in a libFuzzer compatible way.
+int runFuzzerOnInputs(
+ int ArgC, char *ArgV[], FuzzerTestFun TestOne,
+ FuzzerInitFun Init = [](int *, char ***) { return 0; });
+
+} // namespace llvm
+
+#endif // LLVM_FUZZMUTATE_FUZZERCLI_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Support/AMDHSAKernelDescriptor.h b/contrib/libs/llvm16/include/llvm/Support/AMDHSAKernelDescriptor.h
new file mode 100644
index 0000000000..b5eca2dd70
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Support/AMDHSAKernelDescriptor.h
@@ -0,0 +1,246 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- AMDHSAKernelDescriptor.h -----------------------------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// AMDHSA kernel descriptor definitions. For more information, visit
+/// https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
+#define LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
+
+#include <cstddef>
+#include <cstdint>
+
+// Gets offset of specified member in specified type.
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE*)0)->MEMBER)
+#endif // offsetof
+
+// Creates enumeration entries used for packing bits into integers. Enumeration
+// entries include bit shift amount, bit width, and bit mask.
+#ifndef AMDHSA_BITS_ENUM_ENTRY
+#define AMDHSA_BITS_ENUM_ENTRY(NAME, SHIFT, WIDTH) \
+ NAME ## _SHIFT = (SHIFT), \
+ NAME ## _WIDTH = (WIDTH), \
+ NAME = (((1 << (WIDTH)) - 1) << (SHIFT))
+#endif // AMDHSA_BITS_ENUM_ENTRY
+
+// Gets bits for specified bit mask from specified source.
+#ifndef AMDHSA_BITS_GET
+#define AMDHSA_BITS_GET(SRC, MSK) ((SRC & MSK) >> MSK ## _SHIFT)
+#endif // AMDHSA_BITS_GET
+
+// Sets bits for specified bit mask in specified destination.
+#ifndef AMDHSA_BITS_SET
+#define AMDHSA_BITS_SET(DST, MSK, VAL) \
+ DST &= ~MSK; \
+ DST |= ((VAL << MSK ## _SHIFT) & MSK)
+#endif // AMDHSA_BITS_SET
+
+namespace llvm {
+namespace amdhsa {
+
+// Floating point rounding modes. Must match hardware definition.
+enum : uint8_t {
+ FLOAT_ROUND_MODE_NEAR_EVEN = 0,
+ FLOAT_ROUND_MODE_PLUS_INFINITY = 1,
+ FLOAT_ROUND_MODE_MINUS_INFINITY = 2,
+ FLOAT_ROUND_MODE_ZERO = 3,
+};
+
+// Floating point denorm modes. Must match hardware definition.
+enum : uint8_t {
+ FLOAT_DENORM_MODE_FLUSH_SRC_DST = 0,
+ FLOAT_DENORM_MODE_FLUSH_DST = 1,
+ FLOAT_DENORM_MODE_FLUSH_SRC = 2,
+ FLOAT_DENORM_MODE_FLUSH_NONE = 3,
+};
+
+// System VGPR workitem IDs. Must match hardware definition.
+enum : uint8_t {
+ SYSTEM_VGPR_WORKITEM_ID_X = 0,
+ SYSTEM_VGPR_WORKITEM_ID_X_Y = 1,
+ SYSTEM_VGPR_WORKITEM_ID_X_Y_Z = 2,
+ SYSTEM_VGPR_WORKITEM_ID_UNDEFINED = 3,
+};
+
+// Compute program resource register 1. Must match hardware definition.
+#define COMPUTE_PGM_RSRC1(NAME, SHIFT, WIDTH) \
+ AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_ ## NAME, SHIFT, WIDTH)
+enum : int32_t {
+ COMPUTE_PGM_RSRC1(GRANULATED_WORKITEM_VGPR_COUNT, 0, 6),
+ COMPUTE_PGM_RSRC1(GRANULATED_WAVEFRONT_SGPR_COUNT, 6, 4),
+ COMPUTE_PGM_RSRC1(PRIORITY, 10, 2),
+ COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_32, 12, 2),
+ COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_16_64, 14, 2),
+ COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_32, 16, 2),
+ COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_16_64, 18, 2),
+ COMPUTE_PGM_RSRC1(PRIV, 20, 1),
+ COMPUTE_PGM_RSRC1(ENABLE_DX10_CLAMP, 21, 1),
+ COMPUTE_PGM_RSRC1(DEBUG_MODE, 22, 1),
+ COMPUTE_PGM_RSRC1(ENABLE_IEEE_MODE, 23, 1),
+ COMPUTE_PGM_RSRC1(BULKY, 24, 1),
+ COMPUTE_PGM_RSRC1(CDBG_USER, 25, 1),
+ COMPUTE_PGM_RSRC1(FP16_OVFL, 26, 1), // GFX9+
+ COMPUTE_PGM_RSRC1(RESERVED0, 27, 2),
+ COMPUTE_PGM_RSRC1(WGP_MODE, 29, 1), // GFX10+
+ COMPUTE_PGM_RSRC1(MEM_ORDERED, 30, 1), // GFX10+
+ COMPUTE_PGM_RSRC1(FWD_PROGRESS, 31, 1), // GFX10+
+};
+#undef COMPUTE_PGM_RSRC1
+
+// Compute program resource register 2. Must match hardware definition.
+#define COMPUTE_PGM_RSRC2(NAME, SHIFT, WIDTH) \
+ AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_ ## NAME, SHIFT, WIDTH)
+enum : int32_t {
+ COMPUTE_PGM_RSRC2(ENABLE_PRIVATE_SEGMENT, 0, 1),
+ COMPUTE_PGM_RSRC2(USER_SGPR_COUNT, 1, 5),
+ COMPUTE_PGM_RSRC2(ENABLE_TRAP_HANDLER, 6, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_X, 7, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Y, 8, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Z, 9, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_INFO, 10, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_VGPR_WORKITEM_ID, 11, 2),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_ADDRESS_WATCH, 13, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_MEMORY, 14, 1),
+ COMPUTE_PGM_RSRC2(GRANULATED_LDS_SIZE, 15, 9),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, 24, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_FP_DENORMAL_SOURCE, 25, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, 26, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW, 27, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW, 28, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INEXACT, 29, 1),
+ COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO, 30, 1),
+ COMPUTE_PGM_RSRC2(RESERVED0, 31, 1),
+};
+#undef COMPUTE_PGM_RSRC2
+
+// Compute program resource register 3 for GFX90A+. Must match hardware
+// definition.
+#define COMPUTE_PGM_RSRC3_GFX90A(NAME, SHIFT, WIDTH) \
+ AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX90A_ ## NAME, SHIFT, WIDTH)
+enum : int32_t {
+ COMPUTE_PGM_RSRC3_GFX90A(ACCUM_OFFSET, 0, 6),
+ COMPUTE_PGM_RSRC3_GFX90A(RESERVED0, 6, 10),
+ COMPUTE_PGM_RSRC3_GFX90A(TG_SPLIT, 16, 1),
+ COMPUTE_PGM_RSRC3_GFX90A(RESERVED1, 17, 15),
+};
+#undef COMPUTE_PGM_RSRC3_GFX90A
+
+// Compute program resource register 3 for GFX10+. Must match hardware
+// definition.
+#define COMPUTE_PGM_RSRC3_GFX10_PLUS(NAME, SHIFT, WIDTH) \
+ AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_PLUS_ ## NAME, SHIFT, WIDTH)
+enum : int32_t {
+ COMPUTE_PGM_RSRC3_GFX10_PLUS(SHARED_VGPR_COUNT, 0, 4), // GFX10+
+ COMPUTE_PGM_RSRC3_GFX10_PLUS(INST_PREF_SIZE, 4, 6), // GFX11+
+ COMPUTE_PGM_RSRC3_GFX10_PLUS(TRAP_ON_START, 10, 1), // GFX11+
+ COMPUTE_PGM_RSRC3_GFX10_PLUS(TRAP_ON_END, 11, 1), // GFX11+
+ COMPUTE_PGM_RSRC3_GFX10_PLUS(RESERVED0, 12, 19),
+ COMPUTE_PGM_RSRC3_GFX10_PLUS(IMAGE_OP, 31, 1), // GFX11+
+};
+#undef COMPUTE_PGM_RSRC3_GFX10_PLUS
+
+// Kernel code properties. Must be kept backwards compatible.
+#define KERNEL_CODE_PROPERTY(NAME, SHIFT, WIDTH) \
+ AMDHSA_BITS_ENUM_ENTRY(KERNEL_CODE_PROPERTY_ ## NAME, SHIFT, WIDTH)
+enum : int32_t {
+ KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER, 0, 1),
+ KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_PTR, 1, 1),
+ KERNEL_CODE_PROPERTY(ENABLE_SGPR_QUEUE_PTR, 2, 1),
+ KERNEL_CODE_PROPERTY(ENABLE_SGPR_KERNARG_SEGMENT_PTR, 3, 1),
+ KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_ID, 4, 1),
+ KERNEL_CODE_PROPERTY(ENABLE_SGPR_FLAT_SCRATCH_INIT, 5, 1),
+ KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_SIZE, 6, 1),
+ KERNEL_CODE_PROPERTY(RESERVED0, 7, 3),
+ KERNEL_CODE_PROPERTY(ENABLE_WAVEFRONT_SIZE32, 10, 1), // GFX10+
+ KERNEL_CODE_PROPERTY(USES_DYNAMIC_STACK, 11, 1),
+ KERNEL_CODE_PROPERTY(RESERVED1, 12, 4),
+};
+#undef KERNEL_CODE_PROPERTY
+
+// Kernel descriptor. Must be kept backwards compatible.
+struct kernel_descriptor_t {
+ uint32_t group_segment_fixed_size;
+ uint32_t private_segment_fixed_size;
+ uint32_t kernarg_size;
+ uint8_t reserved0[4];
+ int64_t kernel_code_entry_byte_offset;
+ uint8_t reserved1[20];
+ uint32_t compute_pgm_rsrc3; // GFX10+ and GFX90A+
+ uint32_t compute_pgm_rsrc1;
+ uint32_t compute_pgm_rsrc2;
+ uint16_t kernel_code_properties;
+ uint8_t reserved2[6];
+};
+
+enum : uint32_t {
+ GROUP_SEGMENT_FIXED_SIZE_OFFSET = 0,
+ PRIVATE_SEGMENT_FIXED_SIZE_OFFSET = 4,
+ KERNARG_SIZE_OFFSET = 8,
+ RESERVED0_OFFSET = 12,
+ KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET = 16,
+ RESERVED1_OFFSET = 24,
+ COMPUTE_PGM_RSRC3_OFFSET = 44,
+ COMPUTE_PGM_RSRC1_OFFSET = 48,
+ COMPUTE_PGM_RSRC2_OFFSET = 52,
+ KERNEL_CODE_PROPERTIES_OFFSET = 56,
+ RESERVED2_OFFSET = 58,
+};
+
+static_assert(
+ sizeof(kernel_descriptor_t) == 64,
+ "invalid size for kernel_descriptor_t");
+static_assert(offsetof(kernel_descriptor_t, group_segment_fixed_size) ==
+ GROUP_SEGMENT_FIXED_SIZE_OFFSET,
+ "invalid offset for group_segment_fixed_size");
+static_assert(offsetof(kernel_descriptor_t, private_segment_fixed_size) ==
+ PRIVATE_SEGMENT_FIXED_SIZE_OFFSET,
+ "invalid offset for private_segment_fixed_size");
+static_assert(offsetof(kernel_descriptor_t, kernarg_size) ==
+ KERNARG_SIZE_OFFSET,
+ "invalid offset for kernarg_size");
+static_assert(offsetof(kernel_descriptor_t, reserved0) == RESERVED0_OFFSET,
+ "invalid offset for reserved0");
+static_assert(offsetof(kernel_descriptor_t, kernel_code_entry_byte_offset) ==
+ KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET,
+ "invalid offset for kernel_code_entry_byte_offset");
+static_assert(offsetof(kernel_descriptor_t, reserved1) == RESERVED1_OFFSET,
+ "invalid offset for reserved1");
+static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc3) ==
+ COMPUTE_PGM_RSRC3_OFFSET,
+ "invalid offset for compute_pgm_rsrc3");
+static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc1) ==
+ COMPUTE_PGM_RSRC1_OFFSET,
+ "invalid offset for compute_pgm_rsrc1");
+static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc2) ==
+ COMPUTE_PGM_RSRC2_OFFSET,
+ "invalid offset for compute_pgm_rsrc2");
+static_assert(offsetof(kernel_descriptor_t, kernel_code_properties) ==
+ KERNEL_CODE_PROPERTIES_OFFSET,
+ "invalid offset for kernel_code_properties");
+static_assert(offsetof(kernel_descriptor_t, reserved2) == RESERVED2_OFFSET,
+ "invalid offset for reserved2");
+
+} // end namespace amdhsa
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Support/Solaris/sys/regset.h b/contrib/libs/llvm16/include/llvm/Support/Solaris/sys/regset.h
new file mode 100644
index 0000000000..dd564f5d14
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Support/Solaris/sys/regset.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+/*===- llvm/Support/Solaris/sys/regset.h ------------------------*- C++ -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file works around excessive name space pollution from the system header
+ * on Solaris hosts.
+ *
+ *===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_SUPPORT_SOLARIS_SYS_REGSET_H
+
+#include_next <sys/regset.h>
+
+#undef CS
+#undef DS
+#undef ES
+#undef FS
+#undef GS
+#undef SS
+#undef EAX
+#undef ECX
+#undef EDX
+#undef EBX
+#undef ESP
+#undef EBP
+#undef ESI
+#undef EDI
+#undef EIP
+#undef UESP
+#undef EFL
+#undef ERR
+#undef TRAPNO
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Support/TaskQueue.h b/contrib/libs/llvm16/include/llvm/Support/TaskQueue.h
new file mode 100644
index 0000000000..152aeceb25
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Support/TaskQueue.h
@@ -0,0 +1,149 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/Support/TaskQueue.h - A TaskQueue implementation ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a crude C++11 based task queue.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TASKQUEUE_H
+#define LLVM_SUPPORT_TASKQUEUE_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/thread.h"
+
+#include <atomic>
+#include <cassert>
+#include <condition_variable>
+#include <deque>
+#include <functional>
+#include <future>
+#include <memory>
+#include <mutex>
+#include <utility>
+
+namespace llvm {
+/// TaskQueue executes serialized work on a user-defined Thread Pool. It
+/// guarantees that if task B is enqueued after task A, task B begins after
+/// task A completes and there is no overlap between the two.
+class TaskQueue {
+ // Because we don't have init capture to use move-only local variables that
+ // are captured into a lambda, we create the promise inside an explicit
+ // callable struct. We want to do as much of the wrapping in the
+ // type-specialized domain (before type erasure) and then erase this into a
+ // std::function.
+ template <typename Callable> struct Task {
+ using ResultTy = std::invoke_result_t<Callable>;
+ explicit Task(Callable C, TaskQueue &Parent)
+ : C(std::move(C)), P(std::make_shared<std::promise<ResultTy>>()),
+ Parent(&Parent) {}
+
+ template<typename T>
+ void invokeCallbackAndSetPromise(T*) {
+ P->set_value(C());
+ }
+
+ void invokeCallbackAndSetPromise(void*) {
+ C();
+ P->set_value();
+ }
+
+ void operator()() noexcept {
+ ResultTy *Dummy = nullptr;
+ invokeCallbackAndSetPromise(Dummy);
+ Parent->completeTask();
+ }
+
+ Callable C;
+ std::shared_ptr<std::promise<ResultTy>> P;
+ TaskQueue *Parent;
+ };
+
+public:
+ /// Construct a task queue with no work.
+ TaskQueue(ThreadPool &Scheduler) : Scheduler(Scheduler) { (void)Scheduler; }
+
+ /// Blocking destructor: the queue will wait for all work to complete.
+ ~TaskQueue() {
+ Scheduler.wait();
+ assert(Tasks.empty());
+ }
+
+ /// Asynchronous submission of a task to the queue. The returned future can be
+ /// used to wait for the task (and all previous tasks that have not yet
+ /// completed) to finish.
+ template <typename Callable>
+ std::future<std::invoke_result_t<Callable>> async(Callable &&C) {
+#if !LLVM_ENABLE_THREADS
+ static_assert(false,
+ "TaskQueue requires building with LLVM_ENABLE_THREADS!");
+#endif
+ Task<Callable> T{std::move(C), *this};
+ using ResultTy = std::invoke_result_t<Callable>;
+ std::future<ResultTy> F = T.P->get_future();
+ {
+ std::lock_guard<std::mutex> Lock(QueueLock);
+ // If there's already a task in flight, just queue this one up. If
+ // there is not a task in flight, bypass the queue and schedule this
+ // task immediately.
+ if (IsTaskInFlight)
+ Tasks.push_back(std::move(T));
+ else {
+ Scheduler.async(std::move(T));
+ IsTaskInFlight = true;
+ }
+ }
+ return F;
+ }
+
+private:
+ void completeTask() {
+ // We just completed a task. If there are no more tasks in the queue,
+ // update IsTaskInFlight to false and stop doing work. Otherwise
+ // schedule the next task (while not holding the lock).
+ std::function<void()> Continuation;
+ {
+ std::lock_guard<std::mutex> Lock(QueueLock);
+ if (Tasks.empty()) {
+ IsTaskInFlight = false;
+ return;
+ }
+
+ Continuation = std::move(Tasks.front());
+ Tasks.pop_front();
+ }
+ Scheduler.async(std::move(Continuation));
+ }
+
+ /// The thread pool on which to run the work.
+ ThreadPool &Scheduler;
+
+ /// State which indicates whether the queue currently is currently processing
+ /// any work.
+ bool IsTaskInFlight = false;
+
+ /// Mutex for synchronizing access to the Tasks array.
+ std::mutex QueueLock;
+
+ /// Tasks waiting for execution in the queue.
+ std::deque<std::function<void()>> Tasks;
+};
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_TASKQUEUE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Support/X86TargetParser.def b/contrib/libs/llvm16/include/llvm/Support/X86TargetParser.def
new file mode 100644
index 0000000000..416d583c35
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Support/X86TargetParser.def
@@ -0,0 +1,15 @@
+//===-- llvm/Support/X86TargetParser.def ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This header is deprecated in favour of
+/// `llvm/TargetParser/X86TargetParser.def`.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TargetParser/X86TargetParser.def"
diff --git a/contrib/libs/llvm16/include/llvm/Support/raw_sha1_ostream.h b/contrib/libs/llvm16/include/llvm/Support/raw_sha1_ostream.h
new file mode 100644
index 0000000000..996660e318
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Support/raw_sha1_ostream.h
@@ -0,0 +1,57 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- raw_sha1_ostream.h - raw_ostream that compute SHA1 --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the raw_sha1_ostream class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RAW_SHA1_OSTREAM_H
+#define LLVM_SUPPORT_RAW_SHA1_OSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/SHA1.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// A raw_ostream that hash the content using the sha1 algorithm.
+class raw_sha1_ostream : public raw_ostream {
+ SHA1 State;
+
+ /// See raw_ostream::write_impl.
+ void write_impl(const char *Ptr, size_t Size) override {
+ State.update(ArrayRef<uint8_t>((const uint8_t *)Ptr, Size));
+ }
+
+public:
+ /// Return the current SHA1 hash for the content of the stream
+ std::array<uint8_t, 20> sha1() {
+ flush();
+ return State.result();
+ }
+
+ /// Reset the internal state to start over from scratch.
+ void resetHash() { State.init(); }
+
+ uint64_t current_pos() const override { return 0; }
+};
+
+} // end llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/TableGen/Automaton.td b/contrib/libs/llvm16/include/llvm/TableGen/Automaton.td
new file mode 100644
index 0000000000..13ced2a0e7
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/TableGen/Automaton.td
@@ -0,0 +1,95 @@
+//===- Automaton.td ----------------------------------------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the key top-level classes needed to produce a reasonably
+// generic finite-state automaton.
+//
+//===----------------------------------------------------------------------===//
+
+// Define a record inheriting from GenericAutomaton to generate a reasonably
+// generic finite-state automaton over a set of actions and states.
+//
+// This automaton is defined by:
+// 1) a state space (explicit, always bits<32>).
+// 2) a set of input symbols (actions, explicit) and
+// 3) a transition function from state + action -> state.
+//
+// A theoretical automaton is defined by <Q, S, d, q0, F>:
+// Q: A set of possible states.
+// S: (sigma) The input alphabet.
+// d: (delta) The transition function f(q in Q, s in S) -> q' in Q.
+// F: The set of final (accepting) states.
+//
+// Because generating all possible states is tedious, we instead define the
+// transition function only and crawl all reachable states starting from the
+// initial state with all inputs under all transitions until termination.
+//
+// We define F = S, that is, all valid states are accepting.
+//
+// To ensure the generation of the automaton terminates, the state transitions
+// are defined as a lattice (meaning every transitioned-to state is more
+// specific than the transitioned-from state, for some definition of specificity).
+// Concretely a transition may set one or more bits in the state that were
+// previously zero to one. If any bit was not zero, the transition is invalid.
+//
+// Instead of defining all possible states (which would be cumbersome), the user
+// provides a set of possible Transitions from state A, consuming an input
+// symbol A to state B. The Transition object transforms state A to state B and
+// acts as a predicate. This means the state space can be discovered by crawling
+// all the possible transitions until none are valid.
+//
+// This automaton is considered to be nondeterministic, meaning that multiple
+// transitions can occur from any (state, action) pair. The generated automaton
+// is determinized, meaning that is executes in O(k) time where k is the input
+// sequence length.
+//
+// In addition to a generated automaton that determines if a sequence of inputs
+// is accepted or not, a table is emitted that allows determining a plausible
+// sequence of states traversed to accept that input.
+class GenericAutomaton {
+ // Name of a class that inherits from Transition. All records inheriting from
+ // this class will be considered when constructing the automaton.
+ string TransitionClass;
+
+ // Names of fields within TransitionClass that define the action symbol. This
+ // defines the action as an N-tuple.
+ //
+ // Each symbol field can be of class, int, string or code type.
+ // If the type of a field is a class, the Record's name is used verbatim
+ // in C++ and the class name is used as the C++ type name.
+ // If the type of a field is a string, code or int, that is also used
+ // verbatim in C++.
+ //
+ // To override the C++ type name for field F, define a field called TypeOf_F.
+ // This should be a string that will be used verbatim in C++.
+ //
+ // As an example, to define a 2-tuple with an enum and a string, one might:
+ // def MyTransition : Transition {
+ // MyEnum S1;
+ // int S2;
+ // }
+ // def MyAutomaton : GenericAutomaton }{
+ // let TransitionClass = "Transition";
+ // let SymbolFields = ["S1", "S2"];
+ // let TypeOf_S1 = "MyEnumInCxxKind";
+ // }
+ list<string> SymbolFields;
+}
+
+// All transitions inherit from Transition.
+class Transition {
+ // A transition S' = T(S) is valid if, for every set bit in NewState, the
+ // corresponding bit in S is clear. That is:
+ // def T(S):
+ // S' = S | NewState
+ // return S' if S' != S else Failure
+ //
+ // The automaton generator uses this property to crawl the set of possible
+ // transitions from a starting state of 0b0.
+ bits<32> NewState;
+}
diff --git a/contrib/libs/llvm16/include/llvm/Testing/ADT/StringMap.h b/contrib/libs/llvm16/include/llvm/Testing/ADT/StringMap.h
new file mode 100644
index 0000000000..625c010ce7
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Testing/ADT/StringMap.h
@@ -0,0 +1,57 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Testing/ADT/StringMap.h ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_ADT_STRINGMAP_H_
+#define LLVM_TESTING_ADT_STRINGMAP_H_
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Testing/ADT/StringMapEntry.h"
+#include <ostream>
+#include <sstream>
+
+namespace llvm {
+
+/// Support for printing to std::ostream, for use with e.g. producing more
+/// useful error messages with Google Test.
+template <typename T>
+std::ostream &operator<<(std::ostream &OS, const StringMap<T> &M) {
+ if (M.empty()) {
+ return OS << "{ }";
+ }
+
+ std::vector<std::string> Lines;
+ for (const auto &E : M) {
+ std::ostringstream SS;
+ SS << E << ",";
+ Lines.push_back(SS.str());
+ }
+ llvm::sort(Lines);
+ Lines.insert(Lines.begin(), "{");
+ Lines.insert(Lines.end(), "}");
+
+ return OS << llvm::formatv("{0:$[\n]}",
+ make_range(Lines.begin(), Lines.end()))
+ .str();
+}
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Testing/ADT/StringMapEntry.h b/contrib/libs/llvm16/include/llvm/Testing/ADT/StringMapEntry.h
new file mode 100644
index 0000000000..7cb5016205
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Testing/ADT/StringMapEntry.h
@@ -0,0 +1,140 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Testing/ADT/StringMapEntry.h ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_ADT_STRINGMAPENTRY_H_
+#define LLVM_TESTING_ADT_STRINGMAPENTRY_H_
+
+#include "llvm/ADT/StringMapEntry.h"
+#include "gmock/gmock.h"
+#include <ostream>
+#include <type_traits>
+
+namespace llvm {
+namespace detail {
+
+template <typename T, typename = std::void_t<>>
+struct CanOutputToOStream : std::false_type {};
+
+template <typename T>
+struct CanOutputToOStream<T, std::void_t<decltype(std::declval<std::ostream &>()
+ << std::declval<T>())>>
+ : std::true_type {};
+
+} // namespace detail
+
+/// Support for printing to std::ostream, for use with e.g. producing more
+/// useful error messages with Google Test.
+template <typename T>
+std::ostream &operator<<(std::ostream &OS, const StringMapEntry<T> &E) {
+ OS << "{\"" << E.getKey().data() << "\": ";
+ if constexpr (detail::CanOutputToOStream<decltype(E.getValue())>::value) {
+ OS << E.getValue();
+ } else {
+ OS << "non-printable value";
+ }
+ return OS << "}";
+}
+
+namespace detail {
+
+template <typename StringMapEntryT>
+class StringMapEntryMatcherImpl
+ : public testing::MatcherInterface<StringMapEntryT> {
+public:
+ using ValueT = typename std::remove_reference_t<StringMapEntryT>::ValueType;
+
+ template <typename KeyMatcherT, typename ValueMatcherT>
+ StringMapEntryMatcherImpl(KeyMatcherT KeyMatcherArg,
+ ValueMatcherT ValueMatcherArg)
+ : KeyMatcher(
+ testing::SafeMatcherCast<const std::string &>(KeyMatcherArg)),
+ ValueMatcher(
+ testing::SafeMatcherCast<const ValueT &>(ValueMatcherArg)) {}
+
+ void DescribeTo(std::ostream *OS) const override {
+ *OS << "has a string key that ";
+ KeyMatcher.DescribeTo(OS);
+ *OS << ", and has a value that ";
+ ValueMatcher.DescribeTo(OS);
+ }
+
+ void DescribeNegationTo(std::ostream *OS) const override {
+ *OS << "has a string key that ";
+ KeyMatcher.DescribeNegationTo(OS);
+ *OS << ", or has a value that ";
+ ValueMatcher.DescribeNegationTo(OS);
+ }
+
+ bool
+ MatchAndExplain(StringMapEntryT Entry,
+ testing::MatchResultListener *ResultListener) const override {
+ testing::StringMatchResultListener KeyListener;
+ if (!KeyMatcher.MatchAndExplain(Entry.getKey().data(), &KeyListener)) {
+ *ResultListener << ("which has a string key " +
+ (KeyListener.str().empty() ? "that doesn't match"
+ : KeyListener.str()));
+ return false;
+ }
+ testing::StringMatchResultListener ValueListener;
+ if (!ValueMatcher.MatchAndExplain(Entry.getValue(), &ValueListener)) {
+ *ResultListener << ("which has a value " + (ValueListener.str().empty()
+ ? "that doesn't match"
+ : ValueListener.str()));
+ return false;
+ }
+ *ResultListener << "which is a match";
+ return true;
+ }
+
+private:
+ const testing::Matcher<const std::string &> KeyMatcher;
+ const testing::Matcher<const ValueT &> ValueMatcher;
+};
+
+template <typename KeyMatcherT, typename ValueMatcherT>
+class StringMapEntryMatcher {
+public:
+ StringMapEntryMatcher(KeyMatcherT KMArg, ValueMatcherT VMArg)
+ : KM(std::move(KMArg)), VM(std::move(VMArg)) {}
+
+ template <typename StringMapEntryT>
+ operator testing::Matcher<StringMapEntryT>() const { // NOLINT
+ return testing::Matcher<StringMapEntryT>(
+ new StringMapEntryMatcherImpl<const StringMapEntryT &>(KM, VM));
+ }
+
+private:
+ const KeyMatcherT KM;
+ const ValueMatcherT VM;
+};
+
+} // namespace detail
+
+/// Returns a gMock matcher that matches a `StringMapEntry` whose string key
+/// matches `KeyMatcher`, and whose value matches `ValueMatcher`.
+template <typename KeyMatcherT, typename ValueMatcherT>
+detail::StringMapEntryMatcher<KeyMatcherT, ValueMatcherT>
+IsStringMapEntry(KeyMatcherT KM, ValueMatcherT VM) {
+ return detail::StringMapEntryMatcher<KeyMatcherT, ValueMatcherT>(
+ std::move(KM), std::move(VM));
+}
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Testing/Annotations/Annotations.h b/contrib/libs/llvm16/include/llvm/Testing/Annotations/Annotations.h
new file mode 100644
index 0000000000..476cd7878c
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Testing/Annotations/Annotations.h
@@ -0,0 +1,144 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- Annotations.h - Annotated source code for tests ---------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TESTING_SUPPORT_ANNOTATIONS_H
+#define LLVM_TESTING_SUPPORT_ANNOTATIONS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <tuple>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// Annotations lets you mark points and ranges inside source code, for tests:
+///
+/// Annotations Example(R"cpp(
+/// int complete() { x.pri^ } // ^ indicates a point
+/// void err() { [["hello" == 42]]; } // [[this is a range]]
+/// $definition^class Foo{}; // points can be named: "definition"
+/// $(foo)^class Foo{}; // ...or have a payload: "foo"
+/// $definition(foo)^class Foo{}; // ...or both
+/// $fail(runtime)[[assert(false)]] // ranges can have names/payloads too
+/// )cpp");
+///
+/// StringRef Code = Example.code(); // annotations stripped.
+/// std::vector<size_t> PP = Example.points(); // all unnamed points
+/// size_t P = Example.point(); // there must be exactly one
+/// llvm::Range R = Example.range("fail"); // find named ranges
+///
+/// Points/ranges are coordinated into `code()` which is stripped of
+/// annotations.
+///
+/// Names consist of only alphanumeric characters or '_'.
+/// Payloads can contain any character expect '(' and ')'.
+///
+/// Ranges may be nested (and points can be inside ranges), but there's no way
+/// to define general overlapping ranges.
+///
+/// FIXME: the choice of the marking syntax makes it impossible to represent
+/// some of the C++ and Objective C constructs (including common ones
+/// like C++ attributes). We can fix this by:
+/// 1. introducing an escaping mechanism for the special characters,
+/// 2. making characters for marking points and ranges configurable,
+/// 3. changing the syntax to something less commonly used,
+/// 4. ...
+class Annotations {
+public:
+ /// Two offsets pointing to a continuous substring. End is not included, i.e.
+ /// represents a half-open range.
+ struct Range {
+ size_t Begin = 0;
+ size_t End = 0;
+
+ friend bool operator==(const Range &L, const Range &R) {
+ return std::tie(L.Begin, L.End) == std::tie(R.Begin, R.End);
+ }
+ friend bool operator!=(const Range &L, const Range &R) { return !(L == R); }
+ };
+
+ /// Parses the annotations from Text. Crashes if it's malformed.
+ Annotations(llvm::StringRef Text);
+
+ /// The input text with all annotations stripped.
+ /// All points and ranges are relative to this stripped text.
+ llvm::StringRef code() const { return Code; }
+
+ /// Returns the position of the point marked by ^ (or $name^) in the text.
+ /// Crashes if there isn't exactly one.
+ size_t point(llvm::StringRef Name = "") const;
+ /// Returns the position of the point with \p Name and its payload (if any).
+ std::pair<size_t, llvm::StringRef>
+ pointWithPayload(llvm::StringRef Name = "") const;
+ /// Returns the position of all points marked by ^ (or $name^) in the text.
+ /// Order matches the order within the text.
+ std::vector<size_t> points(llvm::StringRef Name = "") const;
+ /// Returns the positions and payloads (if any) of all points named \p Name
+ std::vector<std::pair<size_t, llvm::StringRef>>
+ pointsWithPayload(llvm::StringRef Name = "") const;
+ /// Returns the mapping of all names of points marked in the text to their
+ /// position. Unnamed points are mapped to the empty string. The positions are
+ /// sorted.
+ /// FIXME Remove this and expose `All` directly (currently used out-of-tree)
+ llvm::StringMap<llvm::SmallVector<size_t, 1>> all_points() const;
+
+ /// Returns the location of the range marked by [[ ]] (or $name[[ ]]).
+ /// Crashes if there isn't exactly one.
+ Range range(llvm::StringRef Name = "") const;
+ /// Returns the location and payload of the range marked by [[ ]]
+ /// (or $name(payload)[[ ]]). Crashes if there isn't exactly one.
+ std::pair<Range, llvm::StringRef>
+ rangeWithPayload(llvm::StringRef Name = "") const;
+ /// Returns the location of all ranges marked by [[ ]] (or $name[[ ]]).
+ /// They are ordered by start position within the text.
+ std::vector<Range> ranges(llvm::StringRef Name = "") const;
+ /// Returns the location of all ranges marked by [[ ]]
+ /// (or $name(payload)[[ ]]).
+ /// They are ordered by start position within the text.
+ std::vector<std::pair<Range, llvm::StringRef>>
+ rangesWithPayload(llvm::StringRef Name = "") const;
+ /// Returns the mapping of all names of ranges marked in the text to their
+ /// location. Unnamed ranges are mapped to the empty string. The ranges are
+ /// sorted by their start position.
+ llvm::StringMap<llvm::SmallVector<Range, 1>> all_ranges() const;
+
+private:
+ std::string Code;
+ /// Either a Point (Only Start) or a Range (Start and End)
+ struct Annotation {
+ size_t Begin;
+ size_t End = -1;
+ bool isPoint() const { return End == size_t(-1); }
+ llvm::StringRef Name;
+ llvm::StringRef Payload;
+ };
+ std::vector<Annotation> All;
+ // Values are the indices into All
+ llvm::StringMap<llvm::SmallVector<size_t, 1>> Points;
+ llvm::StringMap<llvm::SmallVector<size_t, 1>> Ranges;
+};
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &O,
+ const llvm::Annotations::Range &R);
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Testing/Support/Error.h b/contrib/libs/llvm16/include/llvm/Testing/Support/Error.h
new file mode 100644
index 0000000000..90896c9c32
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Testing/Support/Error.h
@@ -0,0 +1,238 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Testing/Support/Error.h ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_SUPPORT_ERROR_H
+#define LLVM_TESTING_SUPPORT_ERROR_H
+
+#include "llvm/Support/Error.h"
+#include "llvm/Testing/Support/SupportHelpers.h"
+
+#include "gmock/gmock.h"
+#include <ostream>
+
+namespace llvm {
+namespace detail {
+ErrorHolder TakeError(Error Err);
+
+template <typename T> ExpectedHolder<T> TakeExpected(Expected<T> &Exp) {
+ return {TakeError(Exp.takeError()), Exp};
+}
+
+template <typename T> ExpectedHolder<T> TakeExpected(Expected<T> &&Exp) {
+ return TakeExpected(Exp);
+}
+
+template <typename T>
+class ValueMatchesMono
+ : public testing::MatcherInterface<const ExpectedHolder<T> &> {
+public:
+ explicit ValueMatchesMono(const testing::Matcher<T> &Matcher)
+ : Matcher(Matcher) {}
+
+ bool MatchAndExplain(const ExpectedHolder<T> &Holder,
+ testing::MatchResultListener *listener) const override {
+ if (!Holder.Success())
+ return false;
+
+ bool result = Matcher.MatchAndExplain(*Holder.Exp, listener);
+
+ if (result || !listener->IsInterested())
+ return result;
+ *listener << "(";
+ Matcher.DescribeNegationTo(listener->stream());
+ *listener << ")";
+ return result;
+ }
+
+ void DescribeTo(std::ostream *OS) const override {
+ *OS << "succeeded with value (";
+ Matcher.DescribeTo(OS);
+ *OS << ")";
+ }
+
+ void DescribeNegationTo(std::ostream *OS) const override {
+ *OS << "did not succeed or value (";
+ Matcher.DescribeNegationTo(OS);
+ *OS << ")";
+ }
+
+private:
+ testing::Matcher<T> Matcher;
+};
+
+template<typename M>
+class ValueMatchesPoly {
+public:
+ explicit ValueMatchesPoly(const M &Matcher) : Matcher(Matcher) {}
+
+ template <typename T>
+ operator testing::Matcher<const ExpectedHolder<T> &>() const {
+ return MakeMatcher(
+ new ValueMatchesMono<T>(testing::SafeMatcherCast<T>(Matcher)));
+ }
+
+private:
+ M Matcher;
+};
+
+template <typename InfoT>
+class ErrorMatchesMono : public testing::MatcherInterface<const ErrorHolder &> {
+public:
+ explicit ErrorMatchesMono(std::optional<testing::Matcher<InfoT &>> Matcher)
+ : Matcher(std::move(Matcher)) {}
+
+ bool MatchAndExplain(const ErrorHolder &Holder,
+ testing::MatchResultListener *listener) const override {
+ if (Holder.Success())
+ return false;
+
+ if (Holder.Infos.size() > 1) {
+ *listener << "multiple errors";
+ return false;
+ }
+
+ auto &Info = *Holder.Infos[0];
+ if (!Info.isA<InfoT>()) {
+ *listener << "Error was not of given type";
+ return false;
+ }
+
+ if (!Matcher)
+ return true;
+
+ return Matcher->MatchAndExplain(static_cast<InfoT &>(Info), listener);
+ }
+
+ void DescribeTo(std::ostream *OS) const override {
+ *OS << "failed with Error of given type";
+ if (Matcher) {
+ *OS << " and the error ";
+ Matcher->DescribeTo(OS);
+ }
+ }
+
+ void DescribeNegationTo(std::ostream *OS) const override {
+ *OS << "succeeded or did not fail with the error of given type";
+ if (Matcher) {
+ *OS << " or the error ";
+ Matcher->DescribeNegationTo(OS);
+ }
+ }
+
+private:
+ std::optional<testing::Matcher<InfoT &>> Matcher;
+};
+
+class ErrorMessageMatches
+ : public testing::MatcherInterface<const ErrorHolder &> {
+public:
+ explicit ErrorMessageMatches(
+ testing::Matcher<std::vector<std::string>> Matcher)
+ : Matcher(std::move(Matcher)) {}
+
+ bool MatchAndExplain(const ErrorHolder &Holder,
+ testing::MatchResultListener *listener) const override {
+ std::vector<std::string> Messages;
+ Messages.reserve(Holder.Infos.size());
+ for (const std::shared_ptr<ErrorInfoBase> &Info : Holder.Infos)
+ Messages.push_back(Info->message());
+
+ return Matcher.MatchAndExplain(Messages, listener);
+ }
+
+ void DescribeTo(std::ostream *OS) const override {
+ *OS << "failed with Error whose message ";
+ Matcher.DescribeTo(OS);
+ }
+
+ void DescribeNegationTo(std::ostream *OS) const override {
+ *OS << "failed with an Error whose message ";
+ Matcher.DescribeNegationTo(OS);
+ }
+
+private:
+ testing::Matcher<std::vector<std::string>> Matcher;
+};
+} // namespace detail
+
+#define EXPECT_THAT_ERROR(Err, Matcher) \
+ EXPECT_THAT(llvm::detail::TakeError(Err), Matcher)
+#define ASSERT_THAT_ERROR(Err, Matcher) \
+ ASSERT_THAT(llvm::detail::TakeError(Err), Matcher)
+
+/// Helper macro for checking the result of an 'Expected<T>'
+///
+/// @code{.cpp}
+/// // function to be tested
+/// Expected<int> myDivide(int A, int B);
+///
+/// TEST(myDivideTests, GoodAndBad) {
+/// // test good case
+/// // if you only care about success or failure:
+/// EXPECT_THAT_EXPECTED(myDivide(10, 5), Succeeded());
+/// // if you also care about the value:
+/// EXPECT_THAT_EXPECTED(myDivide(10, 5), HasValue(2));
+///
+/// // test the error case
+/// EXPECT_THAT_EXPECTED(myDivide(10, 0), Failed());
+/// // also check the error message
+/// EXPECT_THAT_EXPECTED(myDivide(10, 0),
+/// FailedWithMessage("B must not be zero!"));
+/// }
+/// @endcode
+
+#define EXPECT_THAT_EXPECTED(Err, Matcher) \
+ EXPECT_THAT(llvm::detail::TakeExpected(Err), Matcher)
+#define ASSERT_THAT_EXPECTED(Err, Matcher) \
+ ASSERT_THAT(llvm::detail::TakeExpected(Err), Matcher)
+
+MATCHER(Succeeded, "") { return arg.Success(); }
+MATCHER(Failed, "") { return !arg.Success(); }
+
+template <typename InfoT>
+testing::Matcher<const detail::ErrorHolder &> Failed() {
+ return MakeMatcher(new detail::ErrorMatchesMono<InfoT>(std::nullopt));
+}
+
+template <typename InfoT, typename M>
+testing::Matcher<const detail::ErrorHolder &> Failed(M Matcher) {
+ return MakeMatcher(new detail::ErrorMatchesMono<InfoT>(
+ testing::SafeMatcherCast<InfoT &>(Matcher)));
+}
+
+template <typename... M>
+testing::Matcher<const detail::ErrorHolder &> FailedWithMessage(M... Matcher) {
+ static_assert(sizeof...(M) > 0);
+ return MakeMatcher(
+ new detail::ErrorMessageMatches(testing::ElementsAre(Matcher...)));
+}
+
+template <typename M>
+testing::Matcher<const detail::ErrorHolder &> FailedWithMessageArray(M Matcher) {
+ return MakeMatcher(new detail::ErrorMessageMatches(Matcher));
+}
+
+template <typename M>
+detail::ValueMatchesPoly<M> HasValue(M Matcher) {
+ return detail::ValueMatchesPoly<M>(Matcher);
+}
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Testing/Support/SupportHelpers.h b/contrib/libs/llvm16/include/llvm/Testing/Support/SupportHelpers.h
new file mode 100644
index 0000000000..56b7330584
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Testing/Support/SupportHelpers.h
@@ -0,0 +1,265 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Testing/Support/SupportHelpers.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
+#define LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_os_ostream.h"
+#include "gmock/gmock-matchers.h"
+#include "gtest/gtest-printers.h"
+
+#include <optional>
+#include <string>
+
+namespace llvm {
+namespace detail {
+struct ErrorHolder {
+ std::vector<std::shared_ptr<ErrorInfoBase>> Infos;
+
+ bool Success() const { return Infos.empty(); }
+};
+
+template <typename T> struct ExpectedHolder : public ErrorHolder {
+ ExpectedHolder(ErrorHolder Err, Expected<T> &Exp)
+ : ErrorHolder(std::move(Err)), Exp(Exp) {}
+
+ Expected<T> &Exp;
+};
+
+inline void PrintTo(const ErrorHolder &Err, std::ostream *Out) {
+ raw_os_ostream OS(*Out);
+ OS << (Err.Success() ? "succeeded" : "failed");
+ if (!Err.Success()) {
+ const char *Delim = " (";
+ for (const auto &Info : Err.Infos) {
+ OS << Delim;
+ Delim = "; ";
+ Info->log(OS);
+ }
+ OS << ")";
+ }
+}
+
+template <typename T>
+void PrintTo(const ExpectedHolder<T> &Item, std::ostream *Out) {
+ if (Item.Success()) {
+ *Out << "succeeded with value " << ::testing::PrintToString(*Item.Exp);
+ } else {
+ PrintTo(static_cast<const ErrorHolder &>(Item), Out);
+ }
+}
+
+template <class InnerMatcher> class ValueIsMatcher {
+public:
+ explicit ValueIsMatcher(InnerMatcher ValueMatcher)
+ : ValueMatcher(ValueMatcher) {}
+
+ template <class T>
+ operator ::testing::Matcher<const std::optional<T> &>() const {
+ return ::testing::MakeMatcher(
+ new Impl<T>(::testing::SafeMatcherCast<T>(ValueMatcher)));
+ }
+
+ template <class T, class O = std::optional<T>>
+ class Impl : public ::testing::MatcherInterface<const O &> {
+ public:
+ explicit Impl(const ::testing::Matcher<T> &ValueMatcher)
+ : ValueMatcher(ValueMatcher) {}
+
+ bool MatchAndExplain(const O &Input,
+ testing::MatchResultListener *L) const override {
+ return Input && ValueMatcher.MatchAndExplain(*Input, L);
+ }
+
+ void DescribeTo(std::ostream *OS) const override {
+ *OS << "has a value that ";
+ ValueMatcher.DescribeTo(OS);
+ }
+ void DescribeNegationTo(std::ostream *OS) const override {
+ *OS << "does not have a value that ";
+ ValueMatcher.DescribeTo(OS);
+ }
+
+ private:
+ testing::Matcher<T> ValueMatcher;
+ };
+
+private:
+ InnerMatcher ValueMatcher;
+};
+} // namespace detail
+
+/// Matches an std::optional<T> with a value that conforms to an inner matcher.
+/// To match std::nullopt you could use Eq(std::nullopt).
+template <class InnerMatcher>
+detail::ValueIsMatcher<InnerMatcher> ValueIs(const InnerMatcher &ValueMatcher) {
+ return detail::ValueIsMatcher<InnerMatcher>(ValueMatcher);
+}
+namespace unittest {
+
+SmallString<128> getInputFileDirectory(const char *Argv0);
+
+/// A RAII object that creates a temporary directory upon initialization and
+/// removes it upon destruction.
+class TempDir {
+ SmallString<128> Path;
+
+public:
+ /// Creates a managed temporary directory.
+ ///
+ /// @param Name The name of the directory to create.
+ /// @param Unique If true, the directory will be created using
+ /// llvm::sys::fs::createUniqueDirectory.
+ explicit TempDir(StringRef Name, bool Unique = false) {
+ std::error_code EC;
+ if (Unique) {
+ EC = llvm::sys::fs::createUniqueDirectory(Name, Path);
+ if (!EC) {
+ // Resolve any symlinks in the new directory.
+ std::string UnresolvedPath(Path.str());
+ EC = llvm::sys::fs::real_path(UnresolvedPath, Path);
+ }
+ } else {
+ Path = Name;
+ EC = llvm::sys::fs::create_directory(Path);
+ }
+ if (EC)
+ Path.clear();
+ EXPECT_FALSE(EC) << EC.message();
+ }
+
+ ~TempDir() {
+ if (!Path.empty()) {
+ EXPECT_FALSE(llvm::sys::fs::remove_directories(Path.str()));
+ }
+ }
+
+ TempDir(const TempDir &) = delete;
+ TempDir &operator=(const TempDir &) = delete;
+
+ TempDir(TempDir &&) = default;
+ TempDir &operator=(TempDir &&) = default;
+
+ /// The path to the temporary directory.
+ StringRef path() const { return Path; }
+
+ /// The null-terminated C string pointing to the path.
+ const char *c_str() { return Path.c_str(); }
+
+ /// Creates a new path by appending the argument to the path of the managed
+ /// directory using the native path separator.
+ SmallString<128> path(StringRef component) const {
+ SmallString<128> Result(Path);
+ SmallString<128> ComponentToAppend(component);
+ llvm::sys::path::native(ComponentToAppend);
+ llvm::sys::path::append(Result, Twine(ComponentToAppend));
+ return Result;
+ }
+};
+
+/// A RAII object that creates a link upon initialization and
+/// removes it upon destruction.
+///
+/// The link may be a soft or a hard link, depending on the platform.
+class TempLink {
+ SmallString<128> Path;
+
+public:
+ /// Creates a managed link at path Link pointing to Target.
+ TempLink(StringRef Target, StringRef Link) {
+ Path = Link;
+ std::error_code EC = sys::fs::create_link(Target, Link);
+ if (EC)
+ Path.clear();
+ EXPECT_FALSE(EC);
+ }
+ ~TempLink() {
+ if (!Path.empty()) {
+ EXPECT_FALSE(llvm::sys::fs::remove(Path.str()));
+ }
+ }
+
+ TempLink(const TempLink &) = delete;
+ TempLink &operator=(const TempLink &) = delete;
+
+ TempLink(TempLink &&) = default;
+ TempLink &operator=(TempLink &&) = default;
+
+ /// The path to the link.
+ StringRef path() const { return Path; }
+};
+
+/// A RAII object that creates a file upon initialization and
+/// removes it upon destruction.
+class TempFile {
+ SmallString<128> Path;
+
+public:
+ /// Creates a managed file.
+ ///
+ /// @param Name The name of the file to create.
+ /// @param Contents The string to write to the file.
+ /// @param Unique If true, the file will be created using
+ /// llvm::sys::fs::createTemporaryFile.
+ TempFile(StringRef Name, StringRef Suffix = "", StringRef Contents = "",
+ bool Unique = false) {
+ std::error_code EC;
+ int fd;
+ if (Unique) {
+ EC = llvm::sys::fs::createTemporaryFile(Name, Suffix, fd, Path);
+ } else {
+ Path = Name;
+ if (!Suffix.empty()) {
+ Path.append(".");
+ Path.append(Suffix);
+ }
+ EC = llvm::sys::fs::openFileForWrite(Path, fd);
+ }
+ EXPECT_FALSE(EC);
+ raw_fd_ostream OS(fd, /*shouldClose*/ true);
+ OS << Contents;
+ OS.flush();
+ EXPECT_FALSE(OS.error());
+ if (EC || OS.error())
+ Path.clear();
+ }
+ ~TempFile() {
+ if (!Path.empty()) {
+ EXPECT_FALSE(llvm::sys::fs::remove(Path.str()));
+ }
+ }
+
+ TempFile(const TempFile &) = delete;
+ TempFile &operator=(const TempFile &) = delete;
+
+ TempFile(TempFile &&) = default;
+ TempFile &operator=(TempFile &&) = default;
+
+ /// The path to the file.
+ StringRef path() const { return Path; }
+};
+
+} // namespace unittest
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceProcessor.h b/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceProcessor.h
new file mode 100644
index 0000000000..b2c81d9596
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceProcessor.h
@@ -0,0 +1,61 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- ResourceProcessor.h -------------------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_PROCESSOR_H
+#define LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_PROCESSOR_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <memory>
+#include <vector>
+
+
+namespace llvm {
+
+class WindowsResourceProcessor {
+public:
+ using PathType = SmallVector<char, 64>;
+
+ WindowsResourceProcessor() {}
+
+ void addDefine(StringRef Key, StringRef Value = StringRef()) {
+ PreprocessorDefines.emplace_back(Key, Value);
+ }
+ void addInclude(const PathType &IncludePath) {
+ IncludeList.push_back(IncludePath);
+ }
+ void setVerbose(bool Verbose) { IsVerbose = Verbose; }
+ void setNullAtEnd(bool NullAtEnd) { AppendNull = NullAtEnd; }
+
+ Error process(StringRef InputData,
+ std::unique_ptr<raw_fd_ostream> OutputStream);
+
+private:
+ StringRef InputData;
+ std::vector<PathType> IncludeList;
+ std::vector<std::pair<StringRef, StringRef>> PreprocessorDefines;
+ bool IsVerbose, AppendNull;
+};
+
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptToken.h b/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptToken.h
new file mode 100644
index 0000000000..42026a8328
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptToken.h
@@ -0,0 +1,69 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- ResourceScriptToken.h -----------------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This declares the .rc script tokens.
+// The list of available tokens is located at ResourceScriptTokenList.h.
+//
+// Ref: msdn.microsoft.com/en-us/library/windows/desktop/aa380599(v=vs.85).aspx
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_SCRIPTTOKEN_H
+#define LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_SCRIPTTOKEN_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+// A definition of a single resource script token. Each token has its kind
+// (declared in ResourceScriptTokenList) and holds a value - a reference
+// representation of the token.
+// RCToken does not claim ownership on its value. A memory buffer containing
+// the token value should be stored in a safe place and cannot be freed
+// nor reallocated.
+class RCToken {
+public:
+ enum class Kind {
+#define TOKEN(Name) Name,
+#define SHORT_TOKEN(Name, Ch) Name,
+#include "ResourceScriptTokenList.h"
+#undef TOKEN
+#undef SHORT_TOKEN
+ };
+
+ RCToken(RCToken::Kind RCTokenKind, StringRef Value);
+
+ // Get an integer value of the integer token.
+ uint32_t intValue() const;
+ bool isLongInt() const;
+
+ StringRef value() const;
+ Kind kind() const;
+
+ // Check if a token describes a binary operator.
+ bool isBinaryOp() const;
+
+private:
+ Kind TokenKind;
+ StringRef TokenValue;
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptTokenList.h b/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptTokenList.h
new file mode 100644
index 0000000000..3e19b83fe6
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/WindowsResource/ResourceScriptTokenList.h
@@ -0,0 +1,45 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- ResourceScriptTokenList.h -------------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This is a part of llvm-rc tokens header. It lists all the possible tokens
+// that might occur in a correct .rc script.
+//
+//===---------------------------------------------------------------------===//
+
+
+// Long tokens. They might consist of more than one character.
+TOKEN(Invalid) // Invalid token. Should not occur in a valid script.
+TOKEN(Int) // Integer (decimal, octal or hexadecimal).
+TOKEN(String) // String value.
+TOKEN(Identifier) // Script identifier (resource name or type).
+
+// Short tokens. They usually consist of exactly one character.
+// The definitions are of the form SHORT_TOKEN(TokenName, TokenChar).
+// TokenChar is the one-character token representation occuring in the correct
+// .rc scripts.
+SHORT_TOKEN(BlockBegin, '{') // Start of the script block; can also be BEGIN.
+SHORT_TOKEN(BlockEnd, '}') // End of the block; can also be END.
+SHORT_TOKEN(Comma, ',') // Comma - resource arguments separator.
+SHORT_TOKEN(Plus, '+') // Addition operator.
+SHORT_TOKEN(Minus, '-') // Subtraction operator.
+SHORT_TOKEN(Pipe, '|') // Bitwise-OR operator.
+SHORT_TOKEN(Amp, '&') // Bitwise-AND operator.
+SHORT_TOKEN(Tilde, '~') // Bitwise-NOT operator.
+SHORT_TOKEN(LeftParen, '(') // Left parenthesis in the script expressions.
+SHORT_TOKEN(RightParen, ')') // Right parenthesis.
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/XRay/FDRLogBuilder.h b/contrib/libs/llvm16/include/llvm/XRay/FDRLogBuilder.h
new file mode 100644
index 0000000000..f851ffd98e
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/XRay/FDRLogBuilder.h
@@ -0,0 +1,51 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- FDRLogBuilder.h - XRay FDR Log Building Utility --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_XRAY_FDRLOGBUILDER_H
+#define LLVM_XRAY_FDRLOGBUILDER_H
+
+#include "llvm/XRay/FDRRecords.h"
+
+namespace llvm {
+namespace xray {
+
+/// The LogBuilder class allows for creating ad-hoc collections of records
+/// through the `add<...>(...)` function. An example use of this API is in
+/// crafting arbitrary sequences of records:
+///
+/// auto Records = LogBuilder()
+/// .add<BufferExtents>(256)
+/// .add<NewBufferRecord>(1)
+/// .consume();
+///
+class LogBuilder {
+ std::vector<std::unique_ptr<Record>> Records;
+
+public:
+ template <class R, class... T> LogBuilder &add(T &&... A) {
+ Records.emplace_back(new R(std::forward<T>(A)...));
+ return *this;
+ }
+
+ std::vector<std::unique_ptr<Record>> consume() { return std::move(Records); }
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_XRAY_FDRLOGBUILDER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/module.extern.modulemap b/contrib/libs/llvm16/include/llvm/module.extern.modulemap
new file mode 100644
index 0000000000..8e726a3957
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/module.extern.modulemap
@@ -0,0 +1,6 @@
+module LLVM_Extern_Config_Def {}
+module LLVM_Extern_IR_Attributes_Gen {}
+module LLVM_Extern_IR_Intrinsics_Gen {}
+module LLVM_Extern_IR_Intrinsics_Enum {}
+module LLVM_Extern_Utils_DataTypes {}
+module LLVM_Extern_TargetParser_Gen {}
diff --git a/contrib/libs/llvm16/include/llvm/module.modulemap b/contrib/libs/llvm16/include/llvm/module.modulemap
new file mode 100644
index 0000000000..741e0a83b1
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/module.modulemap
@@ -0,0 +1,462 @@
+module LLVM_Analysis {
+ requires cplusplus
+ umbrella "Analysis"
+ module * { export * }
+
+ // This is intended for (repeated) textual inclusion.
+ textual header "Analysis/ScalarFuncs.def"
+ textual header "Analysis/TargetLibraryInfo.def"
+ textual header "Analysis/VecFuncs.def"
+}
+
+module LLVM_AsmParser {
+ requires cplusplus
+ umbrella "AsmParser"
+ module * { export * }
+}
+
+// A module covering CodeGen/ and Target/. These are intertwined
+// and codependent, and thus notionally form a single module.
+module LLVM_Backend {
+ requires cplusplus
+
+ module CodeGen {
+ umbrella "CodeGen"
+ module * { export * }
+
+ // Exclude these; they're intended to be included into only a single
+ // translation unit (or none) and aren't part of this module.
+ exclude header "CodeGen/LinkAllAsmWriterComponents.h"
+ exclude header "CodeGen/LinkAllCodegenComponents.h"
+
+ // These are intended for (repeated) textual inclusion.
+ textual header "CodeGen/DIEValue.def"
+ textual header "CodeGen/MachinePassRegistry.def"
+ }
+}
+
+// FIXME: Make this as a submodule of LLVM_Backend again.
+// Doing so causes a linker error in clang-format.
+module LLVM_Backend_Target {
+ umbrella "Target"
+ module * { export * }
+}
+
+module LLVM_Bitcode {
+ requires cplusplus
+ umbrella "Bitcode"
+ module * { export * }
+}
+
+module LLVM_Bitstream {
+ requires cplusplus
+ umbrella "Bitstream"
+ module * { export * }
+}
+
+module LLVM_BinaryFormat {
+ requires cplusplus
+ umbrella "BinaryFormat" module * { export * }
+ textual header "BinaryFormat/Dwarf.def"
+ textual header "BinaryFormat/DXContainerConstants.def"
+ textual header "BinaryFormat/DynamicTags.def"
+ textual header "BinaryFormat/MachO.def"
+ textual header "BinaryFormat/MinidumpConstants.def"
+ textual header "BinaryFormat/Swift.def"
+ textual header "BinaryFormat/ELFRelocs/AArch64.def"
+ textual header "BinaryFormat/ELFRelocs/AMDGPU.def"
+ textual header "BinaryFormat/ELFRelocs/ARM.def"
+ textual header "BinaryFormat/ELFRelocs/ARC.def"
+ textual header "BinaryFormat/ELFRelocs/AVR.def"
+ textual header "BinaryFormat/ELFRelocs/BPF.def"
+ textual header "BinaryFormat/ELFRelocs/CSKY.def"
+ textual header "BinaryFormat/ELFRelocs/Hexagon.def"
+ textual header "BinaryFormat/ELFRelocs/i386.def"
+ textual header "BinaryFormat/ELFRelocs/Lanai.def"
+ textual header "BinaryFormat/ELFRelocs/LoongArch.def"
+ textual header "BinaryFormat/ELFRelocs/M68k.def"
+ textual header "BinaryFormat/ELFRelocs/Mips.def"
+ textual header "BinaryFormat/ELFRelocs/MSP430.def"
+ textual header "BinaryFormat/ELFRelocs/PowerPC64.def"
+ textual header "BinaryFormat/ELFRelocs/PowerPC.def"
+ textual header "BinaryFormat/ELFRelocs/RISCV.def"
+ textual header "BinaryFormat/ELFRelocs/Sparc.def"
+ textual header "BinaryFormat/ELFRelocs/SystemZ.def"
+ textual header "BinaryFormat/ELFRelocs/VE.def"
+ textual header "BinaryFormat/ELFRelocs/x86_64.def"
+ textual header "BinaryFormat/ELFRelocs/Xtensa.def"
+ textual header "BinaryFormat/WasmRelocs.def"
+ textual header "BinaryFormat/MsgPack.def"
+}
+
+module LLVM_Config {
+ requires cplusplus
+ umbrella "Config"
+ extern module LLVM_Extern_Config_Def "module.extern.modulemap"
+ module * { export * }
+}
+
+module LLVM_DebugInfo {
+ requires cplusplus
+ module DIContext { header "DebugInfo/DIContext.h" export * }
+}
+
+module LLVM_DebugInfo_DWARF {
+ requires cplusplus
+
+ umbrella "DebugInfo/DWARF"
+ module * { export * }
+}
+
+module LLVM_DebugInfo_PDB {
+ requires cplusplus
+
+ umbrella "DebugInfo/PDB"
+ module * { export * }
+
+ // Separate out this subdirectory; it's an optional component that depends on
+ // a separate library which might not be available.
+ //
+ // FIXME: There should be a better way to specify this.
+ exclude header "DebugInfo/PDB/DIA/DIADataStream.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumDebugStreams.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumFrameData.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumInjectedSources.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumLineNumbers.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumSectionContribs.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumSourceFiles.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumSymbols.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumTables.h"
+ exclude header "DebugInfo/PDB/DIA/DIAError.h"
+ exclude header "DebugInfo/PDB/DIA/DIAFrameData.h"
+ exclude header "DebugInfo/PDB/DIA/DIAInjectedSource.h"
+ exclude header "DebugInfo/PDB/DIA/DIALineNumber.h"
+ exclude header "DebugInfo/PDB/DIA/DIARawSymbol.h"
+ exclude header "DebugInfo/PDB/DIA/DIASectionContrib.h"
+ exclude header "DebugInfo/PDB/DIA/DIASession.h"
+ exclude header "DebugInfo/PDB/DIA/DIASourceFile.h"
+ exclude header "DebugInfo/PDB/DIA/DIASupport.h"
+ exclude header "DebugInfo/PDB/DIA/DIATable.h"
+ exclude header "DebugInfo/PDB/DIA/DIAUtils.h"
+}
+
+module LLVM_DebugInfo_PDB_DIA {
+ requires cplusplus
+
+ umbrella "DebugInfo/PDB/DIA"
+ module * { export * }
+}
+
+module LLVM_DebugInfo_MSF {
+ requires cplusplus
+
+ umbrella "DebugInfo/MSF"
+ module * { export * }
+}
+
+module LLVM_DebugInfo_CodeView {
+ requires cplusplus
+
+ umbrella "DebugInfo/CodeView"
+ module * { export * }
+
+ // These are intended for (repeated) textual inclusion.
+ textual header "DebugInfo/CodeView/CodeViewRegisters.def"
+ textual header "DebugInfo/CodeView/CodeViewTypes.def"
+ textual header "DebugInfo/CodeView/CodeViewSymbols.def"
+}
+
+module LLVM_DWARFLinker {
+ requires cplusplus
+
+ umbrella "DWARFLinker"
+ module * { export * }
+}
+
+module LLVM_ExecutionEngine {
+ requires cplusplus
+
+ umbrella "ExecutionEngine"
+ module * { export * }
+
+ // Exclude this; it's an optional component of the ExecutionEngine.
+ exclude header "ExecutionEngine/OProfileWrapper.h"
+
+ // Exclude these; they're intended to be included into only a single
+ // translation unit (or none) and aren't part of this module.
+ exclude header "ExecutionEngine/MCJIT.h"
+ exclude header "ExecutionEngine/Interpreter.h"
+
+ // Exclude headers from LLVM_OrcSupport.
+ exclude header "ExecutionEngine/Orc/Shared/OrcError.h"
+}
+
+module LLVM_FileCheck {
+ requires cplusplus
+
+ umbrella "FileCheck"
+ module * { export * }
+}
+
+module LLVM_Frontend_OpenMP {
+ requires cplusplus
+
+ umbrella "Frontend/OpenMP"
+ module * { export * }
+
+ exclude header "Frontend/OpenMP/OMPKinds.def"
+}
+
+// Orc utilities that don't depend only on Support (not ExecutionEngine or
+// IR). This is a workaround for ExecutionEngine's broken layering, and will
+// be removed in the future.
+module LLVM_OrcSupport {
+ requires cplusplus
+
+ header "ExecutionEngine/Orc/Shared/OrcError.h"
+
+ export *
+}
+
+module LLVM_Pass {
+ module Pass {
+ // PassSupport.h and PassAnalysisSupport.h are made available only through
+ // Pass.h.
+ header "Pass.h"
+ textual header "PassSupport.h"
+ textual header "PassAnalysisSupport.h"
+ export *
+ }
+
+ module PassRegistry { header "PassRegistry.h" export * }
+ module InitializePasses { header "InitializePasses.h" export * }
+}
+
+module LLVM_intrinsic_gen {
+ requires cplusplus
+
+ // Delay building the modules containing dependencies to Attributes.h and
+ // Intrinsics.h because they need to be generated by tablegen first.
+
+ // Attributes.h
+ module IR_Argument { header "IR/Argument.h" export * }
+ module IR_Attributes {
+ header "IR/Attributes.h"
+ extern module LLVM_Extern_IR_Attributes_Gen "module.extern.modulemap"
+ export *
+ }
+ module IR_AbstractCallSite { header "IR/AbstractCallSite.h" export * }
+ module IR_ConstantFold { header "IR/ConstantFold.h" export * }
+ module IR_ConstantFolder { header "IR/ConstantFolder.h" export * }
+ module IR_GlobalVariable { header "IR/GlobalVariable.h" export * }
+ module IR_NoFolder { header "IR/NoFolder.h" export * }
+ module IRBuilderFolder { header "IR/IRBuilderFolder.h" export * }
+ module IR_Module { header "IR/Module.h" export * }
+ module IR_ModuleSummaryIndex { header "IR/ModuleSummaryIndex.h" export * }
+ module IR_ModuleSummaryIndexYAML { header "IR/ModuleSummaryIndexYAML.h" export * }
+ module IR_Function { header "IR/Function.h" export * }
+ module IR_InstrTypes { header "IR/InstrTypes.h" export * }
+ module IR_Instructions { header "IR/Instructions.h" export * }
+ module IR_TypeFinder { header "IR/TypeFinder.h" export * }
+ module IR_VectorBuilder { header "IR/VectorBuilder.h" export * }
+
+
+ // Intrinsics.h
+ module IR_CFG { header "IR/CFG.h" export * }
+ module IR_ConstantRange { header "IR/ConstantRange.h" export * }
+ module IR_Dominators { header "IR/Dominators.h" export * }
+ module IR_FixedPointBuilder { header "IR/FixedPointBuilder.h" export * }
+ module Analysis_PostDominators { header "Analysis/PostDominators.h" export * }
+ module Analysis_DomTreeUpdater { header "Analysis/DomTreeUpdater.h" export * }
+ module IR_IRBuilder { header "IR/IRBuilder.h" export * }
+ module IR_IRPrintingPasses { header "IR/IRPrintingPasses.h" export * }
+ module IR_MatrixBuilder { header "IR/MatrixBuilder.h" export * }
+ module IR_PassManager { header "IR/PassManager.h" export * }
+ module IR_PassManagerImpl { header "IR/PassManagerImpl.h" export * }
+ module IR_PredIteratorCache { header "IR/PredIteratorCache.h" export * }
+ module IR_Verifier { header "IR/Verifier.h" export * }
+ module IR_InstIterator { header "IR/InstIterator.h" export * }
+ module IR_InstVisitor { header "IR/InstVisitor.h" export * }
+ module IR_Intrinsics {
+ header "IR/Intrinsics.h"
+ extern module LLVM_Extern_IR_Intricsics_Gen "module.extern.modulemap"
+ extern module LLVM_Extern_IR_Intrinsics_Enum "module.extern.modulemap"
+ export *
+ }
+ module IR_IntrinsicInst { header "IR/IntrinsicInst.h" export * }
+ module IR_PatternMatch { header "IR/PatternMatch.h" export * }
+ module IR_SafepointIRVerifier { header "IR/SafepointIRVerifier.h" export * }
+ module IR_Statepoint { header "IR/Statepoint.h" export * }
+ module IR_DebugInfo { header "IR/DebugInfo.h" export * }
+
+ export *
+}
+
+module LLVM_IR {
+ requires cplusplus
+
+ umbrella "IR"
+ module * { export * }
+
+ // These are intended for (repeated) textual inclusion.
+ textual header "IR/ConstrainedOps.def"
+ textual header "IR/DebugInfoFlags.def"
+ textual header "IR/Instruction.def"
+ textual header "IR/Metadata.def"
+ textual header "IR/FixedMetadataKinds.def"
+ textual header "IR/Value.def"
+ textual header "IR/VPIntrinsics.def"
+ textual header "IR/RuntimeLibcalls.def"
+}
+
+module LLVM_IRReader {
+ requires cplusplus
+ umbrella "IRReader"
+ module * { export * }
+}
+
+module LLVM_LineEditor {
+ requires cplusplus
+ umbrella "LineEditor"
+ module * { export * }
+}
+
+module LLVM_LTO {
+ requires cplusplus
+ umbrella "LTO"
+ module * { export * }
+}
+
+module LLVM_MC {
+ requires cplusplus
+
+ umbrella "MC"
+ module * { export * }
+}
+
+// Used by llvm-tblgen
+module LLVM_MC_TableGen {
+ requires cplusplus
+ module MC_LaneBitmask { header "MC/LaneBitmask.h" export * }
+ module MC_InstrItineraries { header "MC/MCInstrItineraries.h" export * }
+ module MC_Schedule { header "MC/MCSchedule.h" export * }
+ module MC_SubtargetFeature { header "MC/SubtargetFeature.h" export * }
+}
+
+module LLVM_Object {
+ requires cplusplus
+ umbrella "Object"
+ module * { export * }
+}
+
+module LLVM_Option {
+ requires cplusplus
+ umbrella "Option"
+ module * { export * }
+}
+
+module LLVM_ProfileData {
+ requires cplusplus
+
+ umbrella "ProfileData"
+ module * { export * }
+
+ textual header "ProfileData/InstrProfData.inc"
+ textual header "ProfileData/MemProfData.inc"
+ textual header "ProfileData/MIBEntryDef.inc"
+}
+
+// FIXME: Mislayered?
+module LLVM_Support_TargetRegistry {
+ requires cplusplus
+ header "Support/TargetRegistry.h"
+ export *
+}
+
+module LLVM_TableGen {
+ requires cplusplus
+ umbrella "TableGen"
+ module * { export * }
+}
+
+module LLVM_Transforms {
+ requires cplusplus
+ umbrella "Transforms"
+
+ module * { export * }
+
+ // Requires DEBUG_TYPE to be defined by including file.
+ exclude header "Transforms/Utils/InstructionWorklist.h"
+}
+
+extern module LLVM_Extern_Utils_DataTypes "module.extern.modulemap"
+
+// Build the module with the tablegen-generated files needed by the
+// TargetParser module before building the TargetParser module itself.
+module TargetParserGen {
+ module RISCVTargetParserDef {
+ header "TargetParser/RISCVTargetParser.h"
+ extern module LLVM_Extern_TargetParser_Gen "module.extern.modulemap"
+ export *
+ }
+}
+
+// A module covering ADT/ and Support/. These are intertwined and
+// codependent, and notionally form a single module.
+module LLVM_Utils {
+ module ADT {
+ requires cplusplus
+
+ umbrella "ADT"
+ module * { export * }
+ }
+
+ module Support {
+ requires cplusplus
+
+ umbrella "Support"
+ module * { export * }
+
+ // Exclude this; it should only be used on Windows.
+ exclude header "Support/Windows/WindowsSupport.h"
+
+ // Exclude these; they are fundamentally non-modular.
+ exclude header "Support/PluginLoader.h"
+ exclude header "Support/Solaris/sys/regset.h"
+ textual header "Support/TargetOpcodes.def"
+
+ }
+
+ module TargetParser {
+ requires cplusplus
+
+ umbrella "TargetParser"
+ module * { export * }
+
+ // These are intended for textual inclusion.
+ textual header "TargetParser/ARMTargetParser.def"
+ textual header "TargetParser/CSKYTargetParser.def"
+ textual header "TargetParser/X86TargetParser.def"
+ textual header "TargetParser/LoongArchTargetParser.def"
+ }
+
+ // This part of the module is usable from both C and C++ code.
+ module ConvertUTF {
+ header "Support/ConvertUTF.h"
+ export *
+ }
+}
+
+// This is used for a $src == $build compilation. Otherwise we use
+// LLVM_Support_DataTypes_Build, defined in a module map that is
+// copied into the build area.
+module LLVM_Support_DataTypes_Src {
+ header "llvm/Support/DataTypes.h"
+ export *
+}
+
+module LLVM_WindowsManifest {
+ requires cplusplus
+ umbrella "WindowsManifest"
+ module * { export * }
+}
diff --git a/contrib/libs/llvm16/include/llvm/module.modulemap.build b/contrib/libs/llvm16/include/llvm/module.modulemap.build
new file mode 100644
index 0000000000..162a262a00
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/module.modulemap.build
@@ -0,0 +1,13 @@
+// This is copied into the build area for a $src != $build compilation.
+module LLVM_Support_DataTypes {
+ header "Support/DataTypes.h"
+ export *
+}
+module LLVM_Config_ABI_Breaking {
+ header "Config/abi-breaking.h"
+ export *
+}
+module LLVM_Config_Config {
+ header "Config/llvm-config.h"
+ export *
+}
diff --git a/contrib/libs/llvm16/lib/Analysis/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Analysis/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Analysis/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/BinaryFormat/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/BinaryFormat/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/BinaryFormat/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/CodeGen/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/CodeGen/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..ea5eb56395
--- /dev/null
+++ b/contrib/libs/llvm16/lib/CodeGen/.yandex_meta/licenses.list.txt
@@ -0,0 +1,16 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+/// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+/// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+/// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..41eba9c6eb
--- /dev/null
+++ b/contrib/libs/llvm16/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt
@@ -0,0 +1,13 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ auto *CI = cast<ConstantInt>(C);
+ APInt Divisor = CI->getValue();
+ unsigned Shift = Divisor.countTrailingZeros();
diff --git a/contrib/libs/llvm16/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..de1a495b4f
--- /dev/null
+++ b/contrib/libs/llvm16/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,12 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ return isalpha(C) || isdigit(C) || C == '_' || C == '-' || C == '.' ||
+ C == '$';
diff --git a/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DWARFLinker/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DWARFLinker/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DWARFLinker/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DWARFLinkerParallel/DWARFLinker.cpp b/contrib/libs/llvm16/lib/DWARFLinkerParallel/DWARFLinker.cpp
new file mode 100644
index 0000000000..a54d2e3cc2
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DWARFLinkerParallel/DWARFLinker.cpp
@@ -0,0 +1,13 @@
+//=== DWARFLinker.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DWARFLinkerParallel/DWARFLinker.h"
+
+namespace llvm {
+namespace dwarflinker_parallel {} // end of namespace dwarflinker_parallel
+} // namespace llvm
diff --git a/contrib/libs/llvm16/lib/DWP/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DWP/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DWP/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DebugInfo/GSYM/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DebugInfo/GSYM/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DebugInfo/GSYM/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Debuginfod/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Debuginfod/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Debuginfod/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Demangle/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Demangle/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..746c53a681
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Demangle/.yandex_meta/licenses.list.txt
@@ -0,0 +1,11 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ return isDigit(C) || isLower(C) || isUpper(C) || C == '_';
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/Interpreter/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/Interpreter/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/Interpreter/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/JITLink/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/JITLink/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/JITLink/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/Orc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/Orc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/Orc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/FileCheck/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/FileCheck/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/FileCheck/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Frontend/HLSL/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Frontend/HLSL/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Frontend/HLSL/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Frontend/OpenACC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Frontend/OpenACC/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Frontend/OpenACC/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/FuzzMutate/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/FuzzMutate/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/FuzzMutate/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/IR/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/IR/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/IR/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/IRPrinter/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/IRPrinter/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/IRPrinter/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/IRReader/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/IRReader/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/IRReader/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/InterfaceStub/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/InterfaceStub/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/InterfaceStub/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/LTO/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/LTO/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/LTO/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/LineEditor/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/LineEditor/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/LineEditor/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Linker/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Linker/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Linker/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/MC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/MC/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/MC/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/MC/MCParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/MC/MCParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/MC/MCParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/MCA/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/MCA/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/MCA/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ObjCopy/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ObjCopy/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ObjCopy/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Object/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Object/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Object/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ObjectYAML/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ObjectYAML/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ObjectYAML/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Option/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Option/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Option/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Passes/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Passes/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Passes/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ProfileData/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ProfileData/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ProfileData/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Remarks/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Remarks/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Remarks/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Support/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Support/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..a1f81c01d1
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/.yandex_meta/licenses.list.txt
@@ -0,0 +1,992 @@
+====================Apache-2.0====================
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+
+====================Apache-2.0====================
+ To apply the Apache License to your work, attach the following
+
+
+====================Apache-2.0====================
+ APPENDIX: How to apply the Apache License to your work.
+
+
+====================Apache-2.0====================
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+====================Apache-2.0====================
+licensed under the Apache License 2.0.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================BSD-2-Clause====================
+* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+
+====================BSD-2-Clause====================
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following disclaimer
+* in the documentation and/or other materials provided with the
+* distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+====================BSD-3-Clause====================
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+
+
+====================CC0-1.0====================
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
+
+
+====================CC0-1.0====================
+|* SPDX-License-Identifier: CC0-1.0 *|
+
+
+====================COPYRIGHT====================
+ Copyright (C) 2012-2016, Yann Collet.
+
+
+====================COPYRIGHT====================
+ Copyright 2019 Jack O'Connor and Samuel Neves
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 1992 Henry Spencer.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 1992, 1993, 1994 Henry Spencer.
+ * Copyright (c) 1992, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 1994
+ * The Regents of the University of California. All rights reserved.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+
+
+====================COPYRIGHT====================
+ * Copyright © 1991-2015 Unicode, Inc. All rights reserved.
+
+
+====================COPYRIGHT====================
+ * This software was written by Alexander Peslyak in 2001. No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+
+
+====================COPYRIGHT====================
+Copyright 1992, 1993, 1994 Henry Spencer. All rights reserved.
+
+
+====================COPYRIGHT====================
+Copyright © 1991-2022 Unicode, Inc. All rights reserved.
+
+
+====================File: lib/Support/BLAKE3/LICENSE====================
+This work is released into the public domain with CC0 1.0. Alternatively, it is
+licensed under the Apache License 2.0.
+
+-------------------------------------------------------------------------------
+
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
+
+-------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2019 Jack O'Connor and Samuel Neves
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+====================File: tools/polly/lib/External/isl/LICENSE====================
+MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+====================File: tools/polly/lib/External/isl/imath/LICENSE====================
+IMath is Copyright © 2002-2009 Michael J. Fromberger
+You may use it subject to the following Licensing Terms:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+====================ISC====================
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+====================Public-Domain====================
+ * [0] https://hyperelliptic.org/nacl/nacl-20110221.tar.bz2 (public domain
+ * code)
+
+
+====================Public-Domain====================
+ * Homepage:
+ * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
+ *
+ * Author:
+ * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
+ *
+ * This software was written by Alexander Peslyak in 2001. No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted.
+ *
+ * There's ABSOLUTELY NO WARRANTY, express or implied.
+ *
+ * (This is a heavily cut-down "BSD license".)
+
+
+====================Public-Domain====================
+// This code is taken from public domain
+
+
+====================Public-Domain AND CC0-1.0====================
+This work is released into the public domain with CC0 1.0. Alternatively, it is
+
+
+====================Public-Domain AND CC0-1.0====================
+|* Released into the public domain with CC0 1.0 *|
+
+
+====================Spencer-94====================
+This software is not subject to any license of the American Telephone
+and Telegraph Company or of the Regents of the University of California.
+
+Permission is granted to anyone to use this software for any purpose on
+any computer system, and to alter it and redistribute it, subject
+to the following restrictions:
+
+1. The author is not responsible for the consequences of use of this
+ software, no matter how awful, even if they arise from flaws in it.
+
+2. The origin of this software must not be misrepresented, either by
+ explicit claim or by omission. Since few users ever read sources,
+ credits must appear in the documentation.
+
+3. Altered versions must be plainly marked as such, and must not be
+ misrepresented as being the original software. Since few users
+ ever read sources, credits must appear in the documentation.
+
+4. This notice may not be removed or altered.
+
+
+====================Unicode====================
+ * Distributed under the Terms of Use in
+ * http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of the Unicode data files and any associated documentation
+ * (the "Data Files") or Unicode software and any associated documentation
+ * (the "Software") to deal in the Data Files or Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, and/or sell copies of
+ * the Data Files or Software, and to permit persons to whom the Data Files
+ * or Software are furnished to do so, provided that
+ * (a) this copyright and permission notice appear with all copies
+ * of the Data Files or Software,
+ * (b) this copyright and permission notice appear in associated
+ * documentation, and
+ * (c) there is clear notice in each modified Data File or in the Software
+ * as well as in the documentation associated with the Data File(s) or
+ * Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+ * NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+ * DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder
+ * shall not be used in advertising or otherwise to promote the sale,
+ * use or other dealings in these Data Files or Software without prior
+ * written authorization of the copyright holder.
+
+
+====================Unicode====================
+Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Unicode data files and any associated documentation
+(the "Data Files") or Unicode software and any associated documentation
+(the "Software") to deal in the Data Files or Software
+without restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, and/or sell copies of
+the Data Files or Software, and to permit persons to whom the Data Files
+or Software are furnished to do so, provided that either
+(a) this copyright and permission notice appear with all copies
+of the Data Files or Software, or
+(b) this copyright and permission notice appear in associated
+Documentation.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale,
+use or other dealings in these Data Files or Software without prior
+written authorization of the copyright holder.
+
+
+====================Unicode====================
+NOTICE TO USER: Carefully read the following legal agreement.
+BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
+DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
+YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
+TERMS AND CONDITIONS OF THIS AGREEMENT.
+IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
+THE DATA FILES OR SOFTWARE.
+
+COPYRIGHT AND PERMISSION NOTICE
+
+
+====================Unicode====================
+UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2.c b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2.c
new file mode 100644
index 0000000000..e76aa1a3ae
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2.c
@@ -0,0 +1,326 @@
+#include "blake3_impl.h"
+
+#include <immintrin.h>
+
+#define DEGREE 8
+
+INLINE __m256i loadu(const uint8_t src[32]) {
+ return _mm256_loadu_si256((const __m256i *)src);
+}
+
+INLINE void storeu(__m256i src, uint8_t dest[16]) {
+ _mm256_storeu_si256((__m256i *)dest, src);
+}
+
+INLINE __m256i addv(__m256i a, __m256i b) { return _mm256_add_epi32(a, b); }
+
+// Note that clang-format doesn't like the name "xor" for some reason.
+INLINE __m256i xorv(__m256i a, __m256i b) { return _mm256_xor_si256(a, b); }
+
+INLINE __m256i set1(uint32_t x) { return _mm256_set1_epi32((int32_t)x); }
+
+INLINE __m256i rot16(__m256i x) {
+ return _mm256_shuffle_epi8(
+ x, _mm256_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2,
+ 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2));
+}
+
+INLINE __m256i rot12(__m256i x) {
+ return _mm256_or_si256(_mm256_srli_epi32(x, 12), _mm256_slli_epi32(x, 32 - 12));
+}
+
+INLINE __m256i rot8(__m256i x) {
+ return _mm256_shuffle_epi8(
+ x, _mm256_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1,
+ 12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1));
+}
+
+INLINE __m256i rot7(__m256i x) {
+ return _mm256_or_si256(_mm256_srli_epi32(x, 7), _mm256_slli_epi32(x, 32 - 7));
+}
+
+INLINE void round_fn(__m256i v[16], __m256i m[16], size_t r) {
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
+ v[0] = addv(v[0], v[4]);
+ v[1] = addv(v[1], v[5]);
+ v[2] = addv(v[2], v[6]);
+ v[3] = addv(v[3], v[7]);
+ v[12] = xorv(v[12], v[0]);
+ v[13] = xorv(v[13], v[1]);
+ v[14] = xorv(v[14], v[2]);
+ v[15] = xorv(v[15], v[3]);
+ v[12] = rot16(v[12]);
+ v[13] = rot16(v[13]);
+ v[14] = rot16(v[14]);
+ v[15] = rot16(v[15]);
+ v[8] = addv(v[8], v[12]);
+ v[9] = addv(v[9], v[13]);
+ v[10] = addv(v[10], v[14]);
+ v[11] = addv(v[11], v[15]);
+ v[4] = xorv(v[4], v[8]);
+ v[5] = xorv(v[5], v[9]);
+ v[6] = xorv(v[6], v[10]);
+ v[7] = xorv(v[7], v[11]);
+ v[4] = rot12(v[4]);
+ v[5] = rot12(v[5]);
+ v[6] = rot12(v[6]);
+ v[7] = rot12(v[7]);
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
+ v[0] = addv(v[0], v[4]);
+ v[1] = addv(v[1], v[5]);
+ v[2] = addv(v[2], v[6]);
+ v[3] = addv(v[3], v[7]);
+ v[12] = xorv(v[12], v[0]);
+ v[13] = xorv(v[13], v[1]);
+ v[14] = xorv(v[14], v[2]);
+ v[15] = xorv(v[15], v[3]);
+ v[12] = rot8(v[12]);
+ v[13] = rot8(v[13]);
+ v[14] = rot8(v[14]);
+ v[15] = rot8(v[15]);
+ v[8] = addv(v[8], v[12]);
+ v[9] = addv(v[9], v[13]);
+ v[10] = addv(v[10], v[14]);
+ v[11] = addv(v[11], v[15]);
+ v[4] = xorv(v[4], v[8]);
+ v[5] = xorv(v[5], v[9]);
+ v[6] = xorv(v[6], v[10]);
+ v[7] = xorv(v[7], v[11]);
+ v[4] = rot7(v[4]);
+ v[5] = rot7(v[5]);
+ v[6] = rot7(v[6]);
+ v[7] = rot7(v[7]);
+
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
+ v[0] = addv(v[0], v[5]);
+ v[1] = addv(v[1], v[6]);
+ v[2] = addv(v[2], v[7]);
+ v[3] = addv(v[3], v[4]);
+ v[15] = xorv(v[15], v[0]);
+ v[12] = xorv(v[12], v[1]);
+ v[13] = xorv(v[13], v[2]);
+ v[14] = xorv(v[14], v[3]);
+ v[15] = rot16(v[15]);
+ v[12] = rot16(v[12]);
+ v[13] = rot16(v[13]);
+ v[14] = rot16(v[14]);
+ v[10] = addv(v[10], v[15]);
+ v[11] = addv(v[11], v[12]);
+ v[8] = addv(v[8], v[13]);
+ v[9] = addv(v[9], v[14]);
+ v[5] = xorv(v[5], v[10]);
+ v[6] = xorv(v[6], v[11]);
+ v[7] = xorv(v[7], v[8]);
+ v[4] = xorv(v[4], v[9]);
+ v[5] = rot12(v[5]);
+ v[6] = rot12(v[6]);
+ v[7] = rot12(v[7]);
+ v[4] = rot12(v[4]);
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
+ v[0] = addv(v[0], v[5]);
+ v[1] = addv(v[1], v[6]);
+ v[2] = addv(v[2], v[7]);
+ v[3] = addv(v[3], v[4]);
+ v[15] = xorv(v[15], v[0]);
+ v[12] = xorv(v[12], v[1]);
+ v[13] = xorv(v[13], v[2]);
+ v[14] = xorv(v[14], v[3]);
+ v[15] = rot8(v[15]);
+ v[12] = rot8(v[12]);
+ v[13] = rot8(v[13]);
+ v[14] = rot8(v[14]);
+ v[10] = addv(v[10], v[15]);
+ v[11] = addv(v[11], v[12]);
+ v[8] = addv(v[8], v[13]);
+ v[9] = addv(v[9], v[14]);
+ v[5] = xorv(v[5], v[10]);
+ v[6] = xorv(v[6], v[11]);
+ v[7] = xorv(v[7], v[8]);
+ v[4] = xorv(v[4], v[9]);
+ v[5] = rot7(v[5]);
+ v[6] = rot7(v[6]);
+ v[7] = rot7(v[7]);
+ v[4] = rot7(v[4]);
+}
+
+INLINE void transpose_vecs(__m256i vecs[DEGREE]) {
+ // Interleave 32-bit lanes. The low unpack is lanes 00/11/44/55, and the high
+ // is 22/33/66/77.
+ __m256i ab_0145 = _mm256_unpacklo_epi32(vecs[0], vecs[1]);
+ __m256i ab_2367 = _mm256_unpackhi_epi32(vecs[0], vecs[1]);
+ __m256i cd_0145 = _mm256_unpacklo_epi32(vecs[2], vecs[3]);
+ __m256i cd_2367 = _mm256_unpackhi_epi32(vecs[2], vecs[3]);
+ __m256i ef_0145 = _mm256_unpacklo_epi32(vecs[4], vecs[5]);
+ __m256i ef_2367 = _mm256_unpackhi_epi32(vecs[4], vecs[5]);
+ __m256i gh_0145 = _mm256_unpacklo_epi32(vecs[6], vecs[7]);
+ __m256i gh_2367 = _mm256_unpackhi_epi32(vecs[6], vecs[7]);
+
+ // Interleave 64-bit lates. The low unpack is lanes 00/22 and the high is
+ // 11/33.
+ __m256i abcd_04 = _mm256_unpacklo_epi64(ab_0145, cd_0145);
+ __m256i abcd_15 = _mm256_unpackhi_epi64(ab_0145, cd_0145);
+ __m256i abcd_26 = _mm256_unpacklo_epi64(ab_2367, cd_2367);
+ __m256i abcd_37 = _mm256_unpackhi_epi64(ab_2367, cd_2367);
+ __m256i efgh_04 = _mm256_unpacklo_epi64(ef_0145, gh_0145);
+ __m256i efgh_15 = _mm256_unpackhi_epi64(ef_0145, gh_0145);
+ __m256i efgh_26 = _mm256_unpacklo_epi64(ef_2367, gh_2367);
+ __m256i efgh_37 = _mm256_unpackhi_epi64(ef_2367, gh_2367);
+
+ // Interleave 128-bit lanes.
+ vecs[0] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x20);
+ vecs[1] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x20);
+ vecs[2] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x20);
+ vecs[3] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x20);
+ vecs[4] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x31);
+ vecs[5] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x31);
+ vecs[6] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x31);
+ vecs[7] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x31);
+}
+
+INLINE void transpose_msg_vecs(const uint8_t *const *inputs,
+ size_t block_offset, __m256i out[16]) {
+ out[0] = loadu(&inputs[0][block_offset + 0 * sizeof(__m256i)]);
+ out[1] = loadu(&inputs[1][block_offset + 0 * sizeof(__m256i)]);
+ out[2] = loadu(&inputs[2][block_offset + 0 * sizeof(__m256i)]);
+ out[3] = loadu(&inputs[3][block_offset + 0 * sizeof(__m256i)]);
+ out[4] = loadu(&inputs[4][block_offset + 0 * sizeof(__m256i)]);
+ out[5] = loadu(&inputs[5][block_offset + 0 * sizeof(__m256i)]);
+ out[6] = loadu(&inputs[6][block_offset + 0 * sizeof(__m256i)]);
+ out[7] = loadu(&inputs[7][block_offset + 0 * sizeof(__m256i)]);
+ out[8] = loadu(&inputs[0][block_offset + 1 * sizeof(__m256i)]);
+ out[9] = loadu(&inputs[1][block_offset + 1 * sizeof(__m256i)]);
+ out[10] = loadu(&inputs[2][block_offset + 1 * sizeof(__m256i)]);
+ out[11] = loadu(&inputs[3][block_offset + 1 * sizeof(__m256i)]);
+ out[12] = loadu(&inputs[4][block_offset + 1 * sizeof(__m256i)]);
+ out[13] = loadu(&inputs[5][block_offset + 1 * sizeof(__m256i)]);
+ out[14] = loadu(&inputs[6][block_offset + 1 * sizeof(__m256i)]);
+ out[15] = loadu(&inputs[7][block_offset + 1 * sizeof(__m256i)]);
+ for (size_t i = 0; i < 8; ++i) {
+ _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);
+ }
+ transpose_vecs(&out[0]);
+ transpose_vecs(&out[8]);
+}
+
+INLINE void load_counters(uint64_t counter, bool increment_counter,
+ __m256i *out_lo, __m256i *out_hi) {
+ const __m256i mask = _mm256_set1_epi32(-(int32_t)increment_counter);
+ const __m256i add0 = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
+ const __m256i add1 = _mm256_and_si256(mask, add0);
+ __m256i l = _mm256_add_epi32(_mm256_set1_epi32((int32_t)counter), add1);
+ __m256i carry = _mm256_cmpgt_epi32(_mm256_xor_si256(add1, _mm256_set1_epi32(0x80000000)),
+ _mm256_xor_si256( l, _mm256_set1_epi32(0x80000000)));
+ __m256i h = _mm256_sub_epi32(_mm256_set1_epi32((int32_t)(counter >> 32)), carry);
+ *out_lo = l;
+ *out_hi = h;
+}
+
+static
+void blake3_hash8_avx2(const uint8_t *const *inputs, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ bool increment_counter, uint8_t flags,
+ uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
+ __m256i h_vecs[8] = {
+ set1(key[0]), set1(key[1]), set1(key[2]), set1(key[3]),
+ set1(key[4]), set1(key[5]), set1(key[6]), set1(key[7]),
+ };
+ __m256i counter_low_vec, counter_high_vec;
+ load_counters(counter, increment_counter, &counter_low_vec,
+ &counter_high_vec);
+ uint8_t block_flags = flags | flags_start;
+
+ for (size_t block = 0; block < blocks; block++) {
+ if (block + 1 == blocks) {
+ block_flags |= flags_end;
+ }
+ __m256i block_len_vec = set1(BLAKE3_BLOCK_LEN);
+ __m256i block_flags_vec = set1(block_flags);
+ __m256i msg_vecs[16];
+ transpose_msg_vecs(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
+
+ __m256i v[16] = {
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
+ set1(IV[0]), set1(IV[1]), set1(IV[2]), set1(IV[3]),
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
+ };
+ round_fn(v, msg_vecs, 0);
+ round_fn(v, msg_vecs, 1);
+ round_fn(v, msg_vecs, 2);
+ round_fn(v, msg_vecs, 3);
+ round_fn(v, msg_vecs, 4);
+ round_fn(v, msg_vecs, 5);
+ round_fn(v, msg_vecs, 6);
+ h_vecs[0] = xorv(v[0], v[8]);
+ h_vecs[1] = xorv(v[1], v[9]);
+ h_vecs[2] = xorv(v[2], v[10]);
+ h_vecs[3] = xorv(v[3], v[11]);
+ h_vecs[4] = xorv(v[4], v[12]);
+ h_vecs[5] = xorv(v[5], v[13]);
+ h_vecs[6] = xorv(v[6], v[14]);
+ h_vecs[7] = xorv(v[7], v[15]);
+
+ block_flags = flags;
+ }
+
+ transpose_vecs(h_vecs);
+ storeu(h_vecs[0], &out[0 * sizeof(__m256i)]);
+ storeu(h_vecs[1], &out[1 * sizeof(__m256i)]);
+ storeu(h_vecs[2], &out[2 * sizeof(__m256i)]);
+ storeu(h_vecs[3], &out[3 * sizeof(__m256i)]);
+ storeu(h_vecs[4], &out[4 * sizeof(__m256i)]);
+ storeu(h_vecs[5], &out[5 * sizeof(__m256i)]);
+ storeu(h_vecs[6], &out[6 * sizeof(__m256i)]);
+ storeu(h_vecs[7], &out[7 * sizeof(__m256i)]);
+}
+
+#if !defined(BLAKE3_NO_SSE41)
+void blake3_hash_many_sse41(const uint8_t *const *inputs, size_t num_inputs,
+ size_t blocks, const uint32_t key[8],
+ uint64_t counter, bool increment_counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t *out);
+#else
+void blake3_hash_many_portable(const uint8_t *const *inputs, size_t num_inputs,
+ size_t blocks, const uint32_t key[8],
+ uint64_t counter, bool increment_counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t *out);
+#endif
+
+void blake3_hash_many_avx2(const uint8_t *const *inputs, size_t num_inputs,
+ size_t blocks, const uint32_t key[8],
+ uint64_t counter, bool increment_counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t *out) {
+ while (num_inputs >= DEGREE) {
+ blake3_hash8_avx2(inputs, blocks, key, counter, increment_counter, flags,
+ flags_start, flags_end, out);
+ if (increment_counter) {
+ counter += DEGREE;
+ }
+ inputs += DEGREE;
+ num_inputs -= DEGREE;
+ out = &out[DEGREE * BLAKE3_OUT_LEN];
+ }
+#if !defined(BLAKE3_NO_SSE41)
+ blake3_hash_many_sse41(inputs, num_inputs, blocks, key, counter,
+ increment_counter, flags, flags_start, flags_end, out);
+#else
+ blake3_hash_many_portable(inputs, num_inputs, blocks, key, counter,
+ increment_counter, flags, flags_start, flags_end,
+ out);
+#endif
+}
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm
new file mode 100644
index 0000000000..46bad1d98f
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm
@@ -0,0 +1,1828 @@
+public _llvm_blake3_hash_many_avx2
+public llvm_blake3_hash_many_avx2
+
+_TEXT SEGMENT ALIGN(16) 'CODE'
+
+ALIGN 16
+llvm_blake3_hash_many_avx2 PROC
+_llvm_blake3_hash_many_avx2 PROC
+ push r15
+ push r14
+ push r13
+ push r12
+ push rsi
+ push rdi
+ push rbx
+ push rbp
+ mov rbp, rsp
+ sub rsp, 880
+ and rsp, 0FFFFFFFFFFFFFFC0H
+ vmovdqa xmmword ptr [rsp+2D0H], xmm6
+ vmovdqa xmmword ptr [rsp+2E0H], xmm7
+ vmovdqa xmmword ptr [rsp+2F0H], xmm8
+ vmovdqa xmmword ptr [rsp+300H], xmm9
+ vmovdqa xmmword ptr [rsp+310H], xmm10
+ vmovdqa xmmword ptr [rsp+320H], xmm11
+ vmovdqa xmmword ptr [rsp+330H], xmm12
+ vmovdqa xmmword ptr [rsp+340H], xmm13
+ vmovdqa xmmword ptr [rsp+350H], xmm14
+ vmovdqa xmmword ptr [rsp+360H], xmm15
+ mov rdi, rcx
+ mov rsi, rdx
+ mov rdx, r8
+ mov rcx, r9
+ mov r8, qword ptr [rbp+68H]
+ movzx r9, byte ptr [rbp+70H]
+ neg r9d
+ vmovd xmm0, r9d
+ vpbroadcastd ymm0, xmm0
+ vmovdqa ymmword ptr [rsp+260H], ymm0
+ vpand ymm1, ymm0, ymmword ptr [ADD0]
+ vpand ymm2, ymm0, ymmword ptr [ADD1]
+ vmovdqa ymmword ptr [rsp+2A0H], ymm2
+ vmovd xmm2, r8d
+ vpbroadcastd ymm2, xmm2
+ vpaddd ymm2, ymm2, ymm1
+ vmovdqa ymmword ptr [rsp+220H], ymm2
+ vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK]
+ vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK]
+ vpcmpgtd ymm2, ymm1, ymm2
+ shr r8, 32
+ vmovd xmm3, r8d
+ vpbroadcastd ymm3, xmm3
+ vpsubd ymm3, ymm3, ymm2
+ vmovdqa ymmword ptr [rsp+240H], ymm3
+ shl rdx, 6
+ mov qword ptr [rsp+2C0H], rdx
+ cmp rsi, 8
+ jc final7blocks
+outerloop8:
+ vpbroadcastd ymm0, dword ptr [rcx]
+ vpbroadcastd ymm1, dword ptr [rcx+4H]
+ vpbroadcastd ymm2, dword ptr [rcx+8H]
+ vpbroadcastd ymm3, dword ptr [rcx+0CH]
+ vpbroadcastd ymm4, dword ptr [rcx+10H]
+ vpbroadcastd ymm5, dword ptr [rcx+14H]
+ vpbroadcastd ymm6, dword ptr [rcx+18H]
+ vpbroadcastd ymm7, dword ptr [rcx+1CH]
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ mov r12, qword ptr [rdi+20H]
+ mov r13, qword ptr [rdi+28H]
+ mov r14, qword ptr [rdi+30H]
+ mov r15, qword ptr [rdi+38H]
+ movzx eax, byte ptr [rbp+78H]
+ movzx ebx, byte ptr [rbp+80H]
+ or eax, ebx
+ xor edx, edx
+ALIGN 16
+innerloop8:
+ movzx ebx, byte ptr [rbp+88H]
+ or ebx, eax
+ add rdx, 64
+ cmp rdx, qword ptr [rsp+2C0H]
+ cmove eax, ebx
+ mov dword ptr [rsp+200H], eax
+ vmovups xmm8, xmmword ptr [r8+rdx-40H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-40H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-40H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-40H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-40H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-40H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-40H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-40H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm8, ymm12, ymm14, 136
+ vmovaps ymmword ptr [rsp], ymm8
+ vshufps ymm9, ymm12, ymm14, 221
+ vmovaps ymmword ptr [rsp+20H], ymm9
+ vshufps ymm10, ymm13, ymm15, 136
+ vmovaps ymmword ptr [rsp+40H], ymm10
+ vshufps ymm11, ymm13, ymm15, 221
+ vmovaps ymmword ptr [rsp+60H], ymm11
+ vmovups xmm8, xmmword ptr [r8+rdx-30H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-30H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-30H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-30H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-30H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-30H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-30H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-30H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm8, ymm12, ymm14, 136
+ vmovaps ymmword ptr [rsp+80H], ymm8
+ vshufps ymm9, ymm12, ymm14, 221
+ vmovaps ymmword ptr [rsp+0A0H], ymm9
+ vshufps ymm10, ymm13, ymm15, 136
+ vmovaps ymmword ptr [rsp+0C0H], ymm10
+ vshufps ymm11, ymm13, ymm15, 221
+ vmovaps ymmword ptr [rsp+0E0H], ymm11
+ vmovups xmm8, xmmword ptr [r8+rdx-20H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-20H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-20H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-20H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-20H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-20H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-20H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-20H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm8, ymm12, ymm14, 136
+ vmovaps ymmword ptr [rsp+100H], ymm8
+ vshufps ymm9, ymm12, ymm14, 221
+ vmovaps ymmword ptr [rsp+120H], ymm9
+ vshufps ymm10, ymm13, ymm15, 136
+ vmovaps ymmword ptr [rsp+140H], ymm10
+ vshufps ymm11, ymm13, ymm15, 221
+ vmovaps ymmword ptr [rsp+160H], ymm11
+ vmovups xmm8, xmmword ptr [r8+rdx-10H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-10H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-10H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-10H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-10H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-10H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-10H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-10H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm8, ymm12, ymm14, 136
+ vmovaps ymmword ptr [rsp+180H], ymm8
+ vshufps ymm9, ymm12, ymm14, 221
+ vmovaps ymmword ptr [rsp+1A0H], ymm9
+ vshufps ymm10, ymm13, ymm15, 136
+ vmovaps ymmword ptr [rsp+1C0H], ymm10
+ vshufps ymm11, ymm13, ymm15, 221
+ vmovaps ymmword ptr [rsp+1E0H], ymm11
+ vpbroadcastd ymm15, dword ptr [rsp+200H]
+ prefetcht0 byte ptr [r8+rdx+80H]
+ prefetcht0 byte ptr [r12+rdx+80H]
+ prefetcht0 byte ptr [r9+rdx+80H]
+ prefetcht0 byte ptr [r13+rdx+80H]
+ prefetcht0 byte ptr [r10+rdx+80H]
+ prefetcht0 byte ptr [r14+rdx+80H]
+ prefetcht0 byte ptr [r11+rdx+80H]
+ prefetcht0 byte ptr [r15+rdx+80H]
+ vpaddd ymm0, ymm0, ymmword ptr [rsp]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+40H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+80H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0C0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm0, ymmword ptr [rsp+220H]
+ vpxor ymm13, ymm1, ymmword ptr [rsp+240H]
+ vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN]
+ vpxor ymm15, ymm3, ymm15
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0]
+ vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1]
+ vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2]
+ vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3]
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+20H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+60H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+0A0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0E0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+100H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+140H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+180H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1C0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+120H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+160H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+1A0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1E0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+40H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+60H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+0E0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+80H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+0C0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+140H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1A0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+20H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+180H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+120H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1E0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+160H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+0A0H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+1C0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+100H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+60H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+140H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+1A0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0E0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+80H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+180H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+40H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1C0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+0C0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+120H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+160H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+100H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+0A0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+1E0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+20H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+140H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+180H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+1C0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1A0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+0E0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+120H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+60H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1E0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+80H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+160H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+0A0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+20H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+40H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+100H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0C0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+180H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+120H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+1E0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1C0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+1A0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+160H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+140H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+100H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+0E0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+0A0H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0C0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+40H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+60H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+20H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+80H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+120H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+160H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+100H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1E0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+1C0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+0A0H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+180H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+20H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+1A0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+40H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+80H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+60H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+140H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+0C0H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0E0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+160H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+0A0H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+20H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+100H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+1E0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+120H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0C0H]
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxor ymm12, ymm12, ymm0
+ vpxor ymm13, ymm13, ymm1
+ vpxor ymm14, ymm14, ymm2
+ vpxor ymm15, ymm15, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpshufb ymm15, ymm15, ymm8
+ vpaddd ymm8, ymm12, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxor ymm4, ymm4, ymm8
+ vpxor ymm5, ymm5, ymm9
+ vpxor ymm6, ymm6, ymm10
+ vpxor ymm7, ymm7, ymm11
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+1C0H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+40H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+60H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+0E0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT16]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vmovdqa ymmword ptr [rsp+200H], ymm8
+ vpsrld ymm8, ymm5, 12
+ vpslld ymm5, ymm5, 20
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 12
+ vpslld ymm6, ymm6, 20
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 12
+ vpslld ymm7, ymm7, 20
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 12
+ vpslld ymm4, ymm4, 20
+ vpor ymm4, ymm4, ymm8
+ vpaddd ymm0, ymm0, ymmword ptr [rsp+140H]
+ vpaddd ymm1, ymm1, ymmword ptr [rsp+180H]
+ vpaddd ymm2, ymm2, ymmword ptr [rsp+80H]
+ vpaddd ymm3, ymm3, ymmword ptr [rsp+1A0H]
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxor ymm15, ymm15, ymm0
+ vpxor ymm12, ymm12, ymm1
+ vpxor ymm13, ymm13, ymm2
+ vpxor ymm14, ymm14, ymm3
+ vbroadcasti128 ymm8, xmmword ptr [ROT8]
+ vpshufb ymm15, ymm15, ymm8
+ vpshufb ymm12, ymm12, ymm8
+ vpshufb ymm13, ymm13, ymm8
+ vpshufb ymm14, ymm14, ymm8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm13, ymmword ptr [rsp+200H]
+ vpaddd ymm9, ymm9, ymm14
+ vpxor ymm5, ymm5, ymm10
+ vpxor ymm6, ymm6, ymm11
+ vpxor ymm7, ymm7, ymm8
+ vpxor ymm4, ymm4, ymm9
+ vpxor ymm0, ymm0, ymm8
+ vpxor ymm1, ymm1, ymm9
+ vpxor ymm2, ymm2, ymm10
+ vpxor ymm3, ymm3, ymm11
+ vpsrld ymm8, ymm5, 7
+ vpslld ymm5, ymm5, 25
+ vpor ymm5, ymm5, ymm8
+ vpsrld ymm8, ymm6, 7
+ vpslld ymm6, ymm6, 25
+ vpor ymm6, ymm6, ymm8
+ vpsrld ymm8, ymm7, 7
+ vpslld ymm7, ymm7, 25
+ vpor ymm7, ymm7, ymm8
+ vpsrld ymm8, ymm4, 7
+ vpslld ymm4, ymm4, 25
+ vpor ymm4, ymm4, ymm8
+ vpxor ymm4, ymm4, ymm12
+ vpxor ymm5, ymm5, ymm13
+ vpxor ymm6, ymm6, ymm14
+ vpxor ymm7, ymm7, ymm15
+ movzx eax, byte ptr [rbp+78H]
+ jne innerloop8
+ mov rbx, qword ptr [rbp+90H]
+ vunpcklps ymm8, ymm0, ymm1
+ vunpcklps ymm9, ymm2, ymm3
+ vunpckhps ymm10, ymm0, ymm1
+ vunpcklps ymm11, ymm4, ymm5
+ vunpcklps ymm0, ymm6, ymm7
+ vshufps ymm12, ymm8, ymm9, 78
+ vblendps ymm1, ymm8, ymm12, 0CCH
+ vshufps ymm8, ymm11, ymm0, 78
+ vunpckhps ymm13, ymm2, ymm3
+ vblendps ymm2, ymm11, ymm8, 0CCH
+ vblendps ymm3, ymm12, ymm9, 0CCH
+ vperm2f128 ymm12, ymm1, ymm2, 20H
+ vmovups ymmword ptr [rbx], ymm12
+ vunpckhps ymm14, ymm4, ymm5
+ vblendps ymm4, ymm8, ymm0, 0CCH
+ vunpckhps ymm15, ymm6, ymm7
+ vperm2f128 ymm7, ymm3, ymm4, 20H
+ vmovups ymmword ptr [rbx+20H], ymm7
+ vshufps ymm5, ymm10, ymm13, 78
+ vblendps ymm6, ymm5, ymm13, 0CCH
+ vshufps ymm13, ymm14, ymm15, 78
+ vblendps ymm10, ymm10, ymm5, 0CCH
+ vblendps ymm14, ymm14, ymm13, 0CCH
+ vperm2f128 ymm8, ymm10, ymm14, 20H
+ vmovups ymmword ptr [rbx+40H], ymm8
+ vblendps ymm15, ymm13, ymm15, 0CCH
+ vperm2f128 ymm13, ymm6, ymm15, 20H
+ vmovups ymmword ptr [rbx+60H], ymm13
+ vperm2f128 ymm9, ymm1, ymm2, 31H
+ vperm2f128 ymm11, ymm3, ymm4, 31H
+ vmovups ymmword ptr [rbx+80H], ymm9
+ vperm2f128 ymm14, ymm10, ymm14, 31H
+ vperm2f128 ymm15, ymm6, ymm15, 31H
+ vmovups ymmword ptr [rbx+0A0H], ymm11
+ vmovups ymmword ptr [rbx+0C0H], ymm14
+ vmovups ymmword ptr [rbx+0E0H], ymm15
+ vmovdqa ymm0, ymmword ptr [rsp+2A0H]
+ vpaddd ymm1, ymm0, ymmword ptr [rsp+220H]
+ vmovdqa ymmword ptr [rsp+220H], ymm1
+ vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK]
+ vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK]
+ vpcmpgtd ymm2, ymm0, ymm2
+ vmovdqa ymm0, ymmword ptr [rsp+240H]
+ vpsubd ymm2, ymm0, ymm2
+ vmovdqa ymmword ptr [rsp+240H], ymm2
+ add rdi, 64
+ add rbx, 256
+ mov qword ptr [rbp+90H], rbx
+ sub rsi, 8
+ cmp rsi, 8
+ jnc outerloop8
+ test rsi, rsi
+ jnz final7blocks
+unwind:
+ vzeroupper
+ vmovdqa xmm6, xmmword ptr [rsp+2D0H]
+ vmovdqa xmm7, xmmword ptr [rsp+2E0H]
+ vmovdqa xmm8, xmmword ptr [rsp+2F0H]
+ vmovdqa xmm9, xmmword ptr [rsp+300H]
+ vmovdqa xmm10, xmmword ptr [rsp+310H]
+ vmovdqa xmm11, xmmword ptr [rsp+320H]
+ vmovdqa xmm12, xmmword ptr [rsp+330H]
+ vmovdqa xmm13, xmmword ptr [rsp+340H]
+ vmovdqa xmm14, xmmword ptr [rsp+350H]
+ vmovdqa xmm15, xmmword ptr [rsp+360H]
+ mov rsp, rbp
+ pop rbp
+ pop rbx
+ pop rdi
+ pop rsi
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+ ret
+ALIGN 16
+final7blocks:
+ mov rbx, qword ptr [rbp+90H]
+ mov r15, qword ptr [rsp+2C0H]
+ movzx r13d, byte ptr [rbp+78H]
+ movzx r12d, byte ptr [rbp+88H]
+ test rsi, 4H
+ je final3blocks
+ vbroadcasti128 ymm0, xmmword ptr [rcx]
+ vbroadcasti128 ymm1, xmmword ptr [rcx+10H]
+ vmovdqa ymm8, ymm0
+ vmovdqa ymm9, ymm1
+ vbroadcasti128 ymm12, xmmword ptr [rsp+220H]
+ vbroadcasti128 ymm13, xmmword ptr [rsp+240H]
+ vpunpckldq ymm14, ymm12, ymm13
+ vpunpckhdq ymm15, ymm12, ymm13
+ vpermq ymm14, ymm14, 50H
+ vpermq ymm15, ymm15, 50H
+ vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN]
+ vpblendd ymm14, ymm14, ymm12, 44H
+ vpblendd ymm15, ymm15, ymm12, 44H
+ vmovdqa ymmword ptr [rsp], ymm14
+ vmovdqa ymmword ptr [rsp+20H], ymm15
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+ALIGN 16
+innerloop4:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ mov dword ptr [rsp+200H], eax
+ vmovups ymm2, ymmword ptr [r8+rdx-40H]
+ vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-40H], 01H
+ vmovups ymm3, ymmword ptr [r8+rdx-30H]
+ vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-30H], 01H
+ vshufps ymm4, ymm2, ymm3, 136
+ vshufps ymm5, ymm2, ymm3, 221
+ vmovups ymm2, ymmword ptr [r8+rdx-20H]
+ vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-20H], 01H
+ vmovups ymm3, ymmword ptr [r8+rdx-10H]
+ vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-10H], 01H
+ vshufps ymm6, ymm2, ymm3, 136
+ vshufps ymm7, ymm2, ymm3, 221
+ vpshufd ymm6, ymm6, 93H
+ vpshufd ymm7, ymm7, 93H
+ vmovups ymm10, ymmword ptr [r10+rdx-40H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-40H], 01H
+ vmovups ymm11, ymmword ptr [r10+rdx-30H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-30H], 01H
+ vshufps ymm12, ymm10, ymm11, 136
+ vshufps ymm13, ymm10, ymm11, 221
+ vmovups ymm10, ymmword ptr [r10+rdx-20H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-20H], 01H
+ vmovups ymm11, ymmword ptr [r10+rdx-10H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-10H], 01H
+ vshufps ymm14, ymm10, ymm11, 136
+ vshufps ymm15, ymm10, ymm11, 221
+ vpshufd ymm14, ymm14, 93H
+ vpshufd ymm15, ymm15, 93H
+ vpbroadcastd ymm2, dword ptr [rsp+200H]
+ vmovdqa ymm3, ymmword ptr [rsp]
+ vmovdqa ymm11, ymmword ptr [rsp+20H]
+ vpblendd ymm3, ymm3, ymm2, 88H
+ vpblendd ymm11, ymm11, ymm2, 88H
+ vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV]
+ vmovdqa ymm10, ymm2
+ mov al, 7
+roundloop4:
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm8, ymm8, ymm12
+ vmovdqa ymmword ptr [rsp+40H], ymm4
+ nop
+ vmovdqa ymmword ptr [rsp+60H], ymm12
+ nop
+ vpaddd ymm0, ymm0, ymm1
+ vpaddd ymm8, ymm8, ymm9
+ vpxor ymm3, ymm3, ymm0
+ vpxor ymm11, ymm11, ymm8
+ vbroadcasti128 ymm4, xmmword ptr [ROT16]
+ vpshufb ymm3, ymm3, ymm4
+ vpshufb ymm11, ymm11, ymm4
+ vpaddd ymm2, ymm2, ymm3
+ vpaddd ymm10, ymm10, ymm11
+ vpxor ymm1, ymm1, ymm2
+ vpxor ymm9, ymm9, ymm10
+ vpsrld ymm4, ymm1, 12
+ vpslld ymm1, ymm1, 20
+ vpor ymm1, ymm1, ymm4
+ vpsrld ymm4, ymm9, 12
+ vpslld ymm9, ymm9, 20
+ vpor ymm9, ymm9, ymm4
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm0, ymm0, ymm1
+ vpaddd ymm8, ymm8, ymm9
+ vmovdqa ymmword ptr [rsp+80H], ymm5
+ vmovdqa ymmword ptr [rsp+0A0H], ymm13
+ vpxor ymm3, ymm3, ymm0
+ vpxor ymm11, ymm11, ymm8
+ vbroadcasti128 ymm4, xmmword ptr [ROT8]
+ vpshufb ymm3, ymm3, ymm4
+ vpshufb ymm11, ymm11, ymm4
+ vpaddd ymm2, ymm2, ymm3
+ vpaddd ymm10, ymm10, ymm11
+ vpxor ymm1, ymm1, ymm2
+ vpxor ymm9, ymm9, ymm10
+ vpsrld ymm4, ymm1, 7
+ vpslld ymm1, ymm1, 25
+ vpor ymm1, ymm1, ymm4
+ vpsrld ymm4, ymm9, 7
+ vpslld ymm9, ymm9, 25
+ vpor ymm9, ymm9, ymm4
+ vpshufd ymm0, ymm0, 93H
+ vpshufd ymm8, ymm8, 93H
+ vpshufd ymm3, ymm3, 4EH
+ vpshufd ymm11, ymm11, 4EH
+ vpshufd ymm2, ymm2, 39H
+ vpshufd ymm10, ymm10, 39H
+ vpaddd ymm0, ymm0, ymm6
+ vpaddd ymm8, ymm8, ymm14
+ vpaddd ymm0, ymm0, ymm1
+ vpaddd ymm8, ymm8, ymm9
+ vpxor ymm3, ymm3, ymm0
+ vpxor ymm11, ymm11, ymm8
+ vbroadcasti128 ymm4, xmmword ptr [ROT16]
+ vpshufb ymm3, ymm3, ymm4
+ vpshufb ymm11, ymm11, ymm4
+ vpaddd ymm2, ymm2, ymm3
+ vpaddd ymm10, ymm10, ymm11
+ vpxor ymm1, ymm1, ymm2
+ vpxor ymm9, ymm9, ymm10
+ vpsrld ymm4, ymm1, 12
+ vpslld ymm1, ymm1, 20
+ vpor ymm1, ymm1, ymm4
+ vpsrld ymm4, ymm9, 12
+ vpslld ymm9, ymm9, 20
+ vpor ymm9, ymm9, ymm4
+ vpaddd ymm0, ymm0, ymm7
+ vpaddd ymm8, ymm8, ymm15
+ vpaddd ymm0, ymm0, ymm1
+ vpaddd ymm8, ymm8, ymm9
+ vpxor ymm3, ymm3, ymm0
+ vpxor ymm11, ymm11, ymm8
+ vbroadcasti128 ymm4, xmmword ptr [ROT8]
+ vpshufb ymm3, ymm3, ymm4
+ vpshufb ymm11, ymm11, ymm4
+ vpaddd ymm2, ymm2, ymm3
+ vpaddd ymm10, ymm10, ymm11
+ vpxor ymm1, ymm1, ymm2
+ vpxor ymm9, ymm9, ymm10
+ vpsrld ymm4, ymm1, 7
+ vpslld ymm1, ymm1, 25
+ vpor ymm1, ymm1, ymm4
+ vpsrld ymm4, ymm9, 7
+ vpslld ymm9, ymm9, 25
+ vpor ymm9, ymm9, ymm4
+ vpshufd ymm0, ymm0, 39H
+ vpshufd ymm8, ymm8, 39H
+ vpshufd ymm3, ymm3, 4EH
+ vpshufd ymm11, ymm11, 4EH
+ vpshufd ymm2, ymm2, 93H
+ vpshufd ymm10, ymm10, 93H
+ dec al
+ je endroundloop4
+ vmovdqa ymm4, ymmword ptr [rsp+40H]
+ vmovdqa ymm5, ymmword ptr [rsp+80H]
+ vshufps ymm12, ymm4, ymm5, 214
+ vpshufd ymm13, ymm4, 0FH
+ vpshufd ymm4, ymm12, 39H
+ vshufps ymm12, ymm6, ymm7, 250
+ vpblendd ymm13, ymm13, ymm12, 0AAH
+ vpunpcklqdq ymm12, ymm7, ymm5
+ vpblendd ymm12, ymm12, ymm6, 88H
+ vpshufd ymm12, ymm12, 78H
+ vpunpckhdq ymm5, ymm5, ymm7
+ vpunpckldq ymm6, ymm6, ymm5
+ vpshufd ymm7, ymm6, 1EH
+ vmovdqa ymmword ptr [rsp+40H], ymm13
+ vmovdqa ymmword ptr [rsp+80H], ymm12
+ vmovdqa ymm12, ymmword ptr [rsp+60H]
+ vmovdqa ymm13, ymmword ptr [rsp+0A0H]
+ vshufps ymm5, ymm12, ymm13, 214
+ vpshufd ymm6, ymm12, 0FH
+ vpshufd ymm12, ymm5, 39H
+ vshufps ymm5, ymm14, ymm15, 250
+ vpblendd ymm6, ymm6, ymm5, 0AAH
+ vpunpcklqdq ymm5, ymm15, ymm13
+ vpblendd ymm5, ymm5, ymm14, 88H
+ vpshufd ymm5, ymm5, 78H
+ vpunpckhdq ymm13, ymm13, ymm15
+ vpunpckldq ymm14, ymm14, ymm13
+ vpshufd ymm15, ymm14, 1EH
+ vmovdqa ymm13, ymm6
+ vmovdqa ymm14, ymm5
+ vmovdqa ymm5, ymmword ptr [rsp+40H]
+ vmovdqa ymm6, ymmword ptr [rsp+80H]
+ jmp roundloop4
+endroundloop4:
+ vpxor ymm0, ymm0, ymm2
+ vpxor ymm1, ymm1, ymm3
+ vpxor ymm8, ymm8, ymm10
+ vpxor ymm9, ymm9, ymm11
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop4
+ vmovdqu xmmword ptr [rbx], xmm0
+ vmovdqu xmmword ptr [rbx+10H], xmm1
+ vextracti128 xmmword ptr [rbx+20H], ymm0, 01H
+ vextracti128 xmmword ptr [rbx+30H], ymm1, 01H
+ vmovdqu xmmword ptr [rbx+40H], xmm8
+ vmovdqu xmmword ptr [rbx+50H], xmm9
+ vextracti128 xmmword ptr [rbx+60H], ymm8, 01H
+ vextracti128 xmmword ptr [rbx+70H], ymm9, 01H
+ vmovaps xmm8, xmmword ptr [rsp+260H]
+ vmovaps xmm0, xmmword ptr [rsp+220H]
+ vmovaps xmm1, xmmword ptr [rsp+230H]
+ vmovaps xmm2, xmmword ptr [rsp+240H]
+ vmovaps xmm3, xmmword ptr [rsp+250H]
+ vblendvps xmm0, xmm0, xmm1, xmm8
+ vblendvps xmm2, xmm2, xmm3, xmm8
+ vmovaps xmmword ptr [rsp+220H], xmm0
+ vmovaps xmmword ptr [rsp+240H], xmm2
+ add rbx, 128
+ add rdi, 32
+ sub rsi, 4
+final3blocks:
+ test rsi, 2H
+ je final1blocks
+ vbroadcasti128 ymm0, xmmword ptr [rcx]
+ vbroadcasti128 ymm1, xmmword ptr [rcx+10H]
+ vmovd xmm13, dword ptr [rsp+220H]
+ vpinsrd xmm13, xmm13, dword ptr [rsp+240H], 1
+ vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN], 2
+ vmovd xmm14, dword ptr [rsp+224H]
+ vpinsrd xmm14, xmm14, dword ptr [rsp+244H], 1
+ vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN], 2
+ vinserti128 ymm13, ymm13, xmm14, 01H
+ vbroadcasti128 ymm14, xmmword ptr [ROT16]
+ vbroadcasti128 ymm15, xmmword ptr [ROT8]
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+ALIGN 16
+innerloop2:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ mov dword ptr [rsp+200H], eax
+ vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV]
+ vpbroadcastd ymm8, dword ptr [rsp+200H]
+ vpblendd ymm3, ymm13, ymm8, 88H
+ vmovups ymm8, ymmword ptr [r8+rdx-40H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-40H], 01H
+ vmovups ymm9, ymmword ptr [r8+rdx-30H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-30H], 01H
+ vshufps ymm4, ymm8, ymm9, 136
+ vshufps ymm5, ymm8, ymm9, 221
+ vmovups ymm8, ymmword ptr [r8+rdx-20H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-20H], 01H
+ vmovups ymm9, ymmword ptr [r8+rdx-10H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-10H], 01H
+ vshufps ymm6, ymm8, ymm9, 136
+ vshufps ymm7, ymm8, ymm9, 221
+ vpshufd ymm6, ymm6, 93H
+ vpshufd ymm7, ymm7, 93H
+ mov al, 7
+roundloop2:
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm0, ymm0, ymm1
+ vpxor ymm3, ymm3, ymm0
+ vpshufb ymm3, ymm3, ymm14
+ vpaddd ymm2, ymm2, ymm3
+ vpxor ymm1, ymm1, ymm2
+ vpsrld ymm8, ymm1, 12
+ vpslld ymm1, ymm1, 20
+ vpor ymm1, ymm1, ymm8
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm0, ymm0, ymm1
+ vpxor ymm3, ymm3, ymm0
+ vpshufb ymm3, ymm3, ymm15
+ vpaddd ymm2, ymm2, ymm3
+ vpxor ymm1, ymm1, ymm2
+ vpsrld ymm8, ymm1, 7
+ vpslld ymm1, ymm1, 25
+ vpor ymm1, ymm1, ymm8
+ vpshufd ymm0, ymm0, 93H
+ vpshufd ymm3, ymm3, 4EH
+ vpshufd ymm2, ymm2, 39H
+ vpaddd ymm0, ymm0, ymm6
+ vpaddd ymm0, ymm0, ymm1
+ vpxor ymm3, ymm3, ymm0
+ vpshufb ymm3, ymm3, ymm14
+ vpaddd ymm2, ymm2, ymm3
+ vpxor ymm1, ymm1, ymm2
+ vpsrld ymm8, ymm1, 12
+ vpslld ymm1, ymm1, 20
+ vpor ymm1, ymm1, ymm8
+ vpaddd ymm0, ymm0, ymm7
+ vpaddd ymm0, ymm0, ymm1
+ vpxor ymm3, ymm3, ymm0
+ vpshufb ymm3, ymm3, ymm15
+ vpaddd ymm2, ymm2, ymm3
+ vpxor ymm1, ymm1, ymm2
+ vpsrld ymm8, ymm1, 7
+ vpslld ymm1, ymm1, 25
+ vpor ymm1, ymm1, ymm8
+ vpshufd ymm0, ymm0, 39H
+ vpshufd ymm3, ymm3, 4EH
+ vpshufd ymm2, ymm2, 93H
+ dec al
+ jz endroundloop2
+ vshufps ymm8, ymm4, ymm5, 214
+ vpshufd ymm9, ymm4, 0FH
+ vpshufd ymm4, ymm8, 39H
+ vshufps ymm8, ymm6, ymm7, 250
+ vpblendd ymm9, ymm9, ymm8, 0AAH
+ vpunpcklqdq ymm8, ymm7, ymm5
+ vpblendd ymm8, ymm8, ymm6, 88H
+ vpshufd ymm8, ymm8, 78H
+ vpunpckhdq ymm5, ymm5, ymm7
+ vpunpckldq ymm6, ymm6, ymm5
+ vpshufd ymm7, ymm6, 1EH
+ vmovdqa ymm5, ymm9
+ vmovdqa ymm6, ymm8
+ jmp roundloop2
+endroundloop2:
+ vpxor ymm0, ymm0, ymm2
+ vpxor ymm1, ymm1, ymm3
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop2
+ vmovdqu xmmword ptr [rbx], xmm0
+ vmovdqu xmmword ptr [rbx+10H], xmm1
+ vextracti128 xmmword ptr [rbx+20H], ymm0, 01H
+ vextracti128 xmmword ptr [rbx+30H], ymm1, 01H
+ vmovaps ymm8, ymmword ptr [rsp+260H]
+ vmovaps ymm0, ymmword ptr [rsp+220H]
+ vmovups ymm1, ymmword ptr [rsp+228H]
+ vmovaps ymm2, ymmword ptr [rsp+240H]
+ vmovups ymm3, ymmword ptr [rsp+248H]
+ vblendvps ymm0, ymm0, ymm1, ymm8
+ vblendvps ymm2, ymm2, ymm3, ymm8
+ vmovaps ymmword ptr [rsp+220H], ymm0
+ vmovaps ymmword ptr [rsp+240H], ymm2
+ add rbx, 64
+ add rdi, 16
+ sub rsi, 2
+final1blocks:
+ test rsi, 1H
+ je unwind
+ vmovdqu xmm0, xmmword ptr [rcx]
+ vmovdqu xmm1, xmmword ptr [rcx+10H]
+ vmovd xmm3, dword ptr [rsp+220H]
+ vpinsrd xmm3, xmm3, dword ptr [rsp+240H], 1
+ vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN], 2
+ vmovdqa xmm14, xmmword ptr [ROT16]
+ vmovdqa xmm15, xmmword ptr [ROT8]
+ mov r8, qword ptr [rdi]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+ALIGN 16
+innerloop1:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ vmovdqa xmm2, xmmword ptr [BLAKE3_IV]
+ vmovdqa xmm3, xmm13
+ vpinsrd xmm3, xmm3, eax, 3
+ vmovups xmm8, xmmword ptr [r8+rdx-40H]
+ vmovups xmm9, xmmword ptr [r8+rdx-30H]
+ vshufps xmm4, xmm8, xmm9, 136
+ vshufps xmm5, xmm8, xmm9, 221
+ vmovups xmm8, xmmword ptr [r8+rdx-20H]
+ vmovups xmm9, xmmword ptr [r8+rdx-10H]
+ vshufps xmm6, xmm8, xmm9, 136
+ vshufps xmm7, xmm8, xmm9, 221
+ vpshufd xmm6, xmm6, 93H
+ vpshufd xmm7, xmm7, 93H
+ mov al, 7
+roundloop1:
+ vpaddd xmm0, xmm0, xmm4
+ vpaddd xmm0, xmm0, xmm1
+ vpxor xmm3, xmm3, xmm0
+ vpshufb xmm3, xmm3, xmm14
+ vpaddd xmm2, xmm2, xmm3
+ vpxor xmm1, xmm1, xmm2
+ vpsrld xmm8, xmm1, 12
+ vpslld xmm1, xmm1, 20
+ vpor xmm1, xmm1, xmm8
+ vpaddd xmm0, xmm0, xmm5
+ vpaddd xmm0, xmm0, xmm1
+ vpxor xmm3, xmm3, xmm0
+ vpshufb xmm3, xmm3, xmm15
+ vpaddd xmm2, xmm2, xmm3
+ vpxor xmm1, xmm1, xmm2
+ vpsrld xmm8, xmm1, 7
+ vpslld xmm1, xmm1, 25
+ vpor xmm1, xmm1, xmm8
+ vpshufd xmm0, xmm0, 93H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 39H
+ vpaddd xmm0, xmm0, xmm6
+ vpaddd xmm0, xmm0, xmm1
+ vpxor xmm3, xmm3, xmm0
+ vpshufb xmm3, xmm3, xmm14
+ vpaddd xmm2, xmm2, xmm3
+ vpxor xmm1, xmm1, xmm2
+ vpsrld xmm8, xmm1, 12
+ vpslld xmm1, xmm1, 20
+ vpor xmm1, xmm1, xmm8
+ vpaddd xmm0, xmm0, xmm7
+ vpaddd xmm0, xmm0, xmm1
+ vpxor xmm3, xmm3, xmm0
+ vpshufb xmm3, xmm3, xmm15
+ vpaddd xmm2, xmm2, xmm3
+ vpxor xmm1, xmm1, xmm2
+ vpsrld xmm8, xmm1, 7
+ vpslld xmm1, xmm1, 25
+ vpor xmm1, xmm1, xmm8
+ vpshufd xmm0, xmm0, 39H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 93H
+ dec al
+ jz endroundloop1
+ vshufps xmm8, xmm4, xmm5, 214
+ vpshufd xmm9, xmm4, 0FH
+ vpshufd xmm4, xmm8, 39H
+ vshufps xmm8, xmm6, xmm7, 250
+ vpblendd xmm9, xmm9, xmm8, 0AAH
+ vpunpcklqdq xmm8, xmm7, xmm5
+ vpblendd xmm8, xmm8, xmm6, 88H
+ vpshufd xmm8, xmm8, 78H
+ vpunpckhdq xmm5, xmm5, xmm7
+ vpunpckldq xmm6, xmm6, xmm5
+ vpshufd xmm7, xmm6, 1EH
+ vmovdqa xmm5, xmm9
+ vmovdqa xmm6, xmm8
+ jmp roundloop1
+endroundloop1:
+ vpxor xmm0, xmm0, xmm2
+ vpxor xmm1, xmm1, xmm3
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop1
+ vmovdqu xmmword ptr [rbx], xmm0
+ vmovdqu xmmword ptr [rbx+10H], xmm1
+ jmp unwind
+
+_llvm_blake3_hash_many_avx2 ENDP
+llvm_blake3_hash_many_avx2 ENDP
+_TEXT ENDS
+
+_RDATA SEGMENT READONLY PAGE ALIAS(".rdata") 'CONST'
+ALIGN 64
+ADD0:
+ dd 0, 1, 2, 3, 4, 5, 6, 7
+
+ADD1:
+ dd 8 dup (8)
+
+BLAKE3_IV_0:
+ dd 8 dup (6A09E667H)
+
+BLAKE3_IV_1:
+ dd 8 dup (0BB67AE85H)
+
+BLAKE3_IV_2:
+ dd 8 dup (3C6EF372H)
+
+BLAKE3_IV_3:
+ dd 8 dup (0A54FF53AH)
+
+BLAKE3_BLOCK_LEN:
+ dd 8 dup (64)
+
+ROT16:
+ db 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
+
+ROT8:
+ db 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
+
+CMP_MSB_MASK:
+ dd 8 dup(80000000H)
+
+BLAKE3_IV:
+ dd 6A09E667H, 0BB67AE85H, 3C6EF372H, 0A54FF53AH
+
+_RDATA ENDS
+END
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512.c b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512.c
new file mode 100644
index 0000000000..9c35b08c43
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512.c
@@ -0,0 +1,1207 @@
+#include "blake3_impl.h"
+
+#include <immintrin.h>
+
+#define _mm_shuffle_ps2(a, b, c) \
+ (_mm_castps_si128( \
+ _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), (c))))
+
+INLINE __m128i loadu_128(const uint8_t src[16]) {
+ return _mm_loadu_si128((const __m128i *)src);
+}
+
+INLINE __m256i loadu_256(const uint8_t src[32]) {
+ return _mm256_loadu_si256((const __m256i *)src);
+}
+
+INLINE __m512i loadu_512(const uint8_t src[64]) {
+ return _mm512_loadu_si512((const __m512i *)src);
+}
+
+INLINE void storeu_128(__m128i src, uint8_t dest[16]) {
+ _mm_storeu_si128((__m128i *)dest, src);
+}
+
+INLINE void storeu_256(__m256i src, uint8_t dest[16]) {
+ _mm256_storeu_si256((__m256i *)dest, src);
+}
+
+INLINE __m128i add_128(__m128i a, __m128i b) { return _mm_add_epi32(a, b); }
+
+INLINE __m256i add_256(__m256i a, __m256i b) { return _mm256_add_epi32(a, b); }
+
+INLINE __m512i add_512(__m512i a, __m512i b) { return _mm512_add_epi32(a, b); }
+
+INLINE __m128i xor_128(__m128i a, __m128i b) { return _mm_xor_si128(a, b); }
+
+INLINE __m256i xor_256(__m256i a, __m256i b) { return _mm256_xor_si256(a, b); }
+
+INLINE __m512i xor_512(__m512i a, __m512i b) { return _mm512_xor_si512(a, b); }
+
+INLINE __m128i set1_128(uint32_t x) { return _mm_set1_epi32((int32_t)x); }
+
+INLINE __m256i set1_256(uint32_t x) { return _mm256_set1_epi32((int32_t)x); }
+
+INLINE __m512i set1_512(uint32_t x) { return _mm512_set1_epi32((int32_t)x); }
+
+INLINE __m128i set4(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+ return _mm_setr_epi32((int32_t)a, (int32_t)b, (int32_t)c, (int32_t)d);
+}
+
+INLINE __m128i rot16_128(__m128i x) { return _mm_ror_epi32(x, 16); }
+
+INLINE __m256i rot16_256(__m256i x) { return _mm256_ror_epi32(x, 16); }
+
+INLINE __m512i rot16_512(__m512i x) { return _mm512_ror_epi32(x, 16); }
+
+INLINE __m128i rot12_128(__m128i x) { return _mm_ror_epi32(x, 12); }
+
+INLINE __m256i rot12_256(__m256i x) { return _mm256_ror_epi32(x, 12); }
+
+INLINE __m512i rot12_512(__m512i x) { return _mm512_ror_epi32(x, 12); }
+
+INLINE __m128i rot8_128(__m128i x) { return _mm_ror_epi32(x, 8); }
+
+INLINE __m256i rot8_256(__m256i x) { return _mm256_ror_epi32(x, 8); }
+
+INLINE __m512i rot8_512(__m512i x) { return _mm512_ror_epi32(x, 8); }
+
+INLINE __m128i rot7_128(__m128i x) { return _mm_ror_epi32(x, 7); }
+
+INLINE __m256i rot7_256(__m256i x) { return _mm256_ror_epi32(x, 7); }
+
+INLINE __m512i rot7_512(__m512i x) { return _mm512_ror_epi32(x, 7); }
+
+/*
+ * ----------------------------------------------------------------------------
+ * compress_avx512
+ * ----------------------------------------------------------------------------
+ */
+
+INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
+ __m128i m) {
+ *row0 = add_128(add_128(*row0, m), *row1);
+ *row3 = xor_128(*row3, *row0);
+ *row3 = rot16_128(*row3);
+ *row2 = add_128(*row2, *row3);
+ *row1 = xor_128(*row1, *row2);
+ *row1 = rot12_128(*row1);
+}
+
+INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
+ __m128i m) {
+ *row0 = add_128(add_128(*row0, m), *row1);
+ *row3 = xor_128(*row3, *row0);
+ *row3 = rot8_128(*row3);
+ *row2 = add_128(*row2, *row3);
+ *row1 = xor_128(*row1, *row2);
+ *row1 = rot7_128(*row1);
+}
+
+// Note the optimization here of leaving row1 as the unrotated row, rather than
+// row0. All the message loads below are adjusted to compensate for this. See
+// discussion at https://github.com/sneves/blake2-avx2/pull/4
+INLINE void diagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(2, 1, 0, 3));
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(0, 3, 2, 1));
+}
+
+INLINE void undiagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(0, 3, 2, 1));
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(2, 1, 0, 3));
+}
+
+INLINE void compress_pre(__m128i rows[4], const uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter, uint8_t flags) {
+ rows[0] = loadu_128((uint8_t *)&cv[0]);
+ rows[1] = loadu_128((uint8_t *)&cv[4]);
+ rows[2] = set4(IV[0], IV[1], IV[2], IV[3]);
+ rows[3] = set4(counter_low(counter), counter_high(counter),
+ (uint32_t)block_len, (uint32_t)flags);
+
+ __m128i m0 = loadu_128(&block[sizeof(__m128i) * 0]);
+ __m128i m1 = loadu_128(&block[sizeof(__m128i) * 1]);
+ __m128i m2 = loadu_128(&block[sizeof(__m128i) * 2]);
+ __m128i m3 = loadu_128(&block[sizeof(__m128i) * 3]);
+
+ __m128i t0, t1, t2, t3, tt;
+
+ // Round 1. The first round permutes the message words from the original
+ // input order, into the groups that get mixed in parallel.
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(2, 0, 2, 0)); // 6 4 2 0
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 3, 1)); // 7 5 3 1
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(2, 0, 2, 0)); // 14 12 10 8
+ t2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2, 1, 0, 3)); // 12 10 8 14
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 1, 3, 1)); // 15 13 11 9
+ t3 = _mm_shuffle_epi32(t3, _MM_SHUFFLE(2, 1, 0, 3)); // 13 11 9 15
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 2. This round and all following rounds apply a fixed permutation
+ // to the message words from the round before.
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 3
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 4
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 5
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 6
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 7
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+}
+
+void blake3_compress_xof_avx512(const uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter,
+ uint8_t flags, uint8_t out[64]) {
+ __m128i rows[4];
+ compress_pre(rows, cv, block, block_len, counter, flags);
+ storeu_128(xor_128(rows[0], rows[2]), &out[0]);
+ storeu_128(xor_128(rows[1], rows[3]), &out[16]);
+ storeu_128(xor_128(rows[2], loadu_128((uint8_t *)&cv[0])), &out[32]);
+ storeu_128(xor_128(rows[3], loadu_128((uint8_t *)&cv[4])), &out[48]);
+}
+
+void blake3_compress_in_place_avx512(uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter,
+ uint8_t flags) {
+ __m128i rows[4];
+ compress_pre(rows, cv, block, block_len, counter, flags);
+ storeu_128(xor_128(rows[0], rows[2]), (uint8_t *)&cv[0]);
+ storeu_128(xor_128(rows[1], rows[3]), (uint8_t *)&cv[4]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * hash4_avx512
+ * ----------------------------------------------------------------------------
+ */
+
+INLINE void round_fn4(__m128i v[16], __m128i m[16], size_t r) {
+ v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
+ v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
+ v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
+ v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
+ v[0] = add_128(v[0], v[4]);
+ v[1] = add_128(v[1], v[5]);
+ v[2] = add_128(v[2], v[6]);
+ v[3] = add_128(v[3], v[7]);
+ v[12] = xor_128(v[12], v[0]);
+ v[13] = xor_128(v[13], v[1]);
+ v[14] = xor_128(v[14], v[2]);
+ v[15] = xor_128(v[15], v[3]);
+ v[12] = rot16_128(v[12]);
+ v[13] = rot16_128(v[13]);
+ v[14] = rot16_128(v[14]);
+ v[15] = rot16_128(v[15]);
+ v[8] = add_128(v[8], v[12]);
+ v[9] = add_128(v[9], v[13]);
+ v[10] = add_128(v[10], v[14]);
+ v[11] = add_128(v[11], v[15]);
+ v[4] = xor_128(v[4], v[8]);
+ v[5] = xor_128(v[5], v[9]);
+ v[6] = xor_128(v[6], v[10]);
+ v[7] = xor_128(v[7], v[11]);
+ v[4] = rot12_128(v[4]);
+ v[5] = rot12_128(v[5]);
+ v[6] = rot12_128(v[6]);
+ v[7] = rot12_128(v[7]);
+ v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
+ v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
+ v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
+ v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
+ v[0] = add_128(v[0], v[4]);
+ v[1] = add_128(v[1], v[5]);
+ v[2] = add_128(v[2], v[6]);
+ v[3] = add_128(v[3], v[7]);
+ v[12] = xor_128(v[12], v[0]);
+ v[13] = xor_128(v[13], v[1]);
+ v[14] = xor_128(v[14], v[2]);
+ v[15] = xor_128(v[15], v[3]);
+ v[12] = rot8_128(v[12]);
+ v[13] = rot8_128(v[13]);
+ v[14] = rot8_128(v[14]);
+ v[15] = rot8_128(v[15]);
+ v[8] = add_128(v[8], v[12]);
+ v[9] = add_128(v[9], v[13]);
+ v[10] = add_128(v[10], v[14]);
+ v[11] = add_128(v[11], v[15]);
+ v[4] = xor_128(v[4], v[8]);
+ v[5] = xor_128(v[5], v[9]);
+ v[6] = xor_128(v[6], v[10]);
+ v[7] = xor_128(v[7], v[11]);
+ v[4] = rot7_128(v[4]);
+ v[5] = rot7_128(v[5]);
+ v[6] = rot7_128(v[6]);
+ v[7] = rot7_128(v[7]);
+
+ v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
+ v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
+ v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
+ v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
+ v[0] = add_128(v[0], v[5]);
+ v[1] = add_128(v[1], v[6]);
+ v[2] = add_128(v[2], v[7]);
+ v[3] = add_128(v[3], v[4]);
+ v[15] = xor_128(v[15], v[0]);
+ v[12] = xor_128(v[12], v[1]);
+ v[13] = xor_128(v[13], v[2]);
+ v[14] = xor_128(v[14], v[3]);
+ v[15] = rot16_128(v[15]);
+ v[12] = rot16_128(v[12]);
+ v[13] = rot16_128(v[13]);
+ v[14] = rot16_128(v[14]);
+ v[10] = add_128(v[10], v[15]);
+ v[11] = add_128(v[11], v[12]);
+ v[8] = add_128(v[8], v[13]);
+ v[9] = add_128(v[9], v[14]);
+ v[5] = xor_128(v[5], v[10]);
+ v[6] = xor_128(v[6], v[11]);
+ v[7] = xor_128(v[7], v[8]);
+ v[4] = xor_128(v[4], v[9]);
+ v[5] = rot12_128(v[5]);
+ v[6] = rot12_128(v[6]);
+ v[7] = rot12_128(v[7]);
+ v[4] = rot12_128(v[4]);
+ v[0] = add_128(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
+ v[1] = add_128(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
+ v[2] = add_128(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
+ v[3] = add_128(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
+ v[0] = add_128(v[0], v[5]);
+ v[1] = add_128(v[1], v[6]);
+ v[2] = add_128(v[2], v[7]);
+ v[3] = add_128(v[3], v[4]);
+ v[15] = xor_128(v[15], v[0]);
+ v[12] = xor_128(v[12], v[1]);
+ v[13] = xor_128(v[13], v[2]);
+ v[14] = xor_128(v[14], v[3]);
+ v[15] = rot8_128(v[15]);
+ v[12] = rot8_128(v[12]);
+ v[13] = rot8_128(v[13]);
+ v[14] = rot8_128(v[14]);
+ v[10] = add_128(v[10], v[15]);
+ v[11] = add_128(v[11], v[12]);
+ v[8] = add_128(v[8], v[13]);
+ v[9] = add_128(v[9], v[14]);
+ v[5] = xor_128(v[5], v[10]);
+ v[6] = xor_128(v[6], v[11]);
+ v[7] = xor_128(v[7], v[8]);
+ v[4] = xor_128(v[4], v[9]);
+ v[5] = rot7_128(v[5]);
+ v[6] = rot7_128(v[6]);
+ v[7] = rot7_128(v[7]);
+ v[4] = rot7_128(v[4]);
+}
+
+INLINE void transpose_vecs_128(__m128i vecs[4]) {
+ // Interleave 32-bit lates. The low unpack is lanes 00/11 and the high is
+ // 22/33. Note that this doesn't split the vector into two lanes, as the
+ // AVX2 counterparts do.
+ __m128i ab_01 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
+ __m128i ab_23 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
+ __m128i cd_01 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
+ __m128i cd_23 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
+
+ // Interleave 64-bit lanes.
+ __m128i abcd_0 = _mm_unpacklo_epi64(ab_01, cd_01);
+ __m128i abcd_1 = _mm_unpackhi_epi64(ab_01, cd_01);
+ __m128i abcd_2 = _mm_unpacklo_epi64(ab_23, cd_23);
+ __m128i abcd_3 = _mm_unpackhi_epi64(ab_23, cd_23);
+
+ vecs[0] = abcd_0;
+ vecs[1] = abcd_1;
+ vecs[2] = abcd_2;
+ vecs[3] = abcd_3;
+}
+
+INLINE void transpose_msg_vecs4(const uint8_t *const *inputs,
+ size_t block_offset, __m128i out[16]) {
+ out[0] = loadu_128(&inputs[0][block_offset + 0 * sizeof(__m128i)]);
+ out[1] = loadu_128(&inputs[1][block_offset + 0 * sizeof(__m128i)]);
+ out[2] = loadu_128(&inputs[2][block_offset + 0 * sizeof(__m128i)]);
+ out[3] = loadu_128(&inputs[3][block_offset + 0 * sizeof(__m128i)]);
+ out[4] = loadu_128(&inputs[0][block_offset + 1 * sizeof(__m128i)]);
+ out[5] = loadu_128(&inputs[1][block_offset + 1 * sizeof(__m128i)]);
+ out[6] = loadu_128(&inputs[2][block_offset + 1 * sizeof(__m128i)]);
+ out[7] = loadu_128(&inputs[3][block_offset + 1 * sizeof(__m128i)]);
+ out[8] = loadu_128(&inputs[0][block_offset + 2 * sizeof(__m128i)]);
+ out[9] = loadu_128(&inputs[1][block_offset + 2 * sizeof(__m128i)]);
+ out[10] = loadu_128(&inputs[2][block_offset + 2 * sizeof(__m128i)]);
+ out[11] = loadu_128(&inputs[3][block_offset + 2 * sizeof(__m128i)]);
+ out[12] = loadu_128(&inputs[0][block_offset + 3 * sizeof(__m128i)]);
+ out[13] = loadu_128(&inputs[1][block_offset + 3 * sizeof(__m128i)]);
+ out[14] = loadu_128(&inputs[2][block_offset + 3 * sizeof(__m128i)]);
+ out[15] = loadu_128(&inputs[3][block_offset + 3 * sizeof(__m128i)]);
+ for (size_t i = 0; i < 4; ++i) {
+ _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);
+ }
+ transpose_vecs_128(&out[0]);
+ transpose_vecs_128(&out[4]);
+ transpose_vecs_128(&out[8]);
+ transpose_vecs_128(&out[12]);
+}
+
+INLINE void load_counters4(uint64_t counter, bool increment_counter,
+ __m128i *out_lo, __m128i *out_hi) {
+ uint64_t mask = (increment_counter ? ~0 : 0);
+ __m256i mask_vec = _mm256_set1_epi64x(mask);
+ __m256i deltas = _mm256_setr_epi64x(0, 1, 2, 3);
+ deltas = _mm256_and_si256(mask_vec, deltas);
+ __m256i counters =
+ _mm256_add_epi64(_mm256_set1_epi64x((int64_t)counter), deltas);
+ *out_lo = _mm256_cvtepi64_epi32(counters);
+ *out_hi = _mm256_cvtepi64_epi32(_mm256_srli_epi64(counters, 32));
+}
+
+static
+void blake3_hash4_avx512(const uint8_t *const *inputs, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ bool increment_counter, uint8_t flags,
+ uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
+ __m128i h_vecs[8] = {
+ set1_128(key[0]), set1_128(key[1]), set1_128(key[2]), set1_128(key[3]),
+ set1_128(key[4]), set1_128(key[5]), set1_128(key[6]), set1_128(key[7]),
+ };
+ __m128i counter_low_vec, counter_high_vec;
+ load_counters4(counter, increment_counter, &counter_low_vec,
+ &counter_high_vec);
+ uint8_t block_flags = flags | flags_start;
+
+ for (size_t block = 0; block < blocks; block++) {
+ if (block + 1 == blocks) {
+ block_flags |= flags_end;
+ }
+ __m128i block_len_vec = set1_128(BLAKE3_BLOCK_LEN);
+ __m128i block_flags_vec = set1_128(block_flags);
+ __m128i msg_vecs[16];
+ transpose_msg_vecs4(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
+
+ __m128i v[16] = {
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
+ set1_128(IV[0]), set1_128(IV[1]), set1_128(IV[2]), set1_128(IV[3]),
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
+ };
+ round_fn4(v, msg_vecs, 0);
+ round_fn4(v, msg_vecs, 1);
+ round_fn4(v, msg_vecs, 2);
+ round_fn4(v, msg_vecs, 3);
+ round_fn4(v, msg_vecs, 4);
+ round_fn4(v, msg_vecs, 5);
+ round_fn4(v, msg_vecs, 6);
+ h_vecs[0] = xor_128(v[0], v[8]);
+ h_vecs[1] = xor_128(v[1], v[9]);
+ h_vecs[2] = xor_128(v[2], v[10]);
+ h_vecs[3] = xor_128(v[3], v[11]);
+ h_vecs[4] = xor_128(v[4], v[12]);
+ h_vecs[5] = xor_128(v[5], v[13]);
+ h_vecs[6] = xor_128(v[6], v[14]);
+ h_vecs[7] = xor_128(v[7], v[15]);
+
+ block_flags = flags;
+ }
+
+ transpose_vecs_128(&h_vecs[0]);
+ transpose_vecs_128(&h_vecs[4]);
+ // The first four vecs now contain the first half of each output, and the
+ // second four vecs contain the second half of each output.
+ storeu_128(h_vecs[0], &out[0 * sizeof(__m128i)]);
+ storeu_128(h_vecs[4], &out[1 * sizeof(__m128i)]);
+ storeu_128(h_vecs[1], &out[2 * sizeof(__m128i)]);
+ storeu_128(h_vecs[5], &out[3 * sizeof(__m128i)]);
+ storeu_128(h_vecs[2], &out[4 * sizeof(__m128i)]);
+ storeu_128(h_vecs[6], &out[5 * sizeof(__m128i)]);
+ storeu_128(h_vecs[3], &out[6 * sizeof(__m128i)]);
+ storeu_128(h_vecs[7], &out[7 * sizeof(__m128i)]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * hash8_avx512
+ * ----------------------------------------------------------------------------
+ */
+
+INLINE void round_fn8(__m256i v[16], __m256i m[16], size_t r) {
+ v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
+ v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
+ v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
+ v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
+ v[0] = add_256(v[0], v[4]);
+ v[1] = add_256(v[1], v[5]);
+ v[2] = add_256(v[2], v[6]);
+ v[3] = add_256(v[3], v[7]);
+ v[12] = xor_256(v[12], v[0]);
+ v[13] = xor_256(v[13], v[1]);
+ v[14] = xor_256(v[14], v[2]);
+ v[15] = xor_256(v[15], v[3]);
+ v[12] = rot16_256(v[12]);
+ v[13] = rot16_256(v[13]);
+ v[14] = rot16_256(v[14]);
+ v[15] = rot16_256(v[15]);
+ v[8] = add_256(v[8], v[12]);
+ v[9] = add_256(v[9], v[13]);
+ v[10] = add_256(v[10], v[14]);
+ v[11] = add_256(v[11], v[15]);
+ v[4] = xor_256(v[4], v[8]);
+ v[5] = xor_256(v[5], v[9]);
+ v[6] = xor_256(v[6], v[10]);
+ v[7] = xor_256(v[7], v[11]);
+ v[4] = rot12_256(v[4]);
+ v[5] = rot12_256(v[5]);
+ v[6] = rot12_256(v[6]);
+ v[7] = rot12_256(v[7]);
+ v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
+ v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
+ v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
+ v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
+ v[0] = add_256(v[0], v[4]);
+ v[1] = add_256(v[1], v[5]);
+ v[2] = add_256(v[2], v[6]);
+ v[3] = add_256(v[3], v[7]);
+ v[12] = xor_256(v[12], v[0]);
+ v[13] = xor_256(v[13], v[1]);
+ v[14] = xor_256(v[14], v[2]);
+ v[15] = xor_256(v[15], v[3]);
+ v[12] = rot8_256(v[12]);
+ v[13] = rot8_256(v[13]);
+ v[14] = rot8_256(v[14]);
+ v[15] = rot8_256(v[15]);
+ v[8] = add_256(v[8], v[12]);
+ v[9] = add_256(v[9], v[13]);
+ v[10] = add_256(v[10], v[14]);
+ v[11] = add_256(v[11], v[15]);
+ v[4] = xor_256(v[4], v[8]);
+ v[5] = xor_256(v[5], v[9]);
+ v[6] = xor_256(v[6], v[10]);
+ v[7] = xor_256(v[7], v[11]);
+ v[4] = rot7_256(v[4]);
+ v[5] = rot7_256(v[5]);
+ v[6] = rot7_256(v[6]);
+ v[7] = rot7_256(v[7]);
+
+ v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
+ v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
+ v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
+ v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
+ v[0] = add_256(v[0], v[5]);
+ v[1] = add_256(v[1], v[6]);
+ v[2] = add_256(v[2], v[7]);
+ v[3] = add_256(v[3], v[4]);
+ v[15] = xor_256(v[15], v[0]);
+ v[12] = xor_256(v[12], v[1]);
+ v[13] = xor_256(v[13], v[2]);
+ v[14] = xor_256(v[14], v[3]);
+ v[15] = rot16_256(v[15]);
+ v[12] = rot16_256(v[12]);
+ v[13] = rot16_256(v[13]);
+ v[14] = rot16_256(v[14]);
+ v[10] = add_256(v[10], v[15]);
+ v[11] = add_256(v[11], v[12]);
+ v[8] = add_256(v[8], v[13]);
+ v[9] = add_256(v[9], v[14]);
+ v[5] = xor_256(v[5], v[10]);
+ v[6] = xor_256(v[6], v[11]);
+ v[7] = xor_256(v[7], v[8]);
+ v[4] = xor_256(v[4], v[9]);
+ v[5] = rot12_256(v[5]);
+ v[6] = rot12_256(v[6]);
+ v[7] = rot12_256(v[7]);
+ v[4] = rot12_256(v[4]);
+ v[0] = add_256(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
+ v[1] = add_256(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
+ v[2] = add_256(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
+ v[3] = add_256(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
+ v[0] = add_256(v[0], v[5]);
+ v[1] = add_256(v[1], v[6]);
+ v[2] = add_256(v[2], v[7]);
+ v[3] = add_256(v[3], v[4]);
+ v[15] = xor_256(v[15], v[0]);
+ v[12] = xor_256(v[12], v[1]);
+ v[13] = xor_256(v[13], v[2]);
+ v[14] = xor_256(v[14], v[3]);
+ v[15] = rot8_256(v[15]);
+ v[12] = rot8_256(v[12]);
+ v[13] = rot8_256(v[13]);
+ v[14] = rot8_256(v[14]);
+ v[10] = add_256(v[10], v[15]);
+ v[11] = add_256(v[11], v[12]);
+ v[8] = add_256(v[8], v[13]);
+ v[9] = add_256(v[9], v[14]);
+ v[5] = xor_256(v[5], v[10]);
+ v[6] = xor_256(v[6], v[11]);
+ v[7] = xor_256(v[7], v[8]);
+ v[4] = xor_256(v[4], v[9]);
+ v[5] = rot7_256(v[5]);
+ v[6] = rot7_256(v[6]);
+ v[7] = rot7_256(v[7]);
+ v[4] = rot7_256(v[4]);
+}
+
+INLINE void transpose_vecs_256(__m256i vecs[8]) {
+ // Interleave 32-bit lanes. The low unpack is lanes 00/11/44/55, and the high
+ // is 22/33/66/77.
+ __m256i ab_0145 = _mm256_unpacklo_epi32(vecs[0], vecs[1]);
+ __m256i ab_2367 = _mm256_unpackhi_epi32(vecs[0], vecs[1]);
+ __m256i cd_0145 = _mm256_unpacklo_epi32(vecs[2], vecs[3]);
+ __m256i cd_2367 = _mm256_unpackhi_epi32(vecs[2], vecs[3]);
+ __m256i ef_0145 = _mm256_unpacklo_epi32(vecs[4], vecs[5]);
+ __m256i ef_2367 = _mm256_unpackhi_epi32(vecs[4], vecs[5]);
+ __m256i gh_0145 = _mm256_unpacklo_epi32(vecs[6], vecs[7]);
+ __m256i gh_2367 = _mm256_unpackhi_epi32(vecs[6], vecs[7]);
+
+ // Interleave 64-bit lates. The low unpack is lanes 00/22 and the high is
+ // 11/33.
+ __m256i abcd_04 = _mm256_unpacklo_epi64(ab_0145, cd_0145);
+ __m256i abcd_15 = _mm256_unpackhi_epi64(ab_0145, cd_0145);
+ __m256i abcd_26 = _mm256_unpacklo_epi64(ab_2367, cd_2367);
+ __m256i abcd_37 = _mm256_unpackhi_epi64(ab_2367, cd_2367);
+ __m256i efgh_04 = _mm256_unpacklo_epi64(ef_0145, gh_0145);
+ __m256i efgh_15 = _mm256_unpackhi_epi64(ef_0145, gh_0145);
+ __m256i efgh_26 = _mm256_unpacklo_epi64(ef_2367, gh_2367);
+ __m256i efgh_37 = _mm256_unpackhi_epi64(ef_2367, gh_2367);
+
+ // Interleave 128-bit lanes.
+ vecs[0] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x20);
+ vecs[1] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x20);
+ vecs[2] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x20);
+ vecs[3] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x20);
+ vecs[4] = _mm256_permute2x128_si256(abcd_04, efgh_04, 0x31);
+ vecs[5] = _mm256_permute2x128_si256(abcd_15, efgh_15, 0x31);
+ vecs[6] = _mm256_permute2x128_si256(abcd_26, efgh_26, 0x31);
+ vecs[7] = _mm256_permute2x128_si256(abcd_37, efgh_37, 0x31);
+}
+
+INLINE void transpose_msg_vecs8(const uint8_t *const *inputs,
+ size_t block_offset, __m256i out[16]) {
+ out[0] = loadu_256(&inputs[0][block_offset + 0 * sizeof(__m256i)]);
+ out[1] = loadu_256(&inputs[1][block_offset + 0 * sizeof(__m256i)]);
+ out[2] = loadu_256(&inputs[2][block_offset + 0 * sizeof(__m256i)]);
+ out[3] = loadu_256(&inputs[3][block_offset + 0 * sizeof(__m256i)]);
+ out[4] = loadu_256(&inputs[4][block_offset + 0 * sizeof(__m256i)]);
+ out[5] = loadu_256(&inputs[5][block_offset + 0 * sizeof(__m256i)]);
+ out[6] = loadu_256(&inputs[6][block_offset + 0 * sizeof(__m256i)]);
+ out[7] = loadu_256(&inputs[7][block_offset + 0 * sizeof(__m256i)]);
+ out[8] = loadu_256(&inputs[0][block_offset + 1 * sizeof(__m256i)]);
+ out[9] = loadu_256(&inputs[1][block_offset + 1 * sizeof(__m256i)]);
+ out[10] = loadu_256(&inputs[2][block_offset + 1 * sizeof(__m256i)]);
+ out[11] = loadu_256(&inputs[3][block_offset + 1 * sizeof(__m256i)]);
+ out[12] = loadu_256(&inputs[4][block_offset + 1 * sizeof(__m256i)]);
+ out[13] = loadu_256(&inputs[5][block_offset + 1 * sizeof(__m256i)]);
+ out[14] = loadu_256(&inputs[6][block_offset + 1 * sizeof(__m256i)]);
+ out[15] = loadu_256(&inputs[7][block_offset + 1 * sizeof(__m256i)]);
+ for (size_t i = 0; i < 8; ++i) {
+ _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);
+ }
+ transpose_vecs_256(&out[0]);
+ transpose_vecs_256(&out[8]);
+}
+
+INLINE void load_counters8(uint64_t counter, bool increment_counter,
+ __m256i *out_lo, __m256i *out_hi) {
+ uint64_t mask = (increment_counter ? ~0 : 0);
+ __m512i mask_vec = _mm512_set1_epi64(mask);
+ __m512i deltas = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7);
+ deltas = _mm512_and_si512(mask_vec, deltas);
+ __m512i counters =
+ _mm512_add_epi64(_mm512_set1_epi64((int64_t)counter), deltas);
+ *out_lo = _mm512_cvtepi64_epi32(counters);
+ *out_hi = _mm512_cvtepi64_epi32(_mm512_srli_epi64(counters, 32));
+}
+
+static
+void blake3_hash8_avx512(const uint8_t *const *inputs, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ bool increment_counter, uint8_t flags,
+ uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
+ __m256i h_vecs[8] = {
+ set1_256(key[0]), set1_256(key[1]), set1_256(key[2]), set1_256(key[3]),
+ set1_256(key[4]), set1_256(key[5]), set1_256(key[6]), set1_256(key[7]),
+ };
+ __m256i counter_low_vec, counter_high_vec;
+ load_counters8(counter, increment_counter, &counter_low_vec,
+ &counter_high_vec);
+ uint8_t block_flags = flags | flags_start;
+
+ for (size_t block = 0; block < blocks; block++) {
+ if (block + 1 == blocks) {
+ block_flags |= flags_end;
+ }
+ __m256i block_len_vec = set1_256(BLAKE3_BLOCK_LEN);
+ __m256i block_flags_vec = set1_256(block_flags);
+ __m256i msg_vecs[16];
+ transpose_msg_vecs8(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
+
+ __m256i v[16] = {
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
+ set1_256(IV[0]), set1_256(IV[1]), set1_256(IV[2]), set1_256(IV[3]),
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
+ };
+ round_fn8(v, msg_vecs, 0);
+ round_fn8(v, msg_vecs, 1);
+ round_fn8(v, msg_vecs, 2);
+ round_fn8(v, msg_vecs, 3);
+ round_fn8(v, msg_vecs, 4);
+ round_fn8(v, msg_vecs, 5);
+ round_fn8(v, msg_vecs, 6);
+ h_vecs[0] = xor_256(v[0], v[8]);
+ h_vecs[1] = xor_256(v[1], v[9]);
+ h_vecs[2] = xor_256(v[2], v[10]);
+ h_vecs[3] = xor_256(v[3], v[11]);
+ h_vecs[4] = xor_256(v[4], v[12]);
+ h_vecs[5] = xor_256(v[5], v[13]);
+ h_vecs[6] = xor_256(v[6], v[14]);
+ h_vecs[7] = xor_256(v[7], v[15]);
+
+ block_flags = flags;
+ }
+
+ transpose_vecs_256(h_vecs);
+ storeu_256(h_vecs[0], &out[0 * sizeof(__m256i)]);
+ storeu_256(h_vecs[1], &out[1 * sizeof(__m256i)]);
+ storeu_256(h_vecs[2], &out[2 * sizeof(__m256i)]);
+ storeu_256(h_vecs[3], &out[3 * sizeof(__m256i)]);
+ storeu_256(h_vecs[4], &out[4 * sizeof(__m256i)]);
+ storeu_256(h_vecs[5], &out[5 * sizeof(__m256i)]);
+ storeu_256(h_vecs[6], &out[6 * sizeof(__m256i)]);
+ storeu_256(h_vecs[7], &out[7 * sizeof(__m256i)]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * hash16_avx512
+ * ----------------------------------------------------------------------------
+ */
+
+INLINE void round_fn16(__m512i v[16], __m512i m[16], size_t r) {
+ v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
+ v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
+ v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
+ v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
+ v[0] = add_512(v[0], v[4]);
+ v[1] = add_512(v[1], v[5]);
+ v[2] = add_512(v[2], v[6]);
+ v[3] = add_512(v[3], v[7]);
+ v[12] = xor_512(v[12], v[0]);
+ v[13] = xor_512(v[13], v[1]);
+ v[14] = xor_512(v[14], v[2]);
+ v[15] = xor_512(v[15], v[3]);
+ v[12] = rot16_512(v[12]);
+ v[13] = rot16_512(v[13]);
+ v[14] = rot16_512(v[14]);
+ v[15] = rot16_512(v[15]);
+ v[8] = add_512(v[8], v[12]);
+ v[9] = add_512(v[9], v[13]);
+ v[10] = add_512(v[10], v[14]);
+ v[11] = add_512(v[11], v[15]);
+ v[4] = xor_512(v[4], v[8]);
+ v[5] = xor_512(v[5], v[9]);
+ v[6] = xor_512(v[6], v[10]);
+ v[7] = xor_512(v[7], v[11]);
+ v[4] = rot12_512(v[4]);
+ v[5] = rot12_512(v[5]);
+ v[6] = rot12_512(v[6]);
+ v[7] = rot12_512(v[7]);
+ v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
+ v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
+ v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
+ v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
+ v[0] = add_512(v[0], v[4]);
+ v[1] = add_512(v[1], v[5]);
+ v[2] = add_512(v[2], v[6]);
+ v[3] = add_512(v[3], v[7]);
+ v[12] = xor_512(v[12], v[0]);
+ v[13] = xor_512(v[13], v[1]);
+ v[14] = xor_512(v[14], v[2]);
+ v[15] = xor_512(v[15], v[3]);
+ v[12] = rot8_512(v[12]);
+ v[13] = rot8_512(v[13]);
+ v[14] = rot8_512(v[14]);
+ v[15] = rot8_512(v[15]);
+ v[8] = add_512(v[8], v[12]);
+ v[9] = add_512(v[9], v[13]);
+ v[10] = add_512(v[10], v[14]);
+ v[11] = add_512(v[11], v[15]);
+ v[4] = xor_512(v[4], v[8]);
+ v[5] = xor_512(v[5], v[9]);
+ v[6] = xor_512(v[6], v[10]);
+ v[7] = xor_512(v[7], v[11]);
+ v[4] = rot7_512(v[4]);
+ v[5] = rot7_512(v[5]);
+ v[6] = rot7_512(v[6]);
+ v[7] = rot7_512(v[7]);
+
+ v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
+ v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
+ v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
+ v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
+ v[0] = add_512(v[0], v[5]);
+ v[1] = add_512(v[1], v[6]);
+ v[2] = add_512(v[2], v[7]);
+ v[3] = add_512(v[3], v[4]);
+ v[15] = xor_512(v[15], v[0]);
+ v[12] = xor_512(v[12], v[1]);
+ v[13] = xor_512(v[13], v[2]);
+ v[14] = xor_512(v[14], v[3]);
+ v[15] = rot16_512(v[15]);
+ v[12] = rot16_512(v[12]);
+ v[13] = rot16_512(v[13]);
+ v[14] = rot16_512(v[14]);
+ v[10] = add_512(v[10], v[15]);
+ v[11] = add_512(v[11], v[12]);
+ v[8] = add_512(v[8], v[13]);
+ v[9] = add_512(v[9], v[14]);
+ v[5] = xor_512(v[5], v[10]);
+ v[6] = xor_512(v[6], v[11]);
+ v[7] = xor_512(v[7], v[8]);
+ v[4] = xor_512(v[4], v[9]);
+ v[5] = rot12_512(v[5]);
+ v[6] = rot12_512(v[6]);
+ v[7] = rot12_512(v[7]);
+ v[4] = rot12_512(v[4]);
+ v[0] = add_512(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
+ v[1] = add_512(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
+ v[2] = add_512(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
+ v[3] = add_512(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
+ v[0] = add_512(v[0], v[5]);
+ v[1] = add_512(v[1], v[6]);
+ v[2] = add_512(v[2], v[7]);
+ v[3] = add_512(v[3], v[4]);
+ v[15] = xor_512(v[15], v[0]);
+ v[12] = xor_512(v[12], v[1]);
+ v[13] = xor_512(v[13], v[2]);
+ v[14] = xor_512(v[14], v[3]);
+ v[15] = rot8_512(v[15]);
+ v[12] = rot8_512(v[12]);
+ v[13] = rot8_512(v[13]);
+ v[14] = rot8_512(v[14]);
+ v[10] = add_512(v[10], v[15]);
+ v[11] = add_512(v[11], v[12]);
+ v[8] = add_512(v[8], v[13]);
+ v[9] = add_512(v[9], v[14]);
+ v[5] = xor_512(v[5], v[10]);
+ v[6] = xor_512(v[6], v[11]);
+ v[7] = xor_512(v[7], v[8]);
+ v[4] = xor_512(v[4], v[9]);
+ v[5] = rot7_512(v[5]);
+ v[6] = rot7_512(v[6]);
+ v[7] = rot7_512(v[7]);
+ v[4] = rot7_512(v[4]);
+}
+
+// 0b10001000, or lanes a0/a2/b0/b2 in little-endian order
+#define LO_IMM8 0x88
+
+INLINE __m512i unpack_lo_128(__m512i a, __m512i b) {
+ return _mm512_shuffle_i32x4(a, b, LO_IMM8);
+}
+
+// 0b11011101, or lanes a1/a3/b1/b3 in little-endian order
+#define HI_IMM8 0xdd
+
+INLINE __m512i unpack_hi_128(__m512i a, __m512i b) {
+ return _mm512_shuffle_i32x4(a, b, HI_IMM8);
+}
+
+INLINE void transpose_vecs_512(__m512i vecs[16]) {
+ // Interleave 32-bit lanes. The _0 unpack is lanes
+ // 0/0/1/1/4/4/5/5/8/8/9/9/12/12/13/13, and the _2 unpack is lanes
+ // 2/2/3/3/6/6/7/7/10/10/11/11/14/14/15/15.
+ __m512i ab_0 = _mm512_unpacklo_epi32(vecs[0], vecs[1]);
+ __m512i ab_2 = _mm512_unpackhi_epi32(vecs[0], vecs[1]);
+ __m512i cd_0 = _mm512_unpacklo_epi32(vecs[2], vecs[3]);
+ __m512i cd_2 = _mm512_unpackhi_epi32(vecs[2], vecs[3]);
+ __m512i ef_0 = _mm512_unpacklo_epi32(vecs[4], vecs[5]);
+ __m512i ef_2 = _mm512_unpackhi_epi32(vecs[4], vecs[5]);
+ __m512i gh_0 = _mm512_unpacklo_epi32(vecs[6], vecs[7]);
+ __m512i gh_2 = _mm512_unpackhi_epi32(vecs[6], vecs[7]);
+ __m512i ij_0 = _mm512_unpacklo_epi32(vecs[8], vecs[9]);
+ __m512i ij_2 = _mm512_unpackhi_epi32(vecs[8], vecs[9]);
+ __m512i kl_0 = _mm512_unpacklo_epi32(vecs[10], vecs[11]);
+ __m512i kl_2 = _mm512_unpackhi_epi32(vecs[10], vecs[11]);
+ __m512i mn_0 = _mm512_unpacklo_epi32(vecs[12], vecs[13]);
+ __m512i mn_2 = _mm512_unpackhi_epi32(vecs[12], vecs[13]);
+ __m512i op_0 = _mm512_unpacklo_epi32(vecs[14], vecs[15]);
+ __m512i op_2 = _mm512_unpackhi_epi32(vecs[14], vecs[15]);
+
+ // Interleave 64-bit lates. The _0 unpack is lanes
+ // 0/0/0/0/4/4/4/4/8/8/8/8/12/12/12/12, the _1 unpack is lanes
+ // 1/1/1/1/5/5/5/5/9/9/9/9/13/13/13/13, the _2 unpack is lanes
+ // 2/2/2/2/6/6/6/6/10/10/10/10/14/14/14/14, and the _3 unpack is lanes
+ // 3/3/3/3/7/7/7/7/11/11/11/11/15/15/15/15.
+ __m512i abcd_0 = _mm512_unpacklo_epi64(ab_0, cd_0);
+ __m512i abcd_1 = _mm512_unpackhi_epi64(ab_0, cd_0);
+ __m512i abcd_2 = _mm512_unpacklo_epi64(ab_2, cd_2);
+ __m512i abcd_3 = _mm512_unpackhi_epi64(ab_2, cd_2);
+ __m512i efgh_0 = _mm512_unpacklo_epi64(ef_0, gh_0);
+ __m512i efgh_1 = _mm512_unpackhi_epi64(ef_0, gh_0);
+ __m512i efgh_2 = _mm512_unpacklo_epi64(ef_2, gh_2);
+ __m512i efgh_3 = _mm512_unpackhi_epi64(ef_2, gh_2);
+ __m512i ijkl_0 = _mm512_unpacklo_epi64(ij_0, kl_0);
+ __m512i ijkl_1 = _mm512_unpackhi_epi64(ij_0, kl_0);
+ __m512i ijkl_2 = _mm512_unpacklo_epi64(ij_2, kl_2);
+ __m512i ijkl_3 = _mm512_unpackhi_epi64(ij_2, kl_2);
+ __m512i mnop_0 = _mm512_unpacklo_epi64(mn_0, op_0);
+ __m512i mnop_1 = _mm512_unpackhi_epi64(mn_0, op_0);
+ __m512i mnop_2 = _mm512_unpacklo_epi64(mn_2, op_2);
+ __m512i mnop_3 = _mm512_unpackhi_epi64(mn_2, op_2);
+
+ // Interleave 128-bit lanes. The _0 unpack is
+ // 0/0/0/0/8/8/8/8/0/0/0/0/8/8/8/8, the _1 unpack is
+ // 1/1/1/1/9/9/9/9/1/1/1/1/9/9/9/9, and so on.
+ __m512i abcdefgh_0 = unpack_lo_128(abcd_0, efgh_0);
+ __m512i abcdefgh_1 = unpack_lo_128(abcd_1, efgh_1);
+ __m512i abcdefgh_2 = unpack_lo_128(abcd_2, efgh_2);
+ __m512i abcdefgh_3 = unpack_lo_128(abcd_3, efgh_3);
+ __m512i abcdefgh_4 = unpack_hi_128(abcd_0, efgh_0);
+ __m512i abcdefgh_5 = unpack_hi_128(abcd_1, efgh_1);
+ __m512i abcdefgh_6 = unpack_hi_128(abcd_2, efgh_2);
+ __m512i abcdefgh_7 = unpack_hi_128(abcd_3, efgh_3);
+ __m512i ijklmnop_0 = unpack_lo_128(ijkl_0, mnop_0);
+ __m512i ijklmnop_1 = unpack_lo_128(ijkl_1, mnop_1);
+ __m512i ijklmnop_2 = unpack_lo_128(ijkl_2, mnop_2);
+ __m512i ijklmnop_3 = unpack_lo_128(ijkl_3, mnop_3);
+ __m512i ijklmnop_4 = unpack_hi_128(ijkl_0, mnop_0);
+ __m512i ijklmnop_5 = unpack_hi_128(ijkl_1, mnop_1);
+ __m512i ijklmnop_6 = unpack_hi_128(ijkl_2, mnop_2);
+ __m512i ijklmnop_7 = unpack_hi_128(ijkl_3, mnop_3);
+
+ // Interleave 128-bit lanes again for the final outputs.
+ vecs[0] = unpack_lo_128(abcdefgh_0, ijklmnop_0);
+ vecs[1] = unpack_lo_128(abcdefgh_1, ijklmnop_1);
+ vecs[2] = unpack_lo_128(abcdefgh_2, ijklmnop_2);
+ vecs[3] = unpack_lo_128(abcdefgh_3, ijklmnop_3);
+ vecs[4] = unpack_lo_128(abcdefgh_4, ijklmnop_4);
+ vecs[5] = unpack_lo_128(abcdefgh_5, ijklmnop_5);
+ vecs[6] = unpack_lo_128(abcdefgh_6, ijklmnop_6);
+ vecs[7] = unpack_lo_128(abcdefgh_7, ijklmnop_7);
+ vecs[8] = unpack_hi_128(abcdefgh_0, ijklmnop_0);
+ vecs[9] = unpack_hi_128(abcdefgh_1, ijklmnop_1);
+ vecs[10] = unpack_hi_128(abcdefgh_2, ijklmnop_2);
+ vecs[11] = unpack_hi_128(abcdefgh_3, ijklmnop_3);
+ vecs[12] = unpack_hi_128(abcdefgh_4, ijklmnop_4);
+ vecs[13] = unpack_hi_128(abcdefgh_5, ijklmnop_5);
+ vecs[14] = unpack_hi_128(abcdefgh_6, ijklmnop_6);
+ vecs[15] = unpack_hi_128(abcdefgh_7, ijklmnop_7);
+}
+
+INLINE void transpose_msg_vecs16(const uint8_t *const *inputs,
+ size_t block_offset, __m512i out[16]) {
+ out[0] = loadu_512(&inputs[0][block_offset]);
+ out[1] = loadu_512(&inputs[1][block_offset]);
+ out[2] = loadu_512(&inputs[2][block_offset]);
+ out[3] = loadu_512(&inputs[3][block_offset]);
+ out[4] = loadu_512(&inputs[4][block_offset]);
+ out[5] = loadu_512(&inputs[5][block_offset]);
+ out[6] = loadu_512(&inputs[6][block_offset]);
+ out[7] = loadu_512(&inputs[7][block_offset]);
+ out[8] = loadu_512(&inputs[8][block_offset]);
+ out[9] = loadu_512(&inputs[9][block_offset]);
+ out[10] = loadu_512(&inputs[10][block_offset]);
+ out[11] = loadu_512(&inputs[11][block_offset]);
+ out[12] = loadu_512(&inputs[12][block_offset]);
+ out[13] = loadu_512(&inputs[13][block_offset]);
+ out[14] = loadu_512(&inputs[14][block_offset]);
+ out[15] = loadu_512(&inputs[15][block_offset]);
+ for (size_t i = 0; i < 16; ++i) {
+ _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);
+ }
+ transpose_vecs_512(out);
+}
+
+INLINE void load_counters16(uint64_t counter, bool increment_counter,
+ __m512i *out_lo, __m512i *out_hi) {
+ const __m512i mask = _mm512_set1_epi32(-(int32_t)increment_counter);
+ const __m512i add0 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ const __m512i add1 = _mm512_and_si512(mask, add0);
+ __m512i l = _mm512_add_epi32(_mm512_set1_epi32((int32_t)counter), add1);
+ __mmask16 carry = _mm512_cmp_epu32_mask(l, add1, _MM_CMPINT_LT);
+ __m512i h = _mm512_mask_add_epi32(_mm512_set1_epi32((int32_t)(counter >> 32)), carry, _mm512_set1_epi32((int32_t)(counter >> 32)), _mm512_set1_epi32(1));
+ *out_lo = l;
+ *out_hi = h;
+}
+
+static
+void blake3_hash16_avx512(const uint8_t *const *inputs, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ bool increment_counter, uint8_t flags,
+ uint8_t flags_start, uint8_t flags_end,
+ uint8_t *out) {
+ __m512i h_vecs[8] = {
+ set1_512(key[0]), set1_512(key[1]), set1_512(key[2]), set1_512(key[3]),
+ set1_512(key[4]), set1_512(key[5]), set1_512(key[6]), set1_512(key[7]),
+ };
+ __m512i counter_low_vec, counter_high_vec;
+ load_counters16(counter, increment_counter, &counter_low_vec,
+ &counter_high_vec);
+ uint8_t block_flags = flags | flags_start;
+
+ for (size_t block = 0; block < blocks; block++) {
+ if (block + 1 == blocks) {
+ block_flags |= flags_end;
+ }
+ __m512i block_len_vec = set1_512(BLAKE3_BLOCK_LEN);
+ __m512i block_flags_vec = set1_512(block_flags);
+ __m512i msg_vecs[16];
+ transpose_msg_vecs16(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
+
+ __m512i v[16] = {
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
+ set1_512(IV[0]), set1_512(IV[1]), set1_512(IV[2]), set1_512(IV[3]),
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
+ };
+ round_fn16(v, msg_vecs, 0);
+ round_fn16(v, msg_vecs, 1);
+ round_fn16(v, msg_vecs, 2);
+ round_fn16(v, msg_vecs, 3);
+ round_fn16(v, msg_vecs, 4);
+ round_fn16(v, msg_vecs, 5);
+ round_fn16(v, msg_vecs, 6);
+ h_vecs[0] = xor_512(v[0], v[8]);
+ h_vecs[1] = xor_512(v[1], v[9]);
+ h_vecs[2] = xor_512(v[2], v[10]);
+ h_vecs[3] = xor_512(v[3], v[11]);
+ h_vecs[4] = xor_512(v[4], v[12]);
+ h_vecs[5] = xor_512(v[5], v[13]);
+ h_vecs[6] = xor_512(v[6], v[14]);
+ h_vecs[7] = xor_512(v[7], v[15]);
+
+ block_flags = flags;
+ }
+
+ // transpose_vecs_512 operates on a 16x16 matrix of words, but we only have 8
+ // state vectors. Pad the matrix with zeros. After transposition, store the
+ // lower half of each vector.
+ __m512i padded[16] = {
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
+ set1_512(0), set1_512(0), set1_512(0), set1_512(0),
+ set1_512(0), set1_512(0), set1_512(0), set1_512(0),
+ };
+ transpose_vecs_512(padded);
+ _mm256_mask_storeu_epi32(&out[0 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[0]));
+ _mm256_mask_storeu_epi32(&out[1 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[1]));
+ _mm256_mask_storeu_epi32(&out[2 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[2]));
+ _mm256_mask_storeu_epi32(&out[3 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[3]));
+ _mm256_mask_storeu_epi32(&out[4 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[4]));
+ _mm256_mask_storeu_epi32(&out[5 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[5]));
+ _mm256_mask_storeu_epi32(&out[6 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[6]));
+ _mm256_mask_storeu_epi32(&out[7 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[7]));
+ _mm256_mask_storeu_epi32(&out[8 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[8]));
+ _mm256_mask_storeu_epi32(&out[9 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[9]));
+ _mm256_mask_storeu_epi32(&out[10 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[10]));
+ _mm256_mask_storeu_epi32(&out[11 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[11]));
+ _mm256_mask_storeu_epi32(&out[12 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[12]));
+ _mm256_mask_storeu_epi32(&out[13 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[13]));
+ _mm256_mask_storeu_epi32(&out[14 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[14]));
+ _mm256_mask_storeu_epi32(&out[15 * sizeof(__m256i)], (__mmask8)-1, _mm512_castsi512_si256(padded[15]));
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * hash_many_avx512
+ * ----------------------------------------------------------------------------
+ */
+
+INLINE void hash_one_avx512(const uint8_t *input, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN]) {
+ uint32_t cv[8];
+ memcpy(cv, key, BLAKE3_KEY_LEN);
+ uint8_t block_flags = flags | flags_start;
+ while (blocks > 0) {
+ if (blocks == 1) {
+ block_flags |= flags_end;
+ }
+ blake3_compress_in_place_avx512(cv, input, BLAKE3_BLOCK_LEN, counter,
+ block_flags);
+ input = &input[BLAKE3_BLOCK_LEN];
+ blocks -= 1;
+ block_flags = flags;
+ }
+ memcpy(out, cv, BLAKE3_OUT_LEN);
+}
+
+void blake3_hash_many_avx512(const uint8_t *const *inputs, size_t num_inputs,
+ size_t blocks, const uint32_t key[8],
+ uint64_t counter, bool increment_counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t *out) {
+ while (num_inputs >= 16) {
+ blake3_hash16_avx512(inputs, blocks, key, counter, increment_counter, flags,
+ flags_start, flags_end, out);
+ if (increment_counter) {
+ counter += 16;
+ }
+ inputs += 16;
+ num_inputs -= 16;
+ out = &out[16 * BLAKE3_OUT_LEN];
+ }
+ while (num_inputs >= 8) {
+ blake3_hash8_avx512(inputs, blocks, key, counter, increment_counter, flags,
+ flags_start, flags_end, out);
+ if (increment_counter) {
+ counter += 8;
+ }
+ inputs += 8;
+ num_inputs -= 8;
+ out = &out[8 * BLAKE3_OUT_LEN];
+ }
+ while (num_inputs >= 4) {
+ blake3_hash4_avx512(inputs, blocks, key, counter, increment_counter, flags,
+ flags_start, flags_end, out);
+ if (increment_counter) {
+ counter += 4;
+ }
+ inputs += 4;
+ num_inputs -= 4;
+ out = &out[4 * BLAKE3_OUT_LEN];
+ }
+ while (num_inputs > 0) {
+ hash_one_avx512(inputs[0], blocks, key, counter, flags, flags_start,
+ flags_end, out);
+ if (increment_counter) {
+ counter += 1;
+ }
+ inputs += 1;
+ num_inputs -= 1;
+ out = &out[BLAKE3_OUT_LEN];
+ }
+}
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm
new file mode 100644
index 0000000000..f13d1b260a
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm
@@ -0,0 +1,2634 @@
+public _llvm_blake3_hash_many_avx512
+public llvm_blake3_hash_many_avx512
+public llvm_blake3_compress_in_place_avx512
+public _llvm_blake3_compress_in_place_avx512
+public llvm_blake3_compress_xof_avx512
+public _llvm_blake3_compress_xof_avx512
+
+_TEXT SEGMENT ALIGN(16) 'CODE'
+
+ALIGN 16
+llvm_blake3_hash_many_avx512 PROC
+_llvm_blake3_hash_many_avx512 PROC
+ push r15
+ push r14
+ push r13
+ push r12
+ push rdi
+ push rsi
+ push rbx
+ push rbp
+ mov rbp, rsp
+ sub rsp, 304
+ and rsp, 0FFFFFFFFFFFFFFC0H
+ vmovdqa xmmword ptr [rsp+90H], xmm6
+ vmovdqa xmmword ptr [rsp+0A0H], xmm7
+ vmovdqa xmmword ptr [rsp+0B0H], xmm8
+ vmovdqa xmmword ptr [rsp+0C0H], xmm9
+ vmovdqa xmmword ptr [rsp+0D0H], xmm10
+ vmovdqa xmmword ptr [rsp+0E0H], xmm11
+ vmovdqa xmmword ptr [rsp+0F0H], xmm12
+ vmovdqa xmmword ptr [rsp+100H], xmm13
+ vmovdqa xmmword ptr [rsp+110H], xmm14
+ vmovdqa xmmword ptr [rsp+120H], xmm15
+ mov rdi, rcx
+ mov rsi, rdx
+ mov rdx, r8
+ mov rcx, r9
+ mov r8, qword ptr [rbp+68H]
+ movzx r9, byte ptr [rbp+70H]
+ neg r9
+ kmovw k1, r9d
+ vmovd xmm0, r8d
+ vpbroadcastd ymm0, xmm0
+ shr r8, 32
+ vmovd xmm1, r8d
+ vpbroadcastd ymm1, xmm1
+ vmovdqa ymm4, ymm1
+ vmovdqa ymm5, ymm1
+ vpaddd ymm2, ymm0, ymmword ptr [ADD0]
+ vpaddd ymm3, ymm0, ymmword ptr [ADD0+32]
+ vpcmpud k2, ymm2, ymm0, 1
+ vpcmpud k3, ymm3, ymm0, 1
+ ; XXX: ml64.exe does not currently understand the syntax. We use a workaround.
+ vpbroadcastd ymm6, dword ptr [ADD1]
+ vpaddd ymm4 {k2}, ymm4, ymm6
+ vpaddd ymm5 {k3}, ymm5, ymm6
+ ; vpaddd ymm4 {k2}, ymm4, dword ptr [ADD1] {1to8}
+ ; vpaddd ymm5 {k3}, ymm5, dword ptr [ADD1] {1to8}
+ knotw k2, k1
+ vmovdqa32 ymm2 {k2}, ymm0
+ vmovdqa32 ymm3 {k2}, ymm0
+ vmovdqa32 ymm4 {k2}, ymm1
+ vmovdqa32 ymm5 {k2}, ymm1
+ vmovdqa ymmword ptr [rsp], ymm2
+ vmovdqa ymmword ptr [rsp+20H], ymm3
+ vmovdqa ymmword ptr [rsp+40H], ymm4
+ vmovdqa ymmword ptr [rsp+60H], ymm5
+ shl rdx, 6
+ mov qword ptr [rsp+80H], rdx
+ cmp rsi, 16
+ jc final15blocks
+outerloop16:
+ vpbroadcastd zmm0, dword ptr [rcx]
+ vpbroadcastd zmm1, dword ptr [rcx+1H*4H]
+ vpbroadcastd zmm2, dword ptr [rcx+2H*4H]
+ vpbroadcastd zmm3, dword ptr [rcx+3H*4H]
+ vpbroadcastd zmm4, dword ptr [rcx+4H*4H]
+ vpbroadcastd zmm5, dword ptr [rcx+5H*4H]
+ vpbroadcastd zmm6, dword ptr [rcx+6H*4H]
+ vpbroadcastd zmm7, dword ptr [rcx+7H*4H]
+ movzx eax, byte ptr [rbp+78H]
+ movzx ebx, byte ptr [rbp+80H]
+ or eax, ebx
+ xor edx, edx
+ALIGN 16
+innerloop16:
+ movzx ebx, byte ptr [rbp+88H]
+ or ebx, eax
+ add rdx, 64
+ cmp rdx, qword ptr [rsp+80H]
+ cmove eax, ebx
+ mov dword ptr [rsp+88H], eax
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ mov r12, qword ptr [rdi+40H]
+ mov r13, qword ptr [rdi+48H]
+ mov r14, qword ptr [rdi+50H]
+ mov r15, qword ptr [rdi+58H]
+ vmovdqu32 ymm16, ymmword ptr [rdx+r8-2H*20H]
+ vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-2H*20H], 01H
+ vmovdqu32 ymm17, ymmword ptr [rdx+r9-2H*20H]
+ vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-2H*20H], 01H
+ vpunpcklqdq zmm8, zmm16, zmm17
+ vpunpckhqdq zmm9, zmm16, zmm17
+ vmovdqu32 ymm18, ymmword ptr [rdx+r10-2H*20H]
+ vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-2H*20H], 01H
+ vmovdqu32 ymm19, ymmword ptr [rdx+r11-2H*20H]
+ vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-2H*20H], 01H
+ vpunpcklqdq zmm10, zmm18, zmm19
+ vpunpckhqdq zmm11, zmm18, zmm19
+ mov r8, qword ptr [rdi+20H]
+ mov r9, qword ptr [rdi+28H]
+ mov r10, qword ptr [rdi+30H]
+ mov r11, qword ptr [rdi+38H]
+ mov r12, qword ptr [rdi+60H]
+ mov r13, qword ptr [rdi+68H]
+ mov r14, qword ptr [rdi+70H]
+ mov r15, qword ptr [rdi+78H]
+ vmovdqu32 ymm16, ymmword ptr [rdx+r8-2H*20H]
+ vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-2H*20H], 01H
+ vmovdqu32 ymm17, ymmword ptr [rdx+r9-2H*20H]
+ vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-2H*20H], 01H
+ vpunpcklqdq zmm12, zmm16, zmm17
+ vpunpckhqdq zmm13, zmm16, zmm17
+ vmovdqu32 ymm18, ymmword ptr [rdx+r10-2H*20H]
+ vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-2H*20H], 01H
+ vmovdqu32 ymm19, ymmword ptr [rdx+r11-2H*20H]
+ vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-2H*20H], 01H
+ vpunpcklqdq zmm14, zmm18, zmm19
+ vpunpckhqdq zmm15, zmm18, zmm19
+ vmovdqa32 zmm27, zmmword ptr [INDEX0]
+ vmovdqa32 zmm31, zmmword ptr [INDEX1]
+ vshufps zmm16, zmm8, zmm10, 136
+ vshufps zmm17, zmm12, zmm14, 136
+ vmovdqa32 zmm20, zmm16
+ vpermt2d zmm16, zmm27, zmm17
+ vpermt2d zmm20, zmm31, zmm17
+ vshufps zmm17, zmm8, zmm10, 221
+ vshufps zmm30, zmm12, zmm14, 221
+ vmovdqa32 zmm21, zmm17
+ vpermt2d zmm17, zmm27, zmm30
+ vpermt2d zmm21, zmm31, zmm30
+ vshufps zmm18, zmm9, zmm11, 136
+ vshufps zmm8, zmm13, zmm15, 136
+ vmovdqa32 zmm22, zmm18
+ vpermt2d zmm18, zmm27, zmm8
+ vpermt2d zmm22, zmm31, zmm8
+ vshufps zmm19, zmm9, zmm11, 221
+ vshufps zmm8, zmm13, zmm15, 221
+ vmovdqa32 zmm23, zmm19
+ vpermt2d zmm19, zmm27, zmm8
+ vpermt2d zmm23, zmm31, zmm8
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ mov r12, qword ptr [rdi+40H]
+ mov r13, qword ptr [rdi+48H]
+ mov r14, qword ptr [rdi+50H]
+ mov r15, qword ptr [rdi+58H]
+ vmovdqu32 ymm24, ymmword ptr [r8+rdx-1H*20H]
+ vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-1H*20H], 01H
+ vmovdqu32 ymm25, ymmword ptr [r9+rdx-1H*20H]
+ vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-1H*20H], 01H
+ vpunpcklqdq zmm8, zmm24, zmm25
+ vpunpckhqdq zmm9, zmm24, zmm25
+ vmovdqu32 ymm24, ymmword ptr [r10+rdx-1H*20H]
+ vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-1H*20H], 01H
+ vmovdqu32 ymm25, ymmword ptr [r11+rdx-1H*20H]
+ vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-1H*20H], 01H
+ vpunpcklqdq zmm10, zmm24, zmm25
+ vpunpckhqdq zmm11, zmm24, zmm25
+ prefetcht0 byte ptr [r8+rdx+80H]
+ prefetcht0 byte ptr [r12+rdx+80H]
+ prefetcht0 byte ptr [r9+rdx+80H]
+ prefetcht0 byte ptr [r13+rdx+80H]
+ prefetcht0 byte ptr [r10+rdx+80H]
+ prefetcht0 byte ptr [r14+rdx+80H]
+ prefetcht0 byte ptr [r11+rdx+80H]
+ prefetcht0 byte ptr [r15+rdx+80H]
+ mov r8, qword ptr [rdi+20H]
+ mov r9, qword ptr [rdi+28H]
+ mov r10, qword ptr [rdi+30H]
+ mov r11, qword ptr [rdi+38H]
+ mov r12, qword ptr [rdi+60H]
+ mov r13, qword ptr [rdi+68H]
+ mov r14, qword ptr [rdi+70H]
+ mov r15, qword ptr [rdi+78H]
+ vmovdqu32 ymm24, ymmword ptr [r8+rdx-1H*20H]
+ vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-1H*20H], 01H
+ vmovdqu32 ymm25, ymmword ptr [r9+rdx-1H*20H]
+ vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-1H*20H], 01H
+ vpunpcklqdq zmm12, zmm24, zmm25
+ vpunpckhqdq zmm13, zmm24, zmm25
+ vmovdqu32 ymm24, ymmword ptr [r10+rdx-1H*20H]
+ vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-1H*20H], 01H
+ vmovdqu32 ymm25, ymmword ptr [r11+rdx-1H*20H]
+ vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-1H*20H], 01H
+ vpunpcklqdq zmm14, zmm24, zmm25
+ vpunpckhqdq zmm15, zmm24, zmm25
+ prefetcht0 byte ptr [r8+rdx+80H]
+ prefetcht0 byte ptr [r12+rdx+80H]
+ prefetcht0 byte ptr [r9+rdx+80H]
+ prefetcht0 byte ptr [r13+rdx+80H]
+ prefetcht0 byte ptr [r10+rdx+80H]
+ prefetcht0 byte ptr [r14+rdx+80H]
+ prefetcht0 byte ptr [r11+rdx+80H]
+ prefetcht0 byte ptr [r15+rdx+80H]
+ vshufps zmm24, zmm8, zmm10, 136
+ vshufps zmm30, zmm12, zmm14, 136
+ vmovdqa32 zmm28, zmm24
+ vpermt2d zmm24, zmm27, zmm30
+ vpermt2d zmm28, zmm31, zmm30
+ vshufps zmm25, zmm8, zmm10, 221
+ vshufps zmm30, zmm12, zmm14, 221
+ vmovdqa32 zmm29, zmm25
+ vpermt2d zmm25, zmm27, zmm30
+ vpermt2d zmm29, zmm31, zmm30
+ vshufps zmm26, zmm9, zmm11, 136
+ vshufps zmm8, zmm13, zmm15, 136
+ vmovdqa32 zmm30, zmm26
+ vpermt2d zmm26, zmm27, zmm8
+ vpermt2d zmm30, zmm31, zmm8
+ vshufps zmm8, zmm9, zmm11, 221
+ vshufps zmm10, zmm13, zmm15, 221
+ vpermi2d zmm27, zmm8, zmm10
+ vpermi2d zmm31, zmm8, zmm10
+ vpbroadcastd zmm8, dword ptr [BLAKE3_IV_0]
+ vpbroadcastd zmm9, dword ptr [BLAKE3_IV_1]
+ vpbroadcastd zmm10, dword ptr [BLAKE3_IV_2]
+ vpbroadcastd zmm11, dword ptr [BLAKE3_IV_3]
+ vmovdqa32 zmm12, zmmword ptr [rsp]
+ vmovdqa32 zmm13, zmmword ptr [rsp+1H*40H]
+ vpbroadcastd zmm14, dword ptr [BLAKE3_BLOCK_LEN]
+ vpbroadcastd zmm15, dword ptr [rsp+22H*4H]
+ vpaddd zmm0, zmm0, zmm16
+ vpaddd zmm1, zmm1, zmm18
+ vpaddd zmm2, zmm2, zmm20
+ vpaddd zmm3, zmm3, zmm22
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vprord zmm15, zmm15, 16
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 12
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vpaddd zmm0, zmm0, zmm17
+ vpaddd zmm1, zmm1, zmm19
+ vpaddd zmm2, zmm2, zmm21
+ vpaddd zmm3, zmm3, zmm23
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vprord zmm15, zmm15, 8
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 7
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vpaddd zmm0, zmm0, zmm24
+ vpaddd zmm1, zmm1, zmm26
+ vpaddd zmm2, zmm2, zmm28
+ vpaddd zmm3, zmm3, zmm30
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 16
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vprord zmm4, zmm4, 12
+ vpaddd zmm0, zmm0, zmm25
+ vpaddd zmm1, zmm1, zmm27
+ vpaddd zmm2, zmm2, zmm29
+ vpaddd zmm3, zmm3, zmm31
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 8
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vprord zmm4, zmm4, 7
+ vpaddd zmm0, zmm0, zmm18
+ vpaddd zmm1, zmm1, zmm19
+ vpaddd zmm2, zmm2, zmm23
+ vpaddd zmm3, zmm3, zmm20
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vprord zmm15, zmm15, 16
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 12
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vpaddd zmm0, zmm0, zmm22
+ vpaddd zmm1, zmm1, zmm26
+ vpaddd zmm2, zmm2, zmm16
+ vpaddd zmm3, zmm3, zmm29
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vprord zmm15, zmm15, 8
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 7
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vpaddd zmm0, zmm0, zmm17
+ vpaddd zmm1, zmm1, zmm28
+ vpaddd zmm2, zmm2, zmm25
+ vpaddd zmm3, zmm3, zmm31
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 16
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vprord zmm4, zmm4, 12
+ vpaddd zmm0, zmm0, zmm27
+ vpaddd zmm1, zmm1, zmm21
+ vpaddd zmm2, zmm2, zmm30
+ vpaddd zmm3, zmm3, zmm24
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 8
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vprord zmm4, zmm4, 7
+ vpaddd zmm0, zmm0, zmm19
+ vpaddd zmm1, zmm1, zmm26
+ vpaddd zmm2, zmm2, zmm29
+ vpaddd zmm3, zmm3, zmm23
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vprord zmm15, zmm15, 16
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 12
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vpaddd zmm0, zmm0, zmm20
+ vpaddd zmm1, zmm1, zmm28
+ vpaddd zmm2, zmm2, zmm18
+ vpaddd zmm3, zmm3, zmm30
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vprord zmm15, zmm15, 8
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 7
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vpaddd zmm0, zmm0, zmm22
+ vpaddd zmm1, zmm1, zmm25
+ vpaddd zmm2, zmm2, zmm27
+ vpaddd zmm3, zmm3, zmm24
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 16
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vprord zmm4, zmm4, 12
+ vpaddd zmm0, zmm0, zmm21
+ vpaddd zmm1, zmm1, zmm16
+ vpaddd zmm2, zmm2, zmm31
+ vpaddd zmm3, zmm3, zmm17
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 8
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vprord zmm4, zmm4, 7
+ vpaddd zmm0, zmm0, zmm26
+ vpaddd zmm1, zmm1, zmm28
+ vpaddd zmm2, zmm2, zmm30
+ vpaddd zmm3, zmm3, zmm29
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vprord zmm15, zmm15, 16
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 12
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vpaddd zmm0, zmm0, zmm23
+ vpaddd zmm1, zmm1, zmm25
+ vpaddd zmm2, zmm2, zmm19
+ vpaddd zmm3, zmm3, zmm31
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vprord zmm15, zmm15, 8
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 7
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vpaddd zmm0, zmm0, zmm20
+ vpaddd zmm1, zmm1, zmm27
+ vpaddd zmm2, zmm2, zmm21
+ vpaddd zmm3, zmm3, zmm17
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 16
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vprord zmm4, zmm4, 12
+ vpaddd zmm0, zmm0, zmm16
+ vpaddd zmm1, zmm1, zmm18
+ vpaddd zmm2, zmm2, zmm24
+ vpaddd zmm3, zmm3, zmm22
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 8
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vprord zmm4, zmm4, 7
+ vpaddd zmm0, zmm0, zmm28
+ vpaddd zmm1, zmm1, zmm25
+ vpaddd zmm2, zmm2, zmm31
+ vpaddd zmm3, zmm3, zmm30
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vprord zmm15, zmm15, 16
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 12
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vpaddd zmm0, zmm0, zmm29
+ vpaddd zmm1, zmm1, zmm27
+ vpaddd zmm2, zmm2, zmm26
+ vpaddd zmm3, zmm3, zmm24
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vprord zmm15, zmm15, 8
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 7
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vpaddd zmm0, zmm0, zmm23
+ vpaddd zmm1, zmm1, zmm21
+ vpaddd zmm2, zmm2, zmm16
+ vpaddd zmm3, zmm3, zmm22
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 16
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vprord zmm4, zmm4, 12
+ vpaddd zmm0, zmm0, zmm18
+ vpaddd zmm1, zmm1, zmm19
+ vpaddd zmm2, zmm2, zmm17
+ vpaddd zmm3, zmm3, zmm20
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 8
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vprord zmm4, zmm4, 7
+ vpaddd zmm0, zmm0, zmm25
+ vpaddd zmm1, zmm1, zmm27
+ vpaddd zmm2, zmm2, zmm24
+ vpaddd zmm3, zmm3, zmm31
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vprord zmm15, zmm15, 16
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 12
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vpaddd zmm0, zmm0, zmm30
+ vpaddd zmm1, zmm1, zmm21
+ vpaddd zmm2, zmm2, zmm28
+ vpaddd zmm3, zmm3, zmm17
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vprord zmm15, zmm15, 8
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 7
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vpaddd zmm0, zmm0, zmm29
+ vpaddd zmm1, zmm1, zmm16
+ vpaddd zmm2, zmm2, zmm18
+ vpaddd zmm3, zmm3, zmm20
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 16
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vprord zmm4, zmm4, 12
+ vpaddd zmm0, zmm0, zmm19
+ vpaddd zmm1, zmm1, zmm26
+ vpaddd zmm2, zmm2, zmm22
+ vpaddd zmm3, zmm3, zmm23
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 8
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vprord zmm4, zmm4, 7
+ vpaddd zmm0, zmm0, zmm27
+ vpaddd zmm1, zmm1, zmm21
+ vpaddd zmm2, zmm2, zmm17
+ vpaddd zmm3, zmm3, zmm24
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vprord zmm15, zmm15, 16
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 12
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vpaddd zmm0, zmm0, zmm31
+ vpaddd zmm1, zmm1, zmm16
+ vpaddd zmm2, zmm2, zmm25
+ vpaddd zmm3, zmm3, zmm22
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm1, zmm1, zmm5
+ vpaddd zmm2, zmm2, zmm6
+ vpaddd zmm3, zmm3, zmm7
+ vpxord zmm12, zmm12, zmm0
+ vpxord zmm13, zmm13, zmm1
+ vpxord zmm14, zmm14, zmm2
+ vpxord zmm15, zmm15, zmm3
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vprord zmm15, zmm15, 8
+ vpaddd zmm8, zmm8, zmm12
+ vpaddd zmm9, zmm9, zmm13
+ vpaddd zmm10, zmm10, zmm14
+ vpaddd zmm11, zmm11, zmm15
+ vpxord zmm4, zmm4, zmm8
+ vpxord zmm5, zmm5, zmm9
+ vpxord zmm6, zmm6, zmm10
+ vpxord zmm7, zmm7, zmm11
+ vprord zmm4, zmm4, 7
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vpaddd zmm0, zmm0, zmm30
+ vpaddd zmm1, zmm1, zmm18
+ vpaddd zmm2, zmm2, zmm19
+ vpaddd zmm3, zmm3, zmm23
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 16
+ vprord zmm12, zmm12, 16
+ vprord zmm13, zmm13, 16
+ vprord zmm14, zmm14, 16
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 12
+ vprord zmm6, zmm6, 12
+ vprord zmm7, zmm7, 12
+ vprord zmm4, zmm4, 12
+ vpaddd zmm0, zmm0, zmm26
+ vpaddd zmm1, zmm1, zmm28
+ vpaddd zmm2, zmm2, zmm20
+ vpaddd zmm3, zmm3, zmm29
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm1, zmm1, zmm6
+ vpaddd zmm2, zmm2, zmm7
+ vpaddd zmm3, zmm3, zmm4
+ vpxord zmm15, zmm15, zmm0
+ vpxord zmm12, zmm12, zmm1
+ vpxord zmm13, zmm13, zmm2
+ vpxord zmm14, zmm14, zmm3
+ vprord zmm15, zmm15, 8
+ vprord zmm12, zmm12, 8
+ vprord zmm13, zmm13, 8
+ vprord zmm14, zmm14, 8
+ vpaddd zmm10, zmm10, zmm15
+ vpaddd zmm11, zmm11, zmm12
+ vpaddd zmm8, zmm8, zmm13
+ vpaddd zmm9, zmm9, zmm14
+ vpxord zmm5, zmm5, zmm10
+ vpxord zmm6, zmm6, zmm11
+ vpxord zmm7, zmm7, zmm8
+ vpxord zmm4, zmm4, zmm9
+ vprord zmm5, zmm5, 7
+ vprord zmm6, zmm6, 7
+ vprord zmm7, zmm7, 7
+ vprord zmm4, zmm4, 7
+ vpxord zmm0, zmm0, zmm8
+ vpxord zmm1, zmm1, zmm9
+ vpxord zmm2, zmm2, zmm10
+ vpxord zmm3, zmm3, zmm11
+ vpxord zmm4, zmm4, zmm12
+ vpxord zmm5, zmm5, zmm13
+ vpxord zmm6, zmm6, zmm14
+ vpxord zmm7, zmm7, zmm15
+ movzx eax, byte ptr [rbp+78H]
+ jne innerloop16
+ mov rbx, qword ptr [rbp+90H]
+ vpunpckldq zmm16, zmm0, zmm1
+ vpunpckhdq zmm17, zmm0, zmm1
+ vpunpckldq zmm18, zmm2, zmm3
+ vpunpckhdq zmm19, zmm2, zmm3
+ vpunpckldq zmm20, zmm4, zmm5
+ vpunpckhdq zmm21, zmm4, zmm5
+ vpunpckldq zmm22, zmm6, zmm7
+ vpunpckhdq zmm23, zmm6, zmm7
+ vpunpcklqdq zmm0, zmm16, zmm18
+ vpunpckhqdq zmm1, zmm16, zmm18
+ vpunpcklqdq zmm2, zmm17, zmm19
+ vpunpckhqdq zmm3, zmm17, zmm19
+ vpunpcklqdq zmm4, zmm20, zmm22
+ vpunpckhqdq zmm5, zmm20, zmm22
+ vpunpcklqdq zmm6, zmm21, zmm23
+ vpunpckhqdq zmm7, zmm21, zmm23
+ vshufi32x4 zmm16, zmm0, zmm4, 88H
+ vshufi32x4 zmm17, zmm1, zmm5, 88H
+ vshufi32x4 zmm18, zmm2, zmm6, 88H
+ vshufi32x4 zmm19, zmm3, zmm7, 88H
+ vshufi32x4 zmm20, zmm0, zmm4, 0DDH
+ vshufi32x4 zmm21, zmm1, zmm5, 0DDH
+ vshufi32x4 zmm22, zmm2, zmm6, 0DDH
+ vshufi32x4 zmm23, zmm3, zmm7, 0DDH
+ vshufi32x4 zmm0, zmm16, zmm17, 88H
+ vshufi32x4 zmm1, zmm18, zmm19, 88H
+ vshufi32x4 zmm2, zmm20, zmm21, 88H
+ vshufi32x4 zmm3, zmm22, zmm23, 88H
+ vshufi32x4 zmm4, zmm16, zmm17, 0DDH
+ vshufi32x4 zmm5, zmm18, zmm19, 0DDH
+ vshufi32x4 zmm6, zmm20, zmm21, 0DDH
+ vshufi32x4 zmm7, zmm22, zmm23, 0DDH
+ vmovdqu32 zmmword ptr [rbx], zmm0
+ vmovdqu32 zmmword ptr [rbx+1H*40H], zmm1
+ vmovdqu32 zmmword ptr [rbx+2H*40H], zmm2
+ vmovdqu32 zmmword ptr [rbx+3H*40H], zmm3
+ vmovdqu32 zmmword ptr [rbx+4H*40H], zmm4
+ vmovdqu32 zmmword ptr [rbx+5H*40H], zmm5
+ vmovdqu32 zmmword ptr [rbx+6H*40H], zmm6
+ vmovdqu32 zmmword ptr [rbx+7H*40H], zmm7
+ vmovdqa32 zmm0, zmmword ptr [rsp]
+ vmovdqa32 zmm1, zmmword ptr [rsp+1H*40H]
+ vmovdqa32 zmm2, zmm0
+ ; XXX: ml64.exe does not currently understand the syntax. We use a workaround.
+ vpbroadcastd zmm4, dword ptr [ADD16]
+ vpbroadcastd zmm5, dword ptr [ADD1]
+ vpaddd zmm2{k1}, zmm0, zmm4
+ ; vpaddd zmm2{k1}, zmm0, dword ptr [ADD16] ; {1to16}
+ vpcmpud k2, zmm2, zmm0, 1
+ vpaddd zmm1 {k2}, zmm1, zmm5
+ ; vpaddd zmm1 {k2}, zmm1, dword ptr [ADD1] ; {1to16}
+ vmovdqa32 zmmword ptr [rsp], zmm2
+ vmovdqa32 zmmword ptr [rsp+1H*40H], zmm1
+ add rdi, 128
+ add rbx, 512
+ mov qword ptr [rbp+90H], rbx
+ sub rsi, 16
+ cmp rsi, 16
+ jnc outerloop16
+ test rsi, rsi
+ jne final15blocks
+unwind:
+ vzeroupper
+ vmovdqa xmm6, xmmword ptr [rsp+90H]
+ vmovdqa xmm7, xmmword ptr [rsp+0A0H]
+ vmovdqa xmm8, xmmword ptr [rsp+0B0H]
+ vmovdqa xmm9, xmmword ptr [rsp+0C0H]
+ vmovdqa xmm10, xmmword ptr [rsp+0D0H]
+ vmovdqa xmm11, xmmword ptr [rsp+0E0H]
+ vmovdqa xmm12, xmmword ptr [rsp+0F0H]
+ vmovdqa xmm13, xmmword ptr [rsp+100H]
+ vmovdqa xmm14, xmmword ptr [rsp+110H]
+ vmovdqa xmm15, xmmword ptr [rsp+120H]
+ mov rsp, rbp
+ pop rbp
+ pop rbx
+ pop rsi
+ pop rdi
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+ ret
+ALIGN 16
+final15blocks:
+ test esi, 8H
+ je final7blocks
+ vpbroadcastd ymm0, dword ptr [rcx]
+ vpbroadcastd ymm1, dword ptr [rcx+4H]
+ vpbroadcastd ymm2, dword ptr [rcx+8H]
+ vpbroadcastd ymm3, dword ptr [rcx+0CH]
+ vpbroadcastd ymm4, dword ptr [rcx+10H]
+ vpbroadcastd ymm5, dword ptr [rcx+14H]
+ vpbroadcastd ymm6, dword ptr [rcx+18H]
+ vpbroadcastd ymm7, dword ptr [rcx+1CH]
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ mov r12, qword ptr [rdi+20H]
+ mov r13, qword ptr [rdi+28H]
+ mov r14, qword ptr [rdi+30H]
+ mov r15, qword ptr [rdi+38H]
+ movzx eax, byte ptr [rbp+78H]
+ movzx ebx, byte ptr [rbp+80H]
+ or eax, ebx
+ xor edx, edx
+innerloop8:
+ movzx ebx, byte ptr [rbp+88H]
+ or ebx, eax
+ add rdx, 64
+ cmp rdx, qword ptr [rsp+80H]
+ cmove eax, ebx
+ mov dword ptr [rsp+88H], eax
+ vmovups xmm8, xmmword ptr [r8+rdx-40H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-40H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-40H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-40H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-40H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-40H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-40H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-40H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm16, ymm12, ymm14, 136
+ vshufps ymm17, ymm12, ymm14, 221
+ vshufps ymm18, ymm13, ymm15, 136
+ vshufps ymm19, ymm13, ymm15, 221
+ vmovups xmm8, xmmword ptr [r8+rdx-30H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-30H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-30H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-30H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-30H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-30H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-30H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-30H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm20, ymm12, ymm14, 136
+ vshufps ymm21, ymm12, ymm14, 221
+ vshufps ymm22, ymm13, ymm15, 136
+ vshufps ymm23, ymm13, ymm15, 221
+ vmovups xmm8, xmmword ptr [r8+rdx-20H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-20H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-20H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-20H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-20H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-20H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-20H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-20H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm24, ymm12, ymm14, 136
+ vshufps ymm25, ymm12, ymm14, 221
+ vshufps ymm26, ymm13, ymm15, 136
+ vshufps ymm27, ymm13, ymm15, 221
+ vmovups xmm8, xmmword ptr [r8+rdx-10H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-10H], 01H
+ vmovups xmm9, xmmword ptr [r9+rdx-10H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-10H], 01H
+ vunpcklpd ymm12, ymm8, ymm9
+ vunpckhpd ymm13, ymm8, ymm9
+ vmovups xmm10, xmmword ptr [r10+rdx-10H]
+ vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-10H], 01H
+ vmovups xmm11, xmmword ptr [r11+rdx-10H]
+ vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-10H], 01H
+ vunpcklpd ymm14, ymm10, ymm11
+ vunpckhpd ymm15, ymm10, ymm11
+ vshufps ymm28, ymm12, ymm14, 136
+ vshufps ymm29, ymm12, ymm14, 221
+ vshufps ymm30, ymm13, ymm15, 136
+ vshufps ymm31, ymm13, ymm15, 221
+ vpbroadcastd ymm8, dword ptr [BLAKE3_IV_0]
+ vpbroadcastd ymm9, dword ptr [BLAKE3_IV_1]
+ vpbroadcastd ymm10, dword ptr [BLAKE3_IV_2]
+ vpbroadcastd ymm11, dword ptr [BLAKE3_IV_3]
+ vmovdqa ymm12, ymmword ptr [rsp]
+ vmovdqa ymm13, ymmword ptr [rsp+40H]
+ vpbroadcastd ymm14, dword ptr [BLAKE3_BLOCK_LEN]
+ vpbroadcastd ymm15, dword ptr [rsp+88H]
+ vpaddd ymm0, ymm0, ymm16
+ vpaddd ymm1, ymm1, ymm18
+ vpaddd ymm2, ymm2, ymm20
+ vpaddd ymm3, ymm3, ymm22
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vprord ymm15, ymm15, 16
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 12
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vpaddd ymm0, ymm0, ymm17
+ vpaddd ymm1, ymm1, ymm19
+ vpaddd ymm2, ymm2, ymm21
+ vpaddd ymm3, ymm3, ymm23
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vprord ymm15, ymm15, 8
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 7
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vpaddd ymm0, ymm0, ymm24
+ vpaddd ymm1, ymm1, ymm26
+ vpaddd ymm2, ymm2, ymm28
+ vpaddd ymm3, ymm3, ymm30
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 16
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vprord ymm4, ymm4, 12
+ vpaddd ymm0, ymm0, ymm25
+ vpaddd ymm1, ymm1, ymm27
+ vpaddd ymm2, ymm2, ymm29
+ vpaddd ymm3, ymm3, ymm31
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 8
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vprord ymm4, ymm4, 7
+ vpaddd ymm0, ymm0, ymm18
+ vpaddd ymm1, ymm1, ymm19
+ vpaddd ymm2, ymm2, ymm23
+ vpaddd ymm3, ymm3, ymm20
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vprord ymm15, ymm15, 16
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 12
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vpaddd ymm0, ymm0, ymm22
+ vpaddd ymm1, ymm1, ymm26
+ vpaddd ymm2, ymm2, ymm16
+ vpaddd ymm3, ymm3, ymm29
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vprord ymm15, ymm15, 8
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 7
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vpaddd ymm0, ymm0, ymm17
+ vpaddd ymm1, ymm1, ymm28
+ vpaddd ymm2, ymm2, ymm25
+ vpaddd ymm3, ymm3, ymm31
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 16
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vprord ymm4, ymm4, 12
+ vpaddd ymm0, ymm0, ymm27
+ vpaddd ymm1, ymm1, ymm21
+ vpaddd ymm2, ymm2, ymm30
+ vpaddd ymm3, ymm3, ymm24
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 8
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vprord ymm4, ymm4, 7
+ vpaddd ymm0, ymm0, ymm19
+ vpaddd ymm1, ymm1, ymm26
+ vpaddd ymm2, ymm2, ymm29
+ vpaddd ymm3, ymm3, ymm23
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vprord ymm15, ymm15, 16
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 12
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vpaddd ymm0, ymm0, ymm20
+ vpaddd ymm1, ymm1, ymm28
+ vpaddd ymm2, ymm2, ymm18
+ vpaddd ymm3, ymm3, ymm30
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vprord ymm15, ymm15, 8
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 7
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vpaddd ymm0, ymm0, ymm22
+ vpaddd ymm1, ymm1, ymm25
+ vpaddd ymm2, ymm2, ymm27
+ vpaddd ymm3, ymm3, ymm24
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 16
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vprord ymm4, ymm4, 12
+ vpaddd ymm0, ymm0, ymm21
+ vpaddd ymm1, ymm1, ymm16
+ vpaddd ymm2, ymm2, ymm31
+ vpaddd ymm3, ymm3, ymm17
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 8
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vprord ymm4, ymm4, 7
+ vpaddd ymm0, ymm0, ymm26
+ vpaddd ymm1, ymm1, ymm28
+ vpaddd ymm2, ymm2, ymm30
+ vpaddd ymm3, ymm3, ymm29
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vprord ymm15, ymm15, 16
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 12
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vpaddd ymm0, ymm0, ymm23
+ vpaddd ymm1, ymm1, ymm25
+ vpaddd ymm2, ymm2, ymm19
+ vpaddd ymm3, ymm3, ymm31
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vprord ymm15, ymm15, 8
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 7
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vpaddd ymm0, ymm0, ymm20
+ vpaddd ymm1, ymm1, ymm27
+ vpaddd ymm2, ymm2, ymm21
+ vpaddd ymm3, ymm3, ymm17
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 16
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vprord ymm4, ymm4, 12
+ vpaddd ymm0, ymm0, ymm16
+ vpaddd ymm1, ymm1, ymm18
+ vpaddd ymm2, ymm2, ymm24
+ vpaddd ymm3, ymm3, ymm22
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 8
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vprord ymm4, ymm4, 7
+ vpaddd ymm0, ymm0, ymm28
+ vpaddd ymm1, ymm1, ymm25
+ vpaddd ymm2, ymm2, ymm31
+ vpaddd ymm3, ymm3, ymm30
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vprord ymm15, ymm15, 16
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 12
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vpaddd ymm0, ymm0, ymm29
+ vpaddd ymm1, ymm1, ymm27
+ vpaddd ymm2, ymm2, ymm26
+ vpaddd ymm3, ymm3, ymm24
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vprord ymm15, ymm15, 8
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 7
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vpaddd ymm0, ymm0, ymm23
+ vpaddd ymm1, ymm1, ymm21
+ vpaddd ymm2, ymm2, ymm16
+ vpaddd ymm3, ymm3, ymm22
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 16
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vprord ymm4, ymm4, 12
+ vpaddd ymm0, ymm0, ymm18
+ vpaddd ymm1, ymm1, ymm19
+ vpaddd ymm2, ymm2, ymm17
+ vpaddd ymm3, ymm3, ymm20
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 8
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vprord ymm4, ymm4, 7
+ vpaddd ymm0, ymm0, ymm25
+ vpaddd ymm1, ymm1, ymm27
+ vpaddd ymm2, ymm2, ymm24
+ vpaddd ymm3, ymm3, ymm31
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vprord ymm15, ymm15, 16
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 12
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vpaddd ymm0, ymm0, ymm30
+ vpaddd ymm1, ymm1, ymm21
+ vpaddd ymm2, ymm2, ymm28
+ vpaddd ymm3, ymm3, ymm17
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vprord ymm15, ymm15, 8
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 7
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vpaddd ymm0, ymm0, ymm29
+ vpaddd ymm1, ymm1, ymm16
+ vpaddd ymm2, ymm2, ymm18
+ vpaddd ymm3, ymm3, ymm20
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 16
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vprord ymm4, ymm4, 12
+ vpaddd ymm0, ymm0, ymm19
+ vpaddd ymm1, ymm1, ymm26
+ vpaddd ymm2, ymm2, ymm22
+ vpaddd ymm3, ymm3, ymm23
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 8
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vprord ymm4, ymm4, 7
+ vpaddd ymm0, ymm0, ymm27
+ vpaddd ymm1, ymm1, ymm21
+ vpaddd ymm2, ymm2, ymm17
+ vpaddd ymm3, ymm3, ymm24
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vprord ymm15, ymm15, 16
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 12
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vpaddd ymm0, ymm0, ymm31
+ vpaddd ymm1, ymm1, ymm16
+ vpaddd ymm2, ymm2, ymm25
+ vpaddd ymm3, ymm3, ymm22
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm1, ymm1, ymm5
+ vpaddd ymm2, ymm2, ymm6
+ vpaddd ymm3, ymm3, ymm7
+ vpxord ymm12, ymm12, ymm0
+ vpxord ymm13, ymm13, ymm1
+ vpxord ymm14, ymm14, ymm2
+ vpxord ymm15, ymm15, ymm3
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vprord ymm15, ymm15, 8
+ vpaddd ymm8, ymm8, ymm12
+ vpaddd ymm9, ymm9, ymm13
+ vpaddd ymm10, ymm10, ymm14
+ vpaddd ymm11, ymm11, ymm15
+ vpxord ymm4, ymm4, ymm8
+ vpxord ymm5, ymm5, ymm9
+ vpxord ymm6, ymm6, ymm10
+ vpxord ymm7, ymm7, ymm11
+ vprord ymm4, ymm4, 7
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vpaddd ymm0, ymm0, ymm30
+ vpaddd ymm1, ymm1, ymm18
+ vpaddd ymm2, ymm2, ymm19
+ vpaddd ymm3, ymm3, ymm23
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 16
+ vprord ymm12, ymm12, 16
+ vprord ymm13, ymm13, 16
+ vprord ymm14, ymm14, 16
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 12
+ vprord ymm6, ymm6, 12
+ vprord ymm7, ymm7, 12
+ vprord ymm4, ymm4, 12
+ vpaddd ymm0, ymm0, ymm26
+ vpaddd ymm1, ymm1, ymm28
+ vpaddd ymm2, ymm2, ymm20
+ vpaddd ymm3, ymm3, ymm29
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm1, ymm1, ymm6
+ vpaddd ymm2, ymm2, ymm7
+ vpaddd ymm3, ymm3, ymm4
+ vpxord ymm15, ymm15, ymm0
+ vpxord ymm12, ymm12, ymm1
+ vpxord ymm13, ymm13, ymm2
+ vpxord ymm14, ymm14, ymm3
+ vprord ymm15, ymm15, 8
+ vprord ymm12, ymm12, 8
+ vprord ymm13, ymm13, 8
+ vprord ymm14, ymm14, 8
+ vpaddd ymm10, ymm10, ymm15
+ vpaddd ymm11, ymm11, ymm12
+ vpaddd ymm8, ymm8, ymm13
+ vpaddd ymm9, ymm9, ymm14
+ vpxord ymm5, ymm5, ymm10
+ vpxord ymm6, ymm6, ymm11
+ vpxord ymm7, ymm7, ymm8
+ vpxord ymm4, ymm4, ymm9
+ vprord ymm5, ymm5, 7
+ vprord ymm6, ymm6, 7
+ vprord ymm7, ymm7, 7
+ vprord ymm4, ymm4, 7
+ vpxor ymm0, ymm0, ymm8
+ vpxor ymm1, ymm1, ymm9
+ vpxor ymm2, ymm2, ymm10
+ vpxor ymm3, ymm3, ymm11
+ vpxor ymm4, ymm4, ymm12
+ vpxor ymm5, ymm5, ymm13
+ vpxor ymm6, ymm6, ymm14
+ vpxor ymm7, ymm7, ymm15
+ movzx eax, byte ptr [rbp+78H]
+ jne innerloop8
+ mov rbx, qword ptr [rbp+90H]
+ vunpcklps ymm8, ymm0, ymm1
+ vunpcklps ymm9, ymm2, ymm3
+ vunpckhps ymm10, ymm0, ymm1
+ vunpcklps ymm11, ymm4, ymm5
+ vunpcklps ymm0, ymm6, ymm7
+ vshufps ymm12, ymm8, ymm9, 78
+ vblendps ymm1, ymm8, ymm12, 0CCH
+ vshufps ymm8, ymm11, ymm0, 78
+ vunpckhps ymm13, ymm2, ymm3
+ vblendps ymm2, ymm11, ymm8, 0CCH
+ vblendps ymm3, ymm12, ymm9, 0CCH
+ vperm2f128 ymm12, ymm1, ymm2, 20H
+ vmovups ymmword ptr [rbx], ymm12
+ vunpckhps ymm14, ymm4, ymm5
+ vblendps ymm4, ymm8, ymm0, 0CCH
+ vunpckhps ymm15, ymm6, ymm7
+ vperm2f128 ymm7, ymm3, ymm4, 20H
+ vmovups ymmword ptr [rbx+20H], ymm7
+ vshufps ymm5, ymm10, ymm13, 78
+ vblendps ymm6, ymm5, ymm13, 0CCH
+ vshufps ymm13, ymm14, ymm15, 78
+ vblendps ymm10, ymm10, ymm5, 0CCH
+ vblendps ymm14, ymm14, ymm13, 0CCH
+ vperm2f128 ymm8, ymm10, ymm14, 20H
+ vmovups ymmword ptr [rbx+40H], ymm8
+ vblendps ymm15, ymm13, ymm15, 0CCH
+ vperm2f128 ymm13, ymm6, ymm15, 20H
+ vmovups ymmword ptr [rbx+60H], ymm13
+ vperm2f128 ymm9, ymm1, ymm2, 31H
+ vperm2f128 ymm11, ymm3, ymm4, 31H
+ vmovups ymmword ptr [rbx+80H], ymm9
+ vperm2f128 ymm14, ymm10, ymm14, 31H
+ vperm2f128 ymm15, ymm6, ymm15, 31H
+ vmovups ymmword ptr [rbx+0A0H], ymm11
+ vmovups ymmword ptr [rbx+0C0H], ymm14
+ vmovups ymmword ptr [rbx+0E0H], ymm15
+ vmovdqa ymm0, ymmword ptr [rsp]
+ vmovdqa ymm2, ymmword ptr [rsp+40H]
+ vmovdqa32 ymm0 {k1}, ymmword ptr [rsp+1H*20H]
+ vmovdqa32 ymm2 {k1}, ymmword ptr [rsp+3H*20H]
+ vmovdqa ymmword ptr [rsp], ymm0
+ vmovdqa ymmword ptr [rsp+40H], ymm2
+ add rbx, 256
+ mov qword ptr [rbp+90H], rbx
+ add rdi, 64
+ sub rsi, 8
+final7blocks:
+ mov rbx, qword ptr [rbp+90H]
+ mov r15, qword ptr [rsp+80H]
+ movzx r13, byte ptr [rbp+78H]
+ movzx r12, byte ptr [rbp+88H]
+ test esi, 4H
+ je final3blocks
+ vbroadcasti32x4 zmm0, xmmword ptr [rcx]
+ vbroadcasti32x4 zmm1, xmmword ptr [rcx+1H*10H]
+ vmovdqa xmm12, xmmword ptr [rsp]
+ vmovdqa xmm13, xmmword ptr [rsp+40H]
+ vpunpckldq xmm14, xmm12, xmm13
+ vpunpckhdq xmm15, xmm12, xmm13
+ vpermq ymm14, ymm14, 0DCH
+ vpermq ymm15, ymm15, 0DCH
+ vpbroadcastd zmm12, dword ptr [BLAKE3_BLOCK_LEN]
+ vinserti64x4 zmm13, zmm14, ymm15, 01H
+ mov eax, 17476
+ kmovw k2, eax
+ vpblendmd zmm13 {k2}, zmm13, zmm12
+ vbroadcasti32x4 zmm15, xmmword ptr [BLAKE3_IV]
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ mov eax, 43690
+ kmovw k3, eax
+ mov eax, 34952
+ kmovw k4, eax
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+ALIGN 16
+innerloop4:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ mov dword ptr [rsp+88H], eax
+ vmovdqa32 zmm2, zmm15
+ vpbroadcastd zmm8, dword ptr [rsp+22H*4H]
+ vpblendmd zmm3 {k4}, zmm13, zmm8
+ vmovups zmm8, zmmword ptr [r8+rdx-1H*40H]
+ vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-4H*10H], 01H
+ vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-4H*10H], 02H
+ vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-4H*10H], 03H
+ vmovups zmm9, zmmword ptr [r8+rdx-30H]
+ vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-3H*10H], 01H
+ vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-3H*10H], 02H
+ vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-3H*10H], 03H
+ vshufps zmm4, zmm8, zmm9, 136
+ vshufps zmm5, zmm8, zmm9, 221
+ vmovups zmm8, zmmword ptr [r8+rdx-20H]
+ vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-2H*10H], 01H
+ vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-2H*10H], 02H
+ vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-2H*10H], 03H
+ vmovups zmm9, zmmword ptr [r8+rdx-10H]
+ vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-1H*10H], 01H
+ vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-1H*10H], 02H
+ vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-1H*10H], 03H
+ vshufps zmm6, zmm8, zmm9, 136
+ vshufps zmm7, zmm8, zmm9, 221
+ vpshufd zmm6, zmm6, 93H
+ vpshufd zmm7, zmm7, 93H
+ mov al, 7
+roundloop4:
+ vpaddd zmm0, zmm0, zmm4
+ vpaddd zmm0, zmm0, zmm1
+ vpxord zmm3, zmm3, zmm0
+ vprord zmm3, zmm3, 16
+ vpaddd zmm2, zmm2, zmm3
+ vpxord zmm1, zmm1, zmm2
+ vprord zmm1, zmm1, 12
+ vpaddd zmm0, zmm0, zmm5
+ vpaddd zmm0, zmm0, zmm1
+ vpxord zmm3, zmm3, zmm0
+ vprord zmm3, zmm3, 8
+ vpaddd zmm2, zmm2, zmm3
+ vpxord zmm1, zmm1, zmm2
+ vprord zmm1, zmm1, 7
+ vpshufd zmm0, zmm0, 93H
+ vpshufd zmm3, zmm3, 4EH
+ vpshufd zmm2, zmm2, 39H
+ vpaddd zmm0, zmm0, zmm6
+ vpaddd zmm0, zmm0, zmm1
+ vpxord zmm3, zmm3, zmm0
+ vprord zmm3, zmm3, 16
+ vpaddd zmm2, zmm2, zmm3
+ vpxord zmm1, zmm1, zmm2
+ vprord zmm1, zmm1, 12
+ vpaddd zmm0, zmm0, zmm7
+ vpaddd zmm0, zmm0, zmm1
+ vpxord zmm3, zmm3, zmm0
+ vprord zmm3, zmm3, 8
+ vpaddd zmm2, zmm2, zmm3
+ vpxord zmm1, zmm1, zmm2
+ vprord zmm1, zmm1, 7
+ vpshufd zmm0, zmm0, 39H
+ vpshufd zmm3, zmm3, 4EH
+ vpshufd zmm2, zmm2, 93H
+ dec al
+ jz endroundloop4
+ vshufps zmm8, zmm4, zmm5, 214
+ vpshufd zmm9, zmm4, 0FH
+ vpshufd zmm4, zmm8, 39H
+ vshufps zmm8, zmm6, zmm7, 250
+ vpblendmd zmm9 {k3}, zmm9, zmm8
+ vpunpcklqdq zmm8, zmm7, zmm5
+ vpblendmd zmm8 {k4}, zmm8, zmm6
+ vpshufd zmm8, zmm8, 78H
+ vpunpckhdq zmm5, zmm5, zmm7
+ vpunpckldq zmm6, zmm6, zmm5
+ vpshufd zmm7, zmm6, 1EH
+ vmovdqa32 zmm5, zmm9
+ vmovdqa32 zmm6, zmm8
+ jmp roundloop4
+endroundloop4:
+ vpxord zmm0, zmm0, zmm2
+ vpxord zmm1, zmm1, zmm3
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop4
+ vmovdqu xmmword ptr [rbx], xmm0
+ vmovdqu xmmword ptr [rbx+10H], xmm1
+ vextracti128 xmmword ptr [rbx+20H], ymm0, 01H
+ vextracti128 xmmword ptr [rbx+30H], ymm1, 01H
+ vextracti32x4 xmmword ptr [rbx+4H*10H], zmm0, 02H
+ vextracti32x4 xmmword ptr [rbx+5H*10H], zmm1, 02H
+ vextracti32x4 xmmword ptr [rbx+6H*10H], zmm0, 03H
+ vextracti32x4 xmmword ptr [rbx+7H*10H], zmm1, 03H
+ vmovdqa xmm0, xmmword ptr [rsp]
+ vmovdqa xmm2, xmmword ptr [rsp+40H]
+ vmovdqa32 xmm0 {k1}, xmmword ptr [rsp+1H*10H]
+ vmovdqa32 xmm2 {k1}, xmmword ptr [rsp+5H*10H]
+ vmovdqa xmmword ptr [rsp], xmm0
+ vmovdqa xmmword ptr [rsp+40H], xmm2
+ add rbx, 128
+ add rdi, 32
+ sub rsi, 4
+final3blocks:
+ test esi, 2H
+ je final1block
+ vbroadcasti128 ymm0, xmmword ptr [rcx]
+ vbroadcasti128 ymm1, xmmword ptr [rcx+10H]
+ vmovd xmm13, dword ptr [rsp]
+ vpinsrd xmm13, xmm13, dword ptr [rsp+40H], 1
+ vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN], 2
+ vmovd xmm14, dword ptr [rsp+4H]
+ vpinsrd xmm14, xmm14, dword ptr [rsp+44H], 1
+ vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN], 2
+ vinserti128 ymm13, ymm13, xmm14, 01H
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+ALIGN 16
+innerloop2:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ mov dword ptr [rsp+88H], eax
+ vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV]
+ vpbroadcastd ymm8, dword ptr [rsp+88H]
+ vpblendd ymm3, ymm13, ymm8, 88H
+ vmovups ymm8, ymmword ptr [r8+rdx-40H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-40H], 01H
+ vmovups ymm9, ymmword ptr [r8+rdx-30H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-30H], 01H
+ vshufps ymm4, ymm8, ymm9, 136
+ vshufps ymm5, ymm8, ymm9, 221
+ vmovups ymm8, ymmword ptr [r8+rdx-20H]
+ vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-20H], 01H
+ vmovups ymm9, ymmword ptr [r8+rdx-10H]
+ vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-10H], 01H
+ vshufps ymm6, ymm8, ymm9, 136
+ vshufps ymm7, ymm8, ymm9, 221
+ vpshufd ymm6, ymm6, 93H
+ vpshufd ymm7, ymm7, 93H
+ mov al, 7
+roundloop2:
+ vpaddd ymm0, ymm0, ymm4
+ vpaddd ymm0, ymm0, ymm1
+ vpxord ymm3, ymm3, ymm0
+ vprord ymm3, ymm3, 16
+ vpaddd ymm2, ymm2, ymm3
+ vpxord ymm1, ymm1, ymm2
+ vprord ymm1, ymm1, 12
+ vpaddd ymm0, ymm0, ymm5
+ vpaddd ymm0, ymm0, ymm1
+ vpxord ymm3, ymm3, ymm0
+ vprord ymm3, ymm3, 8
+ vpaddd ymm2, ymm2, ymm3
+ vpxord ymm1, ymm1, ymm2
+ vprord ymm1, ymm1, 7
+ vpshufd ymm0, ymm0, 93H
+ vpshufd ymm3, ymm3, 4EH
+ vpshufd ymm2, ymm2, 39H
+ vpaddd ymm0, ymm0, ymm6
+ vpaddd ymm0, ymm0, ymm1
+ vpxord ymm3, ymm3, ymm0
+ vprord ymm3, ymm3, 16
+ vpaddd ymm2, ymm2, ymm3
+ vpxord ymm1, ymm1, ymm2
+ vprord ymm1, ymm1, 12
+ vpaddd ymm0, ymm0, ymm7
+ vpaddd ymm0, ymm0, ymm1
+ vpxord ymm3, ymm3, ymm0
+ vprord ymm3, ymm3, 8
+ vpaddd ymm2, ymm2, ymm3
+ vpxord ymm1, ymm1, ymm2
+ vprord ymm1, ymm1, 7
+ vpshufd ymm0, ymm0, 39H
+ vpshufd ymm3, ymm3, 4EH
+ vpshufd ymm2, ymm2, 93H
+ dec al
+ jz endroundloop2
+ vshufps ymm8, ymm4, ymm5, 214
+ vpshufd ymm9, ymm4, 0FH
+ vpshufd ymm4, ymm8, 39H
+ vshufps ymm8, ymm6, ymm7, 250
+ vpblendd ymm9, ymm9, ymm8, 0AAH
+ vpunpcklqdq ymm8, ymm7, ymm5
+ vpblendd ymm8, ymm8, ymm6, 88H
+ vpshufd ymm8, ymm8, 78H
+ vpunpckhdq ymm5, ymm5, ymm7
+ vpunpckldq ymm6, ymm6, ymm5
+ vpshufd ymm7, ymm6, 1EH
+ vmovdqa ymm5, ymm9
+ vmovdqa ymm6, ymm8
+ jmp roundloop2
+endroundloop2:
+ vpxor ymm0, ymm0, ymm2
+ vpxor ymm1, ymm1, ymm3
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop2
+ vmovdqu xmmword ptr [rbx], xmm0
+ vmovdqu xmmword ptr [rbx+10H], xmm1
+ vextracti128 xmmword ptr [rbx+20H], ymm0, 01H
+ vextracti128 xmmword ptr [rbx+30H], ymm1, 01H
+ vmovdqa xmm0, xmmword ptr [rsp]
+ vmovdqa xmm2, xmmword ptr [rsp+40H]
+ vmovdqu32 xmm0 {k1}, xmmword ptr [rsp+8H]
+ vmovdqu32 xmm2 {k1}, xmmword ptr [rsp+48H]
+ vmovdqa xmmword ptr [rsp], xmm0
+ vmovdqa xmmword ptr [rsp+40H], xmm2
+ add rbx, 64
+ add rdi, 16
+ sub rsi, 2
+final1block:
+ test esi, 1H
+ je unwind
+ vmovdqu xmm0, xmmword ptr [rcx]
+ vmovdqu xmm1, xmmword ptr [rcx+10H]
+ vmovd xmm14, dword ptr [rsp]
+ vpinsrd xmm14, xmm14, dword ptr [rsp+40H], 1
+ vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN], 2
+ vmovdqa xmm15, xmmword ptr [BLAKE3_IV]
+ mov r8, qword ptr [rdi]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+ALIGN 16
+innerloop1:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ vpinsrd xmm3, xmm14, eax, 3
+ vmovdqa xmm2, xmm15
+ vmovups xmm8, xmmword ptr [r8+rdx-40H]
+ vmovups xmm9, xmmword ptr [r8+rdx-30H]
+ vshufps xmm4, xmm8, xmm9, 136
+ vshufps xmm5, xmm8, xmm9, 221
+ vmovups xmm8, xmmword ptr [r8+rdx-20H]
+ vmovups xmm9, xmmword ptr [r8+rdx-10H]
+ vshufps xmm6, xmm8, xmm9, 136
+ vshufps xmm7, xmm8, xmm9, 221
+ vpshufd xmm6, xmm6, 93H
+ vpshufd xmm7, xmm7, 93H
+ mov al, 7
+roundloop1:
+ vpaddd xmm0, xmm0, xmm4
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 16
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 12
+ vpaddd xmm0, xmm0, xmm5
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 8
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 7
+ vpshufd xmm0, xmm0, 93H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 39H
+ vpaddd xmm0, xmm0, xmm6
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 16
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 12
+ vpaddd xmm0, xmm0, xmm7
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 8
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 7
+ vpshufd xmm0, xmm0, 39H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 93H
+ dec al
+ jz endroundloop1
+ vshufps xmm8, xmm4, xmm5, 214
+ vpshufd xmm9, xmm4, 0FH
+ vpshufd xmm4, xmm8, 39H
+ vshufps xmm8, xmm6, xmm7, 250
+ vpblendd xmm9, xmm9, xmm8, 0AAH
+ vpunpcklqdq xmm8, xmm7, xmm5
+ vpblendd xmm8, xmm8, xmm6, 88H
+ vpshufd xmm8, xmm8, 78H
+ vpunpckhdq xmm5, xmm5, xmm7
+ vpunpckldq xmm6, xmm6, xmm5
+ vpshufd xmm7, xmm6, 1EH
+ vmovdqa xmm5, xmm9
+ vmovdqa xmm6, xmm8
+ jmp roundloop1
+endroundloop1:
+ vpxor xmm0, xmm0, xmm2
+ vpxor xmm1, xmm1, xmm3
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop1
+ vmovdqu xmmword ptr [rbx], xmm0
+ vmovdqu xmmword ptr [rbx+10H], xmm1
+ jmp unwind
+
+_llvm_blake3_hash_many_avx512 ENDP
+llvm_blake3_hash_many_avx512 ENDP
+
+ALIGN 16
+llvm_blake3_compress_in_place_avx512 PROC
+_llvm_blake3_compress_in_place_avx512 PROC
+ sub rsp, 72
+ vmovdqa xmmword ptr [rsp], xmm6
+ vmovdqa xmmword ptr [rsp+10H], xmm7
+ vmovdqa xmmword ptr [rsp+20H], xmm8
+ vmovdqa xmmword ptr [rsp+30H], xmm9
+ vmovdqu xmm0, xmmword ptr [rcx]
+ vmovdqu xmm1, xmmword ptr [rcx+10H]
+ movzx eax, byte ptr [rsp+70H]
+ movzx r8d, r8b
+ shl rax, 32
+ add r8, rax
+ vmovq xmm3, r9
+ vmovq xmm4, r8
+ vpunpcklqdq xmm3, xmm3, xmm4
+ vmovaps xmm2, xmmword ptr [BLAKE3_IV]
+ vmovups xmm8, xmmword ptr [rdx]
+ vmovups xmm9, xmmword ptr [rdx+10H]
+ vshufps xmm4, xmm8, xmm9, 136
+ vshufps xmm5, xmm8, xmm9, 221
+ vmovups xmm8, xmmword ptr [rdx+20H]
+ vmovups xmm9, xmmword ptr [rdx+30H]
+ vshufps xmm6, xmm8, xmm9, 136
+ vshufps xmm7, xmm8, xmm9, 221
+ vpshufd xmm6, xmm6, 93H
+ vpshufd xmm7, xmm7, 93H
+ mov al, 7
+@@:
+ vpaddd xmm0, xmm0, xmm4
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 16
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 12
+ vpaddd xmm0, xmm0, xmm5
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 8
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 7
+ vpshufd xmm0, xmm0, 93H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 39H
+ vpaddd xmm0, xmm0, xmm6
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 16
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 12
+ vpaddd xmm0, xmm0, xmm7
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 8
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 7
+ vpshufd xmm0, xmm0, 39H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 93H
+ dec al
+ jz @F
+ vshufps xmm8, xmm4, xmm5, 214
+ vpshufd xmm9, xmm4, 0FH
+ vpshufd xmm4, xmm8, 39H
+ vshufps xmm8, xmm6, xmm7, 250
+ vpblendd xmm9, xmm9, xmm8, 0AAH
+ vpunpcklqdq xmm8, xmm7, xmm5
+ vpblendd xmm8, xmm8, xmm6, 88H
+ vpshufd xmm8, xmm8, 78H
+ vpunpckhdq xmm5, xmm5, xmm7
+ vpunpckldq xmm6, xmm6, xmm5
+ vpshufd xmm7, xmm6, 1EH
+ vmovdqa xmm5, xmm9
+ vmovdqa xmm6, xmm8
+ jmp @B
+@@:
+ vpxor xmm0, xmm0, xmm2
+ vpxor xmm1, xmm1, xmm3
+ vmovdqu xmmword ptr [rcx], xmm0
+ vmovdqu xmmword ptr [rcx+10H], xmm1
+ vmovdqa xmm6, xmmword ptr [rsp]
+ vmovdqa xmm7, xmmword ptr [rsp+10H]
+ vmovdqa xmm8, xmmword ptr [rsp+20H]
+ vmovdqa xmm9, xmmword ptr [rsp+30H]
+ add rsp, 72
+ ret
+_llvm_blake3_compress_in_place_avx512 ENDP
+llvm_blake3_compress_in_place_avx512 ENDP
+
+ALIGN 16
+llvm_blake3_compress_xof_avx512 PROC
+_llvm_blake3_compress_xof_avx512 PROC
+ sub rsp, 72
+ vmovdqa xmmword ptr [rsp], xmm6
+ vmovdqa xmmword ptr [rsp+10H], xmm7
+ vmovdqa xmmword ptr [rsp+20H], xmm8
+ vmovdqa xmmword ptr [rsp+30H], xmm9
+ vmovdqu xmm0, xmmword ptr [rcx]
+ vmovdqu xmm1, xmmword ptr [rcx+10H]
+ movzx eax, byte ptr [rsp+70H]
+ movzx r8d, r8b
+ mov r10, qword ptr [rsp+78H]
+ shl rax, 32
+ add r8, rax
+ vmovq xmm3, r9
+ vmovq xmm4, r8
+ vpunpcklqdq xmm3, xmm3, xmm4
+ vmovaps xmm2, xmmword ptr [BLAKE3_IV]
+ vmovups xmm8, xmmword ptr [rdx]
+ vmovups xmm9, xmmword ptr [rdx+10H]
+ vshufps xmm4, xmm8, xmm9, 136
+ vshufps xmm5, xmm8, xmm9, 221
+ vmovups xmm8, xmmword ptr [rdx+20H]
+ vmovups xmm9, xmmword ptr [rdx+30H]
+ vshufps xmm6, xmm8, xmm9, 136
+ vshufps xmm7, xmm8, xmm9, 221
+ vpshufd xmm6, xmm6, 93H
+ vpshufd xmm7, xmm7, 93H
+ mov al, 7
+@@:
+ vpaddd xmm0, xmm0, xmm4
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 16
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 12
+ vpaddd xmm0, xmm0, xmm5
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 8
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 7
+ vpshufd xmm0, xmm0, 93H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 39H
+ vpaddd xmm0, xmm0, xmm6
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 16
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 12
+ vpaddd xmm0, xmm0, xmm7
+ vpaddd xmm0, xmm0, xmm1
+ vpxord xmm3, xmm3, xmm0
+ vprord xmm3, xmm3, 8
+ vpaddd xmm2, xmm2, xmm3
+ vpxord xmm1, xmm1, xmm2
+ vprord xmm1, xmm1, 7
+ vpshufd xmm0, xmm0, 39H
+ vpshufd xmm3, xmm3, 4EH
+ vpshufd xmm2, xmm2, 93H
+ dec al
+ jz @F
+ vshufps xmm8, xmm4, xmm5, 214
+ vpshufd xmm9, xmm4, 0FH
+ vpshufd xmm4, xmm8, 39H
+ vshufps xmm8, xmm6, xmm7, 250
+ vpblendd xmm9, xmm9, xmm8, 0AAH
+ vpunpcklqdq xmm8, xmm7, xmm5
+ vpblendd xmm8, xmm8, xmm6, 88H
+ vpshufd xmm8, xmm8, 78H
+ vpunpckhdq xmm5, xmm5, xmm7
+ vpunpckldq xmm6, xmm6, xmm5
+ vpshufd xmm7, xmm6, 1EH
+ vmovdqa xmm5, xmm9
+ vmovdqa xmm6, xmm8
+ jmp @B
+@@:
+ vpxor xmm0, xmm0, xmm2
+ vpxor xmm1, xmm1, xmm3
+ vpxor xmm2, xmm2, xmmword ptr [rcx]
+ vpxor xmm3, xmm3, xmmword ptr [rcx+10H]
+ vmovdqu xmmword ptr [r10], xmm0
+ vmovdqu xmmword ptr [r10+10H], xmm1
+ vmovdqu xmmword ptr [r10+20H], xmm2
+ vmovdqu xmmword ptr [r10+30H], xmm3
+ vmovdqa xmm6, xmmword ptr [rsp]
+ vmovdqa xmm7, xmmword ptr [rsp+10H]
+ vmovdqa xmm8, xmmword ptr [rsp+20H]
+ vmovdqa xmm9, xmmword ptr [rsp+30H]
+ add rsp, 72
+ ret
+_llvm_blake3_compress_xof_avx512 ENDP
+llvm_blake3_compress_xof_avx512 ENDP
+
+_TEXT ENDS
+
+_RDATA SEGMENT READONLY PAGE ALIAS(".rdata") 'CONST'
+ALIGN 64
+INDEX0:
+ dd 0, 1, 2, 3, 16, 17, 18, 19
+ dd 8, 9, 10, 11, 24, 25, 26, 27
+INDEX1:
+ dd 4, 5, 6, 7, 20, 21, 22, 23
+ dd 12, 13, 14, 15, 28, 29, 30, 31
+ADD0:
+ dd 0, 1, 2, 3, 4, 5, 6, 7
+ dd 8, 9, 10, 11, 12, 13, 14, 15
+ADD1:
+ dd 1
+ADD16:
+ dd 16
+BLAKE3_BLOCK_LEN:
+ dd 64
+ALIGN 64
+BLAKE3_IV:
+BLAKE3_IV_0:
+ dd 06A09E667H
+BLAKE3_IV_1:
+ dd 0BB67AE85H
+BLAKE3_IV_2:
+ dd 03C6EF372H
+BLAKE3_IV_3:
+ dd 0A54FF53AH
+
+_RDATA ENDS
+END
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2.c b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2.c
new file mode 100644
index 0000000000..f4449ac0b3
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2.c
@@ -0,0 +1,566 @@
+#include "blake3_impl.h"
+
+#include <immintrin.h>
+
+#define DEGREE 4
+
+#define _mm_shuffle_ps2(a, b, c) \
+ (_mm_castps_si128( \
+ _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), (c))))
+
+INLINE __m128i loadu(const uint8_t src[16]) {
+ return _mm_loadu_si128((const __m128i *)src);
+}
+
+INLINE void storeu(__m128i src, uint8_t dest[16]) {
+ _mm_storeu_si128((__m128i *)dest, src);
+}
+
+INLINE __m128i addv(__m128i a, __m128i b) { return _mm_add_epi32(a, b); }
+
+// Note that clang-format doesn't like the name "xor" for some reason.
+INLINE __m128i xorv(__m128i a, __m128i b) { return _mm_xor_si128(a, b); }
+
+INLINE __m128i set1(uint32_t x) { return _mm_set1_epi32((int32_t)x); }
+
+INLINE __m128i set4(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+ return _mm_setr_epi32((int32_t)a, (int32_t)b, (int32_t)c, (int32_t)d);
+}
+
+INLINE __m128i rot16(__m128i x) {
+ return _mm_shufflehi_epi16(_mm_shufflelo_epi16(x, 0xB1), 0xB1);
+}
+
+INLINE __m128i rot12(__m128i x) {
+ return xorv(_mm_srli_epi32(x, 12), _mm_slli_epi32(x, 32 - 12));
+}
+
+INLINE __m128i rot8(__m128i x) {
+ return xorv(_mm_srli_epi32(x, 8), _mm_slli_epi32(x, 32 - 8));
+}
+
+INLINE __m128i rot7(__m128i x) {
+ return xorv(_mm_srli_epi32(x, 7), _mm_slli_epi32(x, 32 - 7));
+}
+
+INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
+ __m128i m) {
+ *row0 = addv(addv(*row0, m), *row1);
+ *row3 = xorv(*row3, *row0);
+ *row3 = rot16(*row3);
+ *row2 = addv(*row2, *row3);
+ *row1 = xorv(*row1, *row2);
+ *row1 = rot12(*row1);
+}
+
+INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
+ __m128i m) {
+ *row0 = addv(addv(*row0, m), *row1);
+ *row3 = xorv(*row3, *row0);
+ *row3 = rot8(*row3);
+ *row2 = addv(*row2, *row3);
+ *row1 = xorv(*row1, *row2);
+ *row1 = rot7(*row1);
+}
+
+// Note the optimization here of leaving row1 as the unrotated row, rather than
+// row0. All the message loads below are adjusted to compensate for this. See
+// discussion at https://github.com/sneves/blake2-avx2/pull/4
+INLINE void diagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(2, 1, 0, 3));
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(0, 3, 2, 1));
+}
+
+INLINE void undiagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(0, 3, 2, 1));
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(2, 1, 0, 3));
+}
+
+INLINE __m128i blend_epi16(__m128i a, __m128i b, const int16_t imm8) {
+ const __m128i bits = _mm_set_epi16(0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01);
+ __m128i mask = _mm_set1_epi16(imm8);
+ mask = _mm_and_si128(mask, bits);
+ mask = _mm_cmpeq_epi16(mask, bits);
+ return _mm_or_si128(_mm_and_si128(mask, b), _mm_andnot_si128(mask, a));
+}
+
+INLINE void compress_pre(__m128i rows[4], const uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter, uint8_t flags) {
+ rows[0] = loadu((uint8_t *)&cv[0]);
+ rows[1] = loadu((uint8_t *)&cv[4]);
+ rows[2] = set4(IV[0], IV[1], IV[2], IV[3]);
+ rows[3] = set4(counter_low(counter), counter_high(counter),
+ (uint32_t)block_len, (uint32_t)flags);
+
+ __m128i m0 = loadu(&block[sizeof(__m128i) * 0]);
+ __m128i m1 = loadu(&block[sizeof(__m128i) * 1]);
+ __m128i m2 = loadu(&block[sizeof(__m128i) * 2]);
+ __m128i m3 = loadu(&block[sizeof(__m128i) * 3]);
+
+ __m128i t0, t1, t2, t3, tt;
+
+ // Round 1. The first round permutes the message words from the original
+ // input order, into the groups that get mixed in parallel.
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(2, 0, 2, 0)); // 6 4 2 0
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 3, 1)); // 7 5 3 1
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(2, 0, 2, 0)); // 14 12 10 8
+ t2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2, 1, 0, 3)); // 12 10 8 14
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 1, 3, 1)); // 15 13 11 9
+ t3 = _mm_shuffle_epi32(t3, _MM_SHUFFLE(2, 1, 0, 3)); // 13 11 9 15
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 2. This round and all following rounds apply a fixed permutation
+ // to the message words from the round before.
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 3
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 4
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 5
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 6
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 7
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+}
+
+void blake3_compress_in_place_sse2(uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter,
+ uint8_t flags) {
+ __m128i rows[4];
+ compress_pre(rows, cv, block, block_len, counter, flags);
+ storeu(xorv(rows[0], rows[2]), (uint8_t *)&cv[0]);
+ storeu(xorv(rows[1], rows[3]), (uint8_t *)&cv[4]);
+}
+
+void blake3_compress_xof_sse2(const uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter,
+ uint8_t flags, uint8_t out[64]) {
+ __m128i rows[4];
+ compress_pre(rows, cv, block, block_len, counter, flags);
+ storeu(xorv(rows[0], rows[2]), &out[0]);
+ storeu(xorv(rows[1], rows[3]), &out[16]);
+ storeu(xorv(rows[2], loadu((uint8_t *)&cv[0])), &out[32]);
+ storeu(xorv(rows[3], loadu((uint8_t *)&cv[4])), &out[48]);
+}
+
+INLINE void round_fn(__m128i v[16], __m128i m[16], size_t r) {
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
+ v[0] = addv(v[0], v[4]);
+ v[1] = addv(v[1], v[5]);
+ v[2] = addv(v[2], v[6]);
+ v[3] = addv(v[3], v[7]);
+ v[12] = xorv(v[12], v[0]);
+ v[13] = xorv(v[13], v[1]);
+ v[14] = xorv(v[14], v[2]);
+ v[15] = xorv(v[15], v[3]);
+ v[12] = rot16(v[12]);
+ v[13] = rot16(v[13]);
+ v[14] = rot16(v[14]);
+ v[15] = rot16(v[15]);
+ v[8] = addv(v[8], v[12]);
+ v[9] = addv(v[9], v[13]);
+ v[10] = addv(v[10], v[14]);
+ v[11] = addv(v[11], v[15]);
+ v[4] = xorv(v[4], v[8]);
+ v[5] = xorv(v[5], v[9]);
+ v[6] = xorv(v[6], v[10]);
+ v[7] = xorv(v[7], v[11]);
+ v[4] = rot12(v[4]);
+ v[5] = rot12(v[5]);
+ v[6] = rot12(v[6]);
+ v[7] = rot12(v[7]);
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
+ v[0] = addv(v[0], v[4]);
+ v[1] = addv(v[1], v[5]);
+ v[2] = addv(v[2], v[6]);
+ v[3] = addv(v[3], v[7]);
+ v[12] = xorv(v[12], v[0]);
+ v[13] = xorv(v[13], v[1]);
+ v[14] = xorv(v[14], v[2]);
+ v[15] = xorv(v[15], v[3]);
+ v[12] = rot8(v[12]);
+ v[13] = rot8(v[13]);
+ v[14] = rot8(v[14]);
+ v[15] = rot8(v[15]);
+ v[8] = addv(v[8], v[12]);
+ v[9] = addv(v[9], v[13]);
+ v[10] = addv(v[10], v[14]);
+ v[11] = addv(v[11], v[15]);
+ v[4] = xorv(v[4], v[8]);
+ v[5] = xorv(v[5], v[9]);
+ v[6] = xorv(v[6], v[10]);
+ v[7] = xorv(v[7], v[11]);
+ v[4] = rot7(v[4]);
+ v[5] = rot7(v[5]);
+ v[6] = rot7(v[6]);
+ v[7] = rot7(v[7]);
+
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
+ v[0] = addv(v[0], v[5]);
+ v[1] = addv(v[1], v[6]);
+ v[2] = addv(v[2], v[7]);
+ v[3] = addv(v[3], v[4]);
+ v[15] = xorv(v[15], v[0]);
+ v[12] = xorv(v[12], v[1]);
+ v[13] = xorv(v[13], v[2]);
+ v[14] = xorv(v[14], v[3]);
+ v[15] = rot16(v[15]);
+ v[12] = rot16(v[12]);
+ v[13] = rot16(v[13]);
+ v[14] = rot16(v[14]);
+ v[10] = addv(v[10], v[15]);
+ v[11] = addv(v[11], v[12]);
+ v[8] = addv(v[8], v[13]);
+ v[9] = addv(v[9], v[14]);
+ v[5] = xorv(v[5], v[10]);
+ v[6] = xorv(v[6], v[11]);
+ v[7] = xorv(v[7], v[8]);
+ v[4] = xorv(v[4], v[9]);
+ v[5] = rot12(v[5]);
+ v[6] = rot12(v[6]);
+ v[7] = rot12(v[7]);
+ v[4] = rot12(v[4]);
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
+ v[0] = addv(v[0], v[5]);
+ v[1] = addv(v[1], v[6]);
+ v[2] = addv(v[2], v[7]);
+ v[3] = addv(v[3], v[4]);
+ v[15] = xorv(v[15], v[0]);
+ v[12] = xorv(v[12], v[1]);
+ v[13] = xorv(v[13], v[2]);
+ v[14] = xorv(v[14], v[3]);
+ v[15] = rot8(v[15]);
+ v[12] = rot8(v[12]);
+ v[13] = rot8(v[13]);
+ v[14] = rot8(v[14]);
+ v[10] = addv(v[10], v[15]);
+ v[11] = addv(v[11], v[12]);
+ v[8] = addv(v[8], v[13]);
+ v[9] = addv(v[9], v[14]);
+ v[5] = xorv(v[5], v[10]);
+ v[6] = xorv(v[6], v[11]);
+ v[7] = xorv(v[7], v[8]);
+ v[4] = xorv(v[4], v[9]);
+ v[5] = rot7(v[5]);
+ v[6] = rot7(v[6]);
+ v[7] = rot7(v[7]);
+ v[4] = rot7(v[4]);
+}
+
+INLINE void transpose_vecs(__m128i vecs[DEGREE]) {
+ // Interleave 32-bit lates. The low unpack is lanes 00/11 and the high is
+ // 22/33. Note that this doesn't split the vector into two lanes, as the
+ // AVX2 counterparts do.
+ __m128i ab_01 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
+ __m128i ab_23 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
+ __m128i cd_01 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
+ __m128i cd_23 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
+
+ // Interleave 64-bit lanes.
+ __m128i abcd_0 = _mm_unpacklo_epi64(ab_01, cd_01);
+ __m128i abcd_1 = _mm_unpackhi_epi64(ab_01, cd_01);
+ __m128i abcd_2 = _mm_unpacklo_epi64(ab_23, cd_23);
+ __m128i abcd_3 = _mm_unpackhi_epi64(ab_23, cd_23);
+
+ vecs[0] = abcd_0;
+ vecs[1] = abcd_1;
+ vecs[2] = abcd_2;
+ vecs[3] = abcd_3;
+}
+
+INLINE void transpose_msg_vecs(const uint8_t *const *inputs,
+ size_t block_offset, __m128i out[16]) {
+ out[0] = loadu(&inputs[0][block_offset + 0 * sizeof(__m128i)]);
+ out[1] = loadu(&inputs[1][block_offset + 0 * sizeof(__m128i)]);
+ out[2] = loadu(&inputs[2][block_offset + 0 * sizeof(__m128i)]);
+ out[3] = loadu(&inputs[3][block_offset + 0 * sizeof(__m128i)]);
+ out[4] = loadu(&inputs[0][block_offset + 1 * sizeof(__m128i)]);
+ out[5] = loadu(&inputs[1][block_offset + 1 * sizeof(__m128i)]);
+ out[6] = loadu(&inputs[2][block_offset + 1 * sizeof(__m128i)]);
+ out[7] = loadu(&inputs[3][block_offset + 1 * sizeof(__m128i)]);
+ out[8] = loadu(&inputs[0][block_offset + 2 * sizeof(__m128i)]);
+ out[9] = loadu(&inputs[1][block_offset + 2 * sizeof(__m128i)]);
+ out[10] = loadu(&inputs[2][block_offset + 2 * sizeof(__m128i)]);
+ out[11] = loadu(&inputs[3][block_offset + 2 * sizeof(__m128i)]);
+ out[12] = loadu(&inputs[0][block_offset + 3 * sizeof(__m128i)]);
+ out[13] = loadu(&inputs[1][block_offset + 3 * sizeof(__m128i)]);
+ out[14] = loadu(&inputs[2][block_offset + 3 * sizeof(__m128i)]);
+ out[15] = loadu(&inputs[3][block_offset + 3 * sizeof(__m128i)]);
+ for (size_t i = 0; i < 4; ++i) {
+ _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);
+ }
+ transpose_vecs(&out[0]);
+ transpose_vecs(&out[4]);
+ transpose_vecs(&out[8]);
+ transpose_vecs(&out[12]);
+}
+
+INLINE void load_counters(uint64_t counter, bool increment_counter,
+ __m128i *out_lo, __m128i *out_hi) {
+ const __m128i mask = _mm_set1_epi32(-(int32_t)increment_counter);
+ const __m128i add0 = _mm_set_epi32(3, 2, 1, 0);
+ const __m128i add1 = _mm_and_si128(mask, add0);
+ __m128i l = _mm_add_epi32(_mm_set1_epi32((int32_t)counter), add1);
+ __m128i carry = _mm_cmpgt_epi32(_mm_xor_si128(add1, _mm_set1_epi32(0x80000000)),
+ _mm_xor_si128( l, _mm_set1_epi32(0x80000000)));
+ __m128i h = _mm_sub_epi32(_mm_set1_epi32((int32_t)(counter >> 32)), carry);
+ *out_lo = l;
+ *out_hi = h;
+}
+
+static
+void blake3_hash4_sse2(const uint8_t *const *inputs, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ bool increment_counter, uint8_t flags,
+ uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
+ __m128i h_vecs[8] = {
+ set1(key[0]), set1(key[1]), set1(key[2]), set1(key[3]),
+ set1(key[4]), set1(key[5]), set1(key[6]), set1(key[7]),
+ };
+ __m128i counter_low_vec, counter_high_vec;
+ load_counters(counter, increment_counter, &counter_low_vec,
+ &counter_high_vec);
+ uint8_t block_flags = flags | flags_start;
+
+ for (size_t block = 0; block < blocks; block++) {
+ if (block + 1 == blocks) {
+ block_flags |= flags_end;
+ }
+ __m128i block_len_vec = set1(BLAKE3_BLOCK_LEN);
+ __m128i block_flags_vec = set1(block_flags);
+ __m128i msg_vecs[16];
+ transpose_msg_vecs(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
+
+ __m128i v[16] = {
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
+ set1(IV[0]), set1(IV[1]), set1(IV[2]), set1(IV[3]),
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
+ };
+ round_fn(v, msg_vecs, 0);
+ round_fn(v, msg_vecs, 1);
+ round_fn(v, msg_vecs, 2);
+ round_fn(v, msg_vecs, 3);
+ round_fn(v, msg_vecs, 4);
+ round_fn(v, msg_vecs, 5);
+ round_fn(v, msg_vecs, 6);
+ h_vecs[0] = xorv(v[0], v[8]);
+ h_vecs[1] = xorv(v[1], v[9]);
+ h_vecs[2] = xorv(v[2], v[10]);
+ h_vecs[3] = xorv(v[3], v[11]);
+ h_vecs[4] = xorv(v[4], v[12]);
+ h_vecs[5] = xorv(v[5], v[13]);
+ h_vecs[6] = xorv(v[6], v[14]);
+ h_vecs[7] = xorv(v[7], v[15]);
+
+ block_flags = flags;
+ }
+
+ transpose_vecs(&h_vecs[0]);
+ transpose_vecs(&h_vecs[4]);
+ // The first four vecs now contain the first half of each output, and the
+ // second four vecs contain the second half of each output.
+ storeu(h_vecs[0], &out[0 * sizeof(__m128i)]);
+ storeu(h_vecs[4], &out[1 * sizeof(__m128i)]);
+ storeu(h_vecs[1], &out[2 * sizeof(__m128i)]);
+ storeu(h_vecs[5], &out[3 * sizeof(__m128i)]);
+ storeu(h_vecs[2], &out[4 * sizeof(__m128i)]);
+ storeu(h_vecs[6], &out[5 * sizeof(__m128i)]);
+ storeu(h_vecs[3], &out[6 * sizeof(__m128i)]);
+ storeu(h_vecs[7], &out[7 * sizeof(__m128i)]);
+}
+
+INLINE void hash_one_sse2(const uint8_t *input, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN]) {
+ uint32_t cv[8];
+ memcpy(cv, key, BLAKE3_KEY_LEN);
+ uint8_t block_flags = flags | flags_start;
+ while (blocks > 0) {
+ if (blocks == 1) {
+ block_flags |= flags_end;
+ }
+ blake3_compress_in_place_sse2(cv, input, BLAKE3_BLOCK_LEN, counter,
+ block_flags);
+ input = &input[BLAKE3_BLOCK_LEN];
+ blocks -= 1;
+ block_flags = flags;
+ }
+ memcpy(out, cv, BLAKE3_OUT_LEN);
+}
+
+void blake3_hash_many_sse2(const uint8_t *const *inputs, size_t num_inputs,
+ size_t blocks, const uint32_t key[8],
+ uint64_t counter, bool increment_counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t *out) {
+ while (num_inputs >= DEGREE) {
+ blake3_hash4_sse2(inputs, blocks, key, counter, increment_counter, flags,
+ flags_start, flags_end, out);
+ if (increment_counter) {
+ counter += DEGREE;
+ }
+ inputs += DEGREE;
+ num_inputs -= DEGREE;
+ out = &out[DEGREE * BLAKE3_OUT_LEN];
+ }
+ while (num_inputs > 0) {
+ hash_one_sse2(inputs[0], blocks, key, counter, flags, flags_start,
+ flags_end, out);
+ if (increment_counter) {
+ counter += 1;
+ }
+ inputs += 1;
+ num_inputs -= 1;
+ out = &out[BLAKE3_OUT_LEN];
+ }
+}
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm
new file mode 100644
index 0000000000..1069c8df4e
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm
@@ -0,0 +1,2350 @@
+public _llvm_blake3_hash_many_sse2
+public llvm_blake3_hash_many_sse2
+public llvm_blake3_compress_in_place_sse2
+public _llvm_blake3_compress_in_place_sse2
+public llvm_blake3_compress_xof_sse2
+public _llvm_blake3_compress_xof_sse2
+
+_TEXT SEGMENT ALIGN(16) 'CODE'
+
+ALIGN 16
+llvm_blake3_hash_many_sse2 PROC
+_llvm_blake3_hash_many_sse2 PROC
+ push r15
+ push r14
+ push r13
+ push r12
+ push rsi
+ push rdi
+ push rbx
+ push rbp
+ mov rbp, rsp
+ sub rsp, 528
+ and rsp, 0FFFFFFFFFFFFFFC0H
+ movdqa xmmword ptr [rsp+170H], xmm6
+ movdqa xmmword ptr [rsp+180H], xmm7
+ movdqa xmmword ptr [rsp+190H], xmm8
+ movdqa xmmword ptr [rsp+1A0H], xmm9
+ movdqa xmmword ptr [rsp+1B0H], xmm10
+ movdqa xmmword ptr [rsp+1C0H], xmm11
+ movdqa xmmword ptr [rsp+1D0H], xmm12
+ movdqa xmmword ptr [rsp+1E0H], xmm13
+ movdqa xmmword ptr [rsp+1F0H], xmm14
+ movdqa xmmword ptr [rsp+200H], xmm15
+ mov rdi, rcx
+ mov rsi, rdx
+ mov rdx, r8
+ mov rcx, r9
+ mov r8, qword ptr [rbp+68H]
+ movzx r9, byte ptr [rbp+70H]
+ neg r9d
+ movd xmm0, r9d
+ pshufd xmm0, xmm0, 00H
+ movdqa xmmword ptr [rsp+130H], xmm0
+ movdqa xmm1, xmm0
+ pand xmm1, xmmword ptr [ADD0]
+ pand xmm0, xmmword ptr [ADD1]
+ movdqa xmmword ptr [rsp+150H], xmm0
+ movd xmm0, r8d
+ pshufd xmm0, xmm0, 00H
+ paddd xmm0, xmm1
+ movdqa xmmword ptr [rsp+110H], xmm0
+ pxor xmm0, xmmword ptr [CMP_MSB_MASK]
+ pxor xmm1, xmmword ptr [CMP_MSB_MASK]
+ pcmpgtd xmm1, xmm0
+ shr r8, 32
+ movd xmm2, r8d
+ pshufd xmm2, xmm2, 00H
+ psubd xmm2, xmm1
+ movdqa xmmword ptr [rsp+120H], xmm2
+ mov rbx, qword ptr [rbp+90H]
+ mov r15, rdx
+ shl r15, 6
+ movzx r13d, byte ptr [rbp+78H]
+ movzx r12d, byte ptr [rbp+88H]
+ cmp rsi, 4
+ jc final3blocks
+outerloop4:
+ movdqu xmm3, xmmword ptr [rcx]
+ pshufd xmm0, xmm3, 00H
+ pshufd xmm1, xmm3, 55H
+ pshufd xmm2, xmm3, 0AAH
+ pshufd xmm3, xmm3, 0FFH
+ movdqu xmm7, xmmword ptr [rcx+10H]
+ pshufd xmm4, xmm7, 00H
+ pshufd xmm5, xmm7, 55H
+ pshufd xmm6, xmm7, 0AAH
+ pshufd xmm7, xmm7, 0FFH
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+innerloop4:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ movdqu xmm8, xmmword ptr [r8+rdx-40H]
+ movdqu xmm9, xmmword ptr [r9+rdx-40H]
+ movdqu xmm10, xmmword ptr [r10+rdx-40H]
+ movdqu xmm11, xmmword ptr [r11+rdx-40H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp], xmm8
+ movdqa xmmword ptr [rsp+10H], xmm9
+ movdqa xmmword ptr [rsp+20H], xmm12
+ movdqa xmmword ptr [rsp+30H], xmm13
+ movdqu xmm8, xmmword ptr [r8+rdx-30H]
+ movdqu xmm9, xmmword ptr [r9+rdx-30H]
+ movdqu xmm10, xmmword ptr [r10+rdx-30H]
+ movdqu xmm11, xmmword ptr [r11+rdx-30H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp+40H], xmm8
+ movdqa xmmword ptr [rsp+50H], xmm9
+ movdqa xmmword ptr [rsp+60H], xmm12
+ movdqa xmmword ptr [rsp+70H], xmm13
+ movdqu xmm8, xmmword ptr [r8+rdx-20H]
+ movdqu xmm9, xmmword ptr [r9+rdx-20H]
+ movdqu xmm10, xmmword ptr [r10+rdx-20H]
+ movdqu xmm11, xmmword ptr [r11+rdx-20H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp+80H], xmm8
+ movdqa xmmword ptr [rsp+90H], xmm9
+ movdqa xmmword ptr [rsp+0A0H], xmm12
+ movdqa xmmword ptr [rsp+0B0H], xmm13
+ movdqu xmm8, xmmword ptr [r8+rdx-10H]
+ movdqu xmm9, xmmword ptr [r9+rdx-10H]
+ movdqu xmm10, xmmword ptr [r10+rdx-10H]
+ movdqu xmm11, xmmword ptr [r11+rdx-10H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp+0C0H], xmm8
+ movdqa xmmword ptr [rsp+0D0H], xmm9
+ movdqa xmmword ptr [rsp+0E0H], xmm12
+ movdqa xmmword ptr [rsp+0F0H], xmm13
+ movdqa xmm9, xmmword ptr [BLAKE3_IV_1]
+ movdqa xmm10, xmmword ptr [BLAKE3_IV_2]
+ movdqa xmm11, xmmword ptr [BLAKE3_IV_3]
+ movdqa xmm12, xmmword ptr [rsp+110H]
+ movdqa xmm13, xmmword ptr [rsp+120H]
+ movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN]
+ movd xmm15, eax
+ pshufd xmm15, xmm15, 00H
+ prefetcht0 byte ptr [r8+rdx+80H]
+ prefetcht0 byte ptr [r9+rdx+80H]
+ prefetcht0 byte ptr [r10+rdx+80H]
+ prefetcht0 byte ptr [r11+rdx+80H]
+ paddd xmm0, xmmword ptr [rsp]
+ paddd xmm1, xmmword ptr [rsp+20H]
+ paddd xmm2, xmmword ptr [rsp+40H]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ movdqa xmm8, xmmword ptr [BLAKE3_IV_0]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+10H]
+ paddd xmm1, xmmword ptr [rsp+30H]
+ paddd xmm2, xmmword ptr [rsp+50H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+80H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp+0C0H]
+ paddd xmm3, xmmword ptr [rsp+0E0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+90H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+0D0H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+20H]
+ paddd xmm1, xmmword ptr [rsp+30H]
+ paddd xmm2, xmmword ptr [rsp+70H]
+ paddd xmm3, xmmword ptr [rsp+40H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+60H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp]
+ paddd xmm3, xmmword ptr [rsp+0D0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+10H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+90H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0B0H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp+0E0H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+30H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp+0D0H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+40H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+20H]
+ paddd xmm3, xmmword ptr [rsp+0E0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+60H]
+ paddd xmm1, xmmword ptr [rsp+90H]
+ paddd xmm2, xmmword ptr [rsp+0B0H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+50H]
+ paddd xmm1, xmmword ptr [rsp]
+ paddd xmm2, xmmword ptr [rsp+0F0H]
+ paddd xmm3, xmmword ptr [rsp+10H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0A0H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+0E0H]
+ paddd xmm3, xmmword ptr [rsp+0D0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+70H]
+ paddd xmm1, xmmword ptr [rsp+90H]
+ paddd xmm2, xmmword ptr [rsp+30H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+40H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+50H]
+ paddd xmm3, xmmword ptr [rsp+10H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp]
+ paddd xmm1, xmmword ptr [rsp+20H]
+ paddd xmm2, xmmword ptr [rsp+80H]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0C0H]
+ paddd xmm1, xmmword ptr [rsp+90H]
+ paddd xmm2, xmmword ptr [rsp+0F0H]
+ paddd xmm3, xmmword ptr [rsp+0E0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0D0H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+0A0H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+70H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+20H]
+ paddd xmm1, xmmword ptr [rsp+30H]
+ paddd xmm2, xmmword ptr [rsp+10H]
+ paddd xmm3, xmmword ptr [rsp+40H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+90H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+80H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0E0H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp+0C0H]
+ paddd xmm3, xmmword ptr [rsp+10H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0D0H]
+ paddd xmm1, xmmword ptr [rsp]
+ paddd xmm2, xmmword ptr [rsp+20H]
+ paddd xmm3, xmmword ptr [rsp+40H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+30H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp+60H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0B0H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp+10H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0F0H]
+ paddd xmm1, xmmword ptr [rsp]
+ paddd xmm2, xmmword ptr [rsp+90H]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0E0H]
+ paddd xmm1, xmmword ptr [rsp+20H]
+ paddd xmm2, xmmword ptr [rsp+30H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ pshuflw xmm15, xmm15, 0B1H
+ pshufhw xmm15, xmm15, 0B1H
+ pshuflw xmm12, xmm12, 0B1H
+ pshufhw xmm12, xmm12, 0B1H
+ pshuflw xmm13, xmm13, 0B1H
+ pshufhw xmm13, xmm13, 0B1H
+ pshuflw xmm14, xmm14, 0B1H
+ pshufhw xmm14, xmm14, 0B1H
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0A0H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+40H]
+ paddd xmm3, xmmword ptr [rsp+0D0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmm15
+ psrld xmm15, 8
+ pslld xmm8, 24
+ pxor xmm15, xmm8
+ movdqa xmm8, xmm12
+ psrld xmm12, 8
+ pslld xmm8, 24
+ pxor xmm12, xmm8
+ movdqa xmm8, xmm13
+ psrld xmm13, 8
+ pslld xmm8, 24
+ pxor xmm13, xmm8
+ movdqa xmm8, xmm14
+ psrld xmm14, 8
+ pslld xmm8, 24
+ pxor xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ pxor xmm0, xmm8
+ pxor xmm1, xmm9
+ pxor xmm2, xmm10
+ pxor xmm3, xmm11
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ pxor xmm4, xmm12
+ pxor xmm5, xmm13
+ pxor xmm6, xmm14
+ pxor xmm7, xmm15
+ mov eax, r13d
+ jne innerloop4
+ movdqa xmm9, xmm0
+ punpckldq xmm0, xmm1
+ punpckhdq xmm9, xmm1
+ movdqa xmm11, xmm2
+ punpckldq xmm2, xmm3
+ punpckhdq xmm11, xmm3
+ movdqa xmm1, xmm0
+ punpcklqdq xmm0, xmm2
+ punpckhqdq xmm1, xmm2
+ movdqa xmm3, xmm9
+ punpcklqdq xmm9, xmm11
+ punpckhqdq xmm3, xmm11
+ movdqu xmmword ptr [rbx], xmm0
+ movdqu xmmword ptr [rbx+20H], xmm1
+ movdqu xmmword ptr [rbx+40H], xmm9
+ movdqu xmmword ptr [rbx+60H], xmm3
+ movdqa xmm9, xmm4
+ punpckldq xmm4, xmm5
+ punpckhdq xmm9, xmm5
+ movdqa xmm11, xmm6
+ punpckldq xmm6, xmm7
+ punpckhdq xmm11, xmm7
+ movdqa xmm5, xmm4
+ punpcklqdq xmm4, xmm6
+ punpckhqdq xmm5, xmm6
+ movdqa xmm7, xmm9
+ punpcklqdq xmm9, xmm11
+ punpckhqdq xmm7, xmm11
+ movdqu xmmword ptr [rbx+10H], xmm4
+ movdqu xmmword ptr [rbx+30H], xmm5
+ movdqu xmmword ptr [rbx+50H], xmm9
+ movdqu xmmword ptr [rbx+70H], xmm7
+ movdqa xmm1, xmmword ptr [rsp+110H]
+ movdqa xmm0, xmm1
+ paddd xmm1, xmmword ptr [rsp+150H]
+ movdqa xmmword ptr [rsp+110H], xmm1
+ pxor xmm0, xmmword ptr [CMP_MSB_MASK]
+ pxor xmm1, xmmword ptr [CMP_MSB_MASK]
+ pcmpgtd xmm0, xmm1
+ movdqa xmm1, xmmword ptr [rsp+120H]
+ psubd xmm1, xmm0
+ movdqa xmmword ptr [rsp+120H], xmm1
+ add rbx, 128
+ add rdi, 32
+ sub rsi, 4
+ cmp rsi, 4
+ jnc outerloop4
+ test rsi, rsi
+ jne final3blocks
+unwind:
+ movdqa xmm6, xmmword ptr [rsp+170H]
+ movdqa xmm7, xmmword ptr [rsp+180H]
+ movdqa xmm8, xmmword ptr [rsp+190H]
+ movdqa xmm9, xmmword ptr [rsp+1A0H]
+ movdqa xmm10, xmmword ptr [rsp+1B0H]
+ movdqa xmm11, xmmword ptr [rsp+1C0H]
+ movdqa xmm12, xmmword ptr [rsp+1D0H]
+ movdqa xmm13, xmmword ptr [rsp+1E0H]
+ movdqa xmm14, xmmword ptr [rsp+1F0H]
+ movdqa xmm15, xmmword ptr [rsp+200H]
+ mov rsp, rbp
+ pop rbp
+ pop rbx
+ pop rdi
+ pop rsi
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+ ret
+ALIGN 16
+final3blocks:
+ test esi, 2H
+ je final1block
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movaps xmm8, xmm0
+ movaps xmm9, xmm1
+ movd xmm13, dword ptr [rsp+110H]
+ movd xmm14, dword ptr [rsp+120H]
+ punpckldq xmm13, xmm14
+ movaps xmmword ptr [rsp], xmm13
+ movd xmm14, dword ptr [rsp+114H]
+ movd xmm13, dword ptr [rsp+124H]
+ punpckldq xmm14, xmm13
+ movaps xmmword ptr [rsp+10H], xmm14
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+innerloop2:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ movaps xmm10, xmm2
+ movups xmm4, xmmword ptr [r8+rdx-40H]
+ movups xmm5, xmmword ptr [r8+rdx-30H]
+ movaps xmm3, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm3, xmm5, 221
+ movaps xmm5, xmm3
+ movups xmm6, xmmword ptr [r8+rdx-20H]
+ movups xmm7, xmmword ptr [r8+rdx-10H]
+ movaps xmm3, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm3, xmm7, 221
+ pshufd xmm7, xmm3, 93H
+ movups xmm12, xmmword ptr [r9+rdx-40H]
+ movups xmm13, xmmword ptr [r9+rdx-30H]
+ movaps xmm11, xmm12
+ shufps xmm12, xmm13, 136
+ shufps xmm11, xmm13, 221
+ movaps xmm13, xmm11
+ movups xmm14, xmmword ptr [r9+rdx-20H]
+ movups xmm15, xmmword ptr [r9+rdx-10H]
+ movaps xmm11, xmm14
+ shufps xmm14, xmm15, 136
+ pshufd xmm14, xmm14, 93H
+ shufps xmm11, xmm15, 221
+ pshufd xmm15, xmm11, 93H
+ shl rax, 20H
+ or rax, 40H
+ movd xmm3, rax
+ movdqa xmmword ptr [rsp+20H], xmm3
+ movaps xmm3, xmmword ptr [rsp]
+ movaps xmm11, xmmword ptr [rsp+10H]
+ punpcklqdq xmm3, xmmword ptr [rsp+20H]
+ punpcklqdq xmm11, xmmword ptr [rsp+20H]
+ mov al, 7
+roundloop2:
+ paddd xmm0, xmm4
+ paddd xmm8, xmm12
+ movaps xmmword ptr [rsp+20H], xmm4
+ movaps xmmword ptr [rsp+30H], xmm12
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ pshuflw xmm11, xmm11, 0B1H
+ pshufhw xmm11, xmm11, 0B1H
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 20
+ psrld xmm4, 12
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 20
+ psrld xmm4, 12
+ por xmm9, xmm4
+ paddd xmm0, xmm5
+ paddd xmm8, xmm13
+ movaps xmmword ptr [rsp+40H], xmm5
+ movaps xmmword ptr [rsp+50H], xmm13
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ movdqa xmm13, xmm3
+ psrld xmm3, 8
+ pslld xmm13, 24
+ pxor xmm3, xmm13
+ movdqa xmm13, xmm11
+ psrld xmm11, 8
+ pslld xmm13, 24
+ pxor xmm11, xmm13
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 25
+ psrld xmm4, 7
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 25
+ psrld xmm4, 7
+ por xmm9, xmm4
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm8, xmm8, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm11, xmm11, 4EH
+ pshufd xmm2, xmm2, 39H
+ pshufd xmm10, xmm10, 39H
+ paddd xmm0, xmm6
+ paddd xmm8, xmm14
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ pshuflw xmm11, xmm11, 0B1H
+ pshufhw xmm11, xmm11, 0B1H
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 20
+ psrld xmm4, 12
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 20
+ psrld xmm4, 12
+ por xmm9, xmm4
+ paddd xmm0, xmm7
+ paddd xmm8, xmm15
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ movdqa xmm13, xmm3
+ psrld xmm3, 8
+ pslld xmm13, 24
+ pxor xmm3, xmm13
+ movdqa xmm13, xmm11
+ psrld xmm11, 8
+ pslld xmm13, 24
+ pxor xmm11, xmm13
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 25
+ psrld xmm4, 7
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 25
+ psrld xmm4, 7
+ por xmm9, xmm4
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm8, xmm8, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm11, xmm11, 4EH
+ pshufd xmm2, xmm2, 93H
+ pshufd xmm10, xmm10, 93H
+ dec al
+ je endroundloop2
+ movdqa xmm12, xmmword ptr [rsp+20H]
+ movdqa xmm5, xmmword ptr [rsp+40H]
+ pshufd xmm13, xmm12, 0FH
+ shufps xmm12, xmm5, 214
+ pshufd xmm4, xmm12, 39H
+ movdqa xmm12, xmm6
+ shufps xmm12, xmm7, 250
+ pand xmm13, xmmword ptr [PBLENDW_0x33_MASK]
+ pand xmm12, xmmword ptr [PBLENDW_0xCC_MASK]
+ por xmm13, xmm12
+ movdqa xmmword ptr [rsp+20H], xmm13
+ movdqa xmm12, xmm7
+ punpcklqdq xmm12, xmm5
+ movdqa xmm13, xmm6
+ pand xmm12, xmmword ptr [PBLENDW_0x3F_MASK]
+ pand xmm13, xmmword ptr [PBLENDW_0xC0_MASK]
+ por xmm12, xmm13
+ pshufd xmm12, xmm12, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmmword ptr [rsp+40H], xmm12
+ movdqa xmm5, xmmword ptr [rsp+30H]
+ movdqa xmm13, xmmword ptr [rsp+50H]
+ pshufd xmm6, xmm5, 0FH
+ shufps xmm5, xmm13, 214
+ pshufd xmm12, xmm5, 39H
+ movdqa xmm5, xmm14
+ shufps xmm5, xmm15, 250
+ pand xmm6, xmmword ptr [PBLENDW_0x33_MASK]
+ pand xmm5, xmmword ptr [PBLENDW_0xCC_MASK]
+ por xmm6, xmm5
+ movdqa xmm5, xmm15
+ punpcklqdq xmm5, xmm13
+ movdqa xmmword ptr [rsp+30H], xmm2
+ movdqa xmm2, xmm14
+ pand xmm5, xmmword ptr [PBLENDW_0x3F_MASK]
+ pand xmm2, xmmword ptr [PBLENDW_0xC0_MASK]
+ por xmm5, xmm2
+ movdqa xmm2, xmmword ptr [rsp+30H]
+ pshufd xmm5, xmm5, 78H
+ punpckhdq xmm13, xmm15
+ punpckldq xmm14, xmm13
+ pshufd xmm15, xmm14, 1EH
+ movdqa xmm13, xmm6
+ movdqa xmm14, xmm5
+ movdqa xmm5, xmmword ptr [rsp+20H]
+ movdqa xmm6, xmmword ptr [rsp+40H]
+ jmp roundloop2
+endroundloop2:
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ pxor xmm8, xmm10
+ pxor xmm9, xmm11
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop2
+ movups xmmword ptr [rbx], xmm0
+ movups xmmword ptr [rbx+10H], xmm1
+ movups xmmword ptr [rbx+20H], xmm8
+ movups xmmword ptr [rbx+30H], xmm9
+ mov eax, dword ptr [rsp+130H]
+ neg eax
+ mov r10d, dword ptr [rsp+110H+8*rax]
+ mov r11d, dword ptr [rsp+120H+8*rax]
+ mov dword ptr [rsp+110H], r10d
+ mov dword ptr [rsp+120H], r11d
+ add rdi, 16
+ add rbx, 64
+ sub rsi, 2
+final1block:
+ test esi, 1H
+ je unwind
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movd xmm13, dword ptr [rsp+110H]
+ movd xmm14, dword ptr [rsp+120H]
+ punpckldq xmm13, xmm14
+ mov r8, qword ptr [rdi]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+innerloop1:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ shl rax, 32
+ or rax, 64
+ movd xmm12, rax
+ movdqa xmm3, xmm13
+ punpcklqdq xmm3, xmm12
+ movups xmm4, xmmword ptr [r8+rdx-40H]
+ movups xmm5, xmmword ptr [r8+rdx-30H]
+ movaps xmm8, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm8, xmm5, 221
+ movaps xmm5, xmm8
+ movups xmm6, xmmword ptr [r8+rdx-20H]
+ movups xmm7, xmmword ptr [r8+rdx-10H]
+ movaps xmm8, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm8, xmm7, 221
+ pshufd xmm7, xmm8, 93H
+ mov al, 7
+roundloop1:
+ paddd xmm0, xmm4
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm5
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ movdqa xmm14, xmm3
+ psrld xmm3, 8
+ pslld xmm14, 24
+ pxor xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 39H
+ paddd xmm0, xmm6
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm7
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ movdqa xmm14, xmm3
+ psrld xmm3, 8
+ pslld xmm14, 24
+ pxor xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 93H
+ dec al
+ jz endroundloop1
+ movdqa xmm8, xmm4
+ shufps xmm8, xmm5, 214
+ pshufd xmm9, xmm4, 0FH
+ pshufd xmm4, xmm8, 39H
+ movdqa xmm8, xmm6
+ shufps xmm8, xmm7, 250
+ pand xmm9, xmmword ptr [PBLENDW_0x33_MASK]
+ pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK]
+ por xmm9, xmm8
+ movdqa xmm8, xmm7
+ punpcklqdq xmm8, xmm5
+ movdqa xmm10, xmm6
+ pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK]
+ pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK]
+ por xmm8, xmm10
+ pshufd xmm8, xmm8, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmm5, xmm9
+ movdqa xmm6, xmm8
+ jmp roundloop1
+endroundloop1:
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop1
+ movups xmmword ptr [rbx], xmm0
+ movups xmmword ptr [rbx+10H], xmm1
+ jmp unwind
+_llvm_blake3_hash_many_sse2 ENDP
+llvm_blake3_hash_many_sse2 ENDP
+
+llvm_blake3_compress_in_place_sse2 PROC
+_llvm_blake3_compress_in_place_sse2 PROC
+ sub rsp, 120
+ movdqa xmmword ptr [rsp], xmm6
+ movdqa xmmword ptr [rsp+10H], xmm7
+ movdqa xmmword ptr [rsp+20H], xmm8
+ movdqa xmmword ptr [rsp+30H], xmm9
+ movdqa xmmword ptr [rsp+40H], xmm11
+ movdqa xmmword ptr [rsp+50H], xmm14
+ movdqa xmmword ptr [rsp+60H], xmm15
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ movzx eax, byte ptr [rsp+0A0H]
+ movzx r8d, r8b
+ shl rax, 32
+ add r8, rax
+ movd xmm3, r9
+ movd xmm4, r8
+ punpcklqdq xmm3, xmm4
+ movups xmm4, xmmword ptr [rdx]
+ movups xmm5, xmmword ptr [rdx+10H]
+ movaps xmm8, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm8, xmm5, 221
+ movaps xmm5, xmm8
+ movups xmm6, xmmword ptr [rdx+20H]
+ movups xmm7, xmmword ptr [rdx+30H]
+ movaps xmm8, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm8, xmm7, 221
+ pshufd xmm7, xmm8, 93H
+ mov al, 7
+@@:
+ paddd xmm0, xmm4
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm5
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ movdqa xmm14, xmm3
+ psrld xmm3, 8
+ pslld xmm14, 24
+ pxor xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 39H
+ paddd xmm0, xmm6
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm7
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ movdqa xmm14, xmm3
+ psrld xmm3, 8
+ pslld xmm14, 24
+ pxor xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 93H
+ dec al
+ jz @F
+ movdqa xmm8, xmm4
+ shufps xmm8, xmm5, 214
+ pshufd xmm9, xmm4, 0FH
+ pshufd xmm4, xmm8, 39H
+ movdqa xmm8, xmm6
+ shufps xmm8, xmm7, 250
+ pand xmm9, xmmword ptr [PBLENDW_0x33_MASK]
+ pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK]
+ por xmm9, xmm8
+ movdqa xmm8, xmm7
+ punpcklqdq xmm8, xmm5
+ movdqa xmm14, xmm6
+ pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK]
+ pand xmm14, xmmword ptr [PBLENDW_0xC0_MASK]
+ por xmm8, xmm14
+ pshufd xmm8, xmm8, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmm5, xmm9
+ movdqa xmm6, xmm8
+ jmp @B
+@@:
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ movups xmmword ptr [rcx], xmm0
+ movups xmmword ptr [rcx+10H], xmm1
+ movdqa xmm6, xmmword ptr [rsp]
+ movdqa xmm7, xmmword ptr [rsp+10H]
+ movdqa xmm8, xmmword ptr [rsp+20H]
+ movdqa xmm9, xmmword ptr [rsp+30H]
+ movdqa xmm11, xmmword ptr [rsp+40H]
+ movdqa xmm14, xmmword ptr [rsp+50H]
+ movdqa xmm15, xmmword ptr [rsp+60H]
+ add rsp, 120
+ ret
+_llvm_blake3_compress_in_place_sse2 ENDP
+llvm_blake3_compress_in_place_sse2 ENDP
+
+ALIGN 16
+llvm_blake3_compress_xof_sse2 PROC
+_llvm_blake3_compress_xof_sse2 PROC
+ sub rsp, 120
+ movdqa xmmword ptr [rsp], xmm6
+ movdqa xmmword ptr [rsp+10H], xmm7
+ movdqa xmmword ptr [rsp+20H], xmm8
+ movdqa xmmword ptr [rsp+30H], xmm9
+ movdqa xmmword ptr [rsp+40H], xmm11
+ movdqa xmmword ptr [rsp+50H], xmm14
+ movdqa xmmword ptr [rsp+60H], xmm15
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ movzx eax, byte ptr [rsp+0A0H]
+ movzx r8d, r8b
+ mov r10, qword ptr [rsp+0A8H]
+ shl rax, 32
+ add r8, rax
+ movd xmm3, r9
+ movd xmm4, r8
+ punpcklqdq xmm3, xmm4
+ movups xmm4, xmmword ptr [rdx]
+ movups xmm5, xmmword ptr [rdx+10H]
+ movaps xmm8, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm8, xmm5, 221
+ movaps xmm5, xmm8
+ movups xmm6, xmmword ptr [rdx+20H]
+ movups xmm7, xmmword ptr [rdx+30H]
+ movaps xmm8, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm8, xmm7, 221
+ pshufd xmm7, xmm8, 93H
+ mov al, 7
+@@:
+ paddd xmm0, xmm4
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm5
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ movdqa xmm14, xmm3
+ psrld xmm3, 8
+ pslld xmm14, 24
+ pxor xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 39H
+ paddd xmm0, xmm6
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshuflw xmm3, xmm3, 0B1H
+ pshufhw xmm3, xmm3, 0B1H
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm7
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ movdqa xmm14, xmm3
+ psrld xmm3, 8
+ pslld xmm14, 24
+ pxor xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 93H
+ dec al
+ jz @F
+ movdqa xmm8, xmm4
+ shufps xmm8, xmm5, 214
+ pshufd xmm9, xmm4, 0FH
+ pshufd xmm4, xmm8, 39H
+ movdqa xmm8, xmm6
+ shufps xmm8, xmm7, 250
+ pand xmm9, xmmword ptr [PBLENDW_0x33_MASK]
+ pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK]
+ por xmm9, xmm8
+ movdqa xmm8, xmm7
+ punpcklqdq xmm8, xmm5
+ movdqa xmm14, xmm6
+ pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK]
+ pand xmm14, xmmword ptr [PBLENDW_0xC0_MASK]
+ por xmm8, xmm14
+ pshufd xmm8, xmm8, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmm5, xmm9
+ movdqa xmm6, xmm8
+ jmp @B
+@@:
+ movdqu xmm4, xmmword ptr [rcx]
+ movdqu xmm5, xmmword ptr [rcx+10H]
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ pxor xmm2, xmm4
+ pxor xmm3, xmm5
+ movups xmmword ptr [r10], xmm0
+ movups xmmword ptr [r10+10H], xmm1
+ movups xmmword ptr [r10+20H], xmm2
+ movups xmmword ptr [r10+30H], xmm3
+ movdqa xmm6, xmmword ptr [rsp]
+ movdqa xmm7, xmmword ptr [rsp+10H]
+ movdqa xmm8, xmmword ptr [rsp+20H]
+ movdqa xmm9, xmmword ptr [rsp+30H]
+ movdqa xmm11, xmmword ptr [rsp+40H]
+ movdqa xmm14, xmmword ptr [rsp+50H]
+ movdqa xmm15, xmmword ptr [rsp+60H]
+ add rsp, 120
+ ret
+_llvm_blake3_compress_xof_sse2 ENDP
+llvm_blake3_compress_xof_sse2 ENDP
+
+_TEXT ENDS
+
+
+_RDATA SEGMENT READONLY PAGE ALIAS(".rdata") 'CONST'
+ALIGN 64
+BLAKE3_IV:
+ dd 6A09E667H, 0BB67AE85H, 3C6EF372H, 0A54FF53AH
+
+ADD0:
+ dd 0, 1, 2, 3
+
+ADD1:
+ dd 4 dup (4)
+
+BLAKE3_IV_0:
+ dd 4 dup (6A09E667H)
+
+BLAKE3_IV_1:
+ dd 4 dup (0BB67AE85H)
+
+BLAKE3_IV_2:
+ dd 4 dup (3C6EF372H)
+
+BLAKE3_IV_3:
+ dd 4 dup (0A54FF53AH)
+
+BLAKE3_BLOCK_LEN:
+ dd 4 dup (64)
+
+CMP_MSB_MASK:
+ dd 8 dup(80000000H)
+
+PBLENDW_0x33_MASK:
+ dd 0FFFFFFFFH, 000000000H, 0FFFFFFFFH, 000000000H
+PBLENDW_0xCC_MASK:
+ dd 000000000H, 0FFFFFFFFH, 000000000H, 0FFFFFFFFH
+PBLENDW_0x3F_MASK:
+ dd 0FFFFFFFFH, 0FFFFFFFFH, 0FFFFFFFFH, 000000000H
+PBLENDW_0xC0_MASK:
+ dd 000000000H, 000000000H, 000000000H, 0FFFFFFFFH
+
+_RDATA ENDS
+END
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41.c b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41.c
new file mode 100644
index 0000000000..87a8dae15c
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41.c
@@ -0,0 +1,560 @@
+#include "blake3_impl.h"
+
+#include <immintrin.h>
+
+#define DEGREE 4
+
+#define _mm_shuffle_ps2(a, b, c) \
+ (_mm_castps_si128( \
+ _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), (c))))
+
+INLINE __m128i loadu(const uint8_t src[16]) {
+ return _mm_loadu_si128((const __m128i *)src);
+}
+
+INLINE void storeu(__m128i src, uint8_t dest[16]) {
+ _mm_storeu_si128((__m128i *)dest, src);
+}
+
+INLINE __m128i addv(__m128i a, __m128i b) { return _mm_add_epi32(a, b); }
+
+// Note that clang-format doesn't like the name "xor" for some reason.
+INLINE __m128i xorv(__m128i a, __m128i b) { return _mm_xor_si128(a, b); }
+
+INLINE __m128i set1(uint32_t x) { return _mm_set1_epi32((int32_t)x); }
+
+INLINE __m128i set4(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+ return _mm_setr_epi32((int32_t)a, (int32_t)b, (int32_t)c, (int32_t)d);
+}
+
+INLINE __m128i rot16(__m128i x) {
+ return _mm_shuffle_epi8(
+ x, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2));
+}
+
+INLINE __m128i rot12(__m128i x) {
+ return xorv(_mm_srli_epi32(x, 12), _mm_slli_epi32(x, 32 - 12));
+}
+
+INLINE __m128i rot8(__m128i x) {
+ return _mm_shuffle_epi8(
+ x, _mm_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1));
+}
+
+INLINE __m128i rot7(__m128i x) {
+ return xorv(_mm_srli_epi32(x, 7), _mm_slli_epi32(x, 32 - 7));
+}
+
+INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
+ __m128i m) {
+ *row0 = addv(addv(*row0, m), *row1);
+ *row3 = xorv(*row3, *row0);
+ *row3 = rot16(*row3);
+ *row2 = addv(*row2, *row3);
+ *row1 = xorv(*row1, *row2);
+ *row1 = rot12(*row1);
+}
+
+INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
+ __m128i m) {
+ *row0 = addv(addv(*row0, m), *row1);
+ *row3 = xorv(*row3, *row0);
+ *row3 = rot8(*row3);
+ *row2 = addv(*row2, *row3);
+ *row1 = xorv(*row1, *row2);
+ *row1 = rot7(*row1);
+}
+
+// Note the optimization here of leaving row1 as the unrotated row, rather than
+// row0. All the message loads below are adjusted to compensate for this. See
+// discussion at https://github.com/sneves/blake2-avx2/pull/4
+INLINE void diagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(2, 1, 0, 3));
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(0, 3, 2, 1));
+}
+
+INLINE void undiagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(0, 3, 2, 1));
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(2, 1, 0, 3));
+}
+
+INLINE void compress_pre(__m128i rows[4], const uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter, uint8_t flags) {
+ rows[0] = loadu((uint8_t *)&cv[0]);
+ rows[1] = loadu((uint8_t *)&cv[4]);
+ rows[2] = set4(IV[0], IV[1], IV[2], IV[3]);
+ rows[3] = set4(counter_low(counter), counter_high(counter),
+ (uint32_t)block_len, (uint32_t)flags);
+
+ __m128i m0 = loadu(&block[sizeof(__m128i) * 0]);
+ __m128i m1 = loadu(&block[sizeof(__m128i) * 1]);
+ __m128i m2 = loadu(&block[sizeof(__m128i) * 2]);
+ __m128i m3 = loadu(&block[sizeof(__m128i) * 3]);
+
+ __m128i t0, t1, t2, t3, tt;
+
+ // Round 1. The first round permutes the message words from the original
+ // input order, into the groups that get mixed in parallel.
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(2, 0, 2, 0)); // 6 4 2 0
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 3, 1)); // 7 5 3 1
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(2, 0, 2, 0)); // 14 12 10 8
+ t2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2, 1, 0, 3)); // 12 10 8 14
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 1, 3, 1)); // 15 13 11 9
+ t3 = _mm_shuffle_epi32(t3, _MM_SHUFFLE(2, 1, 0, 3)); // 13 11 9 15
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 2. This round and all following rounds apply a fixed permutation
+ // to the message words from the round before.
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 3
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 4
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 5
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 6
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+ m0 = t0;
+ m1 = t1;
+ m2 = t2;
+ m3 = t3;
+
+ // Round 7
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
+ diagonalize(&rows[0], &rows[2], &rows[3]);
+ t2 = _mm_unpacklo_epi64(m3, m1);
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
+ t3 = _mm_unpackhi_epi32(m1, m3);
+ tt = _mm_unpacklo_epi32(m2, t3);
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
+}
+
+void blake3_compress_in_place_sse41(uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter,
+ uint8_t flags) {
+ __m128i rows[4];
+ compress_pre(rows, cv, block, block_len, counter, flags);
+ storeu(xorv(rows[0], rows[2]), (uint8_t *)&cv[0]);
+ storeu(xorv(rows[1], rows[3]), (uint8_t *)&cv[4]);
+}
+
+void blake3_compress_xof_sse41(const uint32_t cv[8],
+ const uint8_t block[BLAKE3_BLOCK_LEN],
+ uint8_t block_len, uint64_t counter,
+ uint8_t flags, uint8_t out[64]) {
+ __m128i rows[4];
+ compress_pre(rows, cv, block, block_len, counter, flags);
+ storeu(xorv(rows[0], rows[2]), &out[0]);
+ storeu(xorv(rows[1], rows[3]), &out[16]);
+ storeu(xorv(rows[2], loadu((uint8_t *)&cv[0])), &out[32]);
+ storeu(xorv(rows[3], loadu((uint8_t *)&cv[4])), &out[48]);
+}
+
+INLINE void round_fn(__m128i v[16], __m128i m[16], size_t r) {
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
+ v[0] = addv(v[0], v[4]);
+ v[1] = addv(v[1], v[5]);
+ v[2] = addv(v[2], v[6]);
+ v[3] = addv(v[3], v[7]);
+ v[12] = xorv(v[12], v[0]);
+ v[13] = xorv(v[13], v[1]);
+ v[14] = xorv(v[14], v[2]);
+ v[15] = xorv(v[15], v[3]);
+ v[12] = rot16(v[12]);
+ v[13] = rot16(v[13]);
+ v[14] = rot16(v[14]);
+ v[15] = rot16(v[15]);
+ v[8] = addv(v[8], v[12]);
+ v[9] = addv(v[9], v[13]);
+ v[10] = addv(v[10], v[14]);
+ v[11] = addv(v[11], v[15]);
+ v[4] = xorv(v[4], v[8]);
+ v[5] = xorv(v[5], v[9]);
+ v[6] = xorv(v[6], v[10]);
+ v[7] = xorv(v[7], v[11]);
+ v[4] = rot12(v[4]);
+ v[5] = rot12(v[5]);
+ v[6] = rot12(v[6]);
+ v[7] = rot12(v[7]);
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
+ v[0] = addv(v[0], v[4]);
+ v[1] = addv(v[1], v[5]);
+ v[2] = addv(v[2], v[6]);
+ v[3] = addv(v[3], v[7]);
+ v[12] = xorv(v[12], v[0]);
+ v[13] = xorv(v[13], v[1]);
+ v[14] = xorv(v[14], v[2]);
+ v[15] = xorv(v[15], v[3]);
+ v[12] = rot8(v[12]);
+ v[13] = rot8(v[13]);
+ v[14] = rot8(v[14]);
+ v[15] = rot8(v[15]);
+ v[8] = addv(v[8], v[12]);
+ v[9] = addv(v[9], v[13]);
+ v[10] = addv(v[10], v[14]);
+ v[11] = addv(v[11], v[15]);
+ v[4] = xorv(v[4], v[8]);
+ v[5] = xorv(v[5], v[9]);
+ v[6] = xorv(v[6], v[10]);
+ v[7] = xorv(v[7], v[11]);
+ v[4] = rot7(v[4]);
+ v[5] = rot7(v[5]);
+ v[6] = rot7(v[6]);
+ v[7] = rot7(v[7]);
+
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
+ v[0] = addv(v[0], v[5]);
+ v[1] = addv(v[1], v[6]);
+ v[2] = addv(v[2], v[7]);
+ v[3] = addv(v[3], v[4]);
+ v[15] = xorv(v[15], v[0]);
+ v[12] = xorv(v[12], v[1]);
+ v[13] = xorv(v[13], v[2]);
+ v[14] = xorv(v[14], v[3]);
+ v[15] = rot16(v[15]);
+ v[12] = rot16(v[12]);
+ v[13] = rot16(v[13]);
+ v[14] = rot16(v[14]);
+ v[10] = addv(v[10], v[15]);
+ v[11] = addv(v[11], v[12]);
+ v[8] = addv(v[8], v[13]);
+ v[9] = addv(v[9], v[14]);
+ v[5] = xorv(v[5], v[10]);
+ v[6] = xorv(v[6], v[11]);
+ v[7] = xorv(v[7], v[8]);
+ v[4] = xorv(v[4], v[9]);
+ v[5] = rot12(v[5]);
+ v[6] = rot12(v[6]);
+ v[7] = rot12(v[7]);
+ v[4] = rot12(v[4]);
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
+ v[0] = addv(v[0], v[5]);
+ v[1] = addv(v[1], v[6]);
+ v[2] = addv(v[2], v[7]);
+ v[3] = addv(v[3], v[4]);
+ v[15] = xorv(v[15], v[0]);
+ v[12] = xorv(v[12], v[1]);
+ v[13] = xorv(v[13], v[2]);
+ v[14] = xorv(v[14], v[3]);
+ v[15] = rot8(v[15]);
+ v[12] = rot8(v[12]);
+ v[13] = rot8(v[13]);
+ v[14] = rot8(v[14]);
+ v[10] = addv(v[10], v[15]);
+ v[11] = addv(v[11], v[12]);
+ v[8] = addv(v[8], v[13]);
+ v[9] = addv(v[9], v[14]);
+ v[5] = xorv(v[5], v[10]);
+ v[6] = xorv(v[6], v[11]);
+ v[7] = xorv(v[7], v[8]);
+ v[4] = xorv(v[4], v[9]);
+ v[5] = rot7(v[5]);
+ v[6] = rot7(v[6]);
+ v[7] = rot7(v[7]);
+ v[4] = rot7(v[4]);
+}
+
+INLINE void transpose_vecs(__m128i vecs[DEGREE]) {
+ // Interleave 32-bit lates. The low unpack is lanes 00/11 and the high is
+ // 22/33. Note that this doesn't split the vector into two lanes, as the
+ // AVX2 counterparts do.
+ __m128i ab_01 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
+ __m128i ab_23 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
+ __m128i cd_01 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
+ __m128i cd_23 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
+
+ // Interleave 64-bit lanes.
+ __m128i abcd_0 = _mm_unpacklo_epi64(ab_01, cd_01);
+ __m128i abcd_1 = _mm_unpackhi_epi64(ab_01, cd_01);
+ __m128i abcd_2 = _mm_unpacklo_epi64(ab_23, cd_23);
+ __m128i abcd_3 = _mm_unpackhi_epi64(ab_23, cd_23);
+
+ vecs[0] = abcd_0;
+ vecs[1] = abcd_1;
+ vecs[2] = abcd_2;
+ vecs[3] = abcd_3;
+}
+
+INLINE void transpose_msg_vecs(const uint8_t *const *inputs,
+ size_t block_offset, __m128i out[16]) {
+ out[0] = loadu(&inputs[0][block_offset + 0 * sizeof(__m128i)]);
+ out[1] = loadu(&inputs[1][block_offset + 0 * sizeof(__m128i)]);
+ out[2] = loadu(&inputs[2][block_offset + 0 * sizeof(__m128i)]);
+ out[3] = loadu(&inputs[3][block_offset + 0 * sizeof(__m128i)]);
+ out[4] = loadu(&inputs[0][block_offset + 1 * sizeof(__m128i)]);
+ out[5] = loadu(&inputs[1][block_offset + 1 * sizeof(__m128i)]);
+ out[6] = loadu(&inputs[2][block_offset + 1 * sizeof(__m128i)]);
+ out[7] = loadu(&inputs[3][block_offset + 1 * sizeof(__m128i)]);
+ out[8] = loadu(&inputs[0][block_offset + 2 * sizeof(__m128i)]);
+ out[9] = loadu(&inputs[1][block_offset + 2 * sizeof(__m128i)]);
+ out[10] = loadu(&inputs[2][block_offset + 2 * sizeof(__m128i)]);
+ out[11] = loadu(&inputs[3][block_offset + 2 * sizeof(__m128i)]);
+ out[12] = loadu(&inputs[0][block_offset + 3 * sizeof(__m128i)]);
+ out[13] = loadu(&inputs[1][block_offset + 3 * sizeof(__m128i)]);
+ out[14] = loadu(&inputs[2][block_offset + 3 * sizeof(__m128i)]);
+ out[15] = loadu(&inputs[3][block_offset + 3 * sizeof(__m128i)]);
+ for (size_t i = 0; i < 4; ++i) {
+ _mm_prefetch((const void *)&inputs[i][block_offset + 256], _MM_HINT_T0);
+ }
+ transpose_vecs(&out[0]);
+ transpose_vecs(&out[4]);
+ transpose_vecs(&out[8]);
+ transpose_vecs(&out[12]);
+}
+
+INLINE void load_counters(uint64_t counter, bool increment_counter,
+ __m128i *out_lo, __m128i *out_hi) {
+ const __m128i mask = _mm_set1_epi32(-(int32_t)increment_counter);
+ const __m128i add0 = _mm_set_epi32(3, 2, 1, 0);
+ const __m128i add1 = _mm_and_si128(mask, add0);
+ __m128i l = _mm_add_epi32(_mm_set1_epi32((int32_t)counter), add1);
+ __m128i carry = _mm_cmpgt_epi32(_mm_xor_si128(add1, _mm_set1_epi32(0x80000000)),
+ _mm_xor_si128( l, _mm_set1_epi32(0x80000000)));
+ __m128i h = _mm_sub_epi32(_mm_set1_epi32((int32_t)(counter >> 32)), carry);
+ *out_lo = l;
+ *out_hi = h;
+}
+
+static
+void blake3_hash4_sse41(const uint8_t *const *inputs, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ bool increment_counter, uint8_t flags,
+ uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
+ __m128i h_vecs[8] = {
+ set1(key[0]), set1(key[1]), set1(key[2]), set1(key[3]),
+ set1(key[4]), set1(key[5]), set1(key[6]), set1(key[7]),
+ };
+ __m128i counter_low_vec, counter_high_vec;
+ load_counters(counter, increment_counter, &counter_low_vec,
+ &counter_high_vec);
+ uint8_t block_flags = flags | flags_start;
+
+ for (size_t block = 0; block < blocks; block++) {
+ if (block + 1 == blocks) {
+ block_flags |= flags_end;
+ }
+ __m128i block_len_vec = set1(BLAKE3_BLOCK_LEN);
+ __m128i block_flags_vec = set1(block_flags);
+ __m128i msg_vecs[16];
+ transpose_msg_vecs(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
+
+ __m128i v[16] = {
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
+ set1(IV[0]), set1(IV[1]), set1(IV[2]), set1(IV[3]),
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
+ };
+ round_fn(v, msg_vecs, 0);
+ round_fn(v, msg_vecs, 1);
+ round_fn(v, msg_vecs, 2);
+ round_fn(v, msg_vecs, 3);
+ round_fn(v, msg_vecs, 4);
+ round_fn(v, msg_vecs, 5);
+ round_fn(v, msg_vecs, 6);
+ h_vecs[0] = xorv(v[0], v[8]);
+ h_vecs[1] = xorv(v[1], v[9]);
+ h_vecs[2] = xorv(v[2], v[10]);
+ h_vecs[3] = xorv(v[3], v[11]);
+ h_vecs[4] = xorv(v[4], v[12]);
+ h_vecs[5] = xorv(v[5], v[13]);
+ h_vecs[6] = xorv(v[6], v[14]);
+ h_vecs[7] = xorv(v[7], v[15]);
+
+ block_flags = flags;
+ }
+
+ transpose_vecs(&h_vecs[0]);
+ transpose_vecs(&h_vecs[4]);
+ // The first four vecs now contain the first half of each output, and the
+ // second four vecs contain the second half of each output.
+ storeu(h_vecs[0], &out[0 * sizeof(__m128i)]);
+ storeu(h_vecs[4], &out[1 * sizeof(__m128i)]);
+ storeu(h_vecs[1], &out[2 * sizeof(__m128i)]);
+ storeu(h_vecs[5], &out[3 * sizeof(__m128i)]);
+ storeu(h_vecs[2], &out[4 * sizeof(__m128i)]);
+ storeu(h_vecs[6], &out[5 * sizeof(__m128i)]);
+ storeu(h_vecs[3], &out[6 * sizeof(__m128i)]);
+ storeu(h_vecs[7], &out[7 * sizeof(__m128i)]);
+}
+
+INLINE void hash_one_sse41(const uint8_t *input, size_t blocks,
+ const uint32_t key[8], uint64_t counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN]) {
+ uint32_t cv[8];
+ memcpy(cv, key, BLAKE3_KEY_LEN);
+ uint8_t block_flags = flags | flags_start;
+ while (blocks > 0) {
+ if (blocks == 1) {
+ block_flags |= flags_end;
+ }
+ blake3_compress_in_place_sse41(cv, input, BLAKE3_BLOCK_LEN, counter,
+ block_flags);
+ input = &input[BLAKE3_BLOCK_LEN];
+ blocks -= 1;
+ block_flags = flags;
+ }
+ memcpy(out, cv, BLAKE3_OUT_LEN);
+}
+
+void blake3_hash_many_sse41(const uint8_t *const *inputs, size_t num_inputs,
+ size_t blocks, const uint32_t key[8],
+ uint64_t counter, bool increment_counter,
+ uint8_t flags, uint8_t flags_start,
+ uint8_t flags_end, uint8_t *out) {
+ while (num_inputs >= DEGREE) {
+ blake3_hash4_sse41(inputs, blocks, key, counter, increment_counter, flags,
+ flags_start, flags_end, out);
+ if (increment_counter) {
+ counter += DEGREE;
+ }
+ inputs += DEGREE;
+ num_inputs -= DEGREE;
+ out = &out[DEGREE * BLAKE3_OUT_LEN];
+ }
+ while (num_inputs > 0) {
+ hash_one_sse41(inputs[0], blocks, key, counter, flags, flags_start,
+ flags_end, out);
+ if (increment_counter) {
+ counter += 1;
+ }
+ inputs += 1;
+ num_inputs -= 1;
+ out = &out[BLAKE3_OUT_LEN];
+ }
+}
diff --git a/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm
new file mode 100644
index 0000000000..770935372c
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm
@@ -0,0 +1,2089 @@
+public _llvm_blake3_hash_many_sse41
+public llvm_blake3_hash_many_sse41
+public llvm_blake3_compress_in_place_sse41
+public _llvm_blake3_compress_in_place_sse41
+public llvm_blake3_compress_xof_sse41
+public _llvm_blake3_compress_xof_sse41
+
+_TEXT SEGMENT ALIGN(16) 'CODE'
+
+ALIGN 16
+llvm_blake3_hash_many_sse41 PROC
+_llvm_blake3_hash_many_sse41 PROC
+ push r15
+ push r14
+ push r13
+ push r12
+ push rsi
+ push rdi
+ push rbx
+ push rbp
+ mov rbp, rsp
+ sub rsp, 528
+ and rsp, 0FFFFFFFFFFFFFFC0H
+ movdqa xmmword ptr [rsp+170H], xmm6
+ movdqa xmmword ptr [rsp+180H], xmm7
+ movdqa xmmword ptr [rsp+190H], xmm8
+ movdqa xmmword ptr [rsp+1A0H], xmm9
+ movdqa xmmword ptr [rsp+1B0H], xmm10
+ movdqa xmmword ptr [rsp+1C0H], xmm11
+ movdqa xmmword ptr [rsp+1D0H], xmm12
+ movdqa xmmword ptr [rsp+1E0H], xmm13
+ movdqa xmmword ptr [rsp+1F0H], xmm14
+ movdqa xmmword ptr [rsp+200H], xmm15
+ mov rdi, rcx
+ mov rsi, rdx
+ mov rdx, r8
+ mov rcx, r9
+ mov r8, qword ptr [rbp+68H]
+ movzx r9, byte ptr [rbp+70H]
+ neg r9d
+ movd xmm0, r9d
+ pshufd xmm0, xmm0, 00H
+ movdqa xmmword ptr [rsp+130H], xmm0
+ movdqa xmm1, xmm0
+ pand xmm1, xmmword ptr [ADD0]
+ pand xmm0, xmmword ptr [ADD1]
+ movdqa xmmword ptr [rsp+150H], xmm0
+ movd xmm0, r8d
+ pshufd xmm0, xmm0, 00H
+ paddd xmm0, xmm1
+ movdqa xmmword ptr [rsp+110H], xmm0
+ pxor xmm0, xmmword ptr [CMP_MSB_MASK]
+ pxor xmm1, xmmword ptr [CMP_MSB_MASK]
+ pcmpgtd xmm1, xmm0
+ shr r8, 32
+ movd xmm2, r8d
+ pshufd xmm2, xmm2, 00H
+ psubd xmm2, xmm1
+ movdqa xmmword ptr [rsp+120H], xmm2
+ mov rbx, qword ptr [rbp+90H]
+ mov r15, rdx
+ shl r15, 6
+ movzx r13d, byte ptr [rbp+78H]
+ movzx r12d, byte ptr [rbp+88H]
+ cmp rsi, 4
+ jc final3blocks
+outerloop4:
+ movdqu xmm3, xmmword ptr [rcx]
+ pshufd xmm0, xmm3, 00H
+ pshufd xmm1, xmm3, 55H
+ pshufd xmm2, xmm3, 0AAH
+ pshufd xmm3, xmm3, 0FFH
+ movdqu xmm7, xmmword ptr [rcx+10H]
+ pshufd xmm4, xmm7, 00H
+ pshufd xmm5, xmm7, 55H
+ pshufd xmm6, xmm7, 0AAH
+ pshufd xmm7, xmm7, 0FFH
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ mov r10, qword ptr [rdi+10H]
+ mov r11, qword ptr [rdi+18H]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+innerloop4:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ movdqu xmm8, xmmword ptr [r8+rdx-40H]
+ movdqu xmm9, xmmword ptr [r9+rdx-40H]
+ movdqu xmm10, xmmword ptr [r10+rdx-40H]
+ movdqu xmm11, xmmword ptr [r11+rdx-40H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp], xmm8
+ movdqa xmmword ptr [rsp+10H], xmm9
+ movdqa xmmword ptr [rsp+20H], xmm12
+ movdqa xmmword ptr [rsp+30H], xmm13
+ movdqu xmm8, xmmword ptr [r8+rdx-30H]
+ movdqu xmm9, xmmword ptr [r9+rdx-30H]
+ movdqu xmm10, xmmword ptr [r10+rdx-30H]
+ movdqu xmm11, xmmword ptr [r11+rdx-30H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp+40H], xmm8
+ movdqa xmmword ptr [rsp+50H], xmm9
+ movdqa xmmword ptr [rsp+60H], xmm12
+ movdqa xmmword ptr [rsp+70H], xmm13
+ movdqu xmm8, xmmword ptr [r8+rdx-20H]
+ movdqu xmm9, xmmword ptr [r9+rdx-20H]
+ movdqu xmm10, xmmword ptr [r10+rdx-20H]
+ movdqu xmm11, xmmword ptr [r11+rdx-20H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp+80H], xmm8
+ movdqa xmmword ptr [rsp+90H], xmm9
+ movdqa xmmword ptr [rsp+0A0H], xmm12
+ movdqa xmmword ptr [rsp+0B0H], xmm13
+ movdqu xmm8, xmmword ptr [r8+rdx-10H]
+ movdqu xmm9, xmmword ptr [r9+rdx-10H]
+ movdqu xmm10, xmmword ptr [r10+rdx-10H]
+ movdqu xmm11, xmmword ptr [r11+rdx-10H]
+ movdqa xmm12, xmm8
+ punpckldq xmm8, xmm9
+ punpckhdq xmm12, xmm9
+ movdqa xmm14, xmm10
+ punpckldq xmm10, xmm11
+ punpckhdq xmm14, xmm11
+ movdqa xmm9, xmm8
+ punpcklqdq xmm8, xmm10
+ punpckhqdq xmm9, xmm10
+ movdqa xmm13, xmm12
+ punpcklqdq xmm12, xmm14
+ punpckhqdq xmm13, xmm14
+ movdqa xmmword ptr [rsp+0C0H], xmm8
+ movdqa xmmword ptr [rsp+0D0H], xmm9
+ movdqa xmmword ptr [rsp+0E0H], xmm12
+ movdqa xmmword ptr [rsp+0F0H], xmm13
+ movdqa xmm9, xmmword ptr [BLAKE3_IV_1]
+ movdqa xmm10, xmmword ptr [BLAKE3_IV_2]
+ movdqa xmm11, xmmword ptr [BLAKE3_IV_3]
+ movdqa xmm12, xmmword ptr [rsp+110H]
+ movdqa xmm13, xmmword ptr [rsp+120H]
+ movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN]
+ movd xmm15, eax
+ pshufd xmm15, xmm15, 00H
+ prefetcht0 byte ptr [r8+rdx+80H]
+ prefetcht0 byte ptr [r9+rdx+80H]
+ prefetcht0 byte ptr [r10+rdx+80H]
+ prefetcht0 byte ptr [r11+rdx+80H]
+ paddd xmm0, xmmword ptr [rsp]
+ paddd xmm1, xmmword ptr [rsp+20H]
+ paddd xmm2, xmmword ptr [rsp+40H]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [BLAKE3_IV_0]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+10H]
+ paddd xmm1, xmmword ptr [rsp+30H]
+ paddd xmm2, xmmword ptr [rsp+50H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+80H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp+0C0H]
+ paddd xmm3, xmmword ptr [rsp+0E0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+90H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+0D0H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+20H]
+ paddd xmm1, xmmword ptr [rsp+30H]
+ paddd xmm2, xmmword ptr [rsp+70H]
+ paddd xmm3, xmmword ptr [rsp+40H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+60H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp]
+ paddd xmm3, xmmword ptr [rsp+0D0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+10H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+90H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0B0H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp+0E0H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+30H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp+0D0H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+40H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+20H]
+ paddd xmm3, xmmword ptr [rsp+0E0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+60H]
+ paddd xmm1, xmmword ptr [rsp+90H]
+ paddd xmm2, xmmword ptr [rsp+0B0H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+50H]
+ paddd xmm1, xmmword ptr [rsp]
+ paddd xmm2, xmmword ptr [rsp+0F0H]
+ paddd xmm3, xmmword ptr [rsp+10H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0A0H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+0E0H]
+ paddd xmm3, xmmword ptr [rsp+0D0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+70H]
+ paddd xmm1, xmmword ptr [rsp+90H]
+ paddd xmm2, xmmword ptr [rsp+30H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+40H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+50H]
+ paddd xmm3, xmmword ptr [rsp+10H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp]
+ paddd xmm1, xmmword ptr [rsp+20H]
+ paddd xmm2, xmmword ptr [rsp+80H]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0C0H]
+ paddd xmm1, xmmword ptr [rsp+90H]
+ paddd xmm2, xmmword ptr [rsp+0F0H]
+ paddd xmm3, xmmword ptr [rsp+0E0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0D0H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+0A0H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+70H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+20H]
+ paddd xmm1, xmmword ptr [rsp+30H]
+ paddd xmm2, xmmword ptr [rsp+10H]
+ paddd xmm3, xmmword ptr [rsp+40H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+90H]
+ paddd xmm1, xmmword ptr [rsp+0B0H]
+ paddd xmm2, xmmword ptr [rsp+80H]
+ paddd xmm3, xmmword ptr [rsp+0F0H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0E0H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp+0C0H]
+ paddd xmm3, xmmword ptr [rsp+10H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0D0H]
+ paddd xmm1, xmmword ptr [rsp]
+ paddd xmm2, xmmword ptr [rsp+20H]
+ paddd xmm3, xmmword ptr [rsp+40H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+30H]
+ paddd xmm1, xmmword ptr [rsp+0A0H]
+ paddd xmm2, xmmword ptr [rsp+60H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0B0H]
+ paddd xmm1, xmmword ptr [rsp+50H]
+ paddd xmm2, xmmword ptr [rsp+10H]
+ paddd xmm3, xmmword ptr [rsp+80H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0F0H]
+ paddd xmm1, xmmword ptr [rsp]
+ paddd xmm2, xmmword ptr [rsp+90H]
+ paddd xmm3, xmmword ptr [rsp+60H]
+ paddd xmm0, xmm4
+ paddd xmm1, xmm5
+ paddd xmm2, xmm6
+ paddd xmm3, xmm7
+ pxor xmm12, xmm0
+ pxor xmm13, xmm1
+ pxor xmm14, xmm2
+ pxor xmm15, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ pshufb xmm15, xmm8
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm12
+ paddd xmm9, xmm13
+ paddd xmm10, xmm14
+ paddd xmm11, xmm15
+ pxor xmm4, xmm8
+ pxor xmm5, xmm9
+ pxor xmm6, xmm10
+ pxor xmm7, xmm11
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ paddd xmm0, xmmword ptr [rsp+0E0H]
+ paddd xmm1, xmmword ptr [rsp+20H]
+ paddd xmm2, xmmword ptr [rsp+30H]
+ paddd xmm3, xmmword ptr [rsp+70H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT16]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ movdqa xmmword ptr [rsp+100H], xmm8
+ movdqa xmm8, xmm5
+ psrld xmm8, 12
+ pslld xmm5, 20
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 12
+ pslld xmm6, 20
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 12
+ pslld xmm7, 20
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 12
+ pslld xmm4, 20
+ por xmm4, xmm8
+ paddd xmm0, xmmword ptr [rsp+0A0H]
+ paddd xmm1, xmmword ptr [rsp+0C0H]
+ paddd xmm2, xmmword ptr [rsp+40H]
+ paddd xmm3, xmmword ptr [rsp+0D0H]
+ paddd xmm0, xmm5
+ paddd xmm1, xmm6
+ paddd xmm2, xmm7
+ paddd xmm3, xmm4
+ pxor xmm15, xmm0
+ pxor xmm12, xmm1
+ pxor xmm13, xmm2
+ pxor xmm14, xmm3
+ movdqa xmm8, xmmword ptr [ROT8]
+ pshufb xmm15, xmm8
+ pshufb xmm12, xmm8
+ pshufb xmm13, xmm8
+ pshufb xmm14, xmm8
+ paddd xmm10, xmm15
+ paddd xmm11, xmm12
+ movdqa xmm8, xmmword ptr [rsp+100H]
+ paddd xmm8, xmm13
+ paddd xmm9, xmm14
+ pxor xmm5, xmm10
+ pxor xmm6, xmm11
+ pxor xmm7, xmm8
+ pxor xmm4, xmm9
+ pxor xmm0, xmm8
+ pxor xmm1, xmm9
+ pxor xmm2, xmm10
+ pxor xmm3, xmm11
+ movdqa xmm8, xmm5
+ psrld xmm8, 7
+ pslld xmm5, 25
+ por xmm5, xmm8
+ movdqa xmm8, xmm6
+ psrld xmm8, 7
+ pslld xmm6, 25
+ por xmm6, xmm8
+ movdqa xmm8, xmm7
+ psrld xmm8, 7
+ pslld xmm7, 25
+ por xmm7, xmm8
+ movdqa xmm8, xmm4
+ psrld xmm8, 7
+ pslld xmm4, 25
+ por xmm4, xmm8
+ pxor xmm4, xmm12
+ pxor xmm5, xmm13
+ pxor xmm6, xmm14
+ pxor xmm7, xmm15
+ mov eax, r13d
+ jne innerloop4
+ movdqa xmm9, xmm0
+ punpckldq xmm0, xmm1
+ punpckhdq xmm9, xmm1
+ movdqa xmm11, xmm2
+ punpckldq xmm2, xmm3
+ punpckhdq xmm11, xmm3
+ movdqa xmm1, xmm0
+ punpcklqdq xmm0, xmm2
+ punpckhqdq xmm1, xmm2
+ movdqa xmm3, xmm9
+ punpcklqdq xmm9, xmm11
+ punpckhqdq xmm3, xmm11
+ movdqu xmmword ptr [rbx], xmm0
+ movdqu xmmword ptr [rbx+20H], xmm1
+ movdqu xmmword ptr [rbx+40H], xmm9
+ movdqu xmmword ptr [rbx+60H], xmm3
+ movdqa xmm9, xmm4
+ punpckldq xmm4, xmm5
+ punpckhdq xmm9, xmm5
+ movdqa xmm11, xmm6
+ punpckldq xmm6, xmm7
+ punpckhdq xmm11, xmm7
+ movdqa xmm5, xmm4
+ punpcklqdq xmm4, xmm6
+ punpckhqdq xmm5, xmm6
+ movdqa xmm7, xmm9
+ punpcklqdq xmm9, xmm11
+ punpckhqdq xmm7, xmm11
+ movdqu xmmword ptr [rbx+10H], xmm4
+ movdqu xmmword ptr [rbx+30H], xmm5
+ movdqu xmmword ptr [rbx+50H], xmm9
+ movdqu xmmword ptr [rbx+70H], xmm7
+ movdqa xmm1, xmmword ptr [rsp+110H]
+ movdqa xmm0, xmm1
+ paddd xmm1, xmmword ptr [rsp+150H]
+ movdqa xmmword ptr [rsp+110H], xmm1
+ pxor xmm0, xmmword ptr [CMP_MSB_MASK]
+ pxor xmm1, xmmword ptr [CMP_MSB_MASK]
+ pcmpgtd xmm0, xmm1
+ movdqa xmm1, xmmword ptr [rsp+120H]
+ psubd xmm1, xmm0
+ movdqa xmmword ptr [rsp+120H], xmm1
+ add rbx, 128
+ add rdi, 32
+ sub rsi, 4
+ cmp rsi, 4
+ jnc outerloop4
+ test rsi, rsi
+ jne final3blocks
+unwind:
+ movdqa xmm6, xmmword ptr [rsp+170H]
+ movdqa xmm7, xmmword ptr [rsp+180H]
+ movdqa xmm8, xmmword ptr [rsp+190H]
+ movdqa xmm9, xmmword ptr [rsp+1A0H]
+ movdqa xmm10, xmmword ptr [rsp+1B0H]
+ movdqa xmm11, xmmword ptr [rsp+1C0H]
+ movdqa xmm12, xmmword ptr [rsp+1D0H]
+ movdqa xmm13, xmmword ptr [rsp+1E0H]
+ movdqa xmm14, xmmword ptr [rsp+1F0H]
+ movdqa xmm15, xmmword ptr [rsp+200H]
+ mov rsp, rbp
+ pop rbp
+ pop rbx
+ pop rdi
+ pop rsi
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+ ret
+ALIGN 16
+final3blocks:
+ test esi, 2H
+ je final1block
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movaps xmm8, xmm0
+ movaps xmm9, xmm1
+ movd xmm13, dword ptr [rsp+110H]
+ pinsrd xmm13, dword ptr [rsp+120H], 1
+ pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN], 2
+ movaps xmmword ptr [rsp], xmm13
+ movd xmm14, dword ptr [rsp+114H]
+ pinsrd xmm14, dword ptr [rsp+124H], 1
+ pinsrd xmm14, dword ptr [BLAKE3_BLOCK_LEN], 2
+ movaps xmmword ptr [rsp+10H], xmm14
+ mov r8, qword ptr [rdi]
+ mov r9, qword ptr [rdi+8H]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+innerloop2:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ movaps xmm10, xmm2
+ movups xmm4, xmmword ptr [r8+rdx-40H]
+ movups xmm5, xmmword ptr [r8+rdx-30H]
+ movaps xmm3, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm3, xmm5, 221
+ movaps xmm5, xmm3
+ movups xmm6, xmmword ptr [r8+rdx-20H]
+ movups xmm7, xmmword ptr [r8+rdx-10H]
+ movaps xmm3, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm3, xmm7, 221
+ pshufd xmm7, xmm3, 93H
+ movups xmm12, xmmword ptr [r9+rdx-40H]
+ movups xmm13, xmmword ptr [r9+rdx-30H]
+ movaps xmm11, xmm12
+ shufps xmm12, xmm13, 136
+ shufps xmm11, xmm13, 221
+ movaps xmm13, xmm11
+ movups xmm14, xmmword ptr [r9+rdx-20H]
+ movups xmm15, xmmword ptr [r9+rdx-10H]
+ movaps xmm11, xmm14
+ shufps xmm14, xmm15, 136
+ pshufd xmm14, xmm14, 93H
+ shufps xmm11, xmm15, 221
+ pshufd xmm15, xmm11, 93H
+ movaps xmm3, xmmword ptr [rsp]
+ movaps xmm11, xmmword ptr [rsp+10H]
+ pinsrd xmm3, eax, 3
+ pinsrd xmm11, eax, 3
+ mov al, 7
+roundloop2:
+ paddd xmm0, xmm4
+ paddd xmm8, xmm12
+ movaps xmmword ptr [rsp+20H], xmm4
+ movaps xmmword ptr [rsp+30H], xmm12
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ movaps xmm12, xmmword ptr [ROT16]
+ pshufb xmm3, xmm12
+ pshufb xmm11, xmm12
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 20
+ psrld xmm4, 12
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 20
+ psrld xmm4, 12
+ por xmm9, xmm4
+ paddd xmm0, xmm5
+ paddd xmm8, xmm13
+ movaps xmmword ptr [rsp+40H], xmm5
+ movaps xmmword ptr [rsp+50H], xmm13
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ movaps xmm13, xmmword ptr [ROT8]
+ pshufb xmm3, xmm13
+ pshufb xmm11, xmm13
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 25
+ psrld xmm4, 7
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 25
+ psrld xmm4, 7
+ por xmm9, xmm4
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm8, xmm8, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm11, xmm11, 4EH
+ pshufd xmm2, xmm2, 39H
+ pshufd xmm10, xmm10, 39H
+ paddd xmm0, xmm6
+ paddd xmm8, xmm14
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ pshufb xmm3, xmm12
+ pshufb xmm11, xmm12
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 20
+ psrld xmm4, 12
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 20
+ psrld xmm4, 12
+ por xmm9, xmm4
+ paddd xmm0, xmm7
+ paddd xmm8, xmm15
+ paddd xmm0, xmm1
+ paddd xmm8, xmm9
+ pxor xmm3, xmm0
+ pxor xmm11, xmm8
+ pshufb xmm3, xmm13
+ pshufb xmm11, xmm13
+ paddd xmm2, xmm3
+ paddd xmm10, xmm11
+ pxor xmm1, xmm2
+ pxor xmm9, xmm10
+ movdqa xmm4, xmm1
+ pslld xmm1, 25
+ psrld xmm4, 7
+ por xmm1, xmm4
+ movdqa xmm4, xmm9
+ pslld xmm9, 25
+ psrld xmm4, 7
+ por xmm9, xmm4
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm8, xmm8, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm11, xmm11, 4EH
+ pshufd xmm2, xmm2, 93H
+ pshufd xmm10, xmm10, 93H
+ dec al
+ je endroundloop2
+ movdqa xmm12, xmmword ptr [rsp+20H]
+ movdqa xmm5, xmmword ptr [rsp+40H]
+ pshufd xmm13, xmm12, 0FH
+ shufps xmm12, xmm5, 214
+ pshufd xmm4, xmm12, 39H
+ movdqa xmm12, xmm6
+ shufps xmm12, xmm7, 250
+ pblendw xmm13, xmm12, 0CCH
+ movdqa xmm12, xmm7
+ punpcklqdq xmm12, xmm5
+ pblendw xmm12, xmm6, 0C0H
+ pshufd xmm12, xmm12, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmmword ptr [rsp+20H], xmm13
+ movdqa xmmword ptr [rsp+40H], xmm12
+ movdqa xmm5, xmmword ptr [rsp+30H]
+ movdqa xmm13, xmmword ptr [rsp+50H]
+ pshufd xmm6, xmm5, 0FH
+ shufps xmm5, xmm13, 214
+ pshufd xmm12, xmm5, 39H
+ movdqa xmm5, xmm14
+ shufps xmm5, xmm15, 250
+ pblendw xmm6, xmm5, 0CCH
+ movdqa xmm5, xmm15
+ punpcklqdq xmm5, xmm13
+ pblendw xmm5, xmm14, 0C0H
+ pshufd xmm5, xmm5, 78H
+ punpckhdq xmm13, xmm15
+ punpckldq xmm14, xmm13
+ pshufd xmm15, xmm14, 1EH
+ movdqa xmm13, xmm6
+ movdqa xmm14, xmm5
+ movdqa xmm5, xmmword ptr [rsp+20H]
+ movdqa xmm6, xmmword ptr [rsp+40H]
+ jmp roundloop2
+endroundloop2:
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ pxor xmm8, xmm10
+ pxor xmm9, xmm11
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop2
+ movups xmmword ptr [rbx], xmm0
+ movups xmmword ptr [rbx+10H], xmm1
+ movups xmmword ptr [rbx+20H], xmm8
+ movups xmmword ptr [rbx+30H], xmm9
+ movdqa xmm0, xmmword ptr [rsp+130H]
+ movdqa xmm1, xmmword ptr [rsp+110H]
+ movdqa xmm2, xmmword ptr [rsp+120H]
+ movdqu xmm3, xmmword ptr [rsp+118H]
+ movdqu xmm4, xmmword ptr [rsp+128H]
+ blendvps xmm1, xmm3, xmm0
+ blendvps xmm2, xmm4, xmm0
+ movdqa xmmword ptr [rsp+110H], xmm1
+ movdqa xmmword ptr [rsp+120H], xmm2
+ add rdi, 16
+ add rbx, 64
+ sub rsi, 2
+final1block:
+ test esi, 1H
+ je unwind
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movd xmm13, dword ptr [rsp+110H]
+ pinsrd xmm13, dword ptr [rsp+120H], 1
+ pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN], 2
+ movaps xmm14, xmmword ptr [ROT8]
+ movaps xmm15, xmmword ptr [ROT16]
+ mov r8, qword ptr [rdi]
+ movzx eax, byte ptr [rbp+80H]
+ or eax, r13d
+ xor edx, edx
+innerloop1:
+ mov r14d, eax
+ or eax, r12d
+ add rdx, 64
+ cmp rdx, r15
+ cmovne eax, r14d
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ movaps xmm3, xmm13
+ pinsrd xmm3, eax, 3
+ movups xmm4, xmmword ptr [r8+rdx-40H]
+ movups xmm5, xmmword ptr [r8+rdx-30H]
+ movaps xmm8, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm8, xmm5, 221
+ movaps xmm5, xmm8
+ movups xmm6, xmmword ptr [r8+rdx-20H]
+ movups xmm7, xmmword ptr [r8+rdx-10H]
+ movaps xmm8, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm8, xmm7, 221
+ pshufd xmm7, xmm8, 93H
+ mov al, 7
+roundloop1:
+ paddd xmm0, xmm4
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm15
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm5
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 39H
+ paddd xmm0, xmm6
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm15
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm7
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 93H
+ dec al
+ jz endroundloop1
+ movdqa xmm8, xmm4
+ shufps xmm8, xmm5, 214
+ pshufd xmm9, xmm4, 0FH
+ pshufd xmm4, xmm8, 39H
+ movdqa xmm8, xmm6
+ shufps xmm8, xmm7, 250
+ pblendw xmm9, xmm8, 0CCH
+ movdqa xmm8, xmm7
+ punpcklqdq xmm8, xmm5
+ pblendw xmm8, xmm6, 0C0H
+ pshufd xmm8, xmm8, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmm5, xmm9
+ movdqa xmm6, xmm8
+ jmp roundloop1
+endroundloop1:
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ mov eax, r13d
+ cmp rdx, r15
+ jne innerloop1
+ movups xmmword ptr [rbx], xmm0
+ movups xmmword ptr [rbx+10H], xmm1
+ jmp unwind
+_llvm_blake3_hash_many_sse41 ENDP
+llvm_blake3_hash_many_sse41 ENDP
+
+llvm_blake3_compress_in_place_sse41 PROC
+_llvm_blake3_compress_in_place_sse41 PROC
+ sub rsp, 120
+ movdqa xmmword ptr [rsp], xmm6
+ movdqa xmmword ptr [rsp+10H], xmm7
+ movdqa xmmword ptr [rsp+20H], xmm8
+ movdqa xmmword ptr [rsp+30H], xmm9
+ movdqa xmmword ptr [rsp+40H], xmm11
+ movdqa xmmword ptr [rsp+50H], xmm14
+ movdqa xmmword ptr [rsp+60H], xmm15
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ movzx eax, byte ptr [rsp+0A0H]
+ movzx r8d, r8b
+ shl rax, 32
+ add r8, rax
+ movd xmm3, r9
+ movd xmm4, r8
+ punpcklqdq xmm3, xmm4
+ movups xmm4, xmmword ptr [rdx]
+ movups xmm5, xmmword ptr [rdx+10H]
+ movaps xmm8, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm8, xmm5, 221
+ movaps xmm5, xmm8
+ movups xmm6, xmmword ptr [rdx+20H]
+ movups xmm7, xmmword ptr [rdx+30H]
+ movaps xmm8, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm8, xmm7, 221
+ pshufd xmm7, xmm8, 93H
+ movaps xmm14, xmmword ptr [ROT8]
+ movaps xmm15, xmmword ptr [ROT16]
+ mov al, 7
+@@:
+ paddd xmm0, xmm4
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm15
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm5
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 39H
+ paddd xmm0, xmm6
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm15
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm7
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 93H
+ dec al
+ jz @F
+ movdqa xmm8, xmm4
+ shufps xmm8, xmm5, 214
+ pshufd xmm9, xmm4, 0FH
+ pshufd xmm4, xmm8, 39H
+ movdqa xmm8, xmm6
+ shufps xmm8, xmm7, 250
+ pblendw xmm9, xmm8, 0CCH
+ movdqa xmm8, xmm7
+ punpcklqdq xmm8, xmm5
+ pblendw xmm8, xmm6, 0C0H
+ pshufd xmm8, xmm8, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmm5, xmm9
+ movdqa xmm6, xmm8
+ jmp @B
+@@:
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ movups xmmword ptr [rcx], xmm0
+ movups xmmword ptr [rcx+10H], xmm1
+ movdqa xmm6, xmmword ptr [rsp]
+ movdqa xmm7, xmmword ptr [rsp+10H]
+ movdqa xmm8, xmmword ptr [rsp+20H]
+ movdqa xmm9, xmmword ptr [rsp+30H]
+ movdqa xmm11, xmmword ptr [rsp+40H]
+ movdqa xmm14, xmmword ptr [rsp+50H]
+ movdqa xmm15, xmmword ptr [rsp+60H]
+ add rsp, 120
+ ret
+_llvm_blake3_compress_in_place_sse41 ENDP
+llvm_blake3_compress_in_place_sse41 ENDP
+
+ALIGN 16
+llvm_blake3_compress_xof_sse41 PROC
+_llvm_blake3_compress_xof_sse41 PROC
+ sub rsp, 120
+ movdqa xmmword ptr [rsp], xmm6
+ movdqa xmmword ptr [rsp+10H], xmm7
+ movdqa xmmword ptr [rsp+20H], xmm8
+ movdqa xmmword ptr [rsp+30H], xmm9
+ movdqa xmmword ptr [rsp+40H], xmm11
+ movdqa xmmword ptr [rsp+50H], xmm14
+ movdqa xmmword ptr [rsp+60H], xmm15
+ movups xmm0, xmmword ptr [rcx]
+ movups xmm1, xmmword ptr [rcx+10H]
+ movaps xmm2, xmmword ptr [BLAKE3_IV]
+ movzx eax, byte ptr [rsp+0A0H]
+ movzx r8d, r8b
+ mov r10, qword ptr [rsp+0A8H]
+ shl rax, 32
+ add r8, rax
+ movd xmm3, r9
+ movd xmm4, r8
+ punpcklqdq xmm3, xmm4
+ movups xmm4, xmmword ptr [rdx]
+ movups xmm5, xmmword ptr [rdx+10H]
+ movaps xmm8, xmm4
+ shufps xmm4, xmm5, 136
+ shufps xmm8, xmm5, 221
+ movaps xmm5, xmm8
+ movups xmm6, xmmword ptr [rdx+20H]
+ movups xmm7, xmmword ptr [rdx+30H]
+ movaps xmm8, xmm6
+ shufps xmm6, xmm7, 136
+ pshufd xmm6, xmm6, 93H
+ shufps xmm8, xmm7, 221
+ pshufd xmm7, xmm8, 93H
+ movaps xmm14, xmmword ptr [ROT8]
+ movaps xmm15, xmmword ptr [ROT16]
+ mov al, 7
+@@:
+ paddd xmm0, xmm4
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm15
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm5
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 93H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 39H
+ paddd xmm0, xmm6
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm15
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 20
+ psrld xmm11, 12
+ por xmm1, xmm11
+ paddd xmm0, xmm7
+ paddd xmm0, xmm1
+ pxor xmm3, xmm0
+ pshufb xmm3, xmm14
+ paddd xmm2, xmm3
+ pxor xmm1, xmm2
+ movdqa xmm11, xmm1
+ pslld xmm1, 25
+ psrld xmm11, 7
+ por xmm1, xmm11
+ pshufd xmm0, xmm0, 39H
+ pshufd xmm3, xmm3, 4EH
+ pshufd xmm2, xmm2, 93H
+ dec al
+ jz @F
+ movdqa xmm8, xmm4
+ shufps xmm8, xmm5, 214
+ pshufd xmm9, xmm4, 0FH
+ pshufd xmm4, xmm8, 39H
+ movdqa xmm8, xmm6
+ shufps xmm8, xmm7, 250
+ pblendw xmm9, xmm8, 0CCH
+ movdqa xmm8, xmm7
+ punpcklqdq xmm8, xmm5
+ pblendw xmm8, xmm6, 0C0H
+ pshufd xmm8, xmm8, 78H
+ punpckhdq xmm5, xmm7
+ punpckldq xmm6, xmm5
+ pshufd xmm7, xmm6, 1EH
+ movdqa xmm5, xmm9
+ movdqa xmm6, xmm8
+ jmp @B
+@@:
+ movdqu xmm4, xmmword ptr [rcx]
+ movdqu xmm5, xmmword ptr [rcx+10H]
+ pxor xmm0, xmm2
+ pxor xmm1, xmm3
+ pxor xmm2, xmm4
+ pxor xmm3, xmm5
+ movups xmmword ptr [r10], xmm0
+ movups xmmword ptr [r10+10H], xmm1
+ movups xmmword ptr [r10+20H], xmm2
+ movups xmmword ptr [r10+30H], xmm3
+ movdqa xmm6, xmmword ptr [rsp]
+ movdqa xmm7, xmmword ptr [rsp+10H]
+ movdqa xmm8, xmmword ptr [rsp+20H]
+ movdqa xmm9, xmmword ptr [rsp+30H]
+ movdqa xmm11, xmmword ptr [rsp+40H]
+ movdqa xmm14, xmmword ptr [rsp+50H]
+ movdqa xmm15, xmmword ptr [rsp+60H]
+ add rsp, 120
+ ret
+_llvm_blake3_compress_xof_sse41 ENDP
+llvm_blake3_compress_xof_sse41 ENDP
+
+_TEXT ENDS
+
+
+_RDATA SEGMENT READONLY PAGE ALIAS(".rdata") 'CONST'
+ALIGN 64
+BLAKE3_IV:
+ dd 6A09E667H, 0BB67AE85H, 3C6EF372H, 0A54FF53AH
+
+ADD0:
+ dd 0, 1, 2, 3
+
+ADD1:
+ dd 4 dup (4)
+
+BLAKE3_IV_0:
+ dd 4 dup (6A09E667H)
+
+BLAKE3_IV_1:
+ dd 4 dup (0BB67AE85H)
+
+BLAKE3_IV_2:
+ dd 4 dup (3C6EF372H)
+
+BLAKE3_IV_3:
+ dd 4 dup (0A54FF53AH)
+
+BLAKE3_BLOCK_LEN:
+ dd 4 dup (64)
+
+ROT16:
+ db 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
+
+ROT8:
+ db 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
+
+CMP_MSB_MASK:
+ dd 8 dup(80000000H)
+
+_RDATA ENDS
+END
+
diff --git a/contrib/libs/llvm16/lib/TableGen/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/TableGen/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/TableGen/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/AArch64/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/AArch64/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/AArch64/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/ARM/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/ARM/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/ARM/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/BPF/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/BPF/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/BPF/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/LoongArch/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/LoongArch/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/LoongArch/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/LoongArch/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/LoongArch/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/LoongArch/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/LoongArch/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/LoongArch/Disassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/LoongArch/Disassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/LoongArch/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/LoongArch/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/LoongArch/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/LoongArch/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/LoongArch/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/LoongArch/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/NVPTX/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/NVPTX/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/NVPTX/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/PowerPC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/PowerPC/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..00f365fa29
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/PowerPC/.yandex_meta/licenses.list.txt
@@ -0,0 +1,588 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https)//llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier) Apache-2.0 WITH LLVM-exception
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================File: LICENSE.TXT====================
+==============================================================================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+==============================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+==============================================================================
+Software from third parties included in the LLVM Project:
+==============================================================================
+The LLVM Project contains third party software which is under different license
+terms. All such code will be identified clearly using at least one of two
+mechanisms:
+1) It will be in a separate directory tree with its own `LICENSE.txt` or
+ `LICENSE` file at the top containing the specific license and restrictions
+ which apply to that software, or
+2) It will contain specific license and restriction terms at the top of every
+ file.
+
+==============================================================================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+
+
+====================File: include/llvm/Support/LICENSE.TXT====================
+LLVM System Interface Library
+-------------------------------------------------------------------------------
+
+Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+See https://llvm.org/LICENSE.txt for license information.
+SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================File: tools/polly/LICENSE.TXT====================
+==============================================================================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+==============================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+==============================================================================
+Software from third parties included in the LLVM Project:
+==============================================================================
+The LLVM Project contains third party software which is under different license
+terms. All such code will be identified clearly using at least one of two
+mechanisms:
+1) It will be in a separate directory tree with its own `LICENSE.txt` or
+ `LICENSE` file at the top containing the specific license and restrictions
+ which apply to that software, or
+2) It will contain specific license and restriction terms at the top of every
+ file.
+
+==============================================================================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2019 Polly Team
+All rights reserved.
+
+Developed by:
+
+ Polly Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the Polly Team, copyright holders, nor the names of
+ its contributors may be used to endorse or promote products derived from
+ this Software without specific prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+
+====================NCSA====================
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm16/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
new file mode 100644
index 0000000000..d265f3a12b
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -0,0 +1,51 @@
+//===-- RISCVCallLowering.cpp - Call lowering -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file implements the lowering of LLVM calls to machine code calls for
+/// GlobalISel.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVCallLowering.h"
+#include "RISCVISelLowering.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+
+using namespace llvm;
+
+RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
+ : CallLowering(&TLI) {}
+
+bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
+ const Value *Val, ArrayRef<Register> VRegs,
+ FunctionLoweringInfo &FLI) const {
+
+ MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET);
+
+ if (Val != nullptr) {
+ return false;
+ }
+ MIRBuilder.insertInstr(Ret);
+ return true;
+}
+
+bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+ const Function &F,
+ ArrayRef<ArrayRef<Register>> VRegs,
+ FunctionLoweringInfo &FLI) const {
+
+ if (F.arg_empty())
+ return true;
+
+ return false;
+}
+
+bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
+ CallLoweringInfo &Info) const {
+ return false;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.h b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.h
new file mode 100644
index 0000000000..cd7fc4c761
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVCallLowering.h
@@ -0,0 +1,44 @@
+//===-- RISCVCallLowering.h - Call lowering ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVCALLLOWERING_H
+#define LLVM_LIB_TARGET_RISCV_RISCVCALLLOWERING_H
+
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+
+class RISCVTargetLowering;
+
+class RISCVCallLowering : public CallLowering {
+
+public:
+ RISCVCallLowering(const RISCVTargetLowering &TLI);
+
+ bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val,
+ ArrayRef<Register> VRegs,
+ FunctionLoweringInfo &FLI) const override;
+
+ bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
+ ArrayRef<ArrayRef<Register>> VRegs,
+ FunctionLoweringInfo &FLI) const override;
+
+ bool lowerCall(MachineIRBuilder &MIRBuilder,
+ CallLoweringInfo &Info) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_RISCV_RISCVCALLLOWERING_H
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
new file mode 100644
index 0000000000..8dfd71ac0b
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -0,0 +1,103 @@
+//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements the targeting of the InstructionSelector class for
+/// RISCV.
+/// \todo This should be generated by TableGen.
+//===----------------------------------------------------------------------===//
+
+#include "RISCVRegisterBankInfo.h"
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "riscv-isel"
+
+using namespace llvm;
+
+#define GET_GLOBALISEL_PREDICATE_BITSET
+#include "RISCVGenGlobalISel.inc"
+#undef GET_GLOBALISEL_PREDICATE_BITSET
+
+namespace {
+
+class RISCVInstructionSelector : public InstructionSelector {
+public:
+ RISCVInstructionSelector(const RISCVTargetMachine &TM,
+ const RISCVSubtarget &STI,
+ const RISCVRegisterBankInfo &RBI);
+
+ bool select(MachineInstr &I) override;
+ static const char *getName() { return DEBUG_TYPE; }
+
+private:
+ bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
+
+ const RISCVSubtarget &STI;
+ const RISCVInstrInfo &TII;
+ const RISCVRegisterInfo &TRI;
+ const RISCVRegisterBankInfo &RBI;
+
+ // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
+ // uses "STI." in the code generated by TableGen. We need to unify the name of
+ // Subtarget variable.
+ const RISCVSubtarget *Subtarget = &STI;
+
+#define GET_GLOBALISEL_PREDICATES_DECL
+#include "RISCVGenGlobalISel.inc"
+#undef GET_GLOBALISEL_PREDICATES_DECL
+
+#define GET_GLOBALISEL_TEMPORARIES_DECL
+#include "RISCVGenGlobalISel.inc"
+#undef GET_GLOBALISEL_TEMPORARIES_DECL
+};
+
+} // end anonymous namespace
+
+#define GET_GLOBALISEL_IMPL
+#include "RISCVGenGlobalISel.inc"
+#undef GET_GLOBALISEL_IMPL
+
+RISCVInstructionSelector::RISCVInstructionSelector(
+ const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
+ const RISCVRegisterBankInfo &RBI)
+ : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
+
+#define GET_GLOBALISEL_PREDICATES_INIT
+#include "RISCVGenGlobalISel.inc"
+#undef GET_GLOBALISEL_PREDICATES_INIT
+#define GET_GLOBALISEL_TEMPORARIES_INIT
+#include "RISCVGenGlobalISel.inc"
+#undef GET_GLOBALISEL_TEMPORARIES_INIT
+{
+}
+
+bool RISCVInstructionSelector::select(MachineInstr &I) {
+
+ if (!isPreISelGenericOpcode(I.getOpcode())) {
+ // Certain non-generic instructions also need some special handling.
+ return true;
+ }
+
+ if (selectImpl(I, *CoverageInfo))
+ return true;
+
+ return false;
+}
+
+namespace llvm {
+InstructionSelector *
+createRISCVInstructionSelector(const RISCVTargetMachine &TM,
+ RISCVSubtarget &Subtarget,
+ RISCVRegisterBankInfo &RBI) {
+ return new RISCVInstructionSelector(TM, Subtarget, RBI);
+}
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
new file mode 100644
index 0000000000..f6256defe5
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -0,0 +1,23 @@
+//===-- RISCVLegalizerInfo.cpp ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements the targeting of the Machinelegalizer class for RISCV.
+/// \todo This should be generated by TableGen.
+//===----------------------------------------------------------------------===//
+
+#include "RISCVLegalizerInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Type.h"
+
+using namespace llvm;
+
+RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
+ getLegacyLegalizerInfo().computeTables();
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
new file mode 100644
index 0000000000..f2c2b9a3fd
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -0,0 +1,28 @@
+//===-- RISCVLegalizerInfo.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the targeting of the Machinelegalizer class for RISCV.
+/// \todo This should be generated by TableGen.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVMACHINELEGALIZER_H
+#define LLVM_LIB_TARGET_RISCV_RISCVMACHINELEGALIZER_H
+
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+
+namespace llvm {
+
+class RISCVSubtarget;
+
+/// This class provides the information for the target register banks.
+class RISCVLegalizerInfo : public LegalizerInfo {
+public:
+ RISCVLegalizerInfo(const RISCVSubtarget &ST);
+};
+} // end namespace llvm
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
new file mode 100644
index 0000000000..5371b790a1
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -0,0 +1,25 @@
+//===-- RISCVRegisterBankInfo.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements the targeting of the RegisterBankInfo class for RISCV.
+/// \todo This should be generated by TableGen.
+//===----------------------------------------------------------------------===//
+
+#include "RISCVRegisterBankInfo.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterBank.h"
+#include "llvm/CodeGen/RegisterBankInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+#define GET_TARGET_REGBANK_IMPL
+#include "RISCVGenRegisterBank.inc"
+
+using namespace llvm;
+
+RISCVRegisterBankInfo::RISCVRegisterBankInfo(const TargetRegisterInfo &TRI) {}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h
new file mode 100644
index 0000000000..194a1548af
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.h
@@ -0,0 +1,37 @@
+//===-- RISCVRegisterBankInfo.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the targeting of the RegisterBankInfo class for RISCV.
+/// \todo This should be generated by TableGen.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVREGISTERBANKINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVREGISTERBANKINFO_H
+
+#include "llvm/CodeGen/RegisterBankInfo.h"
+
+#define GET_REGBANK_DECLARATIONS
+#include "RISCVGenRegisterBank.inc"
+
+namespace llvm {
+
+class TargetRegisterInfo;
+
+class RISCVGenRegisterBankInfo : public RegisterBankInfo {
+protected:
+#define GET_TARGET_REGBANK_CLASS
+#include "RISCVGenRegisterBank.inc"
+};
+
+/// This class provides the information for the target register banks.
+class RISCVRegisterBankInfo final : public RISCVGenRegisterBankInfo {
+public:
+ RISCVRegisterBankInfo(const TargetRegisterInfo &TRI);
+};
+} // end namespace llvm
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCV.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCV.h
new file mode 100644
index 0000000000..c42fb070aa
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCV.h
@@ -0,0 +1,80 @@
+//===-- RISCV.h - Top-level interface for RISCV -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// RISC-V back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCV_H
+#define LLVM_LIB_TARGET_RISCV_RISCV_H
+
+#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class AsmPrinter;
+class FunctionPass;
+class InstructionSelector;
+class MCInst;
+class MCOperand;
+class MachineInstr;
+class MachineOperand;
+class PassRegistry;
+class RISCVRegisterBankInfo;
+class RISCVSubtarget;
+class RISCVTargetMachine;
+
+FunctionPass *createRISCVCodeGenPreparePass();
+void initializeRISCVCodeGenPreparePass(PassRegistry &);
+
+bool lowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ AsmPrinter &AP);
+bool lowerRISCVMachineOperandToMCOperand(const MachineOperand &MO,
+ MCOperand &MCOp, const AsmPrinter &AP);
+
+FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+
+FunctionPass *createRISCVMakeCompressibleOptPass();
+void initializeRISCVMakeCompressibleOptPass(PassRegistry &);
+
+FunctionPass *createRISCVGatherScatterLoweringPass();
+void initializeRISCVGatherScatterLoweringPass(PassRegistry &);
+
+FunctionPass *createRISCVSExtWRemovalPass();
+void initializeRISCVSExtWRemovalPass(PassRegistry &);
+
+FunctionPass *createRISCVStripWSuffixPass();
+void initializeRISCVStripWSuffixPass(PassRegistry &);
+
+FunctionPass *createRISCVMergeBaseOffsetOptPass();
+void initializeRISCVMergeBaseOffsetOptPass(PassRegistry &);
+
+FunctionPass *createRISCVExpandPseudoPass();
+void initializeRISCVExpandPseudoPass(PassRegistry &);
+
+FunctionPass *createRISCVPreRAExpandPseudoPass();
+void initializeRISCVPreRAExpandPseudoPass(PassRegistry &);
+
+FunctionPass *createRISCVExpandAtomicPseudoPass();
+void initializeRISCVExpandAtomicPseudoPass(PassRegistry &);
+
+FunctionPass *createRISCVInsertVSETVLIPass();
+void initializeRISCVInsertVSETVLIPass(PassRegistry &);
+
+FunctionPass *createRISCVRedundantCopyEliminationPass();
+void initializeRISCVRedundantCopyEliminationPass(PassRegistry &);
+
+InstructionSelector *createRISCVInstructionSelector(const RISCVTargetMachine &,
+ RISCVSubtarget &,
+ RISCVRegisterBankInfo &);
+void initializeRISCVDAGToDAGISelPass(PassRegistry &);
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVAsmPrinter.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVAsmPrinter.cpp
new file mode 100644
index 0000000000..a4b999e6aa
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -0,0 +1,467 @@
+//===-- RISCVAsmPrinter.cpp - RISCV LLVM assembly writer ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to the RISCV assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVInstPrinter.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "MCTargetDesc/RISCVTargetStreamer.h"
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVTargetMachine.h"
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+STATISTIC(RISCVNumInstrsCompressed,
+ "Number of RISC-V Compressed instructions emitted");
+
+namespace {
+class RISCVAsmPrinter : public AsmPrinter {
+ const RISCVSubtarget *STI;
+
+public:
+ explicit RISCVAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
+
+ StringRef getPassName() const override { return "RISCV Assembly Printer"; }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void emitInstruction(const MachineInstr *MI) override;
+
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) override;
+
+ void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
+ bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
+ const MachineInstr *MI);
+
+ typedef std::tuple<unsigned, uint32_t> HwasanMemaccessTuple;
+ std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
+ void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
+ void EmitHwasanMemaccessSymbols(Module &M);
+
+ // Wrapper needed for tblgenned pseudo lowering.
+ bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
+ return lowerRISCVMachineOperandToMCOperand(MO, MCOp, *this);
+ }
+
+ void emitStartOfAsmFile(Module &M) override;
+ void emitEndOfAsmFile(Module &M) override;
+
+ void emitFunctionEntryLabel() override;
+
+private:
+ void emitAttributes();
+};
+}
+
+void RISCVAsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
+ MCInst CInst;
+ bool Res = RISCVRVC::compress(CInst, Inst, *STI);
+ if (Res)
+ ++RISCVNumInstrsCompressed;
+ AsmPrinter::EmitToStreamer(*OutStreamer, Res ? CInst : Inst);
+}
+
+// Simple pseudo-instructions have their lowering (with expansion to real
+// instructions) auto-generated.
+#include "RISCVGenMCPseudoLowering.inc"
+
+void RISCVAsmPrinter::emitInstruction(const MachineInstr *MI) {
+ RISCV_MC::verifyInstructionPredicates(MI->getOpcode(),
+ getSubtargetInfo().getFeatureBits());
+
+ // Do any auto-generated pseudo lowerings.
+ if (emitPseudoExpansionLowering(*OutStreamer, MI))
+ return;
+
+
+ switch (MI->getOpcode()) {
+ case RISCV::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
+ LowerHWASAN_CHECK_MEMACCESS(*MI);
+ return;
+ }
+
+ MCInst TmpInst;
+ if (!lowerRISCVMachineInstrToMCInst(MI, TmpInst, *this))
+ EmitToStreamer(*OutStreamer, TmpInst);
+}
+
+bool RISCVAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) {
+ // First try the generic code, which knows about modifiers like 'c' and 'n'.
+ if (!AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, OS))
+ return false;
+
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0)
+ return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default:
+ return true; // Unknown modifier.
+ case 'z': // Print zero register if zero, regular printing otherwise.
+ if (MO.isImm() && MO.getImm() == 0) {
+ OS << RISCVInstPrinter::getRegisterName(RISCV::X0);
+ return false;
+ }
+ break;
+ case 'i': // Literal 'i' if operand is not a register.
+ if (!MO.isReg())
+ OS << 'i';
+ return false;
+ }
+ }
+
+ switch (MO.getType()) {
+ case MachineOperand::MO_Immediate:
+ OS << MO.getImm();
+ return false;
+ case MachineOperand::MO_Register:
+ OS << RISCVInstPrinter::getRegisterName(MO.getReg());
+ return false;
+ case MachineOperand::MO_GlobalAddress:
+ PrintSymbolOperand(MO, OS);
+ return false;
+ case MachineOperand::MO_BlockAddress: {
+ MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
+ Sym->print(OS, MAI);
+ return false;
+ }
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool RISCVAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ if (!ExtraCode) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ // For now, we only support register memory operands in registers and
+ // assume there is no addend
+ if (!MO.isReg())
+ return true;
+
+ OS << "0(" << RISCVInstPrinter::getRegisterName(MO.getReg()) << ")";
+ return false;
+ }
+
+ return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, ExtraCode, OS);
+}
+
+bool RISCVAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ STI = &MF.getSubtarget<RISCVSubtarget>();
+
+ SetupMachineFunction(MF);
+ emitFunctionBody();
+ return false;
+}
+
+void RISCVAsmPrinter::emitStartOfAsmFile(Module &M) {
+ RISCVTargetStreamer &RTS =
+ static_cast<RISCVTargetStreamer &>(*OutStreamer->getTargetStreamer());
+ if (const MDString *ModuleTargetABI =
+ dyn_cast_or_null<MDString>(M.getModuleFlag("target-abi")))
+ RTS.setTargetABI(RISCVABI::getTargetABI(ModuleTargetABI->getString()));
+ if (TM.getTargetTriple().isOSBinFormatELF())
+ emitAttributes();
+}
+
+void RISCVAsmPrinter::emitEndOfAsmFile(Module &M) {
+ RISCVTargetStreamer &RTS =
+ static_cast<RISCVTargetStreamer &>(*OutStreamer->getTargetStreamer());
+
+ if (TM.getTargetTriple().isOSBinFormatELF())
+ RTS.finishAttributeSection();
+ EmitHwasanMemaccessSymbols(M);
+}
+
+void RISCVAsmPrinter::emitAttributes() {
+ RISCVTargetStreamer &RTS =
+ static_cast<RISCVTargetStreamer &>(*OutStreamer->getTargetStreamer());
+ // Use MCSubtargetInfo from TargetMachine. Individual functions may have
+ // attributes that differ from other functions in the module and we have no
+ // way to know which function is correct.
+ RTS.emitTargetAttributes(*TM.getMCSubtargetInfo());
+}
+
+void RISCVAsmPrinter::emitFunctionEntryLabel() {
+ const auto *RMFI = MF->getInfo<RISCVMachineFunctionInfo>();
+ if (RMFI->isVectorCall()) {
+ auto &RTS =
+ static_cast<RISCVTargetStreamer &>(*OutStreamer->getTargetStreamer());
+ RTS.emitDirectiveVariantCC(*CurrentFnSym);
+ }
+ return AsmPrinter::emitFunctionEntryLabel();
+}
+
+// Force static initialization.
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVAsmPrinter() {
+ RegisterAsmPrinter<RISCVAsmPrinter> X(getTheRISCV32Target());
+ RegisterAsmPrinter<RISCVAsmPrinter> Y(getTheRISCV64Target());
+}
+
+void RISCVAsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
+ Register Reg = MI.getOperand(0).getReg();
+ uint32_t AccessInfo = MI.getOperand(1).getImm();
+ MCSymbol *&Sym =
+ HwasanMemaccessSymbols[HwasanMemaccessTuple(Reg, AccessInfo)];
+ if (!Sym) {
+ // FIXME: Make this work on non-ELF.
+ if (!TM.getTargetTriple().isOSBinFormatELF())
+ report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
+
+ std::string SymName = "__hwasan_check_x" + utostr(Reg - RISCV::X0) + "_" +
+ utostr(AccessInfo) + "_short";
+ Sym = OutContext.getOrCreateSymbol(SymName);
+ }
+ auto Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, OutContext);
+ auto Expr = RISCVMCExpr::create(Res, RISCVMCExpr::VK_RISCV_CALL, OutContext);
+
+ EmitToStreamer(*OutStreamer, MCInstBuilder(RISCV::PseudoCALL).addExpr(Expr));
+}
+
+void RISCVAsmPrinter::EmitHwasanMemaccessSymbols(Module &M) {
+ if (HwasanMemaccessSymbols.empty())
+ return;
+
+ assert(TM.getTargetTriple().isOSBinFormatELF());
+ // Use MCSubtargetInfo from TargetMachine. Individual functions may have
+ // attributes that differ from other functions in the module and we have no
+ // way to know which function is correct.
+ const MCSubtargetInfo &MCSTI = *TM.getMCSubtargetInfo();
+
+ MCSymbol *HwasanTagMismatchV2Sym =
+ OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
+ // Annotate symbol as one having incompatible calling convention, so
+ // run-time linkers can instead eagerly bind this function.
+ auto &RTS =
+ static_cast<RISCVTargetStreamer &>(*OutStreamer->getTargetStreamer());
+ RTS.emitDirectiveVariantCC(*HwasanTagMismatchV2Sym);
+
+ const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
+ MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
+ auto Expr = RISCVMCExpr::create(HwasanTagMismatchV2Ref,
+ RISCVMCExpr::VK_RISCV_CALL, OutContext);
+
+ for (auto &P : HwasanMemaccessSymbols) {
+ unsigned Reg = std::get<0>(P.first);
+ uint32_t AccessInfo = std::get<1>(P.first);
+ MCSymbol *Sym = P.second;
+
+ unsigned Size =
+ 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
+ OutStreamer->switchSection(OutContext.getELFSection(
+ ".text.hot", ELF::SHT_PROGBITS,
+ ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
+ /*IsComdat=*/true));
+
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
+ OutStreamer->emitLabel(Sym);
+
+ // Extract shadow offset from ptr
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::SLLI).addReg(RISCV::X6).addReg(Reg).addImm(8),
+ MCSTI);
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::SRLI)
+ .addReg(RISCV::X6)
+ .addReg(RISCV::X6)
+ .addImm(12),
+ MCSTI);
+ // load shadow tag in X6, X5 contains shadow base
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::ADD)
+ .addReg(RISCV::X6)
+ .addReg(RISCV::X5)
+ .addReg(RISCV::X6),
+ MCSTI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::LBU).addReg(RISCV::X6).addReg(RISCV::X6).addImm(0),
+ MCSTI);
+ // Extract tag from X5 and compare it with loaded tag from shadow
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::SRLI).addReg(RISCV::X7).addReg(Reg).addImm(56),
+ MCSTI);
+ MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
+ // X7 contains tag from memory, while X6 contains tag from the pointer
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::BNE)
+ .addReg(RISCV::X7)
+ .addReg(RISCV::X6)
+ .addExpr(MCSymbolRefExpr::create(HandleMismatchOrPartialSym,
+ OutContext)),
+ MCSTI);
+ MCSymbol *ReturnSym = OutContext.createTempSymbol();
+ OutStreamer->emitLabel(ReturnSym);
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::JALR)
+ .addReg(RISCV::X0)
+ .addReg(RISCV::X1)
+ .addImm(0),
+ MCSTI);
+ OutStreamer->emitLabel(HandleMismatchOrPartialSym);
+
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::ADDI)
+ .addReg(RISCV::X28)
+ .addReg(RISCV::X0)
+ .addImm(16),
+ MCSTI);
+ MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::BGEU)
+ .addReg(RISCV::X6)
+ .addReg(RISCV::X28)
+ .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
+ MCSTI);
+
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::ANDI).addReg(RISCV::X28).addReg(Reg).addImm(0xF),
+ MCSTI);
+
+ if (Size != 1)
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::ADDI)
+ .addReg(RISCV::X28)
+ .addReg(RISCV::X28)
+ .addImm(Size - 1),
+ MCSTI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::BGE)
+ .addReg(RISCV::X28)
+ .addReg(RISCV::X6)
+ .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
+ MCSTI);
+
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::ORI).addReg(RISCV::X6).addReg(Reg).addImm(0xF),
+ MCSTI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::LBU).addReg(RISCV::X6).addReg(RISCV::X6).addImm(0),
+ MCSTI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::BEQ)
+ .addReg(RISCV::X6)
+ .addReg(RISCV::X7)
+ .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
+ MCSTI);
+
+ OutStreamer->emitLabel(HandleMismatchSym);
+
+ // | Previous stack frames... |
+ // +=================================+ <-- [SP + 256]
+ // | ... |
+ // | |
+ // | Stack frame space for x12 - x31.|
+ // | |
+ // | ... |
+ // +---------------------------------+ <-- [SP + 96]
+ // | Saved x11(arg1), as |
+ // | __hwasan_check_* clobbers it. |
+ // +---------------------------------+ <-- [SP + 88]
+ // | Saved x10(arg0), as |
+ // | __hwasan_check_* clobbers it. |
+ // +---------------------------------+ <-- [SP + 80]
+ // | |
+ // | Stack frame space for x9. |
+ // +---------------------------------+ <-- [SP + 72]
+ // | |
+ // | Saved x8(fp), as |
+ // | __hwasan_check_* clobbers it. |
+ // +---------------------------------+ <-- [SP + 64]
+ // | ... |
+ // | |
+ // | Stack frame space for x2 - x7. |
+ // | |
+ // | ... |
+ // +---------------------------------+ <-- [SP + 16]
+ // | Return address (x1) for caller |
+ // | of __hwasan_check_*. |
+ // +---------------------------------+ <-- [SP + 8]
+ // | Reserved place for x0, possibly |
+ // | junk, since we don't save it. |
+ // +---------------------------------+ <-- [x2 / SP]
+
+ // Adjust sp
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::ADDI)
+ .addReg(RISCV::X2)
+ .addReg(RISCV::X2)
+ .addImm(-256),
+ MCSTI);
+
+ // store x10(arg0) by new sp
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::SD)
+ .addReg(RISCV::X10)
+ .addReg(RISCV::X2)
+ .addImm(8 * 10),
+ MCSTI);
+ // store x11(arg1) by new sp
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::SD)
+ .addReg(RISCV::X11)
+ .addReg(RISCV::X2)
+ .addImm(8 * 11),
+ MCSTI);
+
+ // store x8(fp) by new sp
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::SD).addReg(RISCV::X8).addReg(RISCV::X2).addImm(8 *
+ 8),
+ MCSTI);
+ // store x1(ra) by new sp
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::SD).addReg(RISCV::X1).addReg(RISCV::X2).addImm(1 *
+ 8),
+ MCSTI);
+ if (Reg != RISCV::X10)
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::ADDI)
+ .addReg(RISCV::X10)
+ .addReg(Reg)
+ .addImm(0),
+ MCSTI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(RISCV::ADDI)
+ .addReg(RISCV::X11)
+ .addReg(RISCV::X0)
+ .addImm(AccessInfo & HWASanAccessInfo::RuntimeMask),
+ MCSTI);
+
+ OutStreamer->emitInstruction(MCInstBuilder(RISCV::PseudoCALL).addExpr(Expr),
+ MCSTI);
+ }
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVCodeGenPrepare.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
new file mode 100644
index 0000000000..5c12d33045
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
@@ -0,0 +1,178 @@
+//===----- RISCVCodeGenPrepare.cpp ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a RISCV specific version of CodeGenPrepare.
+// It munges the code in the input function to better prepare it for
+// SelectionDAG-based code generation. This works around limitations in it's
+// basic-block-at-a-time approach.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-codegenprepare"
+#define PASS_NAME "RISCV CodeGenPrepare"
+
+STATISTIC(NumZExtToSExt, "Number of SExt instructions converted to ZExt");
+
+namespace {
+
+class RISCVCodeGenPrepare : public FunctionPass,
+ public InstVisitor<RISCVCodeGenPrepare, bool> {
+ const DataLayout *DL;
+ const RISCVSubtarget *ST;
+
+public:
+ static char ID;
+
+ RISCVCodeGenPrepare() : FunctionPass(ID) {}
+
+ bool runOnFunction(Function &F) override;
+
+ StringRef getPassName() const override { return PASS_NAME; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<TargetPassConfig>();
+ }
+
+ bool visitInstruction(Instruction &I) { return false; }
+ bool visitZExtInst(ZExtInst &I);
+ bool visitAnd(BinaryOperator &BO);
+};
+
+} // end anonymous namespace
+
+bool RISCVCodeGenPrepare::visitZExtInst(ZExtInst &ZExt) {
+ if (!ST->is64Bit())
+ return false;
+
+ Value *Src = ZExt.getOperand(0);
+
+ // We only care about ZExt from i32 to i64.
+ if (!ZExt.getType()->isIntegerTy(64) || !Src->getType()->isIntegerTy(32))
+ return false;
+
+ // Look for an opportunity to replace (i64 (zext (i32 X))) with a sext if we
+ // can determine that the sign bit of X is zero via a dominating condition.
+ // This often occurs with widened induction variables.
+ if (isImpliedByDomCondition(ICmpInst::ICMP_SGE, Src,
+ Constant::getNullValue(Src->getType()), &ZExt,
+ *DL).value_or(false)) {
+ auto *SExt = new SExtInst(Src, ZExt.getType(), "", &ZExt);
+ SExt->takeName(&ZExt);
+ SExt->setDebugLoc(ZExt.getDebugLoc());
+
+ ZExt.replaceAllUsesWith(SExt);
+ ZExt.eraseFromParent();
+ ++NumZExtToSExt;
+ return true;
+ }
+
+ // Convert (zext (abs(i32 X, i1 1))) -> (sext (abs(i32 X, i1 1))). If abs of
+ // INT_MIN is poison, the sign bit is zero.
+ using namespace PatternMatch;
+ if (match(Src, m_Intrinsic<Intrinsic::abs>(m_Value(), m_One()))) {
+ auto *SExt = new SExtInst(Src, ZExt.getType(), "", &ZExt);
+ SExt->takeName(&ZExt);
+ SExt->setDebugLoc(ZExt.getDebugLoc());
+
+ ZExt.replaceAllUsesWith(SExt);
+ ZExt.eraseFromParent();
+ ++NumZExtToSExt;
+ return true;
+ }
+
+ return false;
+}
+
+// Try to optimize (i64 (and (zext/sext (i32 X), C1))) if C1 has bit 31 set,
+// but bits 63:32 are zero. If we can prove that bit 31 of X is 0, we can fill
+// the upper 32 bits with ones. A separate transform will turn (zext X) into
+// (sext X) for the same condition.
+bool RISCVCodeGenPrepare::visitAnd(BinaryOperator &BO) {
+ if (!ST->is64Bit())
+ return false;
+
+ if (!BO.getType()->isIntegerTy(64))
+ return false;
+
+ // Left hand side should be sext or zext.
+ Instruction *LHS = dyn_cast<Instruction>(BO.getOperand(0));
+ if (!LHS || (!isa<SExtInst>(LHS) && !isa<ZExtInst>(LHS)))
+ return false;
+
+ Value *LHSSrc = LHS->getOperand(0);
+ if (!LHSSrc->getType()->isIntegerTy(32))
+ return false;
+
+ // Right hand side should be a constant.
+ Value *RHS = BO.getOperand(1);
+
+ auto *CI = dyn_cast<ConstantInt>(RHS);
+ if (!CI)
+ return false;
+ uint64_t C = CI->getZExtValue();
+
+ // Look for constants that fit in 32 bits but not simm12, and can be made
+ // into simm12 by sign extending bit 31. This will allow use of ANDI.
+ // TODO: Is worth making simm32?
+ if (!isUInt<32>(C) || isInt<12>(C) || !isInt<12>(SignExtend64<32>(C)))
+ return false;
+
+ // If we can determine the sign bit of the input is 0, we can replace the
+ // And mask constant.
+ if (!isImpliedByDomCondition(ICmpInst::ICMP_SGE, LHSSrc,
+ Constant::getNullValue(LHSSrc->getType()),
+ LHS, *DL).value_or(false))
+ return false;
+
+ // Sign extend the constant and replace the And operand.
+ C = SignExtend64<32>(C);
+ BO.setOperand(1, ConstantInt::get(LHS->getType(), C));
+
+ return true;
+}
+
+bool RISCVCodeGenPrepare::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
+ auto &TPC = getAnalysis<TargetPassConfig>();
+ auto &TM = TPC.getTM<RISCVTargetMachine>();
+ ST = &TM.getSubtarget<RISCVSubtarget>(F);
+
+ DL = &F.getParent()->getDataLayout();
+
+ bool MadeChange = false;
+ for (auto &BB : F)
+ for (Instruction &I : llvm::make_early_inc_range(BB))
+ MadeChange |= visit(I);
+
+ return MadeChange;
+}
+
+INITIALIZE_PASS_BEGIN(RISCVCodeGenPrepare, DEBUG_TYPE, PASS_NAME, false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(RISCVCodeGenPrepare, DEBUG_TYPE, PASS_NAME, false, false)
+
+char RISCVCodeGenPrepare::ID = 0;
+
+FunctionPass *llvm::createRISCVCodeGenPreparePass() {
+ return new RISCVCodeGenPrepare();
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
new file mode 100644
index 0000000000..58ae28d57b
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
@@ -0,0 +1,681 @@
+//===-- RISCVExpandAtomicPseudoInsts.cpp - Expand atomic pseudo instrs. ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that expands atomic pseudo instructions into
+// target instructions. This pass should be run at the last possible moment,
+// avoiding the possibility for other passes to break the requirements for
+// forward progress in the LR/SC block.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVInstrInfo.h"
+#include "RISCVTargetMachine.h"
+
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+
+using namespace llvm;
+
+#define RISCV_EXPAND_ATOMIC_PSEUDO_NAME \
+ "RISCV atomic pseudo instruction expansion pass"
+
+namespace {
+
+class RISCVExpandAtomicPseudo : public MachineFunctionPass {
+public:
+ const RISCVInstrInfo *TII;
+ static char ID;
+
+ RISCVExpandAtomicPseudo() : MachineFunctionPass(ID) {
+ initializeRISCVExpandAtomicPseudoPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override {
+ return RISCV_EXPAND_ATOMIC_PSEUDO_NAME;
+ }
+
+private:
+ bool expandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicBinOp(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp,
+ bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, bool IsMasked,
+ int Width, MachineBasicBlock::iterator &NextMBBI);
+};
+
+char RISCVExpandAtomicPseudo::ID = 0;
+
+bool RISCVExpandAtomicPseudo::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ bool Modified = false;
+ for (auto &MBB : MF)
+ Modified |= expandMBB(MBB);
+ return Modified;
+}
+
+bool RISCVExpandAtomicPseudo::expandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ Modified |= expandMI(MBB, MBBI, NMBBI);
+ MBBI = NMBBI;
+ }
+
+ return Modified;
+}
+
+bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ // RISCVInstrInfo::getInstSizeInBytes expects that the total size of the
+ // expanded instructions for each pseudo is correct in the Size field of the
+ // tablegen definition for the pseudo.
+ switch (MBBI->getOpcode()) {
+ case RISCV::PseudoAtomicLoadNand32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
+ NextMBBI);
+ case RISCV::PseudoAtomicLoadNand64:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicSwap32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadAdd32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadSub32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadNand32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadMax32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadMin32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadUMax32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadUMin32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
+ NextMBBI);
+ case RISCV::PseudoCmpXchg32:
+ return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
+ case RISCV::PseudoCmpXchg64:
+ return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
+ case RISCV::PseudoMaskedCmpXchg32:
+ return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
+ }
+
+ return false;
+}
+
+static unsigned getLRForRMW32(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::LR_W;
+ case AtomicOrdering::Acquire:
+ return RISCV::LR_W_AQ;
+ case AtomicOrdering::Release:
+ return RISCV::LR_W;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::LR_W_AQ;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::LR_W_AQ_RL;
+ }
+}
+
+static unsigned getSCForRMW32(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::SC_W;
+ case AtomicOrdering::Acquire:
+ return RISCV::SC_W;
+ case AtomicOrdering::Release:
+ return RISCV::SC_W_RL;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::SC_W_RL;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::SC_W_AQ_RL;
+ }
+}
+
+static unsigned getLRForRMW64(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::LR_D;
+ case AtomicOrdering::Acquire:
+ return RISCV::LR_D_AQ;
+ case AtomicOrdering::Release:
+ return RISCV::LR_D;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::LR_D_AQ;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::LR_D_AQ_RL;
+ }
+}
+
+static unsigned getSCForRMW64(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::SC_D;
+ case AtomicOrdering::Acquire:
+ return RISCV::SC_D;
+ case AtomicOrdering::Release:
+ return RISCV::SC_D_RL;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::SC_D_RL;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::SC_D_AQ_RL;
+ }
+}
+
+static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
+ if (Width == 32)
+ return getLRForRMW32(Ordering);
+ if (Width == 64)
+ return getLRForRMW64(Ordering);
+ llvm_unreachable("Unexpected LR width\n");
+}
+
+static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
+ if (Width == 32)
+ return getSCForRMW32(Ordering);
+ if (Width == 64)
+ return getSCForRMW64(Ordering);
+ llvm_unreachable("Unexpected SC width\n");
+}
+
+static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
+ DebugLoc DL, MachineBasicBlock *ThisMBB,
+ MachineBasicBlock *LoopMBB,
+ MachineBasicBlock *DoneMBB,
+ AtomicRMWInst::BinOp BinOp, int Width) {
+ Register DestReg = MI.getOperand(0).getReg();
+ Register ScratchReg = MI.getOperand(1).getReg();
+ Register AddrReg = MI.getOperand(2).getReg();
+ Register IncrReg = MI.getOperand(3).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
+
+ // .loop:
+ // lr.[w|d] dest, (addr)
+ // binop scratch, dest, val
+ // sc.[w|d] scratch, scratch, (addr)
+ // bnez scratch, loop
+ BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
+ .addReg(AddrReg);
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Nand:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-1);
+ break;
+ }
+ BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopMBB);
+}
+
+static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
+ MachineBasicBlock *MBB, Register DestReg,
+ Register OldValReg, Register NewValReg,
+ Register MaskReg, Register ScratchReg) {
+ assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
+ assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
+ assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
+
+ // We select bits from newval and oldval using:
+ // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
+ // r = oldval ^ ((oldval ^ newval) & masktargetdata);
+ BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
+ .addReg(OldValReg)
+ .addReg(NewValReg);
+ BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(ScratchReg)
+ .addReg(MaskReg);
+ BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
+ .addReg(OldValReg)
+ .addReg(ScratchReg);
+}
+
+static void doMaskedAtomicBinOpExpansion(
+ const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
+ MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
+ MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
+ assert(Width == 32 && "Should never need to expand masked 64-bit operations");
+ Register DestReg = MI.getOperand(0).getReg();
+ Register ScratchReg = MI.getOperand(1).getReg();
+ Register AddrReg = MI.getOperand(2).getReg();
+ Register IncrReg = MI.getOperand(3).getReg();
+ Register MaskReg = MI.getOperand(4).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
+
+ // .loop:
+ // lr.w destreg, (alignedaddr)
+ // binop scratch, destreg, incr
+ // xor scratch, destreg, scratch
+ // and scratch, scratch, masktargetdata
+ // xor scratch, destreg, scratch
+ // sc.w scratch, scratch, (alignedaddr)
+ // bnez scratch, loop
+ BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::ADDI), ScratchReg)
+ .addReg(IncrReg)
+ .addImm(0);
+ break;
+ case AtomicRMWInst::Add:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Sub:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Nand:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-1);
+ break;
+ }
+
+ insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
+ ScratchReg);
+
+ BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopMBB);
+}
+
+bool RISCVExpandAtomicPseudo::expandAtomicBinOp(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ MachineFunction *MF = MBB.getParent();
+ auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopMBB);
+ MF->insert(++LoopMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopMBB->addSuccessor(LoopMBB);
+ LoopMBB->addSuccessor(DoneMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopMBB);
+
+ if (!IsMasked)
+ doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
+ else
+ doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
+ Width);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
+ MachineBasicBlock *MBB, Register ValReg,
+ Register ShamtReg) {
+ BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
+ .addReg(ValReg)
+ .addReg(ShamtReg);
+ BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
+ .addReg(ValReg)
+ .addReg(ShamtReg);
+}
+
+bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI) {
+ assert(IsMasked == true &&
+ "Should only need to expand masked atomic max/min");
+ assert(Width == 32 && "Should never need to expand masked 64-bit operations");
+
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopHeadMBB);
+ MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
+ MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
+ MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
+ LoopHeadMBB->addSuccessor(LoopTailMBB);
+ LoopIfBodyMBB->addSuccessor(LoopTailMBB);
+ LoopTailMBB->addSuccessor(LoopHeadMBB);
+ LoopTailMBB->addSuccessor(DoneMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopHeadMBB);
+
+ Register DestReg = MI.getOperand(0).getReg();
+ Register Scratch1Reg = MI.getOperand(1).getReg();
+ Register Scratch2Reg = MI.getOperand(2).getReg();
+ Register AddrReg = MI.getOperand(3).getReg();
+ Register IncrReg = MI.getOperand(4).getReg();
+ Register MaskReg = MI.getOperand(5).getReg();
+ bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
+
+ //
+ // .loophead:
+ // lr.w destreg, (alignedaddr)
+ // and scratch2, destreg, mask
+ // mv scratch1, destreg
+ // [sext scratch2 if signed min/max]
+ // ifnochangeneeded scratch2, incr, .looptail
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
+ .addReg(DestReg)
+ .addReg(MaskReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
+ .addReg(DestReg)
+ .addImm(0);
+
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Max: {
+ insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
+ .addReg(Scratch2Reg)
+ .addReg(IncrReg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+ case AtomicRMWInst::Min: {
+ insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
+ .addReg(IncrReg)
+ .addReg(Scratch2Reg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+ case AtomicRMWInst::UMax:
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
+ .addReg(Scratch2Reg)
+ .addReg(IncrReg)
+ .addMBB(LoopTailMBB);
+ break;
+ case AtomicRMWInst::UMin:
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
+ .addReg(IncrReg)
+ .addReg(Scratch2Reg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+
+ // .loopifbody:
+ // xor scratch1, destreg, incr
+ // and scratch1, scratch1, mask
+ // xor scratch1, destreg, scratch1
+ insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
+ MaskReg, Scratch1Reg);
+
+ // .looptail:
+ // sc.w scratch1, scratch1, (addr)
+ // bnez scratch1, loop
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
+ .addReg(AddrReg)
+ .addReg(Scratch1Reg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(Scratch1Reg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+// If a BNE on the cmpxchg comparison result immediately follows the cmpxchg
+// operation, it can be folded into the cmpxchg expansion by
+// modifying the branch within 'LoopHead' (which performs the same
+// comparison). This is a valid transformation because after altering the
+// LoopHead's BNE destination, the BNE following the cmpxchg becomes
+// redundant and and be deleted. In the case of a masked cmpxchg, an
+// appropriate AND and BNE must be matched.
+//
+// On success, returns true and deletes the matching BNE or AND+BNE, sets the
+// LoopHeadBNETarget argument to the target that should be used within the
+// loop head, and removes that block as a successor to MBB.
+bool tryToFoldBNEOnCmpXchgResult(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ Register DestReg, Register CmpValReg,
+ Register MaskReg,
+ MachineBasicBlock *&LoopHeadBNETarget) {
+ SmallVector<MachineInstr *> ToErase;
+ auto E = MBB.end();
+ if (MBBI == E)
+ return false;
+ MBBI = skipDebugInstructionsForward(MBBI, E);
+
+ // If we have a masked cmpxchg, match AND dst, DestReg, MaskReg.
+ if (MaskReg.isValid()) {
+ if (MBBI == E || MBBI->getOpcode() != RISCV::AND)
+ return false;
+ Register ANDOp1 = MBBI->getOperand(1).getReg();
+ Register ANDOp2 = MBBI->getOperand(2).getReg();
+ if (!(ANDOp1 == DestReg && ANDOp2 == MaskReg) &&
+ !(ANDOp1 == MaskReg && ANDOp2 == DestReg))
+ return false;
+ // We now expect the BNE to use the result of the AND as an operand.
+ DestReg = MBBI->getOperand(0).getReg();
+ ToErase.push_back(&*MBBI);
+ MBBI = skipDebugInstructionsForward(std::next(MBBI), E);
+ }
+
+ // Match BNE DestReg, MaskReg.
+ if (MBBI == E || MBBI->getOpcode() != RISCV::BNE)
+ return false;
+ Register BNEOp0 = MBBI->getOperand(0).getReg();
+ Register BNEOp1 = MBBI->getOperand(1).getReg();
+ if (!(BNEOp0 == DestReg && BNEOp1 == CmpValReg) &&
+ !(BNEOp0 == CmpValReg && BNEOp1 == DestReg))
+ return false;
+ ToErase.push_back(&*MBBI);
+ LoopHeadBNETarget = MBBI->getOperand(2).getMBB();
+ MBBI = skipDebugInstructionsForward(std::next(MBBI), E);
+ if (MBBI != E)
+ return false;
+
+ MBB.removeSuccessor(LoopHeadBNETarget);
+ for (auto *MI : ToErase)
+ MI->eraseFromParent();
+ return true;
+}
+
+bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
+ int Width, MachineBasicBlock::iterator &NextMBBI) {
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ Register DestReg = MI.getOperand(0).getReg();
+ Register ScratchReg = MI.getOperand(1).getReg();
+ Register AddrReg = MI.getOperand(2).getReg();
+ Register CmpValReg = MI.getOperand(3).getReg();
+ Register NewValReg = MI.getOperand(4).getReg();
+ Register MaskReg = IsMasked ? MI.getOperand(5).getReg() : Register();
+
+ MachineBasicBlock *LoopHeadBNETarget = DoneMBB;
+ tryToFoldBNEOnCmpXchgResult(MBB, std::next(MBBI), DestReg, CmpValReg, MaskReg,
+ LoopHeadBNETarget);
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopHeadMBB);
+ MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
+ MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopHeadMBB->addSuccessor(LoopTailMBB);
+ LoopHeadMBB->addSuccessor(LoopHeadBNETarget);
+ LoopTailMBB->addSuccessor(DoneMBB);
+ LoopTailMBB->addSuccessor(LoopHeadMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopHeadMBB);
+
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
+
+ if (!IsMasked) {
+ // .loophead:
+ // lr.[w|d] dest, (addr)
+ // bne dest, cmpval, done
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
+ .addReg(DestReg)
+ .addReg(CmpValReg)
+ .addMBB(LoopHeadBNETarget);
+ // .looptail:
+ // sc.[w|d] scratch, newval, (addr)
+ // bnez scratch, loophead
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(NewValReg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+ } else {
+ // .loophead:
+ // lr.w dest, (addr)
+ // and scratch, dest, mask
+ // bne scratch, cmpval, done
+ Register MaskReg = MI.getOperand(5).getReg();
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(MaskReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(CmpValReg)
+ .addMBB(LoopHeadBNETarget);
+
+ // .looptail:
+ // xor scratch, dest, newval
+ // and scratch, scratch, mask
+ // xor scratch, dest, scratch
+ // sc.w scratch, scratch, (adrr)
+ // bnez scratch, loophead
+ insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
+ MaskReg, ScratchReg);
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+ }
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+} // end of anonymous namespace
+
+INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo",
+ RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false)
+
+namespace llvm {
+
+FunctionPass *createRISCVExpandAtomicPseudoPass() {
+ return new RISCVExpandAtomicPseudo();
+}
+
+} // end of namespace llvm
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
new file mode 100644
index 0000000000..c4d85dc8dd
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -0,0 +1,402 @@
+//===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that expands pseudo instructions into target
+// instructions. This pass should be run after register allocation but before
+// the post-regalloc scheduling pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVInstrInfo.h"
+#include "RISCVTargetMachine.h"
+
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/MC/MCContext.h"
+
+using namespace llvm;
+
+#define RISCV_EXPAND_PSEUDO_NAME "RISCV pseudo instruction expansion pass"
+#define RISCV_PRERA_EXPAND_PSEUDO_NAME "RISCV Pre-RA pseudo instruction expansion pass"
+
+namespace {
+
+class RISCVExpandPseudo : public MachineFunctionPass {
+public:
+ const RISCVInstrInfo *TII;
+ static char ID;
+
+ RISCVExpandPseudo() : MachineFunctionPass(ID) {
+ initializeRISCVExpandPseudoPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override { return RISCV_EXPAND_PSEUDO_NAME; }
+
+private:
+ bool expandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandCCOp(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+ bool expandVMSET_VMCLR(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, unsigned Opcode);
+};
+
+char RISCVExpandPseudo::ID = 0;
+
+bool RISCVExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ bool Modified = false;
+ for (auto &MBB : MF)
+ Modified |= expandMBB(MBB);
+ return Modified;
+}
+
+bool RISCVExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ Modified |= expandMI(MBB, MBBI, NMBBI);
+ MBBI = NMBBI;
+ }
+
+ return Modified;
+}
+
+bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ // RISCVInstrInfo::getInstSizeInBytes expects that the total size of the
+ // expanded instructions for each pseudo is correct in the Size field of the
+ // tablegen definition for the pseudo.
+ switch (MBBI->getOpcode()) {
+ case RISCV::PseudoCCMOVGPR:
+ case RISCV::PseudoCCADD:
+ case RISCV::PseudoCCSUB:
+ case RISCV::PseudoCCAND:
+ case RISCV::PseudoCCOR:
+ case RISCV::PseudoCCXOR:
+ case RISCV::PseudoCCADDW:
+ case RISCV::PseudoCCSUBW:
+ return expandCCOp(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoVSETVLI:
+ case RISCV::PseudoVSETVLIX0:
+ case RISCV::PseudoVSETIVLI:
+ return expandVSetVL(MBB, MBBI);
+ case RISCV::PseudoVMCLR_M_B1:
+ case RISCV::PseudoVMCLR_M_B2:
+ case RISCV::PseudoVMCLR_M_B4:
+ case RISCV::PseudoVMCLR_M_B8:
+ case RISCV::PseudoVMCLR_M_B16:
+ case RISCV::PseudoVMCLR_M_B32:
+ case RISCV::PseudoVMCLR_M_B64:
+ // vmclr.m vd => vmxor.mm vd, vd, vd
+ return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXOR_MM);
+ case RISCV::PseudoVMSET_M_B1:
+ case RISCV::PseudoVMSET_M_B2:
+ case RISCV::PseudoVMSET_M_B4:
+ case RISCV::PseudoVMSET_M_B8:
+ case RISCV::PseudoVMSET_M_B16:
+ case RISCV::PseudoVMSET_M_B32:
+ case RISCV::PseudoVMSET_M_B64:
+ // vmset.m vd => vmxnor.mm vd, vd, vd
+ return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM);
+ }
+
+ return false;
+}
+
+bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+
+ MachineFunction *MF = MBB.getParent();
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ MachineBasicBlock *MergeBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ MF->insert(++MBB.getIterator(), TrueBB);
+ MF->insert(++TrueBB->getIterator(), MergeBB);
+
+ // We want to copy the "true" value when the condition is true which means
+ // we need to invert the branch condition to jump over TrueBB when the
+ // condition is false.
+ auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
+ CC = RISCVCC::getOppositeBranchCondition(CC);
+
+ // Insert branch instruction.
+ BuildMI(MBB, MBBI, DL, TII->getBrCond(CC))
+ .addReg(MI.getOperand(1).getReg())
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(MergeBB);
+
+ Register DestReg = MI.getOperand(0).getReg();
+ assert(MI.getOperand(4).getReg() == DestReg);
+
+ if (MI.getOpcode() == RISCV::PseudoCCMOVGPR) {
+ // Add MV.
+ BuildMI(TrueBB, DL, TII->get(RISCV::ADDI), DestReg)
+ .add(MI.getOperand(5))
+ .addImm(0);
+ } else {
+ unsigned NewOpc;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode!");
+ case RISCV::PseudoCCADD: NewOpc = RISCV::ADD; break;
+ case RISCV::PseudoCCSUB: NewOpc = RISCV::SUB; break;
+ case RISCV::PseudoCCAND: NewOpc = RISCV::AND; break;
+ case RISCV::PseudoCCOR: NewOpc = RISCV::OR; break;
+ case RISCV::PseudoCCXOR: NewOpc = RISCV::XOR; break;
+ case RISCV::PseudoCCADDW: NewOpc = RISCV::ADDW; break;
+ case RISCV::PseudoCCSUBW: NewOpc = RISCV::SUBW; break;
+ }
+ BuildMI(TrueBB, DL, TII->get(NewOpc), DestReg)
+ .add(MI.getOperand(5))
+ .add(MI.getOperand(6));
+ }
+
+ TrueBB->addSuccessor(MergeBB);
+
+ MergeBB->splice(MergeBB->end(), &MBB, MI, MBB.end());
+ MergeBB->transferSuccessors(&MBB);
+
+ MBB.addSuccessor(TrueBB);
+ MBB.addSuccessor(MergeBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ // Make sure live-ins are correctly attached to this new basic block.
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *TrueBB);
+ computeAndAddLiveIns(LiveRegs, *MergeBB);
+
+ return true;
+}
+
+bool RISCVExpandPseudo::expandVSetVL(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ assert(MBBI->getNumExplicitOperands() == 3 && MBBI->getNumOperands() >= 5 &&
+ "Unexpected instruction format");
+
+ DebugLoc DL = MBBI->getDebugLoc();
+
+ assert((MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
+ MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
+ MBBI->getOpcode() == RISCV::PseudoVSETIVLI) &&
+ "Unexpected pseudo instruction");
+ unsigned Opcode;
+ if (MBBI->getOpcode() == RISCV::PseudoVSETIVLI)
+ Opcode = RISCV::VSETIVLI;
+ else
+ Opcode = RISCV::VSETVLI;
+ const MCInstrDesc &Desc = TII->get(Opcode);
+ assert(Desc.getNumOperands() == 3 && "Unexpected instruction format");
+
+ Register DstReg = MBBI->getOperand(0).getReg();
+ bool DstIsDead = MBBI->getOperand(0).isDead();
+ BuildMI(MBB, MBBI, DL, Desc)
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .add(MBBI->getOperand(1)) // VL
+ .add(MBBI->getOperand(2)); // VType
+
+ MBBI->eraseFromParent(); // The pseudo instruction is gone now.
+ return true;
+}
+
+bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned Opcode) {
+ DebugLoc DL = MBBI->getDebugLoc();
+ Register DstReg = MBBI->getOperand(0).getReg();
+ const MCInstrDesc &Desc = TII->get(Opcode);
+ BuildMI(MBB, MBBI, DL, Desc, DstReg)
+ .addReg(DstReg, RegState::Undef)
+ .addReg(DstReg, RegState::Undef);
+ MBBI->eraseFromParent(); // The pseudo instruction is gone now.
+ return true;
+}
+
+class RISCVPreRAExpandPseudo : public MachineFunctionPass {
+public:
+ const RISCVInstrInfo *TII;
+ static char ID;
+
+ RISCVPreRAExpandPseudo() : MachineFunctionPass(ID) {
+ initializeRISCVPreRAExpandPseudoPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ StringRef getPassName() const override {
+ return RISCV_PRERA_EXPAND_PSEUDO_NAME;
+ }
+
+private:
+ bool expandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAuipcInstPair(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI,
+ unsigned FlagsHi, unsigned SecondOpcode);
+ bool expandLoadLocalAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadTLSIEAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadTLSGDAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+};
+
+char RISCVPreRAExpandPseudo::ID = 0;
+
+bool RISCVPreRAExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ bool Modified = false;
+ for (auto &MBB : MF)
+ Modified |= expandMBB(MBB);
+ return Modified;
+}
+
+bool RISCVPreRAExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ Modified |= expandMI(MBB, MBBI, NMBBI);
+ MBBI = NMBBI;
+ }
+
+ return Modified;
+}
+
+bool RISCVPreRAExpandPseudo::expandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+
+ switch (MBBI->getOpcode()) {
+ case RISCV::PseudoLLA:
+ return expandLoadLocalAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA:
+ return expandLoadAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA_TLS_IE:
+ return expandLoadTLSIEAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA_TLS_GD:
+ return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
+ }
+ return false;
+}
+
+bool RISCVPreRAExpandPseudo::expandAuipcInstPair(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI, unsigned FlagsHi,
+ unsigned SecondOpcode) {
+ MachineFunction *MF = MBB.getParent();
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ Register DestReg = MI.getOperand(0).getReg();
+ Register ScratchReg =
+ MF->getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
+
+ MachineOperand &Symbol = MI.getOperand(1);
+ Symbol.setTargetFlags(FlagsHi);
+ MCSymbol *AUIPCSymbol = MF->getContext().createNamedTempSymbol("pcrel_hi");
+
+ MachineInstr *MIAUIPC =
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::AUIPC), ScratchReg).add(Symbol);
+ MIAUIPC->setPreInstrSymbol(*MF, AUIPCSymbol);
+
+ MachineInstr *SecondMI =
+ BuildMI(MBB, MBBI, DL, TII->get(SecondOpcode), DestReg)
+ .addReg(ScratchReg)
+ .addSym(AUIPCSymbol, RISCVII::MO_PCREL_LO);
+
+ if (MI.hasOneMemOperand())
+ SecondMI->addMemOperand(*MF, *MI.memoperands_begin());
+
+ MI.eraseFromParent();
+ return true;
+}
+
+bool RISCVPreRAExpandPseudo::expandLoadLocalAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_PCREL_HI,
+ RISCV::ADDI);
+}
+
+bool RISCVPreRAExpandPseudo::expandLoadAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineFunction *MF = MBB.getParent();
+
+ const auto &STI = MF->getSubtarget<RISCVSubtarget>();
+ // When HWASAN is used and tagging of global variables is enabled
+ // they should be accessed via the GOT, since the tagged address of a global
+ // is incompatible with existing code models. This also applies to non-pic
+ // mode.
+ assert(MF->getTarget().isPositionIndependent() || STI.allowTaggedGlobals());
+ unsigned SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_GOT_HI,
+ SecondOpcode);
+}
+
+bool RISCVPreRAExpandPseudo::expandLoadTLSIEAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineFunction *MF = MBB.getParent();
+
+ const auto &STI = MF->getSubtarget<RISCVSubtarget>();
+ unsigned SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GOT_HI,
+ SecondOpcode);
+}
+
+bool RISCVPreRAExpandPseudo::expandLoadTLSGDAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GD_HI,
+ RISCV::ADDI);
+}
+
+} // end of anonymous namespace
+
+INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",
+ RISCV_EXPAND_PSEUDO_NAME, false, false)
+
+INITIALIZE_PASS(RISCVPreRAExpandPseudo, "riscv-prera-expand-pseudo",
+ RISCV_PRERA_EXPAND_PSEUDO_NAME, false, false)
+
+namespace llvm {
+
+FunctionPass *createRISCVExpandPseudoPass() { return new RISCVExpandPseudo(); }
+FunctionPass *createRISCVPreRAExpandPseudoPass() { return new RISCVPreRAExpandPseudo(); }
+
+} // end of namespace llvm
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.cpp
new file mode 100644
index 0000000000..bb55c16bf1
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -0,0 +1,1369 @@
+//===-- RISCVFrameLowering.cpp - RISCV Frame Information ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVFrameLowering.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVSubtarget.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/Support/LEB128.h"
+
+#include <algorithm>
+
+using namespace llvm;
+
+// For now we use x18, a.k.a s2, as pointer to shadow call stack.
+// User should explicitly set -ffixed-x18 and not use x18 in their asm.
+static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL) {
+ if (!MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack))
+ return;
+
+ const auto &STI = MF.getSubtarget<RISCVSubtarget>();
+ Register RAReg = STI.getRegisterInfo()->getRARegister();
+
+ // Do not save RA to the SCS if it's not saved to the regular stack,
+ // i.e. RA is not at risk of being overwritten.
+ std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
+ if (llvm::none_of(
+ CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
+ return;
+
+ Register SCSPReg = RISCVABI::getSCSPReg();
+
+ auto &Ctx = MF.getFunction().getContext();
+ if (!STI.isRegisterReservedByUser(SCSPReg)) {
+ Ctx.diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(), "x18 not reserved by user for Shadow Call Stack."});
+ return;
+ }
+
+ const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ if (RVFI->useSaveRestoreLibCalls(MF)) {
+ Ctx.diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(),
+ "Shadow Call Stack cannot be combined with Save/Restore LibCalls."});
+ return;
+ }
+
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+ bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
+ int64_t SlotSize = STI.getXLen() / 8;
+ // Store return address to shadow call stack
+ // s[w|d] ra, 0(s2)
+ // addi s2, s2, [4|8]
+ BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
+ .addReg(RAReg)
+ .addReg(SCSPReg)
+ .addImm(0)
+ .setMIFlag(MachineInstr::FrameSetup);
+ BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
+ .addReg(SCSPReg, RegState::Define)
+ .addReg(SCSPReg)
+ .addImm(SlotSize)
+ .setMIFlag(MachineInstr::FrameSetup);
+}
+
+static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL) {
+ if (!MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack))
+ return;
+
+ const auto &STI = MF.getSubtarget<RISCVSubtarget>();
+ Register RAReg = STI.getRegisterInfo()->getRARegister();
+
+ // See emitSCSPrologue() above.
+ std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
+ if (llvm::none_of(
+ CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
+ return;
+
+ Register SCSPReg = RISCVABI::getSCSPReg();
+
+ auto &Ctx = MF.getFunction().getContext();
+ if (!STI.isRegisterReservedByUser(SCSPReg)) {
+ Ctx.diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(), "x18 not reserved by user for Shadow Call Stack."});
+ return;
+ }
+
+ const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ if (RVFI->useSaveRestoreLibCalls(MF)) {
+ Ctx.diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(),
+ "Shadow Call Stack cannot be combined with Save/Restore LibCalls."});
+ return;
+ }
+
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+ bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
+ int64_t SlotSize = STI.getXLen() / 8;
+ // Load return address from shadow call stack
+ // l[w|d] ra, -[4|8](s2)
+ // addi s2, s2, -[4|8]
+ BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::LD : RISCV::LW))
+ .addReg(RAReg, RegState::Define)
+ .addReg(SCSPReg)
+ .addImm(-SlotSize)
+ .setMIFlag(MachineInstr::FrameDestroy);
+ BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
+ .addReg(SCSPReg, RegState::Define)
+ .addReg(SCSPReg)
+ .addImm(-SlotSize)
+ .setMIFlag(MachineInstr::FrameDestroy);
+}
+
+// Get the ID of the libcall used for spilling and restoring callee saved
+// registers. The ID is representative of the number of registers saved or
+// restored by the libcall, except it is zero-indexed - ID 0 corresponds to a
+// single register.
+static int getLibCallID(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI) {
+ const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ if (CSI.empty() || !RVFI->useSaveRestoreLibCalls(MF))
+ return -1;
+
+ Register MaxReg = RISCV::NoRegister;
+ for (auto &CS : CSI)
+ // RISCVRegisterInfo::hasReservedSpillSlot assigns negative frame indexes to
+ // registers which can be saved by libcall.
+ if (CS.getFrameIdx() < 0)
+ MaxReg = std::max(MaxReg.id(), CS.getReg().id());
+
+ if (MaxReg == RISCV::NoRegister)
+ return -1;
+
+ switch (MaxReg) {
+ default:
+ llvm_unreachable("Something has gone wrong!");
+ case /*s11*/ RISCV::X27: return 12;
+ case /*s10*/ RISCV::X26: return 11;
+ case /*s9*/ RISCV::X25: return 10;
+ case /*s8*/ RISCV::X24: return 9;
+ case /*s7*/ RISCV::X23: return 8;
+ case /*s6*/ RISCV::X22: return 7;
+ case /*s5*/ RISCV::X21: return 6;
+ case /*s4*/ RISCV::X20: return 5;
+ case /*s3*/ RISCV::X19: return 4;
+ case /*s2*/ RISCV::X18: return 3;
+ case /*s1*/ RISCV::X9: return 2;
+ case /*s0*/ RISCV::X8: return 1;
+ case /*ra*/ RISCV::X1: return 0;
+ }
+}
+
+// Get the name of the libcall used for spilling callee saved registers.
+// If this function will not use save/restore libcalls, then return a nullptr.
+static const char *
+getSpillLibCallName(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI) {
+ static const char *const SpillLibCalls[] = {
+ "__riscv_save_0",
+ "__riscv_save_1",
+ "__riscv_save_2",
+ "__riscv_save_3",
+ "__riscv_save_4",
+ "__riscv_save_5",
+ "__riscv_save_6",
+ "__riscv_save_7",
+ "__riscv_save_8",
+ "__riscv_save_9",
+ "__riscv_save_10",
+ "__riscv_save_11",
+ "__riscv_save_12"
+ };
+
+ int LibCallID = getLibCallID(MF, CSI);
+ if (LibCallID == -1)
+ return nullptr;
+ return SpillLibCalls[LibCallID];
+}
+
+// Get the name of the libcall used for restoring callee saved registers.
+// If this function will not use save/restore libcalls, then return a nullptr.
+static const char *
+getRestoreLibCallName(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI) {
+ static const char *const RestoreLibCalls[] = {
+ "__riscv_restore_0",
+ "__riscv_restore_1",
+ "__riscv_restore_2",
+ "__riscv_restore_3",
+ "__riscv_restore_4",
+ "__riscv_restore_5",
+ "__riscv_restore_6",
+ "__riscv_restore_7",
+ "__riscv_restore_8",
+ "__riscv_restore_9",
+ "__riscv_restore_10",
+ "__riscv_restore_11",
+ "__riscv_restore_12"
+ };
+
+ int LibCallID = getLibCallID(MF, CSI);
+ if (LibCallID == -1)
+ return nullptr;
+ return RestoreLibCalls[LibCallID];
+}
+
+// Return true if the specified function should have a dedicated frame
+// pointer register. This is true if frame pointer elimination is
+// disabled, if it needs dynamic stack realignment, if the function has
+// variable sized allocas, or if the frame address is taken.
+bool RISCVFrameLowering::hasFP(const MachineFunction &MF) const {
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
+ MFI.isFrameAddressTaken();
+}
+
+bool RISCVFrameLowering::hasBP(const MachineFunction &MF) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+
+ // If we do not reserve stack space for outgoing arguments in prologue,
+ // we will adjust the stack pointer before call instruction. After the
+ // adjustment, we can not use SP to access the stack objects for the
+ // arguments. Instead, use BP to access these stack objects.
+ return (MFI.hasVarSizedObjects() ||
+ (!hasReservedCallFrame(MF) && (!MFI.isMaxCallFrameSizeComputed() ||
+ MFI.getMaxCallFrameSize() != 0))) &&
+ TRI->hasStackRealignment(MF);
+}
+
+// Determines the size of the frame and maximum call frame size.
+void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ uint64_t FrameSize = MFI.getStackSize();
+
+ // Get the alignment.
+ Align StackAlign = getStackAlign();
+
+ // Make sure the frame is aligned.
+ FrameSize = alignTo(FrameSize, StackAlign);
+
+ // Update frame info.
+ MFI.setStackSize(FrameSize);
+
+ // When using SP or BP to access stack objects, we may require extra padding
+ // to ensure the bottom of the RVV stack is correctly aligned within the main
+ // stack. We calculate this as the amount required to align the scalar local
+ // variable section up to the RVV alignment.
+ const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+ if (RVFI->getRVVStackSize() && (!hasFP(MF) || TRI->hasStackRealignment(MF))) {
+ int ScalarLocalVarSize = FrameSize - RVFI->getCalleeSavedStackSize() -
+ RVFI->getVarArgsSaveSize();
+ if (auto RVVPadding =
+ offsetToAlignment(ScalarLocalVarSize, RVFI->getRVVStackAlign()))
+ RVFI->setRVVPadding(RVVPadding);
+ }
+}
+
+// Returns the stack size including RVV padding (when required), rounded back
+// up to the required stack alignment.
+uint64_t RISCVFrameLowering::getStackSizeWithRVVPadding(
+ const MachineFunction &MF) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ return alignTo(MFI.getStackSize() + RVFI->getRVVPadding(), getStackAlign());
+}
+
+// Returns the register used to hold the frame pointer.
+static Register getFPReg(const RISCVSubtarget &STI) { return RISCV::X8; }
+
+// Returns the register used to hold the stack pointer.
+static Register getSPReg(const RISCVSubtarget &STI) { return RISCV::X2; }
+
+static SmallVector<CalleeSavedInfo, 8>
+getNonLibcallCSI(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI) {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ SmallVector<CalleeSavedInfo, 8> NonLibcallCSI;
+
+ for (auto &CS : CSI) {
+ int FI = CS.getFrameIdx();
+ if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::Default)
+ NonLibcallCSI.push_back(CS);
+ }
+
+ return NonLibcallCSI;
+}
+
+void RISCVFrameLowering::adjustStackForRVV(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, int64_t Amount,
+ MachineInstr::MIFlag Flag) const {
+ assert(Amount != 0 && "Did not need to adjust stack pointer for RVV.");
+
+ const Register SPReg = getSPReg(STI);
+
+ // Optimize compile time offset case
+ StackOffset Offset = StackOffset::getScalable(Amount);
+ if (STI.getRealMinVLen() == STI.getRealMaxVLen()) {
+ // 1. Multiply the number of v-slots by the (constant) length of register
+ const int64_t VLENB = STI.getRealMinVLen() / 8;
+ assert(Amount % 8 == 0 &&
+ "Reserve the stack by the multiple of one vector size.");
+ const int64_t NumOfVReg = Amount / 8;
+ const int64_t FixedOffset = NumOfVReg * VLENB;
+ if (!isInt<32>(FixedOffset)) {
+ report_fatal_error(
+ "Frame size outside of the signed 32-bit range not supported");
+ }
+ Offset = StackOffset::getFixed(FixedOffset);
+ }
+
+ const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
+ // We must keep the stack pointer aligned through any intermediate
+ // updates.
+ RI.adjustReg(MBB, MBBI, DL, SPReg, SPReg, Offset,
+ Flag, getStackAlign());
+}
+
+static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
+ Register Reg,
+ uint64_t FixedOffset,
+ uint64_t ScalableOffset) {
+ assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV");
+ SmallString<64> Expr;
+ std::string CommentBuffer;
+ llvm::raw_string_ostream Comment(CommentBuffer);
+ // Build up the expression (Reg + FixedOffset + ScalableOffset * VLENB).
+ unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
+ Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
+ Expr.push_back(0);
+ if (Reg == RISCV::X2)
+ Comment << "sp";
+ else
+ Comment << printReg(Reg, &TRI);
+
+ uint8_t buffer[16];
+ if (FixedOffset) {
+ Expr.push_back(dwarf::DW_OP_consts);
+ Expr.append(buffer, buffer + encodeSLEB128(FixedOffset, buffer));
+ Expr.push_back((uint8_t)dwarf::DW_OP_plus);
+ Comment << " + " << FixedOffset;
+ }
+
+ Expr.push_back((uint8_t)dwarf::DW_OP_consts);
+ Expr.append(buffer, buffer + encodeSLEB128(ScalableOffset, buffer));
+
+ unsigned DwarfVlenb = TRI.getDwarfRegNum(RISCV::VLENB, true);
+ Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
+ Expr.append(buffer, buffer + encodeULEB128(DwarfVlenb, buffer));
+ Expr.push_back(0);
+
+ Expr.push_back((uint8_t)dwarf::DW_OP_mul);
+ Expr.push_back((uint8_t)dwarf::DW_OP_plus);
+
+ Comment << " + " << ScalableOffset << " * vlenb";
+
+ SmallString<64> DefCfaExpr;
+ DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
+ DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer));
+ DefCfaExpr.append(Expr.str());
+
+ return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(),
+ Comment.str());
+}
+
+void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+
+ Register FPReg = getFPReg(STI);
+ Register SPReg = getSPReg(STI);
+ Register BPReg = RISCVABI::getBPReg();
+
+ // Debug location must be unknown since the first debug location is used
+ // to determine the end of the prologue.
+ DebugLoc DL;
+
+ // All calls are tail calls in GHC calling conv, and functions have no
+ // prologue/epilogue.
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
+ return;
+
+ // Emit prologue for shadow call stack.
+ emitSCSPrologue(MF, MBB, MBBI, DL);
+
+ // Since spillCalleeSavedRegisters may have inserted a libcall, skip past
+ // any instructions marked as FrameSetup
+ while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
+ ++MBBI;
+
+ // Determine the correct frame layout
+ determineFrameLayout(MF);
+
+ // If libcalls are used to spill and restore callee-saved registers, the frame
+ // has two sections; the opaque section managed by the libcalls, and the
+ // section managed by MachineFrameInfo which can also hold callee saved
+ // registers in fixed stack slots, both of which have negative frame indices.
+ // This gets even more complicated when incoming arguments are passed via the
+ // stack, as these too have negative frame indices. An example is detailed
+ // below:
+ //
+ // | incoming arg | <- FI[-3]
+ // | libcallspill |
+ // | calleespill | <- FI[-2]
+ // | calleespill | <- FI[-1]
+ // | this_frame | <- FI[0]
+ //
+ // For negative frame indices, the offset from the frame pointer will differ
+ // depending on which of these groups the frame index applies to.
+ // The following calculates the correct offset knowing the number of callee
+ // saved registers spilt by the two methods.
+ if (int LibCallRegs = getLibCallID(MF, MFI.getCalleeSavedInfo()) + 1) {
+ // Calculate the size of the frame managed by the libcall. The libcalls are
+ // implemented such that the stack will always be 16 byte aligned.
+ unsigned LibCallFrameSize = alignTo((STI.getXLen() / 8) * LibCallRegs, 16);
+ RVFI->setLibCallStackSize(LibCallFrameSize);
+ }
+
+ // FIXME (note copied from Lanai): This appears to be overallocating. Needs
+ // investigation. Get the number of bytes to allocate from the FrameInfo.
+ uint64_t StackSize = getStackSizeWithRVVPadding(MF);
+ uint64_t RealStackSize = StackSize + RVFI->getLibCallStackSize();
+ uint64_t RVVStackSize = RVFI->getRVVStackSize();
+
+ // Early exit if there is no need to allocate on the stack
+ if (RealStackSize == 0 && !MFI.adjustsStack() && RVVStackSize == 0)
+ return;
+
+ // If the stack pointer has been marked as reserved, then produce an error if
+ // the frame requires stack allocation
+ if (STI.isRegisterReservedByUser(SPReg))
+ MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(), "Stack pointer required, but has been reserved."});
+
+ uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
+ // Split the SP adjustment to reduce the offsets of callee saved spill.
+ if (FirstSPAdjustAmount) {
+ StackSize = FirstSPAdjustAmount;
+ RealStackSize = FirstSPAdjustAmount;
+ }
+
+ // Allocate space on the stack if necessary.
+ RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(-StackSize),
+ MachineInstr::FrameSetup, getStackAlign());
+
+ // Emit ".cfi_def_cfa_offset RealStackSize"
+ unsigned CFIIndex = MF.addFrameInst(
+ MCCFIInstruction::cfiDefCfaOffset(nullptr, RealStackSize));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ const auto &CSI = MFI.getCalleeSavedInfo();
+
+ // The frame pointer is callee-saved, and code has been generated for us to
+ // save it to the stack. We need to skip over the storing of callee-saved
+ // registers as the frame pointer must be modified after it has been saved
+ // to the stack, not before.
+ // FIXME: assumes exactly one instruction is used to save each callee-saved
+ // register.
+ std::advance(MBBI, getNonLibcallCSI(MF, CSI).size());
+
+ // Iterate over list of callee-saved registers and emit .cfi_offset
+ // directives.
+ for (const auto &Entry : CSI) {
+ int FrameIdx = Entry.getFrameIdx();
+ int64_t Offset;
+ // Offsets for objects with fixed locations (IE: those saved by libcall) are
+ // simply calculated from the frame index.
+ if (FrameIdx < 0)
+ Offset = FrameIdx * (int64_t) STI.getXLen() / 8;
+ else
+ Offset = MFI.getObjectOffset(Entry.getFrameIdx()) -
+ RVFI->getLibCallStackSize();
+ Register Reg = Entry.getReg();
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, RI->getDwarfRegNum(Reg, true), Offset));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Generate new FP.
+ if (hasFP(MF)) {
+ if (STI.isRegisterReservedByUser(FPReg))
+ MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(), "Frame pointer required, but has been reserved."});
+ // The frame pointer does need to be reserved from register allocation.
+ assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
+
+ RI->adjustReg(MBB, MBBI, DL, FPReg, SPReg,
+ StackOffset::getFixed(RealStackSize - RVFI->getVarArgsSaveSize()),
+ MachineInstr::FrameSetup, getStackAlign());
+
+ // Emit ".cfi_def_cfa $fp, RVFI->getVarArgsSaveSize()"
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
+ nullptr, RI->getDwarfRegNum(FPReg, true), RVFI->getVarArgsSaveSize()));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Emit the second SP adjustment after saving callee saved registers.
+ if (FirstSPAdjustAmount) {
+ uint64_t SecondSPAdjustAmount =
+ getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
+ assert(SecondSPAdjustAmount > 0 &&
+ "SecondSPAdjustAmount should be greater than zero");
+ RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
+ StackOffset::getFixed(-SecondSPAdjustAmount),
+ MachineInstr::FrameSetup, getStackAlign());
+
+ // If we are using a frame-pointer, and thus emitted ".cfi_def_cfa fp, 0",
+ // don't emit an sp-based .cfi_def_cfa_offset
+ if (!hasFP(MF)) {
+ // Emit ".cfi_def_cfa_offset StackSize"
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(
+ nullptr, getStackSizeWithRVVPadding(MF)));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+
+ if (RVVStackSize) {
+ adjustStackForRVV(MF, MBB, MBBI, DL, -RVVStackSize,
+ MachineInstr::FrameSetup);
+ if (!hasFP(MF)) {
+ // Emit .cfi_def_cfa_expression "sp + StackSize + RVVStackSize * vlenb".
+ unsigned CFIIndex = MF.addFrameInst(createDefCFAExpression(
+ *RI, SPReg, getStackSizeWithRVVPadding(MF), RVVStackSize / 8));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+
+ if (hasFP(MF)) {
+ // Realign Stack
+ const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+ if (RI->hasStackRealignment(MF)) {
+ Align MaxAlignment = MFI.getMaxAlign();
+
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+ if (isInt<12>(-(int)MaxAlignment.value())) {
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::ANDI), SPReg)
+ .addReg(SPReg)
+ .addImm(-(int)MaxAlignment.value())
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else {
+ unsigned ShiftAmount = Log2(MaxAlignment);
+ Register VR =
+ MF.getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::SRLI), VR)
+ .addReg(SPReg)
+ .addImm(ShiftAmount)
+ .setMIFlag(MachineInstr::FrameSetup);
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::SLLI), SPReg)
+ .addReg(VR)
+ .addImm(ShiftAmount)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ // FP will be used to restore the frame in the epilogue, so we need
+ // another base register BP to record SP after re-alignment. SP will
+ // track the current stack after allocating variable sized objects.
+ if (hasBP(MF)) {
+ // move BP, SP
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), BPReg)
+ .addReg(SPReg)
+ .addImm(0)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+ }
+}
+
+void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ Register FPReg = getFPReg(STI);
+ Register SPReg = getSPReg(STI);
+
+ // All calls are tail calls in GHC calling conv, and functions have no
+ // prologue/epilogue.
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
+ return;
+
+ // Get the insert location for the epilogue. If there were no terminators in
+ // the block, get the last instruction.
+ MachineBasicBlock::iterator MBBI = MBB.end();
+ DebugLoc DL;
+ if (!MBB.empty()) {
+ MBBI = MBB.getLastNonDebugInstr();
+ if (MBBI != MBB.end())
+ DL = MBBI->getDebugLoc();
+
+ MBBI = MBB.getFirstTerminator();
+
+ // If callee-saved registers are saved via libcall, place stack adjustment
+ // before this call.
+ while (MBBI != MBB.begin() &&
+ std::prev(MBBI)->getFlag(MachineInstr::FrameDestroy))
+ --MBBI;
+ }
+
+ const auto &CSI = getNonLibcallCSI(MF, MFI.getCalleeSavedInfo());
+
+ // Skip to before the restores of callee-saved registers
+ // FIXME: assumes exactly one instruction is used to restore each
+ // callee-saved register.
+ auto LastFrameDestroy = MBBI;
+ if (!CSI.empty())
+ LastFrameDestroy = std::prev(MBBI, CSI.size());
+
+ uint64_t StackSize = getStackSizeWithRVVPadding(MF);
+ uint64_t RealStackSize = StackSize + RVFI->getLibCallStackSize();
+ uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
+ uint64_t RVVStackSize = RVFI->getRVVStackSize();
+
+ // Restore the stack pointer using the value of the frame pointer. Only
+ // necessary if the stack pointer was modified, meaning the stack size is
+ // unknown.
+ //
+ // In order to make sure the stack point is right through the EH region,
+ // we also need to restore stack pointer from the frame pointer if we
+ // don't preserve stack space within prologue/epilogue for outgoing variables,
+ // normally it's just checking the variable sized object is present or not
+ // is enough, but we also don't preserve that at prologue/epilogue when
+ // have vector objects in stack.
+ if (RI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
+ !hasReservedCallFrame(MF)) {
+ assert(hasFP(MF) && "frame pointer should not have been eliminated");
+ RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, FPReg,
+ StackOffset::getFixed(-FPOffset),
+ MachineInstr::FrameDestroy, getStackAlign());
+ } else {
+ if (RVVStackSize)
+ adjustStackForRVV(MF, MBB, LastFrameDestroy, DL, RVVStackSize,
+ MachineInstr::FrameDestroy);
+ }
+
+ uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
+ if (FirstSPAdjustAmount) {
+ uint64_t SecondSPAdjustAmount =
+ getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
+ assert(SecondSPAdjustAmount > 0 &&
+ "SecondSPAdjustAmount should be greater than zero");
+
+ RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg,
+ StackOffset::getFixed(SecondSPAdjustAmount),
+ MachineInstr::FrameDestroy, getStackAlign());
+ }
+
+ if (FirstSPAdjustAmount)
+ StackSize = FirstSPAdjustAmount;
+
+ // Deallocate stack
+ RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(StackSize),
+ MachineInstr::FrameDestroy, getStackAlign());
+
+ // Emit epilogue for shadow call stack.
+ emitSCSEpilogue(MF, MBB, MBBI, DL);
+}
+
+StackOffset
+RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+ Register &FrameReg) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ // Callee-saved registers should be referenced relative to the stack
+ // pointer (positive offset), otherwise use the frame pointer (negative
+ // offset).
+ const auto &CSI = getNonLibcallCSI(MF, MFI.getCalleeSavedInfo());
+ int MinCSFI = 0;
+ int MaxCSFI = -1;
+ StackOffset Offset;
+ auto StackID = MFI.getStackID(FI);
+
+ assert((StackID == TargetStackID::Default ||
+ StackID == TargetStackID::ScalableVector) &&
+ "Unexpected stack ID for the frame object.");
+ if (StackID == TargetStackID::Default) {
+ Offset =
+ StackOffset::getFixed(MFI.getObjectOffset(FI) - getOffsetOfLocalArea() +
+ MFI.getOffsetAdjustment());
+ } else if (StackID == TargetStackID::ScalableVector) {
+ Offset = StackOffset::getScalable(MFI.getObjectOffset(FI));
+ }
+
+ uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
+
+ if (CSI.size()) {
+ MinCSFI = CSI[0].getFrameIdx();
+ MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+ }
+
+ if (FI >= MinCSFI && FI <= MaxCSFI) {
+ FrameReg = RISCV::X2;
+
+ if (FirstSPAdjustAmount)
+ Offset += StackOffset::getFixed(FirstSPAdjustAmount);
+ else
+ Offset += StackOffset::getFixed(getStackSizeWithRVVPadding(MF));
+ return Offset;
+ }
+
+ if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(FI)) {
+ // If the stack was realigned, the frame pointer is set in order to allow
+ // SP to be restored, so we need another base register to record the stack
+ // after realignment.
+ // |--------------------------| -- <-- FP
+ // | callee-allocated save | | <----|
+ // | area for register varargs| | |
+ // |--------------------------| | |
+ // | callee-saved registers | | |
+ // |--------------------------| -- |
+ // | realignment (the size of | | |
+ // | this area is not counted | | |
+ // | in MFI.getStackSize()) | | |
+ // |--------------------------| -- |-- MFI.getStackSize()
+ // | RVV alignment padding | | |
+ // | (not counted in | | |
+ // | MFI.getStackSize() but | | |
+ // | counted in | | |
+ // | RVFI.getRVVStackSize()) | | |
+ // |--------------------------| -- |
+ // | RVV objects | | |
+ // | (not counted in | | |
+ // | MFI.getStackSize()) | | |
+ // |--------------------------| -- |
+ // | padding before RVV | | |
+ // | (not counted in | | |
+ // | MFI.getStackSize() or in | | |
+ // | RVFI.getRVVStackSize()) | | |
+ // |--------------------------| -- |
+ // | scalar local variables | | <----'
+ // |--------------------------| -- <-- BP (if var sized objects present)
+ // | VarSize objects | |
+ // |--------------------------| -- <-- SP
+ if (hasBP(MF)) {
+ FrameReg = RISCVABI::getBPReg();
+ } else {
+ // VarSize objects must be empty in this case!
+ assert(!MFI.hasVarSizedObjects());
+ FrameReg = RISCV::X2;
+ }
+ } else {
+ FrameReg = RI->getFrameRegister(MF);
+ }
+
+ if (FrameReg == getFPReg(STI)) {
+ Offset += StackOffset::getFixed(RVFI->getVarArgsSaveSize());
+ if (FI >= 0)
+ Offset -= StackOffset::getFixed(RVFI->getLibCallStackSize());
+ // When using FP to access scalable vector objects, we need to minus
+ // the frame size.
+ //
+ // |--------------------------| -- <-- FP
+ // | callee-allocated save | |
+ // | area for register varargs| |
+ // |--------------------------| |
+ // | callee-saved registers | |
+ // |--------------------------| | MFI.getStackSize()
+ // | scalar local variables | |
+ // |--------------------------| -- (Offset of RVV objects is from here.)
+ // | RVV objects |
+ // |--------------------------|
+ // | VarSize objects |
+ // |--------------------------| <-- SP
+ if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+ assert(!RI->hasStackRealignment(MF) &&
+ "Can't index across variable sized realign");
+ // We don't expect any extra RVV alignment padding, as the stack size
+ // and RVV object sections should be correct aligned in their own
+ // right.
+ assert(MFI.getStackSize() == getStackSizeWithRVVPadding(MF) &&
+ "Inconsistent stack layout");
+ Offset -= StackOffset::getFixed(MFI.getStackSize());
+ }
+ return Offset;
+ }
+
+ // This case handles indexing off both SP and BP.
+ // If indexing off SP, there must not be any var sized objects
+ assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
+
+ // When using SP to access frame objects, we need to add RVV stack size.
+ //
+ // |--------------------------| -- <-- FP
+ // | callee-allocated save | | <----|
+ // | area for register varargs| | |
+ // |--------------------------| | |
+ // | callee-saved registers | | |
+ // |--------------------------| -- |
+ // | RVV alignment padding | | |
+ // | (not counted in | | |
+ // | MFI.getStackSize() but | | |
+ // | counted in | | |
+ // | RVFI.getRVVStackSize()) | | |
+ // |--------------------------| -- |
+ // | RVV objects | | |-- MFI.getStackSize()
+ // | (not counted in | | |
+ // | MFI.getStackSize()) | | |
+ // |--------------------------| -- |
+ // | padding before RVV | | |
+ // | (not counted in | | |
+ // | MFI.getStackSize()) | | |
+ // |--------------------------| -- |
+ // | scalar local variables | | <----'
+ // |--------------------------| -- <-- BP (if var sized objects present)
+ // | VarSize objects | |
+ // |--------------------------| -- <-- SP
+ //
+ // The total amount of padding surrounding RVV objects is described by
+ // RVV->getRVVPadding() and it can be zero. It allows us to align the RVV
+ // objects to the required alignment.
+ if (MFI.getStackID(FI) == TargetStackID::Default) {
+ if (MFI.isFixedObjectIndex(FI)) {
+ assert(!RI->hasStackRealignment(MF) &&
+ "Can't index across variable sized realign");
+ Offset += StackOffset::get(getStackSizeWithRVVPadding(MF) +
+ RVFI->getLibCallStackSize(),
+ RVFI->getRVVStackSize());
+ } else {
+ Offset += StackOffset::getFixed(MFI.getStackSize());
+ }
+ } else if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+ // Ensure the base of the RVV stack is correctly aligned: add on the
+ // alignment padding.
+ int ScalarLocalVarSize =
+ MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
+ RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
+ Offset += StackOffset::get(ScalarLocalVarSize, RVFI->getRVVStackSize());
+ }
+ return Offset;
+}
+
+void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF,
+ BitVector &SavedRegs,
+ RegScavenger *RS) const {
+ TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+ // Unconditionally spill RA and FP only if the function uses a frame
+ // pointer.
+ if (hasFP(MF)) {
+ SavedRegs.set(RISCV::X1);
+ SavedRegs.set(RISCV::X8);
+ }
+ // Mark BP as used if function has dedicated base pointer.
+ if (hasBP(MF))
+ SavedRegs.set(RISCVABI::getBPReg());
+
+ // If interrupt is enabled and there are calls in the handler,
+ // unconditionally save all Caller-saved registers and
+ // all FP registers, regardless whether they are used.
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ if (MF.getFunction().hasFnAttribute("interrupt") && MFI.hasCalls()) {
+
+ static const MCPhysReg CSRegs[] = { RISCV::X1, /* ra */
+ RISCV::X5, RISCV::X6, RISCV::X7, /* t0-t2 */
+ RISCV::X10, RISCV::X11, /* a0-a1, a2-a7 */
+ RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17,
+ RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31, 0 /* t3-t6 */
+ };
+
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ SavedRegs.set(CSRegs[i]);
+
+ if (MF.getSubtarget<RISCVSubtarget>().hasStdExtF()) {
+
+ // If interrupt is enabled, this list contains all FP registers.
+ const MCPhysReg * Regs = MF.getRegInfo().getCalleeSavedRegs();
+
+ for (unsigned i = 0; Regs[i]; ++i)
+ if (RISCV::FPR16RegClass.contains(Regs[i]) ||
+ RISCV::FPR32RegClass.contains(Regs[i]) ||
+ RISCV::FPR64RegClass.contains(Regs[i]))
+ SavedRegs.set(Regs[i]);
+ }
+ }
+}
+
+std::pair<int64_t, Align>
+RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ // Create a buffer of RVV objects to allocate.
+ SmallVector<int, 8> ObjectsToAllocate;
+ for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
+ unsigned StackID = MFI.getStackID(I);
+ if (StackID != TargetStackID::ScalableVector)
+ continue;
+ if (MFI.isDeadObjectIndex(I))
+ continue;
+
+ ObjectsToAllocate.push_back(I);
+ }
+
+ // The minimum alignment is 16 bytes.
+ Align RVVStackAlign(16);
+ const auto &ST = MF.getSubtarget<RISCVSubtarget>();
+
+ if (!ST.hasVInstructions()) {
+ assert(ObjectsToAllocate.empty() &&
+ "Can't allocate scalable-vector objects without V instructions");
+ return std::make_pair(0, RVVStackAlign);
+ }
+
+ // Allocate all RVV locals and spills
+ int64_t Offset = 0;
+ for (int FI : ObjectsToAllocate) {
+ // ObjectSize in bytes.
+ int64_t ObjectSize = MFI.getObjectSize(FI);
+ auto ObjectAlign = std::max(Align(8), MFI.getObjectAlign(FI));
+ // If the data type is the fractional vector type, reserve one vector
+ // register for it.
+ if (ObjectSize < 8)
+ ObjectSize = 8;
+ Offset = alignTo(Offset + ObjectSize, ObjectAlign);
+ MFI.setObjectOffset(FI, -Offset);
+ // Update the maximum alignment of the RVV stack section
+ RVVStackAlign = std::max(RVVStackAlign, ObjectAlign);
+ }
+
+ // Ensure the alignment of the RVV stack. Since we want the most-aligned
+ // object right at the bottom (i.e., any padding at the top of the frame),
+ // readjust all RVV objects down by the alignment padding.
+ uint64_t StackSize = Offset;
+ if (auto AlignmentPadding = offsetToAlignment(StackSize, RVVStackAlign)) {
+ StackSize += AlignmentPadding;
+ for (int FI : ObjectsToAllocate)
+ MFI.setObjectOffset(FI, MFI.getObjectOffset(FI) - AlignmentPadding);
+ }
+
+ return std::make_pair(StackSize, RVVStackAlign);
+}
+
+static unsigned getScavSlotsNumForRVV(MachineFunction &MF) {
+ // For RVV spill, scalable stack offsets computing requires up to two scratch
+ // registers
+ static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
+
+ // For RVV spill, non-scalable stack offsets computing requires up to one
+ // scratch register.
+ static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
+
+ // ADDI instruction's destination register can be used for computing
+ // offsets. So Scalable stack offsets require up to one scratch register.
+ static constexpr unsigned ScavSlotsADDIScalableObject = 1;
+
+ static constexpr unsigned MaxScavSlotsNumKnown =
+ std::max({ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
+ ScavSlotsNumRVVSpillNonScalableObject});
+
+ unsigned MaxScavSlotsNum = 0;
+ if (!MF.getSubtarget<RISCVSubtarget>().hasVInstructions())
+ return false;
+ for (const MachineBasicBlock &MBB : MF)
+ for (const MachineInstr &MI : MBB) {
+ bool IsRVVSpill = RISCV::isRVVSpill(MI);
+ for (auto &MO : MI.operands()) {
+ if (!MO.isFI())
+ continue;
+ bool IsScalableVectorID = MF.getFrameInfo().getStackID(MO.getIndex()) ==
+ TargetStackID::ScalableVector;
+ if (IsRVVSpill) {
+ MaxScavSlotsNum = std::max(
+ MaxScavSlotsNum, IsScalableVectorID
+ ? ScavSlotsNumRVVSpillScalableObject
+ : ScavSlotsNumRVVSpillNonScalableObject);
+ } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
+ MaxScavSlotsNum =
+ std::max(MaxScavSlotsNum, ScavSlotsADDIScalableObject);
+ }
+ }
+ if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
+ return MaxScavSlotsNumKnown;
+ }
+ return MaxScavSlotsNum;
+}
+
+static bool hasRVVFrameObject(const MachineFunction &MF) {
+ // Originally, the function will scan all the stack objects to check whether
+ // if there is any scalable vector object on the stack or not. However, it
+ // causes errors in the register allocator. In issue 53016, it returns false
+ // before RA because there is no RVV stack objects. After RA, it returns true
+ // because there are spilling slots for RVV values during RA. It will not
+ // reserve BP during register allocation and generate BP access in the PEI
+ // pass due to the inconsistent behavior of the function.
+ //
+ // The function is changed to use hasVInstructions() as the return value. It
+ // is not precise, but it can make the register allocation correct.
+ //
+ // FIXME: Find a better way to make the decision or revisit the solution in
+ // D103622.
+ //
+ // Refer to https://github.com/llvm/llvm-project/issues/53016.
+ return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
+}
+
+static unsigned estimateFunctionSizeInBytes(const MachineFunction &MF,
+ const RISCVInstrInfo &TII) {
+ unsigned FnSize = 0;
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ // Far branches over 20-bit offset will be relaxed in branch relaxation
+ // pass. In the worst case, conditional branches will be relaxed into
+ // the following instruction sequence. Unconditional branches are
+ // relaxed in the same way, with the exception that there is no first
+ // branch instruction.
+ //
+ // foo
+ // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
+ // sd s11, 0(sp) # 4 bytes, or 2 bytes in RVC
+ // jump .restore, s11 # 8 bytes
+ // .rev_cond
+ // bar
+ // j .dest_bb # 4 bytes, or 2 bytes in RVC
+ // .restore:
+ // ld s11, 0(sp) # 4 bytes, or 2 bytes in RVC
+ // .dest:
+ // baz
+ if (MI.isConditionalBranch())
+ FnSize += TII.getInstSizeInBytes(MI);
+ if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
+ if (MF.getSubtarget<RISCVSubtarget>().hasStdExtC())
+ FnSize += 2 + 8 + 2 + 2;
+ else
+ FnSize += 4 + 8 + 4 + 4;
+ continue;
+ }
+
+ FnSize += TII.getInstSizeInBytes(MI);
+ }
+ }
+ return FnSize;
+}
+
+void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
+ MachineFunction &MF, RegScavenger *RS) const {
+ const RISCVRegisterInfo *RegInfo =
+ MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
+ const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterClass *RC = &RISCV::GPRRegClass;
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ int64_t RVVStackSize;
+ Align RVVStackAlign;
+ std::tie(RVVStackSize, RVVStackAlign) = assignRVVStackObjectOffsets(MF);
+
+ RVFI->setRVVStackSize(RVVStackSize);
+ RVFI->setRVVStackAlign(RVVStackAlign);
+
+ if (hasRVVFrameObject(MF)) {
+ // Ensure the entire stack is aligned to at least the RVV requirement: some
+ // scalable-vector object alignments are not considered by the
+ // target-independent code.
+ MFI.ensureMaxAlignment(RVVStackAlign);
+ }
+
+ unsigned ScavSlotsNum = 0;
+
+ // estimateStackSize has been observed to under-estimate the final stack
+ // size, so give ourselves wiggle-room by checking for stack size
+ // representable an 11-bit signed field rather than 12-bits.
+ if (!isInt<11>(MFI.estimateStackSize(MF)))
+ ScavSlotsNum = 1;
+
+ // Far branches over 20-bit offset require a spill slot for scratch register.
+ bool IsLargeFunction = !isInt<20>(estimateFunctionSizeInBytes(MF, *TII));
+ if (IsLargeFunction)
+ ScavSlotsNum = std::max(ScavSlotsNum, 1u);
+
+ // RVV loads & stores have no capacity to hold the immediate address offsets
+ // so we must always reserve an emergency spill slot if the MachineFunction
+ // contains any RVV spills.
+ ScavSlotsNum = std::max(ScavSlotsNum, getScavSlotsNumForRVV(MF));
+
+ for (unsigned I = 0; I < ScavSlotsNum; I++) {
+ int FI = MFI.CreateStackObject(RegInfo->getSpillSize(*RC),
+ RegInfo->getSpillAlign(*RC), false);
+ RS->addScavengingFrameIndex(FI);
+
+ if (IsLargeFunction && RVFI->getBranchRelaxationScratchFrameIndex() == -1)
+ RVFI->setBranchRelaxationScratchFrameIndex(FI);
+ }
+
+ if (MFI.getCalleeSavedInfo().empty() || RVFI->useSaveRestoreLibCalls(MF)) {
+ RVFI->setCalleeSavedStackSize(0);
+ return;
+ }
+
+ unsigned Size = 0;
+ for (const auto &Info : MFI.getCalleeSavedInfo()) {
+ int FrameIdx = Info.getFrameIdx();
+ if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
+ continue;
+
+ Size += MFI.getObjectSize(FrameIdx);
+ }
+ RVFI->setCalleeSavedStackSize(Size);
+}
+
+// Not preserve stack space within prologue for outgoing variables when the
+// function contains variable size objects or there are vector objects accessed
+// by the frame pointer.
+// Let eliminateCallFramePseudoInstr preserve stack space for it.
+bool RISCVFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo().hasVarSizedObjects() &&
+ !(hasFP(MF) && hasRVVFrameObject(MF));
+}
+
+// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
+MachineBasicBlock::iterator RISCVFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ Register SPReg = RISCV::X2;
+ DebugLoc DL = MI->getDebugLoc();
+
+ if (!hasReservedCallFrame(MF)) {
+ // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
+ // ADJCALLSTACKUP must be converted to instructions manipulating the stack
+ // pointer. This is necessary when there is a variable length stack
+ // allocation (e.g. alloca), which means it's not possible to allocate
+ // space for outgoing arguments from within the function prologue.
+ int64_t Amount = MI->getOperand(0).getImm();
+
+ if (Amount != 0) {
+ // Ensure the stack remains aligned after adjustment.
+ Amount = alignSPAdjust(Amount);
+
+ if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
+ Amount = -Amount;
+
+ const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
+ RI.adjustReg(MBB, MI, DL, SPReg, SPReg, StackOffset::getFixed(Amount),
+ MachineInstr::NoFlags, getStackAlign());
+ }
+ }
+
+ return MBB.erase(MI);
+}
+
+// We would like to split the SP adjustment to reduce prologue/epilogue
+// as following instructions. In this way, the offset of the callee saved
+// register could fit in a single store.
+// add sp,sp,-2032
+// sw ra,2028(sp)
+// sw s0,2024(sp)
+// sw s1,2020(sp)
+// sw s3,2012(sp)
+// sw s4,2008(sp)
+// add sp,sp,-64
+uint64_t
+RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
+ const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+ uint64_t StackSize = getStackSizeWithRVVPadding(MF);
+
+ // Disable SplitSPAdjust if save-restore libcall is used. The callee-saved
+ // registers will be pushed by the save-restore libcalls, so we don't have to
+ // split the SP adjustment in this case.
+ if (RVFI->getLibCallStackSize())
+ return 0;
+
+ // Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
+ // 12-bit and there exists a callee-saved register needing to be pushed.
+ if (!isInt<12>(StackSize) && (CSI.size() > 0)) {
+ // FirstSPAdjustAmount is chosen as (2048 - StackAlign) because 2048 will
+ // cause sp = sp + 2048 in the epilogue to be split into multiple
+ // instructions. Offsets smaller than 2048 can fit in a single load/store
+ // instruction, and we have to stick with the stack alignment. 2048 has
+ // 16-byte alignment. The stack alignment for RV32 and RV64 is 16 and for
+ // RV32E it is 4. So (2048 - StackAlign) will satisfy the stack alignment.
+ return 2048 - getStackAlign().value();
+ }
+ return 0;
+}
+
+bool RISCVFrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return true;
+
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
+ DebugLoc DL;
+ if (MI != MBB.end() && !MI->isDebugInstr())
+ DL = MI->getDebugLoc();
+
+ const char *SpillLibCall = getSpillLibCallName(*MF, CSI);
+ if (SpillLibCall) {
+ // Add spill libcall via non-callee-saved register t0.
+ BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoCALLReg), RISCV::X5)
+ .addExternalSymbol(SpillLibCall, RISCVII::MO_CALL)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // Add registers spilled in libcall as liveins.
+ for (auto &CS : CSI)
+ MBB.addLiveIn(CS.getReg());
+ }
+
+ // Manually spill values not spilled by libcall.
+ const auto &NonLibcallCSI = getNonLibcallCSI(*MF, CSI);
+ for (auto &CS : NonLibcallCSI) {
+ // Insert the spill to the stack frame.
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg), CS.getFrameIdx(),
+ RC, TRI, Register());
+ }
+
+ return true;
+}
+
+bool RISCVFrameLowering::restoreCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return true;
+
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
+ DebugLoc DL;
+ if (MI != MBB.end() && !MI->isDebugInstr())
+ DL = MI->getDebugLoc();
+
+ // Manually restore values not restored by libcall.
+ // Keep the same order as in the prologue. There is no need to reverse the
+ // order in the epilogue. In addition, the return address will be restored
+ // first in the epilogue. It increases the opportunity to avoid the
+ // load-to-use data hazard between loading RA and return by RA.
+ // loadRegFromStackSlot can insert multiple instructions.
+ const auto &NonLibcallCSI = getNonLibcallCSI(*MF, CSI);
+ for (auto &CS : NonLibcallCSI) {
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
+ Register());
+ assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
+ }
+
+ const char *RestoreLibCall = getRestoreLibCallName(*MF, CSI);
+ if (RestoreLibCall) {
+ // Add restore libcall via tail call.
+ MachineBasicBlock::iterator NewMI =
+ BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoTAIL))
+ .addExternalSymbol(RestoreLibCall, RISCVII::MO_CALL)
+ .setMIFlag(MachineInstr::FrameDestroy);
+
+ // Remove trailing returns, since the terminator is now a tail call to the
+ // restore function.
+ if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
+ NewMI->copyImplicitOps(*MF, *MI);
+ MI->eraseFromParent();
+ }
+ }
+
+ return true;
+}
+
+bool RISCVFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
+ // Keep the conventional code flow when not optimizing.
+ if (MF.getFunction().hasOptNone())
+ return false;
+
+ return true;
+}
+
+bool RISCVFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
+ MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
+ const MachineFunction *MF = MBB.getParent();
+ const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
+
+ if (!RVFI->useSaveRestoreLibCalls(*MF))
+ return true;
+
+ // Inserting a call to a __riscv_save libcall requires the use of the register
+ // t0 (X5) to hold the return address. Therefore if this register is already
+ // used we can't insert the call.
+
+ RegScavenger RS;
+ RS.enterBasicBlock(*TmpMBB);
+ return !RS.isRegUsed(RISCV::X5);
+}
+
+bool RISCVFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
+ const MachineFunction *MF = MBB.getParent();
+ MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
+ const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
+
+ if (!RVFI->useSaveRestoreLibCalls(*MF))
+ return true;
+
+ // Using the __riscv_restore libcalls to restore CSRs requires a tail call.
+ // This means if we still need to continue executing code within this function
+ // the restore cannot take place in this basic block.
+
+ if (MBB.succ_size() > 1)
+ return false;
+
+ MachineBasicBlock *SuccMBB =
+ MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
+
+ // Doing a tail call should be safe if there are no successors, because either
+ // we have a returning block or the end of the block is unreachable, so the
+ // restore will be eliminated regardless.
+ if (!SuccMBB)
+ return true;
+
+ // The successor can only contain a return, since we would effectively be
+ // replacing the successor with our own tail return at the end of our block.
+ return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
+}
+
+bool RISCVFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
+ switch (ID) {
+ case TargetStackID::Default:
+ case TargetStackID::ScalableVector:
+ return true;
+ case TargetStackID::NoAlloc:
+ case TargetStackID::SGPRSpill:
+ case TargetStackID::WasmLocal:
+ return false;
+ }
+ llvm_unreachable("Invalid TargetStackID::Value");
+}
+
+TargetStackID::Value RISCVFrameLowering::getStackIDForScalableVectors() const {
+ return TargetStackID::ScalableVector;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.h
new file mode 100644
index 0000000000..bf6c1a6526
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVFrameLowering.h
@@ -0,0 +1,94 @@
+//===-- RISCVFrameLowering.h - Define frame lowering for RISCV -*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements RISCV-specific bits of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVFRAMELOWERING_H
+#define LLVM_LIB_TARGET_RISCV_RISCVFRAMELOWERING_H
+
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/Support/TypeSize.h"
+
+namespace llvm {
+class RISCVSubtarget;
+
+class RISCVFrameLowering : public TargetFrameLowering {
+public:
+ explicit RISCVFrameLowering(const RISCVSubtarget &STI)
+ : TargetFrameLowering(StackGrowsDown,
+ /*StackAlignment=*/Align(16),
+ /*LocalAreaOffset=*/0,
+ /*TransientStackAlignment=*/Align(16)),
+ STI(STI) {}
+
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ uint64_t getStackSizeWithRVVPadding(const MachineFunction &MF) const;
+
+ StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
+ Register &FrameReg) const override;
+
+ void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+ RegScavenger *RS) const override;
+
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const override;
+
+ bool hasFP(const MachineFunction &MF) const override;
+
+ bool hasBP(const MachineFunction &MF) const;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ ArrayRef<CalleeSavedInfo> CSI,
+ const TargetRegisterInfo *TRI) const override;
+ bool
+ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ MutableArrayRef<CalleeSavedInfo> CSI,
+ const TargetRegisterInfo *TRI) const override;
+
+ // Get the first stack adjustment amount for SplitSPAdjust.
+ // Return 0 if we don't want to to split the SP adjustment in prologue and
+ // epilogue.
+ uint64_t getFirstSPAdjustAmount(const MachineFunction &MF) const;
+
+ bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;
+ bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override;
+
+ bool enableShrinkWrapping(const MachineFunction &MF) const override;
+
+ bool isSupportedStackID(TargetStackID::Value ID) const override;
+ TargetStackID::Value getStackIDForScalableVectors() const override;
+
+ bool isStackIdSafeForLocalArea(unsigned StackId) const override {
+ // We don't support putting RISCV Vector objects into the pre-allocated
+ // local frame block at the moment.
+ return StackId != TargetStackID::ScalableVector;
+ }
+
+protected:
+ const RISCVSubtarget &STI;
+
+private:
+ void determineFrameLayout(MachineFunction &MF) const;
+ void adjustStackForRVV(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
+ int64_t Amount, MachineInstr::MIFlag Flag) const;
+ std::pair<int64_t, Align>
+ assignRVVStackObjectOffsets(MachineFunction &MF) const;
+};
+} // namespace llvm
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
new file mode 100644
index 0000000000..de627983b5
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
@@ -0,0 +1,543 @@
+//===- RISCVGatherScatterLowering.cpp - Gather/Scatter lowering -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass custom lowers llvm.gather and llvm.scatter instructions to
+// RISCV intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include <optional>
+
+using namespace llvm;
+using namespace PatternMatch;
+
+#define DEBUG_TYPE "riscv-gather-scatter-lowering"
+
+namespace {
+
+class RISCVGatherScatterLowering : public FunctionPass {
+ const RISCVSubtarget *ST = nullptr;
+ const RISCVTargetLowering *TLI = nullptr;
+ LoopInfo *LI = nullptr;
+ const DataLayout *DL = nullptr;
+
+ SmallVector<WeakTrackingVH> MaybeDeadPHIs;
+
+ // Cache of the BasePtr and Stride determined from this GEP. When a GEP is
+ // used by multiple gathers/scatters, this allow us to reuse the scalar
+ // instructions we created for the first gather/scatter for the others.
+ DenseMap<GetElementPtrInst *, std::pair<Value *, Value *>> StridedAddrs;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ RISCVGatherScatterLowering() : FunctionPass(ID) {}
+
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<TargetPassConfig>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ }
+
+ StringRef getPassName() const override {
+ return "RISCV gather/scatter lowering";
+ }
+
+private:
+ bool isLegalTypeAndAlignment(Type *DataType, Value *AlignOp);
+
+ bool tryCreateStridedLoadStore(IntrinsicInst *II, Type *DataType, Value *Ptr,
+ Value *AlignOp);
+
+ std::pair<Value *, Value *> determineBaseAndStride(GetElementPtrInst *GEP,
+ IRBuilder<> &Builder);
+
+ bool matchStridedRecurrence(Value *Index, Loop *L, Value *&Stride,
+ PHINode *&BasePtr, BinaryOperator *&Inc,
+ IRBuilder<> &Builder);
+};
+
+} // end anonymous namespace
+
+char RISCVGatherScatterLowering::ID = 0;
+
+INITIALIZE_PASS(RISCVGatherScatterLowering, DEBUG_TYPE,
+ "RISCV gather/scatter lowering pass", false, false)
+
+FunctionPass *llvm::createRISCVGatherScatterLoweringPass() {
+ return new RISCVGatherScatterLowering();
+}
+
+bool RISCVGatherScatterLowering::isLegalTypeAndAlignment(Type *DataType,
+ Value *AlignOp) {
+ Type *ScalarType = DataType->getScalarType();
+ if (!TLI->isLegalElementTypeForRVV(ScalarType))
+ return false;
+
+ MaybeAlign MA = cast<ConstantInt>(AlignOp)->getMaybeAlignValue();
+ if (MA && MA->value() < DL->getTypeStoreSize(ScalarType).getFixedValue())
+ return false;
+
+ // FIXME: Let the backend type legalize by splitting/widening?
+ EVT DataVT = TLI->getValueType(*DL, DataType);
+ if (!TLI->isTypeLegal(DataVT))
+ return false;
+
+ return true;
+}
+
+// TODO: Should we consider the mask when looking for a stride?
+static std::pair<Value *, Value *> matchStridedConstant(Constant *StartC) {
+ unsigned NumElts = cast<FixedVectorType>(StartC->getType())->getNumElements();
+
+ // Check that the start value is a strided constant.
+ auto *StartVal =
+ dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement((unsigned)0));
+ if (!StartVal)
+ return std::make_pair(nullptr, nullptr);
+ APInt StrideVal(StartVal->getValue().getBitWidth(), 0);
+ ConstantInt *Prev = StartVal;
+ for (unsigned i = 1; i != NumElts; ++i) {
+ auto *C = dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement(i));
+ if (!C)
+ return std::make_pair(nullptr, nullptr);
+
+ APInt LocalStride = C->getValue() - Prev->getValue();
+ if (i == 1)
+ StrideVal = LocalStride;
+ else if (StrideVal != LocalStride)
+ return std::make_pair(nullptr, nullptr);
+
+ Prev = C;
+ }
+
+ Value *Stride = ConstantInt::get(StartVal->getType(), StrideVal);
+
+ return std::make_pair(StartVal, Stride);
+}
+
+static std::pair<Value *, Value *> matchStridedStart(Value *Start,
+ IRBuilder<> &Builder) {
+ // Base case, start is a strided constant.
+ auto *StartC = dyn_cast<Constant>(Start);
+ if (StartC)
+ return matchStridedConstant(StartC);
+
+ // Base case, start is a stepvector
+ if (match(Start, m_Intrinsic<Intrinsic::experimental_stepvector>())) {
+ auto *Ty = Start->getType()->getScalarType();
+ return std::make_pair(ConstantInt::get(Ty, 0), ConstantInt::get(Ty, 1));
+ }
+
+ // Not a constant, maybe it's a strided constant with a splat added to it.
+ auto *BO = dyn_cast<BinaryOperator>(Start);
+ if (!BO || BO->getOpcode() != Instruction::Add)
+ return std::make_pair(nullptr, nullptr);
+
+ // Look for an operand that is splatted.
+ unsigned OtherIndex = 1;
+ Value *Splat = getSplatValue(BO->getOperand(0));
+ if (!Splat) {
+ Splat = getSplatValue(BO->getOperand(1));
+ OtherIndex = 0;
+ }
+ if (!Splat)
+ return std::make_pair(nullptr, nullptr);
+
+ Value *Stride;
+ std::tie(Start, Stride) = matchStridedStart(BO->getOperand(OtherIndex),
+ Builder);
+ if (!Start)
+ return std::make_pair(nullptr, nullptr);
+
+ // Add the splat value to the start.
+ Builder.SetInsertPoint(BO);
+ Builder.SetCurrentDebugLocation(DebugLoc());
+ Start = Builder.CreateAdd(Start, Splat);
+ return std::make_pair(Start, Stride);
+}
+
+// Recursively, walk about the use-def chain until we find a Phi with a strided
+// start value. Build and update a scalar recurrence as we unwind the recursion.
+// We also update the Stride as we unwind. Our goal is to move all of the
+// arithmetic out of the loop.
+bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L,
+ Value *&Stride,
+ PHINode *&BasePtr,
+ BinaryOperator *&Inc,
+ IRBuilder<> &Builder) {
+ // Our base case is a Phi.
+ if (auto *Phi = dyn_cast<PHINode>(Index)) {
+ // A phi node we want to perform this function on should be from the
+ // loop header.
+ if (Phi->getParent() != L->getHeader())
+ return false;
+
+ Value *Step, *Start;
+ if (!matchSimpleRecurrence(Phi, Inc, Start, Step) ||
+ Inc->getOpcode() != Instruction::Add)
+ return false;
+ assert(Phi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
+ unsigned IncrementingBlock = Phi->getIncomingValue(0) == Inc ? 0 : 1;
+ assert(Phi->getIncomingValue(IncrementingBlock) == Inc &&
+ "Expected one operand of phi to be Inc");
+
+ // Only proceed if the step is loop invariant.
+ if (!L->isLoopInvariant(Step))
+ return false;
+
+ // Step should be a splat.
+ Step = getSplatValue(Step);
+ if (!Step)
+ return false;
+
+ std::tie(Start, Stride) = matchStridedStart(Start, Builder);
+ if (!Start)
+ return false;
+ assert(Stride != nullptr);
+
+ // Build scalar phi and increment.
+ BasePtr =
+ PHINode::Create(Start->getType(), 2, Phi->getName() + ".scalar", Phi);
+ Inc = BinaryOperator::CreateAdd(BasePtr, Step, Inc->getName() + ".scalar",
+ Inc);
+ BasePtr->addIncoming(Start, Phi->getIncomingBlock(1 - IncrementingBlock));
+ BasePtr->addIncoming(Inc, Phi->getIncomingBlock(IncrementingBlock));
+
+ // Note that this Phi might be eligible for removal.
+ MaybeDeadPHIs.push_back(Phi);
+ return true;
+ }
+
+ // Otherwise look for binary operator.
+ auto *BO = dyn_cast<BinaryOperator>(Index);
+ if (!BO)
+ return false;
+
+ if (BO->getOpcode() != Instruction::Add &&
+ BO->getOpcode() != Instruction::Or &&
+ BO->getOpcode() != Instruction::Mul &&
+ BO->getOpcode() != Instruction::Shl)
+ return false;
+
+ // Only support shift by constant.
+ if (BO->getOpcode() == Instruction::Shl && !isa<Constant>(BO->getOperand(1)))
+ return false;
+
+ // We need to be able to treat Or as Add.
+ if (BO->getOpcode() == Instruction::Or &&
+ !haveNoCommonBitsSet(BO->getOperand(0), BO->getOperand(1), *DL))
+ return false;
+
+ // We should have one operand in the loop and one splat.
+ Value *OtherOp;
+ if (isa<Instruction>(BO->getOperand(0)) &&
+ L->contains(cast<Instruction>(BO->getOperand(0)))) {
+ Index = cast<Instruction>(BO->getOperand(0));
+ OtherOp = BO->getOperand(1);
+ } else if (isa<Instruction>(BO->getOperand(1)) &&
+ L->contains(cast<Instruction>(BO->getOperand(1)))) {
+ Index = cast<Instruction>(BO->getOperand(1));
+ OtherOp = BO->getOperand(0);
+ } else {
+ return false;
+ }
+
+ // Make sure other op is loop invariant.
+ if (!L->isLoopInvariant(OtherOp))
+ return false;
+
+ // Make sure we have a splat.
+ Value *SplatOp = getSplatValue(OtherOp);
+ if (!SplatOp)
+ return false;
+
+ // Recurse up the use-def chain.
+ if (!matchStridedRecurrence(Index, L, Stride, BasePtr, Inc, Builder))
+ return false;
+
+ // Locate the Step and Start values from the recurrence.
+ unsigned StepIndex = Inc->getOperand(0) == BasePtr ? 1 : 0;
+ unsigned StartBlock = BasePtr->getOperand(0) == Inc ? 1 : 0;
+ Value *Step = Inc->getOperand(StepIndex);
+ Value *Start = BasePtr->getOperand(StartBlock);
+
+ // We need to adjust the start value in the preheader.
+ Builder.SetInsertPoint(
+ BasePtr->getIncomingBlock(StartBlock)->getTerminator());
+ Builder.SetCurrentDebugLocation(DebugLoc());
+
+ switch (BO->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode!");
+ case Instruction::Add:
+ case Instruction::Or: {
+ // An add only affects the start value. It's ok to do this for Or because
+ // we already checked that there are no common set bits.
+
+ // If the start value is Zero, just take the SplatOp.
+ if (isa<ConstantInt>(Start) && cast<ConstantInt>(Start)->isZero())
+ Start = SplatOp;
+ else
+ Start = Builder.CreateAdd(Start, SplatOp, "start");
+ BasePtr->setIncomingValue(StartBlock, Start);
+ break;
+ }
+ case Instruction::Mul: {
+ // If the start is zero we don't need to multiply.
+ if (!isa<ConstantInt>(Start) || !cast<ConstantInt>(Start)->isZero())
+ Start = Builder.CreateMul(Start, SplatOp, "start");
+
+ Step = Builder.CreateMul(Step, SplatOp, "step");
+
+ // If the Stride is 1 just take the SplatOpt.
+ if (isa<ConstantInt>(Stride) && cast<ConstantInt>(Stride)->isOne())
+ Stride = SplatOp;
+ else
+ Stride = Builder.CreateMul(Stride, SplatOp, "stride");
+ Inc->setOperand(StepIndex, Step);
+ BasePtr->setIncomingValue(StartBlock, Start);
+ break;
+ }
+ case Instruction::Shl: {
+ // If the start is zero we don't need to shift.
+ if (!isa<ConstantInt>(Start) || !cast<ConstantInt>(Start)->isZero())
+ Start = Builder.CreateShl(Start, SplatOp, "start");
+ Step = Builder.CreateShl(Step, SplatOp, "step");
+ Stride = Builder.CreateShl(Stride, SplatOp, "stride");
+ Inc->setOperand(StepIndex, Step);
+ BasePtr->setIncomingValue(StartBlock, Start);
+ break;
+ }
+ }
+
+ return true;
+}
+
+std::pair<Value *, Value *>
+RISCVGatherScatterLowering::determineBaseAndStride(GetElementPtrInst *GEP,
+ IRBuilder<> &Builder) {
+
+ auto I = StridedAddrs.find(GEP);
+ if (I != StridedAddrs.end())
+ return I->second;
+
+ SmallVector<Value *, 2> Ops(GEP->operands());
+
+ // Base pointer needs to be a scalar.
+ if (Ops[0]->getType()->isVectorTy())
+ return std::make_pair(nullptr, nullptr);
+
+ std::optional<unsigned> VecOperand;
+ unsigned TypeScale = 0;
+
+ // Look for a vector operand and scale.
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
+ if (!Ops[i]->getType()->isVectorTy())
+ continue;
+
+ if (VecOperand)
+ return std::make_pair(nullptr, nullptr);
+
+ VecOperand = i;
+
+ TypeSize TS = DL->getTypeAllocSize(GTI.getIndexedType());
+ if (TS.isScalable())
+ return std::make_pair(nullptr, nullptr);
+
+ TypeScale = TS.getFixedValue();
+ }
+
+ // We need to find a vector index to simplify.
+ if (!VecOperand)
+ return std::make_pair(nullptr, nullptr);
+
+ // We can't extract the stride if the arithmetic is done at a different size
+ // than the pointer type. Adding the stride later may not wrap correctly.
+ // Technically we could handle wider indices, but I don't expect that in
+ // practice.
+ Value *VecIndex = Ops[*VecOperand];
+ Type *VecIntPtrTy = DL->getIntPtrType(GEP->getType());
+ if (VecIndex->getType() != VecIntPtrTy)
+ return std::make_pair(nullptr, nullptr);
+
+ // Handle the non-recursive case. This is what we see if the vectorizer
+ // decides to use a scalar IV + vid on demand instead of a vector IV.
+ auto [Start, Stride] = matchStridedStart(VecIndex, Builder);
+ if (Start) {
+ assert(Stride);
+ Builder.SetInsertPoint(GEP);
+
+ // Replace the vector index with the scalar start and build a scalar GEP.
+ Ops[*VecOperand] = Start;
+ Type *SourceTy = GEP->getSourceElementType();
+ Value *BasePtr =
+ Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
+
+ // Convert stride to pointer size if needed.
+ Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
+ assert(Stride->getType() == IntPtrTy && "Unexpected type");
+
+ // Scale the stride by the size of the indexed type.
+ if (TypeScale != 1)
+ Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
+
+ auto P = std::make_pair(BasePtr, Stride);
+ StridedAddrs[GEP] = P;
+ return P;
+ }
+
+ // Make sure we're in a loop and that has a pre-header and a single latch.
+ Loop *L = LI->getLoopFor(GEP->getParent());
+ if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
+ return std::make_pair(nullptr, nullptr);
+
+ BinaryOperator *Inc;
+ PHINode *BasePhi;
+ if (!matchStridedRecurrence(VecIndex, L, Stride, BasePhi, Inc, Builder))
+ return std::make_pair(nullptr, nullptr);
+
+ assert(BasePhi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
+ unsigned IncrementingBlock = BasePhi->getOperand(0) == Inc ? 0 : 1;
+ assert(BasePhi->getIncomingValue(IncrementingBlock) == Inc &&
+ "Expected one operand of phi to be Inc");
+
+ Builder.SetInsertPoint(GEP);
+
+ // Replace the vector index with the scalar phi and build a scalar GEP.
+ Ops[*VecOperand] = BasePhi;
+ Type *SourceTy = GEP->getSourceElementType();
+ Value *BasePtr =
+ Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
+
+ // Final adjustments to stride should go in the start block.
+ Builder.SetInsertPoint(
+ BasePhi->getIncomingBlock(1 - IncrementingBlock)->getTerminator());
+
+ // Convert stride to pointer size if needed.
+ Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
+ assert(Stride->getType() == IntPtrTy && "Unexpected type");
+
+ // Scale the stride by the size of the indexed type.
+ if (TypeScale != 1)
+ Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
+
+ auto P = std::make_pair(BasePtr, Stride);
+ StridedAddrs[GEP] = P;
+ return P;
+}
+
+bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II,
+ Type *DataType,
+ Value *Ptr,
+ Value *AlignOp) {
+ // Make sure the operation will be supported by the backend.
+ if (!isLegalTypeAndAlignment(DataType, AlignOp))
+ return false;
+
+ // Pointer should be a GEP.
+ auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
+ if (!GEP)
+ return false;
+
+ IRBuilder<> Builder(GEP);
+
+ Value *BasePtr, *Stride;
+ std::tie(BasePtr, Stride) = determineBaseAndStride(GEP, Builder);
+ if (!BasePtr)
+ return false;
+ assert(Stride != nullptr);
+
+ Builder.SetInsertPoint(II);
+
+ CallInst *Call;
+ if (II->getIntrinsicID() == Intrinsic::masked_gather)
+ Call = Builder.CreateIntrinsic(
+ Intrinsic::riscv_masked_strided_load,
+ {DataType, BasePtr->getType(), Stride->getType()},
+ {II->getArgOperand(3), BasePtr, Stride, II->getArgOperand(2)});
+ else
+ Call = Builder.CreateIntrinsic(
+ Intrinsic::riscv_masked_strided_store,
+ {DataType, BasePtr->getType(), Stride->getType()},
+ {II->getArgOperand(0), BasePtr, Stride, II->getArgOperand(3)});
+
+ Call->takeName(II);
+ II->replaceAllUsesWith(Call);
+ II->eraseFromParent();
+
+ if (GEP->use_empty())
+ RecursivelyDeleteTriviallyDeadInstructions(GEP);
+
+ return true;
+}
+
+bool RISCVGatherScatterLowering::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
+ auto &TPC = getAnalysis<TargetPassConfig>();
+ auto &TM = TPC.getTM<RISCVTargetMachine>();
+ ST = &TM.getSubtarget<RISCVSubtarget>(F);
+ if (!ST->hasVInstructions() || !ST->useRVVForFixedLengthVectors())
+ return false;
+
+ TLI = ST->getTargetLowering();
+ DL = &F.getParent()->getDataLayout();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+
+ StridedAddrs.clear();
+
+ SmallVector<IntrinsicInst *, 4> Gathers;
+ SmallVector<IntrinsicInst *, 4> Scatters;
+
+ bool Changed = false;
+
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
+ if (II && II->getIntrinsicID() == Intrinsic::masked_gather) {
+ Gathers.push_back(II);
+ } else if (II && II->getIntrinsicID() == Intrinsic::masked_scatter) {
+ Scatters.push_back(II);
+ }
+ }
+ }
+
+ // Rewrite gather/scatter to form strided load/store if possible.
+ for (auto *II : Gathers)
+ Changed |= tryCreateStridedLoadStore(
+ II, II->getType(), II->getArgOperand(0), II->getArgOperand(1));
+ for (auto *II : Scatters)
+ Changed |=
+ tryCreateStridedLoadStore(II, II->getArgOperand(0)->getType(),
+ II->getArgOperand(1), II->getArgOperand(2));
+
+ // Remove any dead phis.
+ while (!MaybeDeadPHIs.empty()) {
+ if (auto *Phi = dyn_cast_or_null<PHINode>(MaybeDeadPHIs.pop_back_val()))
+ RecursivelyDeleteDeadPHINode(Phi);
+ }
+
+ return Changed;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
new file mode 100644
index 0000000000..28244728f6
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -0,0 +1,2946 @@
+//===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the RISCV target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVISelDAGToDAG.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "MCTargetDesc/RISCVMatInt.h"
+#include "RISCVISelLowering.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <optional>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-isel"
+#define PASS_NAME "RISCV DAG->DAG Pattern Instruction Selection"
+
+namespace llvm::RISCV {
+#define GET_RISCVVSSEGTable_IMPL
+#define GET_RISCVVLSEGTable_IMPL
+#define GET_RISCVVLXSEGTable_IMPL
+#define GET_RISCVVSXSEGTable_IMPL
+#define GET_RISCVVLETable_IMPL
+#define GET_RISCVVSETable_IMPL
+#define GET_RISCVVLXTable_IMPL
+#define GET_RISCVVSXTable_IMPL
+#define GET_RISCVMaskedPseudosTable_IMPL
+#include "RISCVGenSearchableTables.inc"
+} // namespace llvm::RISCV
+
+static unsigned getLastNonGlueOrChainOpIdx(const SDNode *Node) {
+ assert(Node->getNumOperands() > 0 && "Node with no operands");
+ unsigned LastOpIdx = Node->getNumOperands() - 1;
+ if (Node->getOperand(LastOpIdx).getValueType() == MVT::Glue)
+ --LastOpIdx;
+ if (Node->getOperand(LastOpIdx).getValueType() == MVT::Other)
+ --LastOpIdx;
+ return LastOpIdx;
+}
+
+static unsigned getVecPolicyOpIdx(const SDNode *Node, const MCInstrDesc &MCID) {
+ assert(RISCVII::hasVecPolicyOp(MCID.TSFlags));
+ (void)MCID;
+ return getLastNonGlueOrChainOpIdx(Node);
+}
+
+void RISCVDAGToDAGISel::PreprocessISelDAG() {
+ SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
+
+ bool MadeChange = false;
+ while (Position != CurDAG->allnodes_begin()) {
+ SDNode *N = &*--Position;
+ if (N->use_empty())
+ continue;
+
+ SDValue Result;
+ switch (N->getOpcode()) {
+ case ISD::SPLAT_VECTOR: {
+ // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
+ // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
+ MVT VT = N->getSimpleValueType(0);
+ unsigned Opc =
+ VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
+ SDLoc DL(N);
+ SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
+ Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
+ N->getOperand(0), VL);
+ break;
+ }
+ case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
+ // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
+ // load. Done after lowering and combining so that we have a chance to
+ // optimize this to VMV_V_X_VL when the upper bits aren't needed.
+ assert(N->getNumOperands() == 4 && "Unexpected number of operands");
+ MVT VT = N->getSimpleValueType(0);
+ SDValue Passthru = N->getOperand(0);
+ SDValue Lo = N->getOperand(1);
+ SDValue Hi = N->getOperand(2);
+ SDValue VL = N->getOperand(3);
+ assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
+ Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
+ "Unexpected VTs!");
+ MachineFunction &MF = CurDAG->getMachineFunction();
+ RISCVMachineFunctionInfo *FuncInfo =
+ MF.getInfo<RISCVMachineFunctionInfo>();
+ SDLoc DL(N);
+
+ // We use the same frame index we use for moving two i32s into 64-bit FPR.
+ // This is an analogous operation.
+ int FI = FuncInfo->getMoveF64FrameIndex(MF);
+ MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
+ const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
+ SDValue StackSlot =
+ CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
+
+ SDValue Chain = CurDAG->getEntryNode();
+ Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
+
+ SDValue OffsetSlot =
+ CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
+ Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
+ Align(8));
+
+ Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
+
+ SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
+ SDValue IntID =
+ CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
+ SDValue Ops[] = {Chain,
+ IntID,
+ Passthru,
+ StackSlot,
+ CurDAG->getRegister(RISCV::X0, MVT::i64),
+ VL};
+
+ Result = CurDAG->getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
+ MVT::i64, MPI, Align(8),
+ MachineMemOperand::MOLoad);
+ break;
+ }
+ }
+
+ if (Result) {
+ LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nNew: ");
+ LLVM_DEBUG(Result->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
+
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
+ MadeChange = true;
+ }
+ }
+
+ if (MadeChange)
+ CurDAG->RemoveDeadNodes();
+}
+
+void RISCVDAGToDAGISel::PostprocessISelDAG() {
+ HandleSDNode Dummy(CurDAG->getRoot());
+ SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
+
+ bool MadeChange = false;
+ while (Position != CurDAG->allnodes_begin()) {
+ SDNode *N = &*--Position;
+ // Skip dead nodes and any non-machine opcodes.
+ if (N->use_empty() || !N->isMachineOpcode())
+ continue;
+
+ MadeChange |= doPeepholeSExtW(N);
+ MadeChange |= doPeepholeMaskedRVV(N);
+ }
+
+ CurDAG->setRoot(Dummy.getValue());
+
+ MadeChange |= doPeepholeMergeVVMFold();
+
+ if (MadeChange)
+ CurDAG->RemoveDeadNodes();
+}
+
+static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
+ RISCVMatInt::InstSeq &Seq) {
+ SDNode *Result = nullptr;
+ SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
+ for (RISCVMatInt::Inst &Inst : Seq) {
+ SDValue SDImm = CurDAG->getTargetConstant(Inst.getImm(), DL, VT);
+ switch (Inst.getOpndKind()) {
+ case RISCVMatInt::Imm:
+ Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SDImm);
+ break;
+ case RISCVMatInt::RegX0:
+ Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg,
+ CurDAG->getRegister(RISCV::X0, VT));
+ break;
+ case RISCVMatInt::RegReg:
+ Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SrcReg);
+ break;
+ case RISCVMatInt::RegImm:
+ Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SDImm);
+ break;
+ }
+
+ // Only the first instruction has X0 as its source.
+ SrcReg = SDValue(Result, 0);
+ }
+
+ return Result;
+}
+
+static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
+ int64_t Imm, const RISCVSubtarget &Subtarget) {
+ RISCVMatInt::InstSeq Seq =
+ RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
+
+ return selectImmSeq(CurDAG, DL, VT, Seq);
+}
+
+static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
+ unsigned NF, RISCVII::VLMUL LMUL) {
+ static const unsigned M1TupleRegClassIDs[] = {
+ RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
+ RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
+ RISCV::VRN8M1RegClassID};
+ static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
+ RISCV::VRN3M2RegClassID,
+ RISCV::VRN4M2RegClassID};
+
+ assert(Regs.size() >= 2 && Regs.size() <= 8);
+
+ unsigned RegClassID;
+ unsigned SubReg0;
+ switch (LMUL) {
+ default:
+ llvm_unreachable("Invalid LMUL.");
+ case RISCVII::VLMUL::LMUL_F8:
+ case RISCVII::VLMUL::LMUL_F4:
+ case RISCVII::VLMUL::LMUL_F2:
+ case RISCVII::VLMUL::LMUL_1:
+ static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
+ "Unexpected subreg numbering");
+ SubReg0 = RISCV::sub_vrm1_0;
+ RegClassID = M1TupleRegClassIDs[NF - 2];
+ break;
+ case RISCVII::VLMUL::LMUL_2:
+ static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
+ "Unexpected subreg numbering");
+ SubReg0 = RISCV::sub_vrm2_0;
+ RegClassID = M2TupleRegClassIDs[NF - 2];
+ break;
+ case RISCVII::VLMUL::LMUL_4:
+ static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
+ "Unexpected subreg numbering");
+ SubReg0 = RISCV::sub_vrm4_0;
+ RegClassID = RISCV::VRN2M4RegClassID;
+ break;
+ }
+
+ SDLoc DL(Regs[0]);
+ SmallVector<SDValue, 8> Ops;
+
+ Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
+
+ for (unsigned I = 0; I < Regs.size(); ++I) {
+ Ops.push_back(Regs[I]);
+ Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
+ }
+ SDNode *N =
+ CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
+ return SDValue(N, 0);
+}
+
+void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
+ SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
+ bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
+ bool IsLoad, MVT *IndexVT) {
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
+ Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
+
+ if (IsStridedOrIndexed) {
+ Operands.push_back(Node->getOperand(CurOp++)); // Index.
+ if (IndexVT)
+ *IndexVT = Operands.back()->getSimpleValueType(0);
+ }
+
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
+ SDValue VL;
+ selectVLOp(Node->getOperand(CurOp++), VL);
+ Operands.push_back(VL);
+
+ MVT XLenVT = Subtarget->getXLenVT();
+ SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
+ Operands.push_back(SEWOp);
+
+ // Masked load has the tail policy argument.
+ if (IsMasked && IsLoad) {
+ // Policy must be a constant.
+ uint64_t Policy = Node->getConstantOperandVal(CurOp++);
+ SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
+ Operands.push_back(PolicyOp);
+ }
+
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
+}
+
+static bool isAllUndef(ArrayRef<SDValue> Values) {
+ return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
+}
+
+void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
+ bool IsStrided) {
+ SDLoc DL(Node);
+ unsigned NF = Node->getNumValues() - 1;
+ MVT VT = Node->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+
+ unsigned CurOp = 2;
+ SmallVector<SDValue, 8> Operands;
+
+ SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
+ Node->op_begin() + CurOp + NF);
+ bool IsTU = IsMasked || !isAllUndef(Regs);
+ if (IsTU) {
+ SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
+ Operands.push_back(Merge);
+ }
+ CurOp += NF;
+
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
+ Operands, /*IsLoad=*/true);
+
+ const RISCV::VLSEGPseudo *P =
+ RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
+ static_cast<unsigned>(LMUL));
+ MachineSDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
+
+ SDValue SuperReg = SDValue(Load, 0);
+ for (unsigned I = 0; I < NF; ++I) {
+ unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
+ ReplaceUses(SDValue(Node, I),
+ CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
+ }
+
+ ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
+ CurDAG->RemoveDeadNode(Node);
+}
+
+void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
+ SDLoc DL(Node);
+ unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
+ MVT VT = Node->getSimpleValueType(0);
+ MVT XLenVT = Subtarget->getXLenVT();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+
+ unsigned CurOp = 2;
+ SmallVector<SDValue, 7> Operands;
+
+ SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
+ Node->op_begin() + CurOp + NF);
+ bool IsTU = IsMasked || !isAllUndef(Regs);
+ if (IsTU) {
+ SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
+ Operands.push_back(MaskedOff);
+ }
+ CurOp += NF;
+
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
+ /*IsStridedOrIndexed*/ false, Operands,
+ /*IsLoad=*/true);
+
+ const RISCV::VLSEGPseudo *P =
+ RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
+ Log2SEW, static_cast<unsigned>(LMUL));
+ MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
+ XLenVT, MVT::Other, Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
+
+ SDValue SuperReg = SDValue(Load, 0);
+ for (unsigned I = 0; I < NF; ++I) {
+ unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
+ ReplaceUses(SDValue(Node, I),
+ CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
+ }
+
+ ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL
+ ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
+ CurDAG->RemoveDeadNode(Node);
+}
+
+void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
+ bool IsOrdered) {
+ SDLoc DL(Node);
+ unsigned NF = Node->getNumValues() - 1;
+ MVT VT = Node->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+
+ unsigned CurOp = 2;
+ SmallVector<SDValue, 8> Operands;
+
+ SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
+ Node->op_begin() + CurOp + NF);
+ bool IsTU = IsMasked || !isAllUndef(Regs);
+ if (IsTU) {
+ SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
+ Operands.push_back(MaskedOff);
+ }
+ CurOp += NF;
+
+ MVT IndexVT;
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
+ /*IsStridedOrIndexed*/ true, Operands,
+ /*IsLoad=*/true, &IndexVT);
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Element count mismatch");
+
+ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
+ const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
+ NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
+ static_cast<unsigned>(IndexLMUL));
+ MachineSDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
+
+ SDValue SuperReg = SDValue(Load, 0);
+ for (unsigned I = 0; I < NF; ++I) {
+ unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
+ ReplaceUses(SDValue(Node, I),
+ CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
+ }
+
+ ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
+ CurDAG->RemoveDeadNode(Node);
+}
+
+void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
+ bool IsStrided) {
+ SDLoc DL(Node);
+ unsigned NF = Node->getNumOperands() - 4;
+ if (IsStrided)
+ NF--;
+ if (IsMasked)
+ NF--;
+ MVT VT = Node->getOperand(2)->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
+ SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
+
+ SmallVector<SDValue, 8> Operands;
+ Operands.push_back(StoreVal);
+ unsigned CurOp = 2 + NF;
+
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
+ Operands);
+
+ const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
+ NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
+ MachineSDNode *Store =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, Store);
+}
+
+void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
+ bool IsOrdered) {
+ SDLoc DL(Node);
+ unsigned NF = Node->getNumOperands() - 5;
+ if (IsMasked)
+ --NF;
+ MVT VT = Node->getOperand(2)->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
+ SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
+
+ SmallVector<SDValue, 8> Operands;
+ Operands.push_back(StoreVal);
+ unsigned CurOp = 2 + NF;
+
+ MVT IndexVT;
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
+ /*IsStridedOrIndexed*/ true, Operands,
+ /*IsLoad=*/false, &IndexVT);
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Element count mismatch");
+
+ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
+ const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
+ NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
+ static_cast<unsigned>(IndexLMUL));
+ MachineSDNode *Store =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, Store);
+}
+
+void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
+ if (!Subtarget->hasVInstructions())
+ return;
+
+ assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+ Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
+ "Unexpected opcode");
+
+ SDLoc DL(Node);
+ MVT XLenVT = Subtarget->getXLenVT();
+
+ bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
+ unsigned IntNoOffset = HasChain ? 1 : 0;
+ unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
+
+ assert((IntNo == Intrinsic::riscv_vsetvli ||
+ IntNo == Intrinsic::riscv_vsetvlimax ||
+ IntNo == Intrinsic::riscv_vsetvli_opt ||
+ IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
+ "Unexpected vsetvli intrinsic");
+
+ bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
+ IntNo == Intrinsic::riscv_vsetvlimax_opt;
+ unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
+
+ assert(Node->getNumOperands() == Offset + 2 &&
+ "Unexpected number of operands");
+
+ unsigned SEW =
+ RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
+ RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
+ Node->getConstantOperandVal(Offset + 1) & 0x7);
+
+ unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
+ /*MaskAgnostic*/ false);
+ SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
+
+ SmallVector<EVT, 2> VTs = {XLenVT};
+ if (HasChain)
+ VTs.push_back(MVT::Other);
+
+ SDValue VLOperand;
+ unsigned Opcode = RISCV::PseudoVSETVLI;
+ if (VLMax) {
+ VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
+ Opcode = RISCV::PseudoVSETVLIX0;
+ } else {
+ VLOperand = Node->getOperand(IntNoOffset + 1);
+
+ if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
+ uint64_t AVL = C->getZExtValue();
+ if (isUInt<5>(AVL)) {
+ SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
+ SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
+ if (HasChain)
+ Ops.push_back(Node->getOperand(0));
+ ReplaceNode(
+ Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
+ return;
+ }
+ }
+ }
+
+ SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
+ if (HasChain)
+ Ops.push_back(Node->getOperand(0));
+
+ ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
+}
+
+bool RISCVDAGToDAGISel::tryShrinkShlLogicImm(SDNode *Node) {
+ MVT VT = Node->getSimpleValueType(0);
+ unsigned Opcode = Node->getOpcode();
+ assert((Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR) &&
+ "Unexpected opcode");
+ SDLoc DL(Node);
+
+ // For operations of the form (x << C1) op C2, check if we can use
+ // ANDI/ORI/XORI by transforming it into (x op (C2>>C1)) << C1.
+ SDValue N0 = Node->getOperand(0);
+ SDValue N1 = Node->getOperand(1);
+
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
+ if (!Cst)
+ return false;
+
+ int64_t Val = Cst->getSExtValue();
+
+ // Check if immediate can already use ANDI/ORI/XORI.
+ if (isInt<12>(Val))
+ return false;
+
+ SDValue Shift = N0;
+
+ // If Val is simm32 and we have a sext_inreg from i32, then the binop
+ // produces at least 33 sign bits. We can peek through the sext_inreg and use
+ // a SLLIW at the end.
+ bool SignExt = false;
+ if (isInt<32>(Val) && N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ N0.hasOneUse() && cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32) {
+ SignExt = true;
+ Shift = N0.getOperand(0);
+ }
+
+ if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
+ return false;
+
+ ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
+ if (!ShlCst)
+ return false;
+
+ uint64_t ShAmt = ShlCst->getZExtValue();
+
+ // Make sure that we don't change the operation by removing bits.
+ // This only matters for OR and XOR, AND is unaffected.
+ uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
+ if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
+ return false;
+
+ int64_t ShiftedVal = Val >> ShAmt;
+ if (!isInt<12>(ShiftedVal))
+ return false;
+
+ // If we peeked through a sext_inreg, make sure the shift is valid for SLLIW.
+ if (SignExt && ShAmt >= 32)
+ return false;
+
+ // Ok, we can reorder to get a smaller immediate.
+ unsigned BinOpc;
+ switch (Opcode) {
+ default: llvm_unreachable("Unexpected opcode");
+ case ISD::AND: BinOpc = RISCV::ANDI; break;
+ case ISD::OR: BinOpc = RISCV::ORI; break;
+ case ISD::XOR: BinOpc = RISCV::XORI; break;
+ }
+
+ unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
+
+ SDNode *BinOp =
+ CurDAG->getMachineNode(BinOpc, DL, VT, Shift.getOperand(0),
+ CurDAG->getTargetConstant(ShiftedVal, DL, VT));
+ SDNode *SLLI =
+ CurDAG->getMachineNode(ShOpc, DL, VT, SDValue(BinOp, 0),
+ CurDAG->getTargetConstant(ShAmt, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return true;
+}
+
+void RISCVDAGToDAGISel::Select(SDNode *Node) {
+ // If we have a custom node, we have already selected.
+ if (Node->isMachineOpcode()) {
+ LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
+ Node->setNodeId(-1);
+ return;
+ }
+
+ // Instruction Selection not handled by the auto-generated tablegen selection
+ // should be handled here.
+ unsigned Opcode = Node->getOpcode();
+ MVT XLenVT = Subtarget->getXLenVT();
+ SDLoc DL(Node);
+ MVT VT = Node->getSimpleValueType(0);
+
+ switch (Opcode) {
+ case ISD::Constant: {
+ auto *ConstNode = cast<ConstantSDNode>(Node);
+ if (VT == XLenVT && ConstNode->isZero()) {
+ SDValue New =
+ CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
+ ReplaceNode(Node, New.getNode());
+ return;
+ }
+ int64_t Imm = ConstNode->getSExtValue();
+ // If the upper XLen-16 bits are not used, try to convert this to a simm12
+ // by sign extending bit 15.
+ if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
+ hasAllHUsers(Node))
+ Imm = SignExtend64<16>(Imm);
+ // If the upper 32-bits are not used try to convert this into a simm32 by
+ // sign extending bit 32.
+ if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
+ Imm = SignExtend64<32>(Imm);
+
+ ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
+ return;
+ }
+ case ISD::SHL: {
+ auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
+ if (!N1C)
+ break;
+ SDValue N0 = Node->getOperand(0);
+ if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
+ !isa<ConstantSDNode>(N0.getOperand(1)))
+ break;
+ unsigned ShAmt = N1C->getZExtValue();
+ uint64_t Mask = N0.getConstantOperandVal(1);
+
+ // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
+ // 32 leading zeros and C3 trailing zeros.
+ if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
+ unsigned XLen = Subtarget->getXLen();
+ unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
+ unsigned TrailingZeros = countTrailingZeros(Mask);
+ if (TrailingZeros > 0 && LeadingZeros == 32) {
+ SDNode *SRLIW = CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(TrailingZeros, DL, VT));
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
+ CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return;
+ }
+ }
+ break;
+ }
+ case ISD::SRL: {
+ auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
+ if (!N1C)
+ break;
+ SDValue N0 = Node->getOperand(0);
+ if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
+ break;
+ unsigned ShAmt = N1C->getZExtValue();
+ uint64_t Mask = N0.getConstantOperandVal(1);
+
+ // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
+ // 32 leading zeros and C3 trailing zeros.
+ if (isShiftedMask_64(Mask) && N0.hasOneUse()) {
+ unsigned XLen = Subtarget->getXLen();
+ unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
+ unsigned TrailingZeros = countTrailingZeros(Mask);
+ if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
+ SDNode *SRLIW = CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(TrailingZeros, DL, VT));
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
+ CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return;
+ }
+ }
+
+ // Optimize (srl (and X, C2), C) ->
+ // (srli (slli X, (XLen-C3), (XLen-C3) + C)
+ // Where C2 is a mask with C3 trailing ones.
+ // Taking into account that the C2 may have had lower bits unset by
+ // SimplifyDemandedBits. This avoids materializing the C2 immediate.
+ // This pattern occurs when type legalizing right shifts for types with
+ // less than XLen bits.
+ Mask |= maskTrailingOnes<uint64_t>(ShAmt);
+ if (!isMask_64(Mask))
+ break;
+ unsigned TrailingOnes = countTrailingOnes(Mask);
+ if (ShAmt >= TrailingOnes)
+ break;
+ // If the mask has 32 trailing ones, use SRLIW.
+ if (TrailingOnes == 32) {
+ SDNode *SRLIW =
+ CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(ShAmt, DL, VT));
+ ReplaceNode(Node, SRLIW);
+ return;
+ }
+
+ // Only do the remaining transforms if the shift has one use.
+ if (!N0.hasOneUse())
+ break;
+
+ // If C2 is (1 << ShAmt) use bexti if possible.
+ if (Subtarget->hasStdExtZbs() && ShAmt + 1 == TrailingOnes) {
+ SDNode *BEXTI =
+ CurDAG->getMachineNode(RISCV::BEXTI, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(ShAmt, DL, VT));
+ ReplaceNode(Node, BEXTI);
+ return;
+ }
+ unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
+ SDNode *SLLI =
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(LShAmt, DL, VT));
+ SDNode *SRLI = CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
+ CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
+ ReplaceNode(Node, SRLI);
+ return;
+ }
+ case ISD::SRA: {
+ // Optimize (sra (sext_inreg X, i16), C) ->
+ // (srai (slli X, (XLen-16), (XLen-16) + C)
+ // And (sra (sext_inreg X, i8), C) ->
+ // (srai (slli X, (XLen-8), (XLen-8) + C)
+ // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
+ // This transform matches the code we get without Zbb. The shifts are more
+ // compressible, and this can help expose CSE opportunities in the sdiv by
+ // constant optimization.
+ auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
+ if (!N1C)
+ break;
+ SDValue N0 = Node->getOperand(0);
+ if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
+ break;
+ unsigned ShAmt = N1C->getZExtValue();
+ unsigned ExtSize =
+ cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
+ // ExtSize of 32 should use sraiw via tablegen pattern.
+ if (ExtSize >= 32 || ShAmt >= ExtSize)
+ break;
+ unsigned LShAmt = Subtarget->getXLen() - ExtSize;
+ SDNode *SLLI =
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(LShAmt, DL, VT));
+ SDNode *SRAI = CurDAG->getMachineNode(
+ RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
+ CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
+ ReplaceNode(Node, SRAI);
+ return;
+ }
+ case ISD::OR:
+ case ISD::XOR:
+ if (tryShrinkShlLogicImm(Node))
+ return;
+
+ break;
+ case ISD::AND: {
+ auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
+ if (!N1C)
+ break;
+
+ SDValue N0 = Node->getOperand(0);
+
+ bool LeftShift = N0.getOpcode() == ISD::SHL;
+ if (LeftShift || N0.getOpcode() == ISD::SRL) {
+ auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (!C)
+ break;
+ unsigned C2 = C->getZExtValue();
+ unsigned XLen = Subtarget->getXLen();
+ assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
+
+ uint64_t C1 = N1C->getZExtValue();
+
+ // Keep track of whether this is a c.andi. If we can't use c.andi, the
+ // shift pair might offer more compression opportunities.
+ // TODO: We could check for C extension here, but we don't have many lit
+ // tests with the C extension enabled so not checking gets better
+ // coverage.
+ // TODO: What if ANDI faster than shift?
+ bool IsCANDI = isInt<6>(N1C->getSExtValue());
+
+ // Clear irrelevant bits in the mask.
+ if (LeftShift)
+ C1 &= maskTrailingZeros<uint64_t>(C2);
+ else
+ C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
+
+ // Some transforms should only be done if the shift has a single use or
+ // the AND would become (srli (slli X, 32), 32)
+ bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
+
+ SDValue X = N0.getOperand(0);
+
+ // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
+ // with c3 leading zeros.
+ if (!LeftShift && isMask_64(C1)) {
+ unsigned Leading = XLen - llvm::bit_width(C1);
+ if (C2 < Leading) {
+ // If the number of leading zeros is C2+32 this can be SRLIW.
+ if (C2 + 32 == Leading) {
+ SDNode *SRLIW = CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
+ ReplaceNode(Node, SRLIW);
+ return;
+ }
+
+ // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32)
+ // if c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
+ //
+ // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
+ // legalized and goes through DAG combine.
+ if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
+ X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
+ SDNode *SRAIW =
+ CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
+ CurDAG->getTargetConstant(31, DL, VT));
+ SDNode *SRLIW = CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
+ CurDAG->getTargetConstant(Leading - 32, DL, VT));
+ ReplaceNode(Node, SRLIW);
+ return;
+ }
+
+ // (srli (slli x, c3-c2), c3).
+ // Skip if we could use (zext.w (sraiw X, C2)).
+ bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
+ X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
+ // Also Skip if we can use bexti.
+ Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1;
+ if (OneUseOrZExtW && !Skip) {
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, X,
+ CurDAG->getTargetConstant(Leading - C2, DL, VT));
+ SDNode *SRLI = CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
+ CurDAG->getTargetConstant(Leading, DL, VT));
+ ReplaceNode(Node, SRLI);
+ return;
+ }
+ }
+ }
+
+ // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
+ // shifted by c2 bits with c3 leading zeros.
+ if (LeftShift && isShiftedMask_64(C1)) {
+ unsigned Leading = XLen - llvm::bit_width(C1);
+
+ if (C2 + Leading < XLen &&
+ C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
+ // Use slli.uw when possible.
+ if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
+ SDNode *SLLI_UW =
+ CurDAG->getMachineNode(RISCV::SLLI_UW, DL, VT, X,
+ CurDAG->getTargetConstant(C2, DL, VT));
+ ReplaceNode(Node, SLLI_UW);
+ return;
+ }
+
+ // (srli (slli c2+c3), c3)
+ if (OneUseOrZExtW && !IsCANDI) {
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, X,
+ CurDAG->getTargetConstant(C2 + Leading, DL, VT));
+ SDNode *SRLI = CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
+ CurDAG->getTargetConstant(Leading, DL, VT));
+ ReplaceNode(Node, SRLI);
+ return;
+ }
+ }
+ }
+
+ // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
+ // shifted mask with c2 leading zeros and c3 trailing zeros.
+ if (!LeftShift && isShiftedMask_64(C1)) {
+ unsigned Leading = XLen - llvm::bit_width(C1);
+ unsigned Trailing = countTrailingZeros(C1);
+ if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
+ !IsCANDI) {
+ unsigned SrliOpc = RISCV::SRLI;
+ // If the input is zexti32 we should use SRLIW.
+ if (X.getOpcode() == ISD::AND &&
+ isa<ConstantSDNode>(X.getOperand(1)) &&
+ X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
+ SrliOpc = RISCV::SRLIW;
+ X = X.getOperand(0);
+ }
+ SDNode *SRLI = CurDAG->getMachineNode(
+ SrliOpc, DL, VT, X,
+ CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
+ CurDAG->getTargetConstant(Trailing, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return;
+ }
+ // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
+ if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
+ OneUseOrZExtW && !IsCANDI) {
+ SDNode *SRLIW = CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, X,
+ CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
+ CurDAG->getTargetConstant(Trailing, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return;
+ }
+ }
+
+ // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
+ // shifted mask with no leading zeros and c3 trailing zeros.
+ if (LeftShift && isShiftedMask_64(C1)) {
+ unsigned Leading = XLen - llvm::bit_width(C1);
+ unsigned Trailing = countTrailingZeros(C1);
+ if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
+ SDNode *SRLI = CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, X,
+ CurDAG->getTargetConstant(Trailing - C2, DL, VT));
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
+ CurDAG->getTargetConstant(Trailing, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return;
+ }
+ // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
+ if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
+ SDNode *SRLIW = CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, X,
+ CurDAG->getTargetConstant(Trailing - C2, DL, VT));
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
+ CurDAG->getTargetConstant(Trailing, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return;
+ }
+ }
+ }
+
+ if (tryShrinkShlLogicImm(Node))
+ return;
+
+ break;
+ }
+ case ISD::MUL: {
+ // Special case for calculating (mul (and X, C2), C1) where the full product
+ // fits in XLen bits. We can shift X left by the number of leading zeros in
+ // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
+ // product has XLen trailing zeros, putting it in the output of MULHU. This
+ // can avoid materializing a constant in a register for C2.
+
+ // RHS should be a constant.
+ auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
+ if (!N1C || !N1C->hasOneUse())
+ break;
+
+ // LHS should be an AND with constant.
+ SDValue N0 = Node->getOperand(0);
+ if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
+ break;
+
+ uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
+
+ // Constant should be a mask.
+ if (!isMask_64(C2))
+ break;
+
+ // If this can be an ANDI, ZEXT.H or ZEXT.W, don't do this if the ANDI/ZEXT
+ // has multiple users or the constant is a simm12. This prevents inserting
+ // a shift and still have uses of the AND/ZEXT. Shifting a simm12 will
+ // likely make it more costly to materialize. Otherwise, using a SLLI
+ // might allow it to be compressed.
+ bool IsANDIOrZExt =
+ isInt<12>(C2) ||
+ (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb()) ||
+ (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba());
+ if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
+ break;
+
+ // We need to shift left the AND input and C1 by a total of XLen bits.
+
+ // How far left do we need to shift the AND input?
+ unsigned XLen = Subtarget->getXLen();
+ unsigned LeadingZeros = XLen - llvm::bit_width(C2);
+
+ // The constant gets shifted by the remaining amount unless that would
+ // shift bits out.
+ uint64_t C1 = N1C->getZExtValue();
+ unsigned ConstantShift = XLen - LeadingZeros;
+ if (ConstantShift > (XLen - llvm::bit_width(C1)))
+ break;
+
+ uint64_t ShiftedC1 = C1 << ConstantShift;
+ // If this RV32, we need to sign extend the constant.
+ if (XLen == 32)
+ ShiftedC1 = SignExtend64<32>(ShiftedC1);
+
+ // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
+ SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
+ SDNode *SLLI =
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
+ CurDAG->getTargetConstant(LeadingZeros, DL, VT));
+ SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
+ SDValue(SLLI, 0), SDValue(Imm, 0));
+ ReplaceNode(Node, MULHU);
+ return;
+ }
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = Node->getConstantOperandVal(0);
+ switch (IntNo) {
+ // By default we do not custom select any intrinsic.
+ default:
+ break;
+ case Intrinsic::riscv_vmsgeu:
+ case Intrinsic::riscv_vmsge: {
+ SDValue Src1 = Node->getOperand(1);
+ SDValue Src2 = Node->getOperand(2);
+ bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
+ bool IsCmpUnsignedZero = false;
+ // Only custom select scalar second operand.
+ if (Src2.getValueType() != XLenVT)
+ break;
+ // Small constants are handled with patterns.
+ if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
+ int64_t CVal = C->getSExtValue();
+ if (CVal >= -15 && CVal <= 16) {
+ if (!IsUnsigned || CVal != 0)
+ break;
+ IsCmpUnsignedZero = true;
+ }
+ }
+ MVT Src1VT = Src1.getSimpleValueType();
+ unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
+ switch (RISCVTargetLowering::getLMUL(Src1VT)) {
+ default:
+ llvm_unreachable("Unexpected LMUL!");
+#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
+ : RISCV::PseudoVMSLT_VX_##suffix; \
+ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
+ VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
+ break;
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
+#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
+ }
+ SDValue SEW = CurDAG->getTargetConstant(
+ Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
+ SDValue VL;
+ selectVLOp(Node->getOperand(3), VL);
+
+ // If vmsgeu with 0 immediate, expand it to vmset.
+ if (IsCmpUnsignedZero) {
+ ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
+ return;
+ }
+
+ // Expand to
+ // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
+ SDValue Cmp = SDValue(
+ CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
+ 0);
+ ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
+ {Cmp, Cmp, VL, SEW}));
+ return;
+ }
+ case Intrinsic::riscv_vmsgeu_mask:
+ case Intrinsic::riscv_vmsge_mask: {
+ SDValue Src1 = Node->getOperand(2);
+ SDValue Src2 = Node->getOperand(3);
+ bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
+ bool IsCmpUnsignedZero = false;
+ // Only custom select scalar second operand.
+ if (Src2.getValueType() != XLenVT)
+ break;
+ // Small constants are handled with patterns.
+ if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
+ int64_t CVal = C->getSExtValue();
+ if (CVal >= -15 && CVal <= 16) {
+ if (!IsUnsigned || CVal != 0)
+ break;
+ IsCmpUnsignedZero = true;
+ }
+ }
+ MVT Src1VT = Src1.getSimpleValueType();
+ unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
+ VMOROpcode;
+ switch (RISCVTargetLowering::getLMUL(Src1VT)) {
+ default:
+ llvm_unreachable("Unexpected LMUL!");
+#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
+ : RISCV::PseudoVMSLT_VX_##suffix; \
+ VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
+ : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
+ break;
+ CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
+ CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
+ CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
+ CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
+ CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
+ CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
+#undef CASE_VMSLT_OPCODES
+ }
+ // Mask operations use the LMUL from the mask type.
+ switch (RISCVTargetLowering::getLMUL(VT)) {
+ default:
+ llvm_unreachable("Unexpected LMUL!");
+#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
+ VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
+ break;
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
+#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
+ }
+ SDValue SEW = CurDAG->getTargetConstant(
+ Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
+ SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
+ SDValue VL;
+ selectVLOp(Node->getOperand(5), VL);
+ SDValue MaskedOff = Node->getOperand(1);
+ SDValue Mask = Node->getOperand(4);
+
+ // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
+ if (IsCmpUnsignedZero) {
+ // We don't need vmor if the MaskedOff and the Mask are the same
+ // value.
+ if (Mask == MaskedOff) {
+ ReplaceUses(Node, Mask.getNode());
+ return;
+ }
+ ReplaceNode(Node,
+ CurDAG->getMachineNode(VMOROpcode, DL, VT,
+ {Mask, MaskedOff, VL, MaskSEW}));
+ return;
+ }
+
+ // If the MaskedOff value and the Mask are the same value use
+ // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
+ // This avoids needing to copy v0 to vd before starting the next sequence.
+ if (Mask == MaskedOff) {
+ SDValue Cmp = SDValue(
+ CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
+ 0);
+ ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
+ {Mask, Cmp, VL, MaskSEW}));
+ return;
+ }
+
+ // Mask needs to be copied to V0.
+ SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
+ RISCV::V0, Mask, SDValue());
+ SDValue Glue = Chain.getValue(1);
+ SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
+
+ // Otherwise use
+ // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
+ // The result is mask undisturbed.
+ // We use the same instructions to emulate mask agnostic behavior, because
+ // the agnostic result can be either undisturbed or all 1.
+ SDValue Cmp = SDValue(
+ CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
+ {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
+ 0);
+ // vmxor.mm vd, vd, v0 is used to update active value.
+ ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
+ {Cmp, Mask, VL, MaskSEW}));
+ return;
+ }
+ case Intrinsic::riscv_vsetvli_opt:
+ case Intrinsic::riscv_vsetvlimax_opt:
+ return selectVSETVLI(Node);
+ }
+ break;
+ }
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ // By default we do not custom select any intrinsic.
+ default:
+ break;
+ case Intrinsic::riscv_vsetvli:
+ case Intrinsic::riscv_vsetvlimax:
+ return selectVSETVLI(Node);
+ case Intrinsic::riscv_vlseg2:
+ case Intrinsic::riscv_vlseg3:
+ case Intrinsic::riscv_vlseg4:
+ case Intrinsic::riscv_vlseg5:
+ case Intrinsic::riscv_vlseg6:
+ case Intrinsic::riscv_vlseg7:
+ case Intrinsic::riscv_vlseg8: {
+ selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
+ return;
+ }
+ case Intrinsic::riscv_vlseg2_mask:
+ case Intrinsic::riscv_vlseg3_mask:
+ case Intrinsic::riscv_vlseg4_mask:
+ case Intrinsic::riscv_vlseg5_mask:
+ case Intrinsic::riscv_vlseg6_mask:
+ case Intrinsic::riscv_vlseg7_mask:
+ case Intrinsic::riscv_vlseg8_mask: {
+ selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
+ return;
+ }
+ case Intrinsic::riscv_vlsseg2:
+ case Intrinsic::riscv_vlsseg3:
+ case Intrinsic::riscv_vlsseg4:
+ case Intrinsic::riscv_vlsseg5:
+ case Intrinsic::riscv_vlsseg6:
+ case Intrinsic::riscv_vlsseg7:
+ case Intrinsic::riscv_vlsseg8: {
+ selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
+ return;
+ }
+ case Intrinsic::riscv_vlsseg2_mask:
+ case Intrinsic::riscv_vlsseg3_mask:
+ case Intrinsic::riscv_vlsseg4_mask:
+ case Intrinsic::riscv_vlsseg5_mask:
+ case Intrinsic::riscv_vlsseg6_mask:
+ case Intrinsic::riscv_vlsseg7_mask:
+ case Intrinsic::riscv_vlsseg8_mask: {
+ selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
+ return;
+ }
+ case Intrinsic::riscv_vloxseg2:
+ case Intrinsic::riscv_vloxseg3:
+ case Intrinsic::riscv_vloxseg4:
+ case Intrinsic::riscv_vloxseg5:
+ case Intrinsic::riscv_vloxseg6:
+ case Intrinsic::riscv_vloxseg7:
+ case Intrinsic::riscv_vloxseg8:
+ selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
+ return;
+ case Intrinsic::riscv_vluxseg2:
+ case Intrinsic::riscv_vluxseg3:
+ case Intrinsic::riscv_vluxseg4:
+ case Intrinsic::riscv_vluxseg5:
+ case Intrinsic::riscv_vluxseg6:
+ case Intrinsic::riscv_vluxseg7:
+ case Intrinsic::riscv_vluxseg8:
+ selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
+ return;
+ case Intrinsic::riscv_vloxseg2_mask:
+ case Intrinsic::riscv_vloxseg3_mask:
+ case Intrinsic::riscv_vloxseg4_mask:
+ case Intrinsic::riscv_vloxseg5_mask:
+ case Intrinsic::riscv_vloxseg6_mask:
+ case Intrinsic::riscv_vloxseg7_mask:
+ case Intrinsic::riscv_vloxseg8_mask:
+ selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
+ return;
+ case Intrinsic::riscv_vluxseg2_mask:
+ case Intrinsic::riscv_vluxseg3_mask:
+ case Intrinsic::riscv_vluxseg4_mask:
+ case Intrinsic::riscv_vluxseg5_mask:
+ case Intrinsic::riscv_vluxseg6_mask:
+ case Intrinsic::riscv_vluxseg7_mask:
+ case Intrinsic::riscv_vluxseg8_mask:
+ selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
+ return;
+ case Intrinsic::riscv_vlseg8ff:
+ case Intrinsic::riscv_vlseg7ff:
+ case Intrinsic::riscv_vlseg6ff:
+ case Intrinsic::riscv_vlseg5ff:
+ case Intrinsic::riscv_vlseg4ff:
+ case Intrinsic::riscv_vlseg3ff:
+ case Intrinsic::riscv_vlseg2ff: {
+ selectVLSEGFF(Node, /*IsMasked*/ false);
+ return;
+ }
+ case Intrinsic::riscv_vlseg8ff_mask:
+ case Intrinsic::riscv_vlseg7ff_mask:
+ case Intrinsic::riscv_vlseg6ff_mask:
+ case Intrinsic::riscv_vlseg5ff_mask:
+ case Intrinsic::riscv_vlseg4ff_mask:
+ case Intrinsic::riscv_vlseg3ff_mask:
+ case Intrinsic::riscv_vlseg2ff_mask: {
+ selectVLSEGFF(Node, /*IsMasked*/ true);
+ return;
+ }
+ case Intrinsic::riscv_vloxei:
+ case Intrinsic::riscv_vloxei_mask:
+ case Intrinsic::riscv_vluxei:
+ case Intrinsic::riscv_vluxei_mask: {
+ bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
+ IntNo == Intrinsic::riscv_vluxei_mask;
+ bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
+ IntNo == Intrinsic::riscv_vloxei_mask;
+
+ MVT VT = Node->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+
+ unsigned CurOp = 2;
+ // Masked intrinsic only have TU version pseduo instructions.
+ bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
+ SmallVector<SDValue, 8> Operands;
+ if (IsTU)
+ Operands.push_back(Node->getOperand(CurOp++));
+ else
+ // Skip the undef passthru operand for nomask TA version pseudo
+ CurOp++;
+
+ MVT IndexVT;
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
+ /*IsStridedOrIndexed*/ true, Operands,
+ /*IsLoad=*/true, &IndexVT);
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Element count mismatch");
+
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
+ const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
+ IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
+ static_cast<unsigned>(IndexLMUL));
+ MachineSDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, Load);
+ return;
+ }
+ case Intrinsic::riscv_vlm:
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vlse_mask: {
+ bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
+ IntNo == Intrinsic::riscv_vlse_mask;
+ bool IsStrided =
+ IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
+
+ MVT VT = Node->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+
+ unsigned CurOp = 2;
+ // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
+ bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
+ // Masked intrinsic only have TU version pseduo instructions.
+ bool IsTU = HasPassthruOperand &&
+ (IsMasked || !Node->getOperand(CurOp).isUndef());
+ SmallVector<SDValue, 8> Operands;
+ if (IsTU)
+ Operands.push_back(Node->getOperand(CurOp++));
+ else if (HasPassthruOperand)
+ // Skip the undef passthru operand for nomask TA version pseudo
+ CurOp++;
+
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
+ Operands, /*IsLoad=*/true);
+
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ const RISCV::VLEPseudo *P =
+ RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
+ static_cast<unsigned>(LMUL));
+ MachineSDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, Load);
+ return;
+ }
+ case Intrinsic::riscv_vleff:
+ case Intrinsic::riscv_vleff_mask: {
+ bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
+
+ MVT VT = Node->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+
+ unsigned CurOp = 2;
+ // Masked intrinsic only have TU version pseduo instructions.
+ bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
+ SmallVector<SDValue, 7> Operands;
+ if (IsTU)
+ Operands.push_back(Node->getOperand(CurOp++));
+ else
+ // Skip the undef passthru operand for nomask TA version pseudo
+ CurOp++;
+
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
+ /*IsStridedOrIndexed*/ false, Operands,
+ /*IsLoad=*/true);
+
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ const RISCV::VLEPseudo *P =
+ RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
+ Log2SEW, static_cast<unsigned>(LMUL));
+ MachineSDNode *Load = CurDAG->getMachineNode(
+ P->Pseudo, DL, Node->getVTList(), Operands);
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, Load);
+ return;
+ }
+ }
+ break;
+ }
+ case ISD::INTRINSIC_VOID: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ case Intrinsic::riscv_vsseg2:
+ case Intrinsic::riscv_vsseg3:
+ case Intrinsic::riscv_vsseg4:
+ case Intrinsic::riscv_vsseg5:
+ case Intrinsic::riscv_vsseg6:
+ case Intrinsic::riscv_vsseg7:
+ case Intrinsic::riscv_vsseg8: {
+ selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
+ return;
+ }
+ case Intrinsic::riscv_vsseg2_mask:
+ case Intrinsic::riscv_vsseg3_mask:
+ case Intrinsic::riscv_vsseg4_mask:
+ case Intrinsic::riscv_vsseg5_mask:
+ case Intrinsic::riscv_vsseg6_mask:
+ case Intrinsic::riscv_vsseg7_mask:
+ case Intrinsic::riscv_vsseg8_mask: {
+ selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
+ return;
+ }
+ case Intrinsic::riscv_vssseg2:
+ case Intrinsic::riscv_vssseg3:
+ case Intrinsic::riscv_vssseg4:
+ case Intrinsic::riscv_vssseg5:
+ case Intrinsic::riscv_vssseg6:
+ case Intrinsic::riscv_vssseg7:
+ case Intrinsic::riscv_vssseg8: {
+ selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
+ return;
+ }
+ case Intrinsic::riscv_vssseg2_mask:
+ case Intrinsic::riscv_vssseg3_mask:
+ case Intrinsic::riscv_vssseg4_mask:
+ case Intrinsic::riscv_vssseg5_mask:
+ case Intrinsic::riscv_vssseg6_mask:
+ case Intrinsic::riscv_vssseg7_mask:
+ case Intrinsic::riscv_vssseg8_mask: {
+ selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
+ return;
+ }
+ case Intrinsic::riscv_vsoxseg2:
+ case Intrinsic::riscv_vsoxseg3:
+ case Intrinsic::riscv_vsoxseg4:
+ case Intrinsic::riscv_vsoxseg5:
+ case Intrinsic::riscv_vsoxseg6:
+ case Intrinsic::riscv_vsoxseg7:
+ case Intrinsic::riscv_vsoxseg8:
+ selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
+ return;
+ case Intrinsic::riscv_vsuxseg2:
+ case Intrinsic::riscv_vsuxseg3:
+ case Intrinsic::riscv_vsuxseg4:
+ case Intrinsic::riscv_vsuxseg5:
+ case Intrinsic::riscv_vsuxseg6:
+ case Intrinsic::riscv_vsuxseg7:
+ case Intrinsic::riscv_vsuxseg8:
+ selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
+ return;
+ case Intrinsic::riscv_vsoxseg2_mask:
+ case Intrinsic::riscv_vsoxseg3_mask:
+ case Intrinsic::riscv_vsoxseg4_mask:
+ case Intrinsic::riscv_vsoxseg5_mask:
+ case Intrinsic::riscv_vsoxseg6_mask:
+ case Intrinsic::riscv_vsoxseg7_mask:
+ case Intrinsic::riscv_vsoxseg8_mask:
+ selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
+ return;
+ case Intrinsic::riscv_vsuxseg2_mask:
+ case Intrinsic::riscv_vsuxseg3_mask:
+ case Intrinsic::riscv_vsuxseg4_mask:
+ case Intrinsic::riscv_vsuxseg5_mask:
+ case Intrinsic::riscv_vsuxseg6_mask:
+ case Intrinsic::riscv_vsuxseg7_mask:
+ case Intrinsic::riscv_vsuxseg8_mask:
+ selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
+ return;
+ case Intrinsic::riscv_vsoxei:
+ case Intrinsic::riscv_vsoxei_mask:
+ case Intrinsic::riscv_vsuxei:
+ case Intrinsic::riscv_vsuxei_mask: {
+ bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
+ IntNo == Intrinsic::riscv_vsuxei_mask;
+ bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
+ IntNo == Intrinsic::riscv_vsoxei_mask;
+
+ MVT VT = Node->getOperand(2)->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+
+ unsigned CurOp = 2;
+ SmallVector<SDValue, 8> Operands;
+ Operands.push_back(Node->getOperand(CurOp++)); // Store value.
+
+ MVT IndexVT;
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
+ /*IsStridedOrIndexed*/ true, Operands,
+ /*IsLoad=*/false, &IndexVT);
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Element count mismatch");
+
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
+ const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
+ IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
+ static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
+ MachineSDNode *Store =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
+
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, Store);
+ return;
+ }
+ case Intrinsic::riscv_vsm:
+ case Intrinsic::riscv_vse:
+ case Intrinsic::riscv_vse_mask:
+ case Intrinsic::riscv_vsse:
+ case Intrinsic::riscv_vsse_mask: {
+ bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
+ IntNo == Intrinsic::riscv_vsse_mask;
+ bool IsStrided =
+ IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
+
+ MVT VT = Node->getOperand(2)->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+
+ unsigned CurOp = 2;
+ SmallVector<SDValue, 8> Operands;
+ Operands.push_back(Node->getOperand(CurOp++)); // Store value.
+
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
+ Operands);
+
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
+ IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
+ MachineSDNode *Store =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, Store);
+ return;
+ }
+ }
+ break;
+ }
+ case ISD::BITCAST: {
+ MVT SrcVT = Node->getOperand(0).getSimpleValueType();
+ // Just drop bitcasts between vectors if both are fixed or both are
+ // scalable.
+ if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
+ (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
+ ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
+ CurDAG->RemoveDeadNode(Node);
+ return;
+ }
+ break;
+ }
+ case ISD::INSERT_SUBVECTOR: {
+ SDValue V = Node->getOperand(0);
+ SDValue SubV = Node->getOperand(1);
+ SDLoc DL(SubV);
+ auto Idx = Node->getConstantOperandVal(2);
+ MVT SubVecVT = SubV.getSimpleValueType();
+
+ const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
+ MVT SubVecContainerVT = SubVecVT;
+ // Establish the correct scalable-vector types for any fixed-length type.
+ if (SubVecVT.isFixedLengthVector())
+ SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
+ if (VT.isFixedLengthVector())
+ VT = TLI.getContainerForFixedLengthVector(VT);
+
+ const auto *TRI = Subtarget->getRegisterInfo();
+ unsigned SubRegIdx;
+ std::tie(SubRegIdx, Idx) =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ VT, SubVecContainerVT, Idx, TRI);
+
+ // If the Idx hasn't been completely eliminated then this is a subvector
+ // insert which doesn't naturally align to a vector register. These must
+ // be handled using instructions to manipulate the vector registers.
+ if (Idx != 0)
+ break;
+
+ RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
+ bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
+ SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
+ SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
+ (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
+ assert((!IsSubVecPartReg || V.isUndef()) &&
+ "Expecting lowering to have created legal INSERT_SUBVECTORs when "
+ "the subvector is smaller than a full-sized register");
+
+ // If we haven't set a SubRegIdx, then we must be going between
+ // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
+ if (SubRegIdx == RISCV::NoSubRegister) {
+ unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
+ assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
+ InRegClassID &&
+ "Unexpected subvector extraction");
+ SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
+ SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
+ DL, VT, SubV, RC);
+ ReplaceNode(Node, NewNode);
+ return;
+ }
+
+ SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
+ ReplaceNode(Node, Insert.getNode());
+ return;
+ }
+ case ISD::EXTRACT_SUBVECTOR: {
+ SDValue V = Node->getOperand(0);
+ auto Idx = Node->getConstantOperandVal(1);
+ MVT InVT = V.getSimpleValueType();
+ SDLoc DL(V);
+
+ const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
+ MVT SubVecContainerVT = VT;
+ // Establish the correct scalable-vector types for any fixed-length type.
+ if (VT.isFixedLengthVector())
+ SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
+ if (InVT.isFixedLengthVector())
+ InVT = TLI.getContainerForFixedLengthVector(InVT);
+
+ const auto *TRI = Subtarget->getRegisterInfo();
+ unsigned SubRegIdx;
+ std::tie(SubRegIdx, Idx) =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ InVT, SubVecContainerVT, Idx, TRI);
+
+ // If the Idx hasn't been completely eliminated then this is a subvector
+ // extract which doesn't naturally align to a vector register. These must
+ // be handled using instructions to manipulate the vector registers.
+ if (Idx != 0)
+ break;
+
+ // If we haven't set a SubRegIdx, then we must be going between
+ // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
+ if (SubRegIdx == RISCV::NoSubRegister) {
+ unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
+ assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
+ InRegClassID &&
+ "Unexpected subvector extraction");
+ SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
+ SDNode *NewNode =
+ CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
+ ReplaceNode(Node, NewNode);
+ return;
+ }
+
+ SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
+ ReplaceNode(Node, Extract.getNode());
+ return;
+ }
+ case RISCVISD::VMV_S_X_VL:
+ case RISCVISD::VFMV_S_F_VL:
+ case RISCVISD::VMV_V_X_VL:
+ case RISCVISD::VFMV_V_F_VL: {
+ // Only if we have optimized zero-stride vector load.
+ if (!Subtarget->hasOptimizedZeroStrideLoad())
+ break;
+
+ // Try to match splat of a scalar load to a strided load with stride of x0.
+ bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
+ Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
+ if (!Node->getOperand(0).isUndef())
+ break;
+ SDValue Src = Node->getOperand(1);
+ auto *Ld = dyn_cast<LoadSDNode>(Src);
+ if (!Ld)
+ break;
+ EVT MemVT = Ld->getMemoryVT();
+ // The memory VT should be the same size as the element type.
+ if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
+ break;
+ if (!IsProfitableToFold(Src, Node, Node) ||
+ !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
+ break;
+
+ SDValue VL;
+ if (IsScalarMove) {
+ // We could deal with more VL if we update the VSETVLI insert pass to
+ // avoid introducing more VSETVLI.
+ if (!isOneConstant(Node->getOperand(2)))
+ break;
+ selectVLOp(Node->getOperand(2), VL);
+ } else
+ selectVLOp(Node->getOperand(2), VL);
+
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
+
+ SDValue Operands[] = {Ld->getBasePtr(),
+ CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
+ Ld->getChain()};
+
+ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
+ /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
+ Log2SEW, static_cast<unsigned>(LMUL));
+ MachineSDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, {VT, MVT::Other}, Operands);
+ // Update the chain.
+ ReplaceUses(Src.getValue(1), SDValue(Load, 1));
+ // Record the mem-refs
+ CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
+ // Replace the splat with the vlse.
+ ReplaceNode(Node, Load);
+ return;
+ }
+ }
+
+ // Select the default instruction.
+ SelectCode(Node);
+}
+
+bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
+ const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
+ switch (ConstraintID) {
+ case InlineAsm::Constraint_m:
+ // We just support simple memory operands that have a single address
+ // operand and need no special handling.
+ OutOps.push_back(Op);
+ return false;
+ case InlineAsm::Constraint_A:
+ OutOps.push_back(Op);
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
+ return true;
+ }
+
+ return false;
+}
+
+// Select a frame index and an optional immediate offset from an ADD or OR.
+bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (SelectAddrFrameIndex(Addr, Base, Offset))
+ return true;
+
+ if (!CurDAG->isBaseWithConstantOffset(Addr))
+ return false;
+
+ if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
+ int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
+ if (isInt<12>(CVal)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
+ Subtarget->getXLenVT());
+ Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
+ Subtarget->getXLenVT());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Fold constant addresses.
+static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
+ const MVT VT, const RISCVSubtarget *Subtarget,
+ SDValue Addr, SDValue &Base, SDValue &Offset) {
+ if (!isa<ConstantSDNode>(Addr))
+ return false;
+
+ int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
+
+ // If the constant is a simm12, we can fold the whole constant and use X0 as
+ // the base. If the constant can be materialized with LUI+simm12, use LUI as
+ // the base. We can't use generateInstSeq because it favors LUI+ADDIW.
+ int64_t Lo12 = SignExtend64<12>(CVal);
+ int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12;
+ if (!Subtarget->is64Bit() || isInt<32>(Hi)) {
+ if (Hi) {
+ int64_t Hi20 = (Hi >> 12) & 0xfffff;
+ Base = SDValue(
+ CurDAG->getMachineNode(RISCV::LUI, DL, VT,
+ CurDAG->getTargetConstant(Hi20, DL, VT)),
+ 0);
+ } else {
+ Base = CurDAG->getRegister(RISCV::X0, VT);
+ }
+ Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
+ return true;
+ }
+
+ // Ask how constant materialization would handle this constant.
+ RISCVMatInt::InstSeq Seq =
+ RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
+
+ // If the last instruction would be an ADDI, we can fold its immediate and
+ // emit the rest of the sequence as the base.
+ if (Seq.back().getOpcode() != RISCV::ADDI)
+ return false;
+ Lo12 = Seq.back().getImm();
+
+ // Drop the last instruction.
+ Seq.pop_back();
+ assert(!Seq.empty() && "Expected more instructions in sequence");
+
+ Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0);
+ Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
+ return true;
+}
+
+// Is this ADD instruction only used as the base pointer of scalar loads and
+// stores?
+static bool isWorthFoldingAdd(SDValue Add) {
+ for (auto *Use : Add->uses()) {
+ if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
+ Use->getOpcode() != ISD::ATOMIC_LOAD &&
+ Use->getOpcode() != ISD::ATOMIC_STORE)
+ return false;
+ EVT VT = cast<MemSDNode>(Use)->getMemoryVT();
+ if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
+ VT != MVT::f64)
+ return false;
+ // Don't allow stores of the value. It must be used as the address.
+ if (Use->getOpcode() == ISD::STORE &&
+ cast<StoreSDNode>(Use)->getValue() == Add)
+ return false;
+ if (Use->getOpcode() == ISD::ATOMIC_STORE &&
+ cast<AtomicSDNode>(Use)->getVal() == Add)
+ return false;
+ }
+
+ return true;
+}
+
+bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (SelectAddrFrameIndex(Addr, Base, Offset))
+ return true;
+
+ SDLoc DL(Addr);
+ MVT VT = Addr.getSimpleValueType();
+
+ if (Addr.getOpcode() == RISCVISD::ADD_LO) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1);
+ return true;
+ }
+
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
+ if (isInt<12>(CVal)) {
+ Base = Addr.getOperand(0);
+ if (Base.getOpcode() == RISCVISD::ADD_LO) {
+ SDValue LoOperand = Base.getOperand(1);
+ if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
+ // If the Lo in (ADD_LO hi, lo) is a global variable's address
+ // (its low part, really), then we can rely on the alignment of that
+ // variable to provide a margin of safety before low part can overflow
+ // the 12 bits of the load/store offset. Check if CVal falls within
+ // that margin; if so (low part + CVal) can't overflow.
+ const DataLayout &DL = CurDAG->getDataLayout();
+ Align Alignment = commonAlignment(
+ GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
+ if (CVal == 0 || Alignment > CVal) {
+ int64_t CombinedOffset = CVal + GA->getOffset();
+ Base = Base.getOperand(0);
+ Offset = CurDAG->getTargetGlobalAddress(
+ GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
+ CombinedOffset, GA->getTargetFlags());
+ return true;
+ }
+ }
+ }
+
+ if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
+ Offset = CurDAG->getTargetConstant(CVal, DL, VT);
+ return true;
+ }
+ }
+
+ // Handle ADD with large immediates.
+ if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
+ int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
+ assert(!isInt<12>(CVal) && "simm12 not already handled?");
+
+ // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use
+ // an ADDI for part of the offset and fold the rest into the load/store.
+ // This mirrors the AddiPair PatFrag in RISCVInstrInfo.td.
+ if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
+ int64_t Adj = CVal < 0 ? -2048 : 2047;
+ Base = SDValue(
+ CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
+ CurDAG->getTargetConstant(Adj, DL, VT)),
+ 0);
+ Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
+ return true;
+ }
+
+ // For larger immediates, we might be able to save one instruction from
+ // constant materialization by folding the Lo12 bits of the immediate into
+ // the address. We should only do this if the ADD is only used by loads and
+ // stores that can fold the lo12 bits. Otherwise, the ADD will get iseled
+ // separately with the full materialized immediate creating extra
+ // instructions.
+ if (isWorthFoldingAdd(Addr) &&
+ selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base,
+ Offset)) {
+ // Insert an ADD instruction with the materialized Hi52 bits.
+ Base = SDValue(
+ CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base),
+ 0);
+ return true;
+ }
+ }
+
+ if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset))
+ return true;
+
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, DL, VT);
+ return true;
+}
+
+bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
+ SDValue &ShAmt) {
+ ShAmt = N;
+
+ // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
+ // amount. If there is an AND on the shift amount, we can bypass it if it
+ // doesn't affect any of those bits.
+ if (ShAmt.getOpcode() == ISD::AND && isa<ConstantSDNode>(ShAmt.getOperand(1))) {
+ const APInt &AndMask = ShAmt.getConstantOperandAPInt(1);
+
+ // Since the max shift amount is a power of 2 we can subtract 1 to make a
+ // mask that covers the bits needed to represent all shift amounts.
+ assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
+ APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
+
+ if (ShMask.isSubsetOf(AndMask)) {
+ ShAmt = ShAmt.getOperand(0);
+ } else {
+ // SimplifyDemandedBits may have optimized the mask so try restoring any
+ // bits that are known zero.
+ KnownBits Known = CurDAG->computeKnownBits(ShAmt.getOperand(0));
+ if (!ShMask.isSubsetOf(AndMask | Known.Zero))
+ return true;
+ ShAmt = ShAmt.getOperand(0);
+ }
+ }
+
+ if (ShAmt.getOpcode() == ISD::ADD &&
+ isa<ConstantSDNode>(ShAmt.getOperand(1))) {
+ uint64_t Imm = ShAmt.getConstantOperandVal(1);
+ // If we are shifting by X+N where N == 0 mod Size, then just shift by X
+ // to avoid the ADD.
+ if (Imm != 0 && Imm % ShiftWidth == 0) {
+ ShAmt = ShAmt.getOperand(0);
+ return true;
+ }
+ } else if (ShAmt.getOpcode() == ISD::SUB &&
+ isa<ConstantSDNode>(ShAmt.getOperand(0))) {
+ uint64_t Imm = ShAmt.getConstantOperandVal(0);
+ // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
+ // generate a NEG instead of a SUB of a constant.
+ if (Imm != 0 && Imm % ShiftWidth == 0) {
+ SDLoc DL(ShAmt);
+ EVT VT = ShAmt.getValueType();
+ SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
+ unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
+ MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
+ ShAmt.getOperand(1));
+ ShAmt = SDValue(Neg, 0);
+ return true;
+ }
+ // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
+ // to generate a NOT instead of a SUB of a constant.
+ if (Imm % ShiftWidth == ShiftWidth - 1) {
+ SDLoc DL(ShAmt);
+ EVT VT = ShAmt.getValueType();
+ MachineSDNode *Not =
+ CurDAG->getMachineNode(RISCV::XORI, DL, VT, ShAmt.getOperand(1),
+ CurDAG->getTargetConstant(-1, DL, VT));
+ ShAmt = SDValue(Not, 0);
+ return true;
+ }
+ }
+
+ return true;
+}
+
+bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
+ if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
+ Val = N.getOperand(0);
+ return true;
+ }
+ MVT VT = N.getSimpleValueType();
+ if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
+ Val = N;
+ return true;
+ }
+
+ return false;
+}
+
+bool RISCVDAGToDAGISel::selectZExtBits(SDValue N, unsigned Bits, SDValue &Val) {
+ if (N.getOpcode() == ISD::AND) {
+ auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (C && C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
+ Val = N.getOperand(0);
+ return true;
+ }
+ }
+ MVT VT = N.getSimpleValueType();
+ APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), Bits);
+ if (CurDAG->MaskedValueIsZero(N, Mask)) {
+ Val = N;
+ return true;
+ }
+
+ return false;
+}
+
+/// Look for various patterns that can be done with a SHL that can be folded
+/// into a SHXADD. \p ShAmt contains 1, 2, or 3 and is set based on which
+/// SHXADD we are trying to match.
+bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
+ SDValue &Val) {
+ if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
+ SDValue N0 = N.getOperand(0);
+
+ bool LeftShift = N0.getOpcode() == ISD::SHL;
+ if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
+ isa<ConstantSDNode>(N0.getOperand(1))) {
+ uint64_t Mask = N.getConstantOperandVal(1);
+ unsigned C2 = N0.getConstantOperandVal(1);
+
+ unsigned XLen = Subtarget->getXLen();
+ if (LeftShift)
+ Mask &= maskTrailingZeros<uint64_t>(C2);
+ else
+ Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
+
+ // Look for (and (shl y, c2), c1) where c1 is a shifted mask with no
+ // leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3
+ // followed by a SHXADD with c3 for the X amount.
+ if (isShiftedMask_64(Mask)) {
+ unsigned Leading = XLen - llvm::bit_width(Mask);
+ unsigned Trailing = countTrailingZeros(Mask);
+ if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
+ SDLoc DL(N);
+ EVT VT = N.getValueType();
+ Val = SDValue(CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, N0.getOperand(0),
+ CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
+ 0);
+ return true;
+ }
+ // Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2
+ // leading zeros and c3 trailing zeros. We can use an SRLI by C3
+ // followed by a SHXADD using c3 for the X amount.
+ if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
+ SDLoc DL(N);
+ EVT VT = N.getValueType();
+ Val = SDValue(
+ CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, N0.getOperand(0),
+ CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
+ 0);
+ return true;
+ }
+ }
+ }
+ }
+
+ bool LeftShift = N.getOpcode() == ISD::SHL;
+ if ((LeftShift || N.getOpcode() == ISD::SRL) &&
+ isa<ConstantSDNode>(N.getOperand(1))) {
+ SDValue N0 = N.getOperand(0);
+ if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
+ isa<ConstantSDNode>(N0.getOperand(1))) {
+ uint64_t Mask = N0.getConstantOperandVal(1);
+ if (isShiftedMask_64(Mask)) {
+ unsigned C1 = N.getConstantOperandVal(1);
+ unsigned XLen = Subtarget->getXLen();
+ unsigned Leading = XLen - llvm::bit_width(Mask);
+ unsigned Trailing = countTrailingZeros(Mask);
+ // Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and
+ // C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD.
+ if (LeftShift && Leading == 32 && Trailing > 0 &&
+ (Trailing + C1) == ShAmt) {
+ SDLoc DL(N);
+ EVT VT = N.getValueType();
+ Val = SDValue(CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, N0.getOperand(0),
+ CurDAG->getTargetConstant(Trailing, DL, VT)),
+ 0);
+ return true;
+ }
+ // Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and
+ // C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD.
+ if (!LeftShift && Leading == 32 && Trailing > C1 &&
+ (Trailing - C1) == ShAmt) {
+ SDLoc DL(N);
+ EVT VT = N.getValueType();
+ Val = SDValue(CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, N0.getOperand(0),
+ CurDAG->getTargetConstant(Trailing, DL, VT)),
+ 0);
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+/// Look for various patterns that can be done with a SHL that can be folded
+/// into a SHXADD_UW. \p ShAmt contains 1, 2, or 3 and is set based on which
+/// SHXADD_UW we are trying to match.
+bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
+ SDValue &Val) {
+ if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1)) &&
+ N.hasOneUse()) {
+ SDValue N0 = N.getOperand(0);
+ if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
+ N0.hasOneUse()) {
+ uint64_t Mask = N.getConstantOperandVal(1);
+ unsigned C2 = N0.getConstantOperandVal(1);
+
+ Mask &= maskTrailingZeros<uint64_t>(C2);
+
+ // Look for (and (shl y, c2), c1) where c1 is a shifted mask with
+ // 32-ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
+ // c2-ShAmt followed by SHXADD_UW with ShAmt for the X amount.
+ if (isShiftedMask_64(Mask)) {
+ unsigned Leading = countLeadingZeros(Mask);
+ unsigned Trailing = countTrailingZeros(Mask);
+ if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
+ SDLoc DL(N);
+ EVT VT = N.getValueType();
+ Val = SDValue(CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, N0.getOperand(0),
+ CurDAG->getTargetConstant(C2 - ShAmt, DL, VT)),
+ 0);
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+// Return true if all users of this SDNode* only consume the lower \p Bits.
+// This can be used to form W instructions for add/sub/mul/shl even when the
+// root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
+// SimplifyDemandedBits has made it so some users see a sext_inreg and some
+// don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
+// the add/sub/mul/shl to become non-W instructions. By checking the users we
+// may be able to use a W instruction and CSE with the other instruction if
+// this has happened. We could try to detect that the CSE opportunity exists
+// before doing this, but that would be more complicated.
+bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits,
+ const unsigned Depth) const {
+ assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
+ Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
+ Node->getOpcode() == ISD::SRL || Node->getOpcode() == ISD::AND ||
+ Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::XOR ||
+ Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
+ isa<ConstantSDNode>(Node) || Depth != 0) &&
+ "Unexpected opcode");
+
+ if (Depth >= SelectionDAG::MaxRecursionDepth)
+ return false;
+
+ for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ // Users of this node should have already been instruction selected
+ if (!User->isMachineOpcode())
+ return false;
+
+ // TODO: Add more opcodes?
+ switch (User->getMachineOpcode()) {
+ default:
+ return false;
+ case RISCV::ADDW:
+ case RISCV::ADDIW:
+ case RISCV::SUBW:
+ case RISCV::MULW:
+ case RISCV::SLLW:
+ case RISCV::SLLIW:
+ case RISCV::SRAW:
+ case RISCV::SRAIW:
+ case RISCV::SRLW:
+ case RISCV::SRLIW:
+ case RISCV::DIVW:
+ case RISCV::DIVUW:
+ case RISCV::REMW:
+ case RISCV::REMUW:
+ case RISCV::ROLW:
+ case RISCV::RORW:
+ case RISCV::RORIW:
+ case RISCV::CLZW:
+ case RISCV::CTZW:
+ case RISCV::CPOPW:
+ case RISCV::SLLI_UW:
+ case RISCV::FMV_W_X:
+ case RISCV::FCVT_H_W:
+ case RISCV::FCVT_H_WU:
+ case RISCV::FCVT_S_W:
+ case RISCV::FCVT_S_WU:
+ case RISCV::FCVT_D_W:
+ case RISCV::FCVT_D_WU:
+ if (Bits < 32)
+ return false;
+ break;
+ case RISCV::SLL:
+ case RISCV::SRA:
+ case RISCV::SRL:
+ case RISCV::ROL:
+ case RISCV::ROR:
+ case RISCV::BSET:
+ case RISCV::BCLR:
+ case RISCV::BINV:
+ // Shift amount operands only use log2(Xlen) bits.
+ if (UI.getOperandNo() != 1 || Bits < Log2_32(Subtarget->getXLen()))
+ return false;
+ break;
+ case RISCV::SLLI:
+ // SLLI only uses the lower (XLen - ShAmt) bits.
+ if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
+ return false;
+ break;
+ case RISCV::ANDI:
+ if (Bits >= (unsigned)llvm::bit_width(User->getConstantOperandVal(1)))
+ break;
+ goto RecCheck;
+ case RISCV::ORI: {
+ uint64_t Imm = cast<ConstantSDNode>(User->getOperand(1))->getSExtValue();
+ if (Bits >= (unsigned)llvm::bit_width<uint64_t>(~Imm))
+ break;
+ [[fallthrough]];
+ }
+ case RISCV::AND:
+ case RISCV::OR:
+ case RISCV::XOR:
+ case RISCV::XORI:
+ case RISCV::ANDN:
+ case RISCV::ORN:
+ case RISCV::XNOR:
+ case RISCV::SH1ADD:
+ case RISCV::SH2ADD:
+ case RISCV::SH3ADD:
+ RecCheck:
+ if (hasAllNBitUsers(User, Bits, Depth + 1))
+ break;
+ return false;
+ case RISCV::SRLI: {
+ unsigned ShAmt = User->getConstantOperandVal(1);
+ // If we are shifting right by less than Bits, and users don't demand any
+ // bits that were shifted into [Bits-1:0], then we can consider this as an
+ // N-Bit user.
+ if (Bits > ShAmt && hasAllNBitUsers(User, Bits - ShAmt, Depth + 1))
+ break;
+ return false;
+ }
+ case RISCV::SEXT_B:
+ case RISCV::PACKH:
+ if (Bits < 8)
+ return false;
+ break;
+ case RISCV::SEXT_H:
+ case RISCV::FMV_H_X:
+ case RISCV::ZEXT_H_RV32:
+ case RISCV::ZEXT_H_RV64:
+ case RISCV::PACKW:
+ if (Bits < 16)
+ return false;
+ break;
+ case RISCV::PACK:
+ if (Bits < (Subtarget->getXLen() / 2))
+ return false;
+ break;
+ case RISCV::ADD_UW:
+ case RISCV::SH1ADD_UW:
+ case RISCV::SH2ADD_UW:
+ case RISCV::SH3ADD_UW:
+ // The first operand to add.uw/shXadd.uw is implicitly zero extended from
+ // 32 bits.
+ if (UI.getOperandNo() != 0 || Bits < 32)
+ return false;
+ break;
+ case RISCV::SB:
+ if (UI.getOperandNo() != 0 || Bits < 8)
+ return false;
+ break;
+ case RISCV::SH:
+ if (UI.getOperandNo() != 0 || Bits < 16)
+ return false;
+ break;
+ case RISCV::SW:
+ if (UI.getOperandNo() != 0 || Bits < 32)
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+// Select VL as a 5 bit immediate or a value that will become a register. This
+// allows us to choose betwen VSETIVLI or VSETVLI later.
+bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
+ auto *C = dyn_cast<ConstantSDNode>(N);
+ if (C && isUInt<5>(C->getZExtValue())) {
+ VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
+ N->getValueType(0));
+ } else if (C && C->isAllOnesValue()) {
+ // Treat all ones as VLMax.
+ VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
+ N->getValueType(0));
+ } else if (isa<RegisterSDNode>(N) &&
+ cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
+ // All our VL operands use an operand that allows GPRNoX0 or an immediate
+ // as the register class. Convert X0 to a special immediate to pass the
+ // MachineVerifier. This is recognized specially by the vsetvli insertion
+ // pass.
+ VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
+ N->getValueType(0));
+ } else {
+ VL = N;
+ }
+
+ return true;
+}
+
+bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
+ if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
+ return false;
+ assert(N.getNumOperands() == 3 && "Unexpected number of operands");
+ SplatVal = N.getOperand(1);
+ return true;
+}
+
+using ValidateFn = bool (*)(int64_t);
+
+static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
+ SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget,
+ ValidateFn ValidateImm) {
+ if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
+ !isa<ConstantSDNode>(N.getOperand(1)))
+ return false;
+ assert(N.getNumOperands() == 3 && "Unexpected number of operands");
+
+ int64_t SplatImm =
+ cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
+
+ // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
+ // type is wider than the resulting vector element type: an implicit
+ // truncation first takes place. Therefore, perform a manual
+ // truncation/sign-extension in order to ignore any truncated bits and catch
+ // any zero-extended immediate.
+ // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
+ // sign-extending to (XLenVT -1).
+ MVT XLenVT = Subtarget.getXLenVT();
+ assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
+ "Unexpected splat operand type");
+ MVT EltVT = N.getSimpleValueType().getVectorElementType();
+ if (EltVT.bitsLT(XLenVT))
+ SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
+
+ if (!ValidateImm(SplatImm))
+ return false;
+
+ SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
+ return true;
+}
+
+bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
+ return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
+ [](int64_t Imm) { return isInt<5>(Imm); });
+}
+
+bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
+ return selectVSplatSimmHelper(
+ N, SplatVal, *CurDAG, *Subtarget,
+ [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
+}
+
+bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
+ SDValue &SplatVal) {
+ return selectVSplatSimmHelper(
+ N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
+ return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
+ });
+}
+
+bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
+ if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
+ !isa<ConstantSDNode>(N.getOperand(1)))
+ return false;
+
+ int64_t SplatImm =
+ cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
+
+ if (!isUInt<5>(SplatImm))
+ return false;
+
+ SplatVal =
+ CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
+
+ return true;
+}
+
+bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
+ SDValue &Imm) {
+ if (auto *C = dyn_cast<ConstantSDNode>(N)) {
+ int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
+
+ if (!isInt<5>(ImmVal))
+ return false;
+
+ Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
+ return true;
+ }
+
+ return false;
+}
+
+// Try to remove sext.w if the input is a W instruction or can be made into
+// a W instruction cheaply.
+bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
+ // Look for the sext.w pattern, addiw rd, rs1, 0.
+ if (N->getMachineOpcode() != RISCV::ADDIW ||
+ !isNullConstant(N->getOperand(1)))
+ return false;
+
+ SDValue N0 = N->getOperand(0);
+ if (!N0.isMachineOpcode())
+ return false;
+
+ switch (N0.getMachineOpcode()) {
+ default:
+ break;
+ case RISCV::ADD:
+ case RISCV::ADDI:
+ case RISCV::SUB:
+ case RISCV::MUL:
+ case RISCV::SLLI: {
+ // Convert sext.w+add/sub/mul to their W instructions. This will create
+ // a new independent instruction. This improves latency.
+ unsigned Opc;
+ switch (N0.getMachineOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode!");
+ case RISCV::ADD: Opc = RISCV::ADDW; break;
+ case RISCV::ADDI: Opc = RISCV::ADDIW; break;
+ case RISCV::SUB: Opc = RISCV::SUBW; break;
+ case RISCV::MUL: Opc = RISCV::MULW; break;
+ case RISCV::SLLI: Opc = RISCV::SLLIW; break;
+ }
+
+ SDValue N00 = N0.getOperand(0);
+ SDValue N01 = N0.getOperand(1);
+
+ // Shift amount needs to be uimm5.
+ if (N0.getMachineOpcode() == RISCV::SLLI &&
+ !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
+ break;
+
+ SDNode *Result =
+ CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
+ N00, N01);
+ ReplaceUses(N, Result);
+ return true;
+ }
+ case RISCV::ADDW:
+ case RISCV::ADDIW:
+ case RISCV::SUBW:
+ case RISCV::MULW:
+ case RISCV::SLLIW:
+ case RISCV::PACKW:
+ // Result is already sign extended just remove the sext.w.
+ // NOTE: We only handle the nodes that are selected with hasAllWUsers.
+ ReplaceUses(N, N0.getNode());
+ return true;
+ }
+
+ return false;
+}
+
+// Return true if we can make sure mask of N is all-ones mask.
+static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx) {
+ // Check that we're using V0 as a mask register.
+ if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
+ cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
+ return false;
+
+ // The glued user defines V0.
+ const auto *Glued = N->getGluedNode();
+
+ if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
+ return false;
+
+ // Check that we're defining V0 as a mask register.
+ if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
+ cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
+ return false;
+
+ // Check the instruction defining V0; it needs to be a VMSET pseudo.
+ SDValue MaskSetter = Glued->getOperand(2);
+
+ const auto IsVMSet = [](unsigned Opc) {
+ return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
+ Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
+ Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
+ Opc == RISCV::PseudoVMSET_M_B8;
+ };
+
+ // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
+ // undefined behaviour if it's the wrong bitwidth, so we could choose to
+ // assume that it's all-ones? Same applies to its VL.
+ return MaskSetter->isMachineOpcode() &&
+ IsVMSet(MaskSetter.getMachineOpcode());
+}
+
+// Optimize masked RVV pseudo instructions with a known all-ones mask to their
+// corresponding "unmasked" pseudo versions. The mask we're interested in will
+// take the form of a V0 physical register operand, with a glued
+// register-setting instruction.
+bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
+ const RISCV::RISCVMaskedPseudoInfo *I =
+ RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
+ if (!I)
+ return false;
+
+ unsigned MaskOpIdx = I->MaskOpIdx;
+
+ if (!usesAllOnesMask(N, MaskOpIdx))
+ return false;
+
+ // Retrieve the tail policy operand index, if any.
+ std::optional<unsigned> TailPolicyOpIdx;
+ const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
+ const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
+
+ bool IsTA = true;
+ if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
+ TailPolicyOpIdx = getVecPolicyOpIdx(N, MaskedMCID);
+ if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
+ RISCVII::TAIL_AGNOSTIC)) {
+ // Keep the true-masked instruction when there is no unmasked TU
+ // instruction
+ if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
+ return false;
+ // We can't use TA if the tie-operand is not IMPLICIT_DEF
+ if (!N->getOperand(0).isUndef())
+ IsTA = false;
+ }
+ }
+
+ unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
+
+ // Check that we're dropping the mask operand and any policy operand
+ // when we transform to this unmasked pseudo. Additionally, if this insturtion
+ // is tail agnostic, the unmasked instruction should not have a merge op.
+ uint64_t TSFlags = TII.get(Opc).TSFlags;
+ assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
+ RISCVII::hasDummyMaskOp(TSFlags) &&
+ !RISCVII::hasVecPolicyOp(TSFlags) &&
+ "Unexpected pseudo to transform to");
+ (void)TSFlags;
+
+ SmallVector<SDValue, 8> Ops;
+ // Skip the merge operand at index 0 if IsTA
+ for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
+ // Skip the mask, the policy, and the Glue.
+ SDValue Op = N->getOperand(I);
+ if (I == MaskOpIdx || I == TailPolicyOpIdx ||
+ Op.getValueType() == MVT::Glue)
+ continue;
+ Ops.push_back(Op);
+ }
+
+ // Transitively apply any node glued to our new node.
+ const auto *Glued = N->getGluedNode();
+ if (auto *TGlued = Glued->getGluedNode())
+ Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
+
+ SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
+ Result->setFlags(N->getFlags());
+ ReplaceUses(N, Result);
+
+ return true;
+}
+
+// Try to fold VMERGE_VVM with unmasked intrinsic to masked intrinsic. The
+// peephole only deals with VMERGE_VVM which is TU and has false operand same as
+// its true operand now. E.g. (VMERGE_VVM_M1_TU False, False, (VADD_M1 ...),
+// ...) -> (VADD_VV_M1_MASK)
+bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N, bool IsTA) {
+ unsigned Offset = IsTA ? 0 : 1;
+ uint64_t Policy = IsTA ? RISCVII::TAIL_AGNOSTIC : /*TUMU*/ 0;
+
+ SDValue False = N->getOperand(0 + Offset);
+ SDValue True = N->getOperand(1 + Offset);
+ SDValue Mask = N->getOperand(2 + Offset);
+ SDValue VL = N->getOperand(3 + Offset);
+
+ assert(True.getResNo() == 0 &&
+ "Expect True is the first output of an instruction.");
+
+ // Need N is the exactly one using True.
+ if (!True.hasOneUse())
+ return false;
+
+ if (!True.isMachineOpcode())
+ return false;
+
+ unsigned TrueOpc = True.getMachineOpcode();
+
+ // Skip if True has merge operand.
+ // TODO: Deal with True having same merge operand with N.
+ if (RISCVII::hasMergeOp(TII->get(TrueOpc).TSFlags))
+ return false;
+
+ // Skip if True has side effect.
+ // TODO: Support velff and vlsegff.
+ if (TII->get(TrueOpc).hasUnmodeledSideEffects())
+ return false;
+
+ // Only deal with True when True is unmasked intrinsic now.
+ const RISCV::RISCVMaskedPseudoInfo *Info =
+ RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
+
+ if (!Info)
+ return false;
+
+ // The last operand of unmasked intrinsic should be sew or chain.
+ bool HasChainOp =
+ True.getOperand(True.getNumOperands() - 1).getValueType() == MVT::Other;
+
+ if (HasChainOp) {
+ // Avoid creating cycles in the DAG. We must ensure that none of the other
+ // operands depend on True through it's Chain.
+ SmallVector<const SDNode *, 4> LoopWorklist;
+ SmallPtrSet<const SDNode *, 16> Visited;
+ LoopWorklist.push_back(False.getNode());
+ LoopWorklist.push_back(Mask.getNode());
+ LoopWorklist.push_back(VL.getNode());
+ if (SDNode *Glued = N->getGluedNode())
+ LoopWorklist.push_back(Glued);
+ if (SDNode::hasPredecessorHelper(True.getNode(), Visited, LoopWorklist))
+ return false;
+ }
+
+ // Need True has same VL with N.
+ unsigned TrueVLIndex = True.getNumOperands() - HasChainOp - 2;
+ SDValue TrueVL = True.getOperand(TrueVLIndex);
+
+ auto IsNoFPExcept = [this](SDValue N) {
+ return !this->mayRaiseFPException(N.getNode()) ||
+ N->getFlags().hasNoFPExcept();
+ };
+
+ // Allow the peephole for non-exception True with VLMAX vector length, since
+ // all the values after VL of N are dependent on Merge. VLMAX should be
+ // lowered to (XLenVT -1).
+ if (TrueVL != VL && !(IsNoFPExcept(True) && isAllOnesConstant(TrueVL)))
+ return false;
+
+ SDLoc DL(N);
+ unsigned MaskedOpc = Info->MaskedPseudo;
+ assert(RISCVII::hasVecPolicyOp(TII->get(MaskedOpc).TSFlags) &&
+ "Expected instructions with mask have policy operand.");
+ assert(RISCVII::hasMergeOp(TII->get(MaskedOpc).TSFlags) &&
+ "Expected instructions with mask have merge operand.");
+
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(False);
+ Ops.append(True->op_begin(), True->op_begin() + TrueVLIndex);
+ Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
+ Ops.push_back(CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()));
+
+ // Result node should have chain operand of True.
+ if (HasChainOp)
+ Ops.push_back(True.getOperand(True.getNumOperands() - 1));
+
+ // Result node should take over glued node of N.
+ if (N->getGluedNode())
+ Ops.push_back(N->getOperand(N->getNumOperands() - 1));
+
+ SDNode *Result =
+ CurDAG->getMachineNode(MaskedOpc, DL, True->getVTList(), Ops);
+ Result->setFlags(True->getFlags());
+
+ // Replace vmerge.vvm node by Result.
+ ReplaceUses(SDValue(N, 0), SDValue(Result, 0));
+
+ // Replace another value of True. E.g. chain and VL.
+ for (unsigned Idx = 1; Idx < True->getNumValues(); ++Idx)
+ ReplaceUses(True.getValue(Idx), SDValue(Result, Idx));
+
+ // Try to transform Result to unmasked intrinsic.
+ doPeepholeMaskedRVV(Result);
+ return true;
+}
+
+// Transform (VMERGE_VVM_<LMUL>_TU false, false, true, allones, vl, sew) to
+// (VADD_VI_<LMUL>_TU false, true, 0, vl, sew). It may decrease uses of VMSET.
+bool RISCVDAGToDAGISel::performVMergeToVAdd(SDNode *N) {
+ unsigned NewOpc;
+ switch (N->getMachineOpcode()) {
+ default:
+ llvm_unreachable("Expected VMERGE_VVM_<LMUL>_TU instruction.");
+ case RISCV::PseudoVMERGE_VVM_MF8_TU:
+ NewOpc = RISCV::PseudoVADD_VI_MF8_TU;
+ break;
+ case RISCV::PseudoVMERGE_VVM_MF4_TU:
+ NewOpc = RISCV::PseudoVADD_VI_MF4_TU;
+ break;
+ case RISCV::PseudoVMERGE_VVM_MF2_TU:
+ NewOpc = RISCV::PseudoVADD_VI_MF2_TU;
+ break;
+ case RISCV::PseudoVMERGE_VVM_M1_TU:
+ NewOpc = RISCV::PseudoVADD_VI_M1_TU;
+ break;
+ case RISCV::PseudoVMERGE_VVM_M2_TU:
+ NewOpc = RISCV::PseudoVADD_VI_M2_TU;
+ break;
+ case RISCV::PseudoVMERGE_VVM_M4_TU:
+ NewOpc = RISCV::PseudoVADD_VI_M4_TU;
+ break;
+ case RISCV::PseudoVMERGE_VVM_M8_TU:
+ NewOpc = RISCV::PseudoVADD_VI_M8_TU;
+ break;
+ }
+
+ if (!usesAllOnesMask(N, /* MaskOpIdx */ 3))
+ return false;
+
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ SDValue Ops[] = {N->getOperand(1), N->getOperand(2),
+ CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT()),
+ N->getOperand(4), N->getOperand(5)};
+ SDNode *Result = CurDAG->getMachineNode(NewOpc, DL, VT, Ops);
+ ReplaceUses(N, Result);
+ return true;
+}
+
+bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
+ bool MadeChange = false;
+ SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
+
+ while (Position != CurDAG->allnodes_begin()) {
+ SDNode *N = &*--Position;
+ if (N->use_empty() || !N->isMachineOpcode())
+ continue;
+
+ auto IsVMergeTU = [](unsigned Opcode) {
+ return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
+ Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
+ Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
+ };
+
+ auto IsVMergeTA = [](unsigned Opcode) {
+ return Opcode == RISCV::PseudoVMERGE_VVM_MF8 ||
+ Opcode == RISCV::PseudoVMERGE_VVM_MF4 ||
+ Opcode == RISCV::PseudoVMERGE_VVM_MF2 ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M1 ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M2 ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M4 ||
+ Opcode == RISCV::PseudoVMERGE_VVM_M8;
+ };
+
+ unsigned Opc = N->getMachineOpcode();
+ // The following optimizations require that the merge operand of N is same
+ // as the false operand of N.
+ if ((IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1)) ||
+ IsVMergeTA(Opc))
+ MadeChange |= performCombineVMergeAndVOps(N, IsVMergeTA(Opc));
+ if (IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1))
+ MadeChange |= performVMergeToVAdd(N);
+ }
+ return MadeChange;
+}
+
+// This pass converts a legalized DAG into a RISCV-specific DAG, ready
+// for instruction scheduling.
+FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new RISCVDAGToDAGISel(TM, OptLevel);
+}
+
+char RISCVDAGToDAGISel::ID = 0;
+
+INITIALIZE_PASS(RISCVDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.h
new file mode 100644
index 0000000000..17205b8ba3
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -0,0 +1,235 @@
+//===---- RISCVISelDAGToDAG.h - A dag to dag inst selector for RISCV ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the RISCV target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVISELDAGTODAG_H
+#define LLVM_LIB_TARGET_RISCV_RISCVISELDAGTODAG_H
+
+#include "RISCV.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/KnownBits.h"
+
+// RISCV-specific code to select RISCV machine instructions for
+// SelectionDAG operations.
+namespace llvm {
+class RISCVDAGToDAGISel : public SelectionDAGISel {
+ const RISCVSubtarget *Subtarget = nullptr;
+
+public:
+ static char ID;
+
+ RISCVDAGToDAGISel() = delete;
+
+ explicit RISCVDAGToDAGISel(RISCVTargetMachine &TargetMachine,
+ CodeGenOpt::Level OptLevel)
+ : SelectionDAGISel(ID, TargetMachine, OptLevel) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<RISCVSubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
+
+ void PreprocessISelDAG() override;
+ void PostprocessISelDAG() override;
+
+ void Select(SDNode *Node) override;
+
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
+ std::vector<SDValue> &OutOps) override;
+
+ bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset);
+ bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset);
+ bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset);
+
+ bool tryShrinkShlLogicImm(SDNode *Node);
+
+ bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt);
+ bool selectShiftMaskXLen(SDValue N, SDValue &ShAmt) {
+ return selectShiftMask(N, Subtarget->getXLen(), ShAmt);
+ }
+ bool selectShiftMask32(SDValue N, SDValue &ShAmt) {
+ return selectShiftMask(N, 32, ShAmt);
+ }
+
+ bool selectSExti32(SDValue N, SDValue &Val);
+ bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val);
+ template <unsigned Bits> bool selectZExtBits(SDValue N, SDValue &Val) {
+ return selectZExtBits(N, Bits, Val);
+ }
+
+ bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val);
+ template <unsigned ShAmt> bool selectSHXADDOp(SDValue N, SDValue &Val) {
+ return selectSHXADDOp(N, ShAmt, Val);
+ }
+
+ bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val);
+ template <unsigned ShAmt> bool selectSHXADD_UWOp(SDValue N, SDValue &Val) {
+ return selectSHXADD_UWOp(N, ShAmt, Val);
+ }
+
+ bool hasAllNBitUsers(SDNode *Node, unsigned Bits,
+ const unsigned Depth = 0) const;
+ bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 16); }
+ bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); }
+
+ bool selectVLOp(SDValue N, SDValue &VL);
+
+ bool selectVSplat(SDValue N, SDValue &SplatVal);
+ bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
+ bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);
+ bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal);
+ bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal);
+
+ bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm);
+ template <unsigned Width> bool selectRVVSimm5(SDValue N, SDValue &Imm) {
+ return selectRVVSimm5(N, Width, Imm);
+ }
+
+ void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm,
+ const SDLoc &DL, unsigned CurOp,
+ bool IsMasked, bool IsStridedOrIndexed,
+ SmallVectorImpl<SDValue> &Operands,
+ bool IsLoad = false, MVT *IndexVT = nullptr);
+
+ void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided);
+ void selectVLSEGFF(SDNode *Node, bool IsMasked);
+ void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered);
+ void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided);
+ void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered);
+
+ void selectVSETVLI(SDNode *Node);
+
+ // Return the RISC-V condition code that matches the given DAG integer
+ // condition code. The CondCode must be one of those supported by the RISC-V
+ // ISA (see translateSetCCForBranch).
+ static RISCVCC::CondCode getRISCVCCForIntCC(ISD::CondCode CC) {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unsupported CondCode");
+ case ISD::SETEQ:
+ return RISCVCC::COND_EQ;
+ case ISD::SETNE:
+ return RISCVCC::COND_NE;
+ case ISD::SETLT:
+ return RISCVCC::COND_LT;
+ case ISD::SETGE:
+ return RISCVCC::COND_GE;
+ case ISD::SETULT:
+ return RISCVCC::COND_LTU;
+ case ISD::SETUGE:
+ return RISCVCC::COND_GEU;
+ }
+ }
+
+// Include the pieces autogenerated from the target description.
+#include "RISCVGenDAGISel.inc"
+
+private:
+ bool doPeepholeSExtW(SDNode *Node);
+ bool doPeepholeMaskedRVV(SDNode *Node);
+ bool doPeepholeMergeVVMFold();
+ bool performVMergeToVAdd(SDNode *N);
+ bool performCombineVMergeAndVOps(SDNode *N, bool IsTA);
+};
+
+namespace RISCV {
+struct VLSEGPseudo {
+ uint16_t NF : 4;
+ uint16_t Masked : 1;
+ uint16_t IsTU : 1;
+ uint16_t Strided : 1;
+ uint16_t FF : 1;
+ uint16_t Log2SEW : 3;
+ uint16_t LMUL : 3;
+ uint16_t Pseudo;
+};
+
+struct VLXSEGPseudo {
+ uint16_t NF : 4;
+ uint16_t Masked : 1;
+ uint16_t IsTU : 1;
+ uint16_t Ordered : 1;
+ uint16_t Log2SEW : 3;
+ uint16_t LMUL : 3;
+ uint16_t IndexLMUL : 3;
+ uint16_t Pseudo;
+};
+
+struct VSSEGPseudo {
+ uint16_t NF : 4;
+ uint16_t Masked : 1;
+ uint16_t Strided : 1;
+ uint16_t Log2SEW : 3;
+ uint16_t LMUL : 3;
+ uint16_t Pseudo;
+};
+
+struct VSXSEGPseudo {
+ uint16_t NF : 4;
+ uint16_t Masked : 1;
+ uint16_t Ordered : 1;
+ uint16_t Log2SEW : 3;
+ uint16_t LMUL : 3;
+ uint16_t IndexLMUL : 3;
+ uint16_t Pseudo;
+};
+
+struct VLEPseudo {
+ uint16_t Masked : 1;
+ uint16_t IsTU : 1;
+ uint16_t Strided : 1;
+ uint16_t FF : 1;
+ uint16_t Log2SEW : 3;
+ uint16_t LMUL : 3;
+ uint16_t Pseudo;
+};
+
+struct VSEPseudo {
+ uint16_t Masked :1;
+ uint16_t Strided : 1;
+ uint16_t Log2SEW : 3;
+ uint16_t LMUL : 3;
+ uint16_t Pseudo;
+};
+
+struct VLX_VSXPseudo {
+ uint16_t Masked : 1;
+ uint16_t IsTU : 1;
+ uint16_t Ordered : 1;
+ uint16_t Log2SEW : 3;
+ uint16_t LMUL : 3;
+ uint16_t IndexLMUL : 3;
+ uint16_t Pseudo;
+};
+
+struct RISCVMaskedPseudoInfo {
+ uint16_t MaskedPseudo;
+ uint16_t UnmaskedPseudo;
+ uint16_t UnmaskedTUPseudo;
+ uint8_t MaskOpIdx;
+};
+
+#define GET_RISCVVSSEGTable_DECL
+#define GET_RISCVVLSEGTable_DECL
+#define GET_RISCVVLXSEGTable_DECL
+#define GET_RISCVVSXSEGTable_DECL
+#define GET_RISCVVLETable_DECL
+#define GET_RISCVVSETable_DECL
+#define GET_RISCVVLXTable_DECL
+#define GET_RISCVVSXTable_DECL
+#define GET_RISCVMaskedPseudosTable_DECL
+#include "RISCVGenSearchableTables.inc"
+} // namespace RISCV
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.cpp
new file mode 100644
index 0000000000..6eea169f89
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -0,0 +1,14257 @@
+//===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that RISCV uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVISelLowering.h"
+#include "MCTargetDesc/RISCVMatInt.h"
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVRegisterInfo.h"
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <optional>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-lower"
+
+STATISTIC(NumTailCalls, "Number of tail calls");
+
+static cl::opt<unsigned> ExtensionMaxWebSize(
+ DEBUG_TYPE "-ext-max-web-size", cl::Hidden,
+ cl::desc("Give the maximum size (in number of nodes) of the web of "
+ "instructions that we will consider for VW expansion"),
+ cl::init(18));
+
+static cl::opt<bool>
+ AllowSplatInVW_W(DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden,
+ cl::desc("Allow the formation of VW_W operations (e.g., "
+ "VWADD_W) with splat constants"),
+ cl::init(false));
+
+static cl::opt<unsigned> NumRepeatedDivisors(
+ DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden,
+ cl::desc("Set the minimum number of repetitions of a divisor to allow "
+ "transformation to multiplications by the reciprocal"),
+ cl::init(2));
+
+RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
+ const RISCVSubtarget &STI)
+ : TargetLowering(TM), Subtarget(STI) {
+
+ if (Subtarget.isRV32E())
+ report_fatal_error("Codegen not yet implemented for RV32E");
+
+ RISCVABI::ABI ABI = Subtarget.getTargetABI();
+ assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
+
+ if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
+ !Subtarget.hasStdExtF()) {
+ errs() << "Hard-float 'f' ABI can't be used for a target that "
+ "doesn't support the F instruction set extension (ignoring "
+ "target-abi)\n";
+ ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
+ } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
+ !Subtarget.hasStdExtD()) {
+ errs() << "Hard-float 'd' ABI can't be used for a target that "
+ "doesn't support the D instruction set extension (ignoring "
+ "target-abi)\n";
+ ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
+ }
+
+ switch (ABI) {
+ default:
+ report_fatal_error("Don't know how to lower this ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64:
+ case RISCVABI::ABI_LP64F:
+ case RISCVABI::ABI_LP64D:
+ break;
+ }
+
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Set up the register classes.
+ addRegisterClass(XLenVT, &RISCV::GPRRegClass);
+
+ if (Subtarget.hasStdExtZfhOrZfhmin())
+ addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
+ if (Subtarget.hasStdExtF())
+ addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
+ if (Subtarget.hasStdExtD())
+ addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
+
+ static const MVT::SimpleValueType BoolVecVTs[] = {
+ MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1,
+ MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
+ static const MVT::SimpleValueType IntVecVTs[] = {
+ MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8,
+ MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16,
+ MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
+ MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
+ MVT::nxv4i64, MVT::nxv8i64};
+ static const MVT::SimpleValueType F16VecVTs[] = {
+ MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16,
+ MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
+ static const MVT::SimpleValueType F32VecVTs[] = {
+ MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
+ static const MVT::SimpleValueType F64VecVTs[] = {
+ MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
+
+ if (Subtarget.hasVInstructions()) {
+ auto addRegClassForRVV = [this](MVT VT) {
+ // Disable the smallest fractional LMUL types if ELEN is less than
+ // RVVBitsPerBlock.
+ unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELEN();
+ if (VT.getVectorMinNumElements() < MinElts)
+ return;
+
+ unsigned Size = VT.getSizeInBits().getKnownMinValue();
+ const TargetRegisterClass *RC;
+ if (Size <= RISCV::RVVBitsPerBlock)
+ RC = &RISCV::VRRegClass;
+ else if (Size == 2 * RISCV::RVVBitsPerBlock)
+ RC = &RISCV::VRM2RegClass;
+ else if (Size == 4 * RISCV::RVVBitsPerBlock)
+ RC = &RISCV::VRM4RegClass;
+ else if (Size == 8 * RISCV::RVVBitsPerBlock)
+ RC = &RISCV::VRM8RegClass;
+ else
+ llvm_unreachable("Unexpected size");
+
+ addRegisterClass(VT, RC);
+ };
+
+ for (MVT VT : BoolVecVTs)
+ addRegClassForRVV(VT);
+ for (MVT VT : IntVecVTs) {
+ if (VT.getVectorElementType() == MVT::i64 &&
+ !Subtarget.hasVInstructionsI64())
+ continue;
+ addRegClassForRVV(VT);
+ }
+
+ if (Subtarget.hasVInstructionsF16())
+ for (MVT VT : F16VecVTs)
+ addRegClassForRVV(VT);
+
+ if (Subtarget.hasVInstructionsF32())
+ for (MVT VT : F32VecVTs)
+ addRegClassForRVV(VT);
+
+ if (Subtarget.hasVInstructionsF64())
+ for (MVT VT : F64VecVTs)
+ addRegClassForRVV(VT);
+
+ if (Subtarget.useRVVForFixedLengthVectors()) {
+ auto addRegClassForFixedVectors = [this](MVT VT) {
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+ unsigned RCID = getRegClassIDForVecVT(ContainerVT);
+ const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
+ addRegisterClass(VT, TRI.getRegClass(RCID));
+ };
+ for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
+ if (useRVVForFixedLengthVectorVT(VT))
+ addRegClassForFixedVectors(VT);
+
+ for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
+ if (useRVVForFixedLengthVectorVT(VT))
+ addRegClassForFixedVectors(VT);
+ }
+ }
+
+ // Compute derived properties from the register classes.
+ computeRegisterProperties(STI.getRegisterInfo());
+
+ setStackPointerRegisterToSaveRestore(RISCV::X2);
+
+ setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
+ MVT::i1, Promote);
+ // DAGCombiner can call isLoadExtLegal for types that aren't legal.
+ setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i32,
+ MVT::i1, Promote);
+
+ // TODO: add all necessary setOperationAction calls.
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
+
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BR_CC, XLenVT, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Custom);
+ setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
+
+ setCondCodeAction(ISD::SETLE, XLenVT, Expand);
+ setCondCodeAction(ISD::SETGT, XLenVT, Custom);
+ setCondCodeAction(ISD::SETGE, XLenVT, Expand);
+ setCondCodeAction(ISD::SETULE, XLenVT, Expand);
+ setCondCodeAction(ISD::SETUGT, XLenVT, Custom);
+ setCondCodeAction(ISD::SETUGE, XLenVT, Expand);
+
+ setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
+
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
+
+ if (!Subtarget.hasStdExtZbb())
+ setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
+
+ if (Subtarget.is64Bit()) {
+ setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
+
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+
+ setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
+ MVT::i32, Custom);
+
+ setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
+ MVT::i32, Custom);
+ } else {
+ setLibcallName(
+ {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
+ nullptr);
+ setLibcallName(RTLIB::MULO_I64, nullptr);
+ }
+
+ if (!Subtarget.hasStdExtM() && !Subtarget.hasStdExtZmmul()) {
+ setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU}, XLenVT, Expand);
+ } else {
+ if (Subtarget.is64Bit()) {
+ setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom);
+ } else {
+ setOperationAction(ISD::MUL, MVT::i64, Custom);
+ }
+ }
+
+ if (!Subtarget.hasStdExtM()) {
+ setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM},
+ XLenVT, Expand);
+ } else {
+ if (Subtarget.is64Bit()) {
+ setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
+ {MVT::i8, MVT::i16, MVT::i32}, Custom);
+ }
+ }
+
+ setOperationAction(
+ {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
+ Expand);
+
+ setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
+ Custom);
+
+ if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
+ if (Subtarget.is64Bit())
+ setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
+ } else {
+ setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
+ }
+
+ // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
+ // pattern match it directly in isel.
+ setOperationAction(ISD::BSWAP, XLenVT,
+ (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
+ ? Legal
+ : Expand);
+ // Zbkb can use rev8+brev8 to implement bitreverse.
+ setOperationAction(ISD::BITREVERSE, XLenVT,
+ Subtarget.hasStdExtZbkb() ? Custom : Expand);
+
+ if (Subtarget.hasStdExtZbb()) {
+ setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
+ Legal);
+
+ if (Subtarget.is64Bit())
+ setOperationAction(
+ {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
+ MVT::i32, Custom);
+ } else {
+ setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand);
+ }
+
+ if (Subtarget.is64Bit())
+ setOperationAction(ISD::ABS, MVT::i32, Custom);
+
+ if (!Subtarget.hasVendorXVentanaCondOps())
+ setOperationAction(ISD::SELECT, XLenVT, Custom);
+
+ static const unsigned FPLegalNodeTypes[] = {
+ ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT,
+ ISD::LLRINT, ISD::LROUND, ISD::LLROUND,
+ ISD::STRICT_LRINT, ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
+ ISD::STRICT_LLROUND, ISD::STRICT_FMA, ISD::STRICT_FADD,
+ ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV,
+ ISD::STRICT_FSQRT, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
+
+ static const ISD::CondCode FPCCToExpand[] = {
+ ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
+ ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
+ ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO};
+
+ static const unsigned FPOpToExpand[] = {
+ ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW,
+ ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
+
+ static const unsigned FPRndMode[] = {
+ ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FRINT, ISD::FROUND,
+ ISD::FROUNDEVEN};
+
+ if (Subtarget.hasStdExtZfhOrZfhmin())
+ setOperationAction(ISD::BITCAST, MVT::i16, Custom);
+
+ if (Subtarget.hasStdExtZfhOrZfhmin()) {
+ if (Subtarget.hasStdExtZfh()) {
+ setOperationAction(FPLegalNodeTypes, MVT::f16, Legal);
+ setOperationAction(FPRndMode, MVT::f16, Custom);
+ setOperationAction(ISD::SELECT, MVT::f16, Custom);
+ } else {
+ static const unsigned ZfhminPromoteOps[] = {
+ ISD::FMINNUM, ISD::FMAXNUM, ISD::FADD,
+ ISD::FSUB, ISD::FMUL, ISD::FMA,
+ ISD::FDIV, ISD::FSQRT, ISD::FABS,
+ ISD::FNEG, ISD::STRICT_FMA, ISD::STRICT_FADD,
+ ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV,
+ ISD::STRICT_FSQRT, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS,
+ ISD::SETCC, ISD::FCEIL, ISD::FFLOOR,
+ ISD::FTRUNC, ISD::FRINT, ISD::FROUND,
+ ISD::FROUNDEVEN, ISD::SELECT};
+
+ setOperationAction(ZfhminPromoteOps, MVT::f16, Promote);
+ setOperationAction({ISD::STRICT_LRINT, ISD::STRICT_LLRINT,
+ ISD::STRICT_LROUND, ISD::STRICT_LLROUND},
+ MVT::f16, Legal);
+ // FIXME: Need to promote f16 FCOPYSIGN to f32, but the
+ // DAGCombiner::visitFP_ROUND probably needs improvements first.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
+ }
+
+ setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
+ setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
+ setCondCodeAction(FPCCToExpand, MVT::f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f16, Expand);
+
+ setOperationAction({ISD::FREM, ISD::FNEARBYINT, ISD::FPOW, ISD::FPOWI,
+ ISD::FCOS, ISD::FSIN, ISD::FSINCOS, ISD::FEXP,
+ ISD::FEXP2, ISD::FLOG, ISD::FLOG2, ISD::FLOG10},
+ MVT::f16, Promote);
+
+ // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
+ // complete support for all operations in LegalizeDAG.
+ setOperationAction({ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR,
+ ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT,
+ ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN,
+ ISD::STRICT_FTRUNC},
+ MVT::f16, Promote);
+
+ // We need to custom promote this.
+ if (Subtarget.is64Bit())
+ setOperationAction(ISD::FPOWI, MVT::i32, Custom);
+ }
+
+ if (Subtarget.hasStdExtF()) {
+ setOperationAction(FPLegalNodeTypes, MVT::f32, Legal);
+ setOperationAction(FPRndMode, MVT::f32, Custom);
+ setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f32, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(FPOpToExpand, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ }
+
+ if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
+ setOperationAction(ISD::BITCAST, MVT::i32, Custom);
+
+ if (Subtarget.hasStdExtD()) {
+ setOperationAction(FPLegalNodeTypes, MVT::f64, Legal);
+ if (Subtarget.is64Bit()) {
+ setOperationAction(FPRndMode, MVT::f64, Custom);
+ }
+ setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
+ setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
+ setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setOperationAction(FPOpToExpand, MVT::f64, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ }
+
+ if (Subtarget.is64Bit())
+ setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
+ ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
+ MVT::i32, Custom);
+
+ if (Subtarget.hasStdExtF()) {
+ setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
+ Custom);
+
+ setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
+ ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
+ XLenVT, Legal);
+
+ setOperationAction(ISD::GET_ROUNDING, XLenVT, Custom);
+ setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
+ }
+
+ setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
+ ISD::JumpTable},
+ XLenVT, Custom);
+
+ setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
+
+ if (Subtarget.is64Bit())
+ setOperationAction(ISD::Constant, MVT::i64, Custom);
+
+ // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
+ // Unfortunately this can't be determined just from the ISA naming string.
+ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
+ Subtarget.is64Bit() ? Legal : Custom);
+
+ setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ if (Subtarget.is64Bit())
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
+
+ if (Subtarget.hasStdExtA()) {
+ setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
+ setMinCmpXchgSizeInBits(32);
+ } else if (Subtarget.hasForcedAtomics()) {
+ setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
+ } else {
+ setMaxAtomicSizeInBitsSupported(0);
+ }
+
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+
+ setBooleanContents(ZeroOrOneBooleanContent);
+
+ if (Subtarget.hasVInstructions()) {
+ setBooleanVectorContents(ZeroOrOneBooleanContent);
+
+ setOperationAction(ISD::VSCALE, XLenVT, Custom);
+
+ // RVV intrinsics may have illegal operands.
+ // We also need to custom legalize vmv.x.s.
+ setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
+ {MVT::i8, MVT::i16}, Custom);
+ if (Subtarget.is64Bit())
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
+ else
+ setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
+ MVT::i64, Custom);
+
+ setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
+ MVT::Other, Custom);
+
+ static const unsigned IntegerVPOps[] = {
+ ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL,
+ ISD::VP_SDIV, ISD::VP_UDIV, ISD::VP_SREM,
+ ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,
+ ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR,
+ ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
+ ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX,
+ ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
+ ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FP_TO_SINT,
+ ISD::VP_FP_TO_UINT, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND,
+ ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE, ISD::VP_SMIN,
+ ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX,
+ ISD::VP_ABS};
+
+ static const unsigned FloatingPointVPOps[] = {
+ ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
+ ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FABS,
+ ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
+ ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
+ ISD::VP_SELECT, ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP,
+ ISD::VP_SETCC, ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND,
+ ISD::VP_SQRT, ISD::VP_FMINNUM, ISD::VP_FMAXNUM,
+ ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND,
+ ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO,
+ ISD::VP_FRINT, ISD::VP_FNEARBYINT};
+
+ static const unsigned IntegerVecReduceOps[] = {
+ ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR,
+ ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
+ ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN};
+
+ static const unsigned FloatingPointVecReduceOps[] = {
+ ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_FMIN,
+ ISD::VECREDUCE_FMAX};
+
+ if (!Subtarget.is64Bit()) {
+ // We must custom-lower certain vXi64 operations on RV32 due to the vector
+ // element type being illegal.
+ setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
+ MVT::i64, Custom);
+
+ setOperationAction(IntegerVecReduceOps, MVT::i64, Custom);
+
+ setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
+ ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
+ ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
+ ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
+ MVT::i64, Custom);
+ }
+
+ for (MVT VT : BoolVecVTs) {
+ if (!isTypeLegal(VT))
+ continue;
+
+ setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
+
+ // Mask VTs are custom-expanded into a series of standard nodes
+ setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
+ ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
+ VT, Custom);
+
+ setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
+ Custom);
+
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(
+ {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
+ Expand);
+
+ setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
+
+ setOperationAction(
+ {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
+ Custom);
+
+ setOperationAction(
+ {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
+ Custom);
+
+ // RVV has native int->float & float->int conversions where the
+ // element type sizes are within one power-of-two of each other. Any
+ // wider distances between type sizes have to be lowered as sequences
+ // which progressively narrow the gap in stages.
+ setOperationAction(
+ {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
+ VT, Custom);
+ setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
+ Custom);
+
+ // Expand all extending loads to types larger than this, and truncating
+ // stores from types larger than this.
+ for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
+ setTruncStoreAction(OtherVT, VT, Expand);
+ setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
+ VT, Expand);
+ }
+
+ setOperationAction({ISD::VP_FP_TO_SINT, ISD::VP_FP_TO_UINT,
+ ISD::VP_TRUNCATE, ISD::VP_SETCC},
+ VT, Custom);
+ setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
+
+ setOperationPromotedToType(
+ ISD::VECTOR_SPLICE, VT,
+ MVT::getVectorVT(MVT::i8, VT.getVectorElementCount()));
+ }
+
+ for (MVT VT : IntVecVTs) {
+ if (!isTypeLegal(VT))
+ continue;
+
+ setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
+ setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
+
+ // Vectors implement MULHS/MULHU.
+ setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
+
+ // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
+ if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
+ setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
+
+ setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
+ Legal);
+
+ setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
+
+ setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, VT, Expand);
+
+ setOperationAction(ISD::BSWAP, VT, Expand);
+ setOperationAction({ISD::VP_BSWAP, ISD::VP_BITREVERSE}, VT, Expand);
+ setOperationAction({ISD::VP_FSHL, ISD::VP_FSHR}, VT, Expand);
+ setOperationAction({ISD::VP_CTLZ, ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ,
+ ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
+ VT, Expand);
+
+ // Custom-lower extensions and truncations from/to mask types.
+ setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
+ VT, Custom);
+
+ // RVV has native int->float & float->int conversions where the
+ // element type sizes are within one power-of-two of each other. Any
+ // wider distances between type sizes have to be lowered as sequences
+ // which progressively narrow the gap in stages.
+ setOperationAction(
+ {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
+ VT, Custom);
+ setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
+ Custom);
+
+ setOperationAction(
+ {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
+
+ // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
+ // nodes which truncate by one power of two at a time.
+ setOperationAction(ISD::TRUNCATE, VT, Custom);
+
+ // Custom-lower insert/extract operations to simplify patterns.
+ setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
+ Custom);
+
+ // Custom-lower reduction operations to set up the corresponding custom
+ // nodes' operands.
+ setOperationAction(IntegerVecReduceOps, VT, Custom);
+
+ setOperationAction(IntegerVPOps, VT, Custom);
+
+ setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
+
+ setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
+ VT, Custom);
+
+ setOperationAction(
+ {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+ ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
+ VT, Custom);
+
+ setOperationAction(
+ {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
+ VT, Custom);
+
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+
+ setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
+
+ for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
+ setTruncStoreAction(VT, OtherVT, Expand);
+ setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
+ VT, Expand);
+ }
+
+ // Splice
+ setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
+
+ // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the range
+ // of f32.
+ EVT FloatVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ if (isTypeLegal(FloatVT)) {
+ setOperationAction(
+ {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
+ Custom);
+ }
+ }
+
+ // Expand various CCs to best match the RVV ISA, which natively supports UNE
+ // but no other unordered comparisons, and supports all ordered comparisons
+ // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
+ // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
+ // and we pattern-match those back to the "original", swapping operands once
+ // more. This way we catch both operations and both "vf" and "fv" forms with
+ // fewer patterns.
+ static const ISD::CondCode VFPCCToExpand[] = {
+ ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
+ ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
+ ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE,
+ };
+
+ // Sets common operation actions on RVV floating-point vector types.
+ const auto SetCommonVFPActions = [&](MVT VT) {
+ setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
+ // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
+ // sizes are within one power-of-two of each other. Therefore conversions
+ // between vXf16 and vXf64 must be lowered as sequences which convert via
+ // vXf32.
+ setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
+ // Custom-lower insert/extract operations to simplify patterns.
+ setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
+ Custom);
+ // Expand various condition codes (explained above).
+ setCondCodeAction(VFPCCToExpand, VT, Expand);
+
+ setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
+
+ setOperationAction(
+ {ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, ISD::FROUNDEVEN},
+ VT, Custom);
+
+ setOperationAction(FloatingPointVecReduceOps, VT, Custom);
+
+ // Expand FP operations that need libcalls.
+ setOperationAction(ISD::FREM, VT, Expand);
+ setOperationAction(ISD::FPOW, VT, Expand);
+ setOperationAction(ISD::FCOS, VT, Expand);
+ setOperationAction(ISD::FSIN, VT, Expand);
+ setOperationAction(ISD::FSINCOS, VT, Expand);
+ setOperationAction(ISD::FEXP, VT, Expand);
+ setOperationAction(ISD::FEXP2, VT, Expand);
+ setOperationAction(ISD::FLOG, VT, Expand);
+ setOperationAction(ISD::FLOG2, VT, Expand);
+ setOperationAction(ISD::FLOG10, VT, Expand);
+ setOperationAction(ISD::FRINT, VT, Expand);
+ setOperationAction(ISD::FNEARBYINT, VT, Expand);
+
+ setOperationAction(ISD::FCOPYSIGN, VT, Legal);
+
+ setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
+
+ setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
+ VT, Custom);
+
+ setOperationAction(
+ {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+ ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
+ VT, Custom);
+
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+
+ setOperationAction(
+ {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
+ VT, Custom);
+
+ setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
+
+ setOperationAction(FloatingPointVPOps, VT, Custom);
+ };
+
+ // Sets common extload/truncstore actions on RVV floating-point vector
+ // types.
+ const auto SetCommonVFPExtLoadTruncStoreActions =
+ [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
+ for (auto SmallVT : SmallerVTs) {
+ setTruncStoreAction(VT, SmallVT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
+ }
+ };
+
+ if (Subtarget.hasVInstructionsF16()) {
+ for (MVT VT : F16VecVTs) {
+ if (!isTypeLegal(VT))
+ continue;
+ SetCommonVFPActions(VT);
+ }
+ }
+
+ if (Subtarget.hasVInstructionsF32()) {
+ for (MVT VT : F32VecVTs) {
+ if (!isTypeLegal(VT))
+ continue;
+ SetCommonVFPActions(VT);
+ SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
+ }
+ }
+
+ if (Subtarget.hasVInstructionsF64()) {
+ for (MVT VT : F64VecVTs) {
+ if (!isTypeLegal(VT))
+ continue;
+ SetCommonVFPActions(VT);
+ SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
+ SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
+ }
+ }
+
+ if (Subtarget.useRVVForFixedLengthVectors()) {
+ for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
+ if (!useRVVForFixedLengthVectorVT(VT))
+ continue;
+
+ // By default everything must be expanded.
+ for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
+ setOperationAction(Op, VT, Expand);
+ for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
+ setTruncStoreAction(VT, OtherVT, Expand);
+ setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD},
+ OtherVT, VT, Expand);
+ }
+
+ // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
+ setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
+ Custom);
+
+ setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
+ Custom);
+
+ setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
+ VT, Custom);
+
+ setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
+
+ setOperationAction(ISD::SETCC, VT, Custom);
+
+ setOperationAction(ISD::SELECT, VT, Custom);
+
+ setOperationAction(ISD::TRUNCATE, VT, Custom);
+
+ setOperationAction(ISD::BITCAST, VT, Custom);
+
+ setOperationAction(
+ {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
+ Custom);
+
+ setOperationAction(
+ {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
+ Custom);
+
+ setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
+ ISD::FP_TO_UINT},
+ VT, Custom);
+ setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
+ Custom);
+
+ // Operations below are different for between masks and other vectors.
+ if (VT.getVectorElementType() == MVT::i1) {
+ setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
+ ISD::OR, ISD::XOR},
+ VT, Custom);
+
+ setOperationAction({ISD::VP_FP_TO_SINT, ISD::VP_FP_TO_UINT,
+ ISD::VP_SETCC, ISD::VP_TRUNCATE},
+ VT, Custom);
+ continue;
+ }
+
+ // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
+ // it before type legalization for i64 vectors on RV32. It will then be
+ // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
+ // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
+ // improvements first.
+ if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
+ setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
+ setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
+ }
+
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+
+ setOperationAction(
+ {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
+
+ setOperationAction({ISD::VP_LOAD, ISD::VP_STORE,
+ ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+ ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
+ ISD::VP_SCATTER},
+ VT, Custom);
+
+ setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
+ ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
+ ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
+ VT, Custom);
+
+ setOperationAction(
+ {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
+
+ // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
+ if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
+ setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
+
+ setOperationAction(
+ {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
+ Custom);
+
+ setOperationAction(ISD::VSELECT, VT, Custom);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+
+ setOperationAction(
+ {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
+
+ // Custom-lower reduction operations to set up the corresponding custom
+ // nodes' operands.
+ setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
+ ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
+ ISD::VECREDUCE_UMIN},
+ VT, Custom);
+
+ setOperationAction(IntegerVPOps, VT, Custom);
+
+ // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the
+ // range of f32.
+ EVT FloatVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ if (isTypeLegal(FloatVT))
+ setOperationAction(
+ {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
+ Custom);
+ }
+
+ for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
+ if (!useRVVForFixedLengthVectorVT(VT))
+ continue;
+
+ // By default everything must be expanded.
+ for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
+ setOperationAction(Op, VT, Expand);
+ for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
+ setTruncStoreAction(VT, OtherVT, Expand);
+ }
+
+ // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
+ setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
+ Custom);
+
+ setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
+ ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
+ ISD::EXTRACT_VECTOR_ELT},
+ VT, Custom);
+
+ setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
+ ISD::MGATHER, ISD::MSCATTER},
+ VT, Custom);
+
+ setOperationAction({ISD::VP_LOAD, ISD::VP_STORE,
+ ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+ ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
+ ISD::VP_SCATTER},
+ VT, Custom);
+
+ setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
+ ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
+ ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM},
+ VT, Custom);
+
+ setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
+
+ setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND,
+ ISD::FROUNDEVEN},
+ VT, Custom);
+
+ setCondCodeAction(VFPCCToExpand, VT, Expand);
+
+ setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+
+ setOperationAction(ISD::BITCAST, VT, Custom);
+
+ setOperationAction(FloatingPointVecReduceOps, VT, Custom);
+
+ setOperationAction(FloatingPointVPOps, VT, Custom);
+ }
+
+ // Custom-legalize bitcasts from fixed-length vectors to scalar types.
+ setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
+ Custom);
+ if (Subtarget.hasStdExtZfhOrZfhmin())
+ setOperationAction(ISD::BITCAST, MVT::f16, Custom);
+ if (Subtarget.hasStdExtF())
+ setOperationAction(ISD::BITCAST, MVT::f32, Custom);
+ if (Subtarget.hasStdExtD())
+ setOperationAction(ISD::BITCAST, MVT::f64, Custom);
+ }
+ }
+
+ if (Subtarget.hasForcedAtomics()) {
+ // Set atomic rmw/cas operations to expand to force __sync libcalls.
+ setOperationAction(
+ {ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP, ISD::ATOMIC_LOAD_ADD,
+ ISD::ATOMIC_LOAD_SUB, ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR,
+ ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_NAND, ISD::ATOMIC_LOAD_MIN,
+ ISD::ATOMIC_LOAD_MAX, ISD::ATOMIC_LOAD_UMIN, ISD::ATOMIC_LOAD_UMAX},
+ XLenVT, Expand);
+ }
+
+ // Function alignments.
+ const Align FunctionAlignment(Subtarget.hasStdExtCOrZca() ? 2 : 4);
+ setMinFunctionAlignment(FunctionAlignment);
+ setPrefFunctionAlignment(FunctionAlignment);
+
+ setMinimumJumpTableEntries(5);
+
+ // Jumps are expensive, compared to logic
+ setJumpIsExpensive();
+
+ setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
+ ISD::OR, ISD::XOR, ISD::SETCC, ISD::SELECT});
+ if (Subtarget.is64Bit())
+ setTargetDAGCombine(ISD::SRA);
+
+ if (Subtarget.hasStdExtF())
+ setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
+
+ if (Subtarget.hasStdExtZbb())
+ setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
+
+ if (Subtarget.hasStdExtZbs() && Subtarget.is64Bit())
+ setTargetDAGCombine(ISD::TRUNCATE);
+
+ if (Subtarget.hasStdExtZbkb())
+ setTargetDAGCombine(ISD::BITREVERSE);
+ if (Subtarget.hasStdExtZfhOrZfhmin())
+ setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
+ if (Subtarget.hasStdExtF())
+ setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
+ ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
+ if (Subtarget.hasVInstructions())
+ setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
+ ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
+ ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
+ if (Subtarget.useRVVForFixedLengthVectors())
+ setTargetDAGCombine(ISD::BITCAST);
+
+ setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
+ setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
+}
+
+EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
+ LLVMContext &Context,
+ EVT VT) const {
+ if (!VT.isVector())
+ return getPointerTy(DL);
+ if (Subtarget.hasVInstructions() &&
+ (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
+ return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
+ return VT.changeVectorElementTypeToInteger();
+}
+
+MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
+ return Subtarget.getXLenVT();
+}
+
+bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const {
+ auto &DL = I.getModule()->getDataLayout();
+ switch (Intrinsic) {
+ default:
+ return false;
+ case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
+ case Intrinsic::riscv_masked_atomicrmw_add_i32:
+ case Intrinsic::riscv_masked_atomicrmw_sub_i32:
+ case Intrinsic::riscv_masked_atomicrmw_nand_i32:
+ case Intrinsic::riscv_masked_atomicrmw_max_i32:
+ case Intrinsic::riscv_masked_atomicrmw_min_i32:
+ case Intrinsic::riscv_masked_atomicrmw_umax_i32:
+ case Intrinsic::riscv_masked_atomicrmw_umin_i32:
+ case Intrinsic::riscv_masked_cmpxchg_i32:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::i32;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = Align(4);
+ Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
+ MachineMemOperand::MOVolatile;
+ return true;
+ case Intrinsic::riscv_masked_strided_load:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.ptrVal = I.getArgOperand(1);
+ Info.memVT = getValueType(DL, I.getType()->getScalarType());
+ Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
+ Info.size = MemoryLocation::UnknownSize;
+ Info.flags |= MachineMemOperand::MOLoad;
+ return true;
+ case Intrinsic::riscv_masked_strided_store:
+ Info.opc = ISD::INTRINSIC_VOID;
+ Info.ptrVal = I.getArgOperand(1);
+ Info.memVT =
+ getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
+ Info.align = Align(
+ DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
+ 8);
+ Info.size = MemoryLocation::UnknownSize;
+ Info.flags |= MachineMemOperand::MOStore;
+ return true;
+ case Intrinsic::riscv_seg2_load:
+ case Intrinsic::riscv_seg3_load:
+ case Intrinsic::riscv_seg4_load:
+ case Intrinsic::riscv_seg5_load:
+ case Intrinsic::riscv_seg6_load:
+ case Intrinsic::riscv_seg7_load:
+ case Intrinsic::riscv_seg8_load:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.memVT =
+ getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
+ Info.align =
+ Align(DL.getTypeSizeInBits(
+ I.getType()->getStructElementType(0)->getScalarType()) /
+ 8);
+ Info.size = MemoryLocation::UnknownSize;
+ Info.flags |= MachineMemOperand::MOLoad;
+ return true;
+ }
+}
+
+bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
+ unsigned AS,
+ Instruction *I) const {
+ // No global is ever allowed as a base.
+ if (AM.BaseGV)
+ return false;
+
+ // RVV instructions only support register addressing.
+ if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
+ return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
+
+ // Require a 12-bit signed offset.
+ if (!isInt<12>(AM.BaseOffs))
+ return false;
+
+ switch (AM.Scale) {
+ case 0: // "r+i" or just "i", depending on HasBaseReg.
+ break;
+ case 1:
+ if (!AM.HasBaseReg) // allow "r+i".
+ break;
+ return false; // disallow "r+r" or "r+r+i".
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ return isInt<12>(Imm);
+}
+
+bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
+ return isInt<12>(Imm);
+}
+
+// On RV32, 64-bit integers are split into their high and low parts and held
+// in two different registers, so the trunc is free since the low register can
+// just be used.
+// FIXME: Should we consider i64->i32 free on RV64 to match the EVT version of
+// isTruncateFree?
+bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
+ if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
+ return false;
+ unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
+ unsigned DestBits = DstTy->getPrimitiveSizeInBits();
+ return (SrcBits == 64 && DestBits == 32);
+}
+
+bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
+ // We consider i64->i32 free on RV64 since we have good selection of W
+ // instructions that make promoting operations back to i64 free in many cases.
+ if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
+ !DstVT.isInteger())
+ return false;
+ unsigned SrcBits = SrcVT.getSizeInBits();
+ unsigned DestBits = DstVT.getSizeInBits();
+ return (SrcBits == 64 && DestBits == 32);
+}
+
+bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ // Zexts are free if they can be combined with a load.
+ // Don't advertise i32->i64 zextload as being free for RV64. It interacts
+ // poorly with type legalization of compares preferring sext.
+ if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
+ EVT MemVT = LD->getMemoryVT();
+ if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
+ (LD->getExtensionType() == ISD::NON_EXTLOAD ||
+ LD->getExtensionType() == ISD::ZEXTLOAD))
+ return true;
+ }
+
+ return TargetLowering::isZExtFree(Val, VT2);
+}
+
+bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
+ return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
+}
+
+bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
+ return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
+}
+
+bool RISCVTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
+ return Subtarget.hasStdExtZbb();
+}
+
+bool RISCVTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
+ return Subtarget.hasStdExtZbb();
+}
+
+bool RISCVTargetLowering::isMaskAndCmp0FoldingBeneficial(
+ const Instruction &AndI) const {
+ // We expect to be able to match a bit extraction instruction if the Zbs
+ // extension is supported and the mask is a power of two. However, we
+ // conservatively return false if the mask would fit in an ANDI instruction,
+ // on the basis that it's possible the sinking+duplication of the AND in
+ // CodeGenPrepare triggered by this hook wouldn't decrease the instruction
+ // count and would increase code size (e.g. ANDI+BNEZ => BEXTI+BNEZ).
+ if (!Subtarget.hasStdExtZbs())
+ return false;
+ ConstantInt *Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
+ if (!Mask)
+ return false;
+ return !Mask->getValue().isSignedIntN(12) && Mask->getValue().isPowerOf2();
+}
+
+bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
+ EVT VT = Y.getValueType();
+
+ // FIXME: Support vectors once we have tests.
+ if (VT.isVector())
+ return false;
+
+ return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
+ !isa<ConstantSDNode>(Y);
+}
+
+bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
+ // Zbs provides BEXT[_I], which can be used with SEQZ/SNEZ as a bit test.
+ if (Subtarget.hasStdExtZbs())
+ return X.getValueType().isScalarInteger();
+ // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
+ auto *C = dyn_cast<ConstantSDNode>(Y);
+ return C && C->getAPIntValue().ule(10);
+}
+
+bool RISCVTargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
+ EVT VT) const {
+ // Only enable for rvv.
+ if (!VT.isVector() || !Subtarget.hasVInstructions())
+ return false;
+
+ if (VT.isFixedLengthVector() && !isTypeLegal(VT))
+ return false;
+
+ return true;
+}
+
+bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getIntegerBitWidth();
+ if (BitSize > Subtarget.getXLen())
+ return false;
+
+ // Fast path, assume 32-bit immediates are cheap.
+ int64_t Val = Imm.getSExtValue();
+ if (isInt<32>(Val))
+ return true;
+
+ // A constant pool entry may be more aligned thant he load we're trying to
+ // replace. If we don't support unaligned scalar mem, prefer the constant
+ // pool.
+ // TODO: Can the caller pass down the alignment?
+ if (!Subtarget.enableUnalignedScalarMem())
+ return true;
+
+ // Prefer to keep the load if it would require many instructions.
+ // This uses the same threshold we use for constant pools but doesn't
+ // check useConstantPoolForLargeInts.
+ // TODO: Should we keep the load only when we're definitely going to emit a
+ // constant pool?
+
+ RISCVMatInt::InstSeq Seq =
+ RISCVMatInt::generateInstSeq(Val, Subtarget.getFeatureBits());
+ return Seq.size() <= Subtarget.getMaxBuildIntsCost();
+}
+
+bool RISCVTargetLowering::
+ shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
+ SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
+ unsigned OldShiftOpcode, unsigned NewShiftOpcode,
+ SelectionDAG &DAG) const {
+ // One interesting pattern that we'd want to form is 'bit extract':
+ // ((1 >> Y) & 1) ==/!= 0
+ // But we also need to be careful not to try to reverse that fold.
+
+ // Is this '((1 >> Y) & 1)'?
+ if (XC && OldShiftOpcode == ISD::SRL && XC->isOne())
+ return false; // Keep the 'bit extract' pattern.
+
+ // Will this be '((1 >> Y) & 1)' after the transform?
+ if (NewShiftOpcode == ISD::SRL && CC->isOne())
+ return true; // Do form the 'bit extract' pattern.
+
+ // If 'X' is a constant, and we transform, then we will immediately
+ // try to undo the fold, thus causing endless combine loop.
+ // So only do the transform if X is not a constant. This matches the default
+ // implementation of this function.
+ return !XC;
+}
+
+bool RISCVTargetLowering::canSplatOperand(unsigned Opcode, int Operand) const {
+ switch (Opcode) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return true;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ return Operand == 1;
+ default:
+ return false;
+ }
+}
+
+
+bool RISCVTargetLowering::canSplatOperand(Instruction *I, int Operand) const {
+ if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
+ return false;
+
+ if (canSplatOperand(I->getOpcode(), Operand))
+ return true;
+
+ auto *II = dyn_cast<IntrinsicInst>(I);
+ if (!II)
+ return false;
+
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::fma:
+ case Intrinsic::vp_fma:
+ return Operand == 0 || Operand == 1;
+ case Intrinsic::vp_shl:
+ case Intrinsic::vp_lshr:
+ case Intrinsic::vp_ashr:
+ case Intrinsic::vp_udiv:
+ case Intrinsic::vp_sdiv:
+ case Intrinsic::vp_urem:
+ case Intrinsic::vp_srem:
+ return Operand == 1;
+ // These intrinsics are commutative.
+ case Intrinsic::vp_add:
+ case Intrinsic::vp_mul:
+ case Intrinsic::vp_and:
+ case Intrinsic::vp_or:
+ case Intrinsic::vp_xor:
+ case Intrinsic::vp_fadd:
+ case Intrinsic::vp_fmul:
+ // These intrinsics have 'vr' versions.
+ case Intrinsic::vp_sub:
+ case Intrinsic::vp_fsub:
+ case Intrinsic::vp_fdiv:
+ return Operand == 0 || Operand == 1;
+ default:
+ return false;
+ }
+}
+
+/// Check if sinking \p I's operands to I's basic block is profitable, because
+/// the operands can be folded into a target instruction, e.g.
+/// splats of scalars can fold into vector instructions.
+bool RISCVTargetLowering::shouldSinkOperands(
+ Instruction *I, SmallVectorImpl<Use *> &Ops) const {
+ using namespace llvm::PatternMatch;
+
+ if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
+ return false;
+
+ for (auto OpIdx : enumerate(I->operands())) {
+ if (!canSplatOperand(I, OpIdx.index()))
+ continue;
+
+ Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
+ // Make sure we are not already sinking this operand
+ if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
+ continue;
+
+ // We are looking for a splat that can be sunk.
+ if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
+ m_Undef(), m_ZeroMask())))
+ continue;
+
+ // All uses of the shuffle should be sunk to avoid duplicating it across gpr
+ // and vector registers
+ for (Use &U : Op->uses()) {
+ Instruction *Insn = cast<Instruction>(U.getUser());
+ if (!canSplatOperand(Insn, U.getOperandNo()))
+ return false;
+ }
+
+ Ops.push_back(&Op->getOperandUse(0));
+ Ops.push_back(&OpIdx.value());
+ }
+ return true;
+}
+
+bool RISCVTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
+ unsigned Opc = VecOp.getOpcode();
+
+ // Assume target opcodes can't be scalarized.
+ // TODO - do we have any exceptions?
+ if (Opc >= ISD::BUILTIN_OP_END)
+ return false;
+
+ // If the vector op is not supported, try to convert to scalar.
+ EVT VecVT = VecOp.getValueType();
+ if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
+ return true;
+
+ // If the vector op is supported, but the scalar op is not, the transform may
+ // not be worthwhile.
+ EVT ScalarVT = VecVT.getScalarType();
+ return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
+}
+
+bool RISCVTargetLowering::isOffsetFoldingLegal(
+ const GlobalAddressSDNode *GA) const {
+ // In order to maximise the opportunity for common subexpression elimination,
+ // keep a separate ADD node for the global address offset instead of folding
+ // it in the global address node. Later peephole optimisations may choose to
+ // fold it back in when profitable.
+ return false;
+}
+
+bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
+ bool ForCodeSize) const {
+ if (VT == MVT::f16 && !Subtarget.hasStdExtZfhOrZfhmin())
+ return false;
+ if (VT == MVT::f32 && !Subtarget.hasStdExtF())
+ return false;
+ if (VT == MVT::f64 && !Subtarget.hasStdExtD())
+ return false;
+ return Imm.isZero();
+}
+
+// TODO: This is very conservative.
+bool RISCVTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
+ unsigned Index) const {
+ if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
+ return false;
+
+ // Only support extracting a fixed from a fixed vector for now.
+ if (ResVT.isScalableVector() || SrcVT.isScalableVector())
+ return false;
+
+ unsigned ResElts = ResVT.getVectorNumElements();
+ unsigned SrcElts = SrcVT.getVectorNumElements();
+
+ // Convervatively only handle extracting half of a vector.
+ // TODO: Relax this.
+ if ((ResElts * 2) != SrcElts)
+ return false;
+
+ // The smallest type we can slide is i8.
+ // TODO: We can extract index 0 from a mask vector without a slide.
+ if (ResVT.getVectorElementType() == MVT::i1)
+ return false;
+
+ // Slide can support arbitrary index, but we only treat vslidedown.vi as
+ // cheap.
+ if (Index >= 32)
+ return false;
+
+ // TODO: We can do arbitrary slidedowns, but for now only support extracting
+ // the upper half of a vector until we have more test coverage.
+ return Index == 0 || Index == ResElts;
+}
+
+bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
+ return (VT == MVT::f16 && Subtarget.hasStdExtZfhOrZfhmin()) ||
+ (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
+ (VT == MVT::f64 && Subtarget.hasStdExtD());
+}
+
+MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
+ CallingConv::ID CC,
+ EVT VT) const {
+ // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
+ // We might still end up using a GPR but that will be decided based on ABI.
+ if (VT == MVT::f16 && Subtarget.hasStdExtF() &&
+ !Subtarget.hasStdExtZfhOrZfhmin())
+ return MVT::f32;
+
+ return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
+}
+
+unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
+ CallingConv::ID CC,
+ EVT VT) const {
+ // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
+ // We might still end up using a GPR but that will be decided based on ABI.
+ if (VT == MVT::f16 && Subtarget.hasStdExtF() &&
+ !Subtarget.hasStdExtZfhOrZfhmin())
+ return 1;
+
+ return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
+}
+
+// Changes the condition code and swaps operands if necessary, so the SetCC
+// operation matches one of the comparisons supported directly by branches
+// in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
+// with 1/-1.
+static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
+ ISD::CondCode &CC, SelectionDAG &DAG) {
+ // If this is a single bit test that can't be handled by ANDI, shift the
+ // bit to be tested to the MSB and perform a signed compare with 0.
+ if (isIntEqualitySetCC(CC) && isNullConstant(RHS) &&
+ LHS.getOpcode() == ISD::AND && LHS.hasOneUse() &&
+ isa<ConstantSDNode>(LHS.getOperand(1))) {
+ uint64_t Mask = LHS.getConstantOperandVal(1);
+ if ((isPowerOf2_64(Mask) || isMask_64(Mask)) && !isInt<12>(Mask)) {
+ unsigned ShAmt = 0;
+ if (isPowerOf2_64(Mask)) {
+ CC = CC == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
+ ShAmt = LHS.getValueSizeInBits() - 1 - Log2_64(Mask);
+ } else {
+ ShAmt = LHS.getValueSizeInBits() - llvm::bit_width(Mask);
+ }
+
+ LHS = LHS.getOperand(0);
+ if (ShAmt != 0)
+ LHS = DAG.getNode(ISD::SHL, DL, LHS.getValueType(), LHS,
+ DAG.getConstant(ShAmt, DL, LHS.getValueType()));
+ return;
+ }
+ }
+
+ if (auto *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
+ int64_t C = RHSC->getSExtValue();
+ switch (CC) {
+ default: break;
+ case ISD::SETGT:
+ // Convert X > -1 to X >= 0.
+ if (C == -1) {
+ RHS = DAG.getConstant(0, DL, RHS.getValueType());
+ CC = ISD::SETGE;
+ return;
+ }
+ break;
+ case ISD::SETLT:
+ // Convert X < 1 to 0 <= X.
+ if (C == 1) {
+ RHS = LHS;
+ LHS = DAG.getConstant(0, DL, RHS.getValueType());
+ CC = ISD::SETGE;
+ return;
+ }
+ break;
+ }
+ }
+
+ switch (CC) {
+ default:
+ break;
+ case ISD::SETGT:
+ case ISD::SETLE:
+ case ISD::SETUGT:
+ case ISD::SETULE:
+ CC = ISD::getSetCCSwappedOperands(CC);
+ std::swap(LHS, RHS);
+ break;
+ }
+}
+
+RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
+ assert(VT.isScalableVector() && "Expecting a scalable vector type");
+ unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
+ if (VT.getVectorElementType() == MVT::i1)
+ KnownSize *= 8;
+
+ switch (KnownSize) {
+ default:
+ llvm_unreachable("Invalid LMUL.");
+ case 8:
+ return RISCVII::VLMUL::LMUL_F8;
+ case 16:
+ return RISCVII::VLMUL::LMUL_F4;
+ case 32:
+ return RISCVII::VLMUL::LMUL_F2;
+ case 64:
+ return RISCVII::VLMUL::LMUL_1;
+ case 128:
+ return RISCVII::VLMUL::LMUL_2;
+ case 256:
+ return RISCVII::VLMUL::LMUL_4;
+ case 512:
+ return RISCVII::VLMUL::LMUL_8;
+ }
+}
+
+unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
+ switch (LMul) {
+ default:
+ llvm_unreachable("Invalid LMUL.");
+ case RISCVII::VLMUL::LMUL_F8:
+ case RISCVII::VLMUL::LMUL_F4:
+ case RISCVII::VLMUL::LMUL_F2:
+ case RISCVII::VLMUL::LMUL_1:
+ return RISCV::VRRegClassID;
+ case RISCVII::VLMUL::LMUL_2:
+ return RISCV::VRM2RegClassID;
+ case RISCVII::VLMUL::LMUL_4:
+ return RISCV::VRM4RegClassID;
+ case RISCVII::VLMUL::LMUL_8:
+ return RISCV::VRM8RegClassID;
+ }
+}
+
+unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
+ RISCVII::VLMUL LMUL = getLMUL(VT);
+ if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
+ LMUL == RISCVII::VLMUL::LMUL_F4 ||
+ LMUL == RISCVII::VLMUL::LMUL_F2 ||
+ LMUL == RISCVII::VLMUL::LMUL_1) {
+ static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
+ "Unexpected subreg numbering");
+ return RISCV::sub_vrm1_0 + Index;
+ }
+ if (LMUL == RISCVII::VLMUL::LMUL_2) {
+ static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
+ "Unexpected subreg numbering");
+ return RISCV::sub_vrm2_0 + Index;
+ }
+ if (LMUL == RISCVII::VLMUL::LMUL_4) {
+ static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
+ "Unexpected subreg numbering");
+ return RISCV::sub_vrm4_0 + Index;
+ }
+ llvm_unreachable("Invalid vector type.");
+}
+
+unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
+ if (VT.getVectorElementType() == MVT::i1)
+ return RISCV::VRRegClassID;
+ return getRegClassIDForLMUL(getLMUL(VT));
+}
+
+// Attempt to decompose a subvector insert/extract between VecVT and
+// SubVecVT via subregister indices. Returns the subregister index that
+// can perform the subvector insert/extract with the given element index, as
+// well as the index corresponding to any leftover subvectors that must be
+// further inserted/extracted within the register class for SubVecVT.
+std::pair<unsigned, unsigned>
+RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
+ const RISCVRegisterInfo *TRI) {
+ static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
+ RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
+ RISCV::VRM2RegClassID > RISCV::VRRegClassID),
+ "Register classes not ordered");
+ unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
+ unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
+ // Try to compose a subregister index that takes us from the incoming
+ // LMUL>1 register class down to the outgoing one. At each step we half
+ // the LMUL:
+ // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
+ // Note that this is not guaranteed to find a subregister index, such as
+ // when we are extracting from one VR type to another.
+ unsigned SubRegIdx = RISCV::NoSubRegister;
+ for (const unsigned RCID :
+ {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
+ if (VecRegClassID > RCID && SubRegClassID <= RCID) {
+ VecVT = VecVT.getHalfNumVectorElementsVT();
+ bool IsHi =
+ InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
+ SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
+ getSubregIndexByMVT(VecVT, IsHi));
+ if (IsHi)
+ InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
+ }
+ return {SubRegIdx, InsertExtractIdx};
+}
+
+// Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
+// stores for those types.
+bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
+ return !Subtarget.useRVVForFixedLengthVectors() ||
+ (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
+}
+
+bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
+ if (ScalarTy->isPointerTy())
+ return true;
+
+ if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
+ ScalarTy->isIntegerTy(32))
+ return true;
+
+ if (ScalarTy->isIntegerTy(64))
+ return Subtarget.hasVInstructionsI64();
+
+ if (ScalarTy->isHalfTy())
+ return Subtarget.hasVInstructionsF16();
+ if (ScalarTy->isFloatTy())
+ return Subtarget.hasVInstructionsF32();
+ if (ScalarTy->isDoubleTy())
+ return Subtarget.hasVInstructionsF64();
+
+ return false;
+}
+
+unsigned RISCVTargetLowering::combineRepeatedFPDivisors() const {
+ return NumRepeatedDivisors;
+}
+
+static SDValue getVLOperand(SDValue Op) {
+ assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
+ Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
+ "Unexpected opcode");
+ bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
+ unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
+ const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
+ RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ if (!II)
+ return SDValue();
+ return Op.getOperand(II->VLOperand + 1 + HasChain);
+}
+
+static bool useRVVForFixedLengthVectorVT(MVT VT,
+ const RISCVSubtarget &Subtarget) {
+ assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
+ if (!Subtarget.useRVVForFixedLengthVectors())
+ return false;
+
+ // We only support a set of vector types with a consistent maximum fixed size
+ // across all supported vector element types to avoid legalization issues.
+ // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
+ // fixed-length vector type we support is 1024 bytes.
+ if (VT.getFixedSizeInBits() > 1024 * 8)
+ return false;
+
+ unsigned MinVLen = Subtarget.getRealMinVLen();
+
+ MVT EltVT = VT.getVectorElementType();
+
+ // Don't use RVV for vectors we cannot scalarize if required.
+ switch (EltVT.SimpleTy) {
+ // i1 is supported but has different rules.
+ default:
+ return false;
+ case MVT::i1:
+ // Masks can only use a single register.
+ if (VT.getVectorNumElements() > MinVLen)
+ return false;
+ MinVLen /= 8;
+ break;
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ break;
+ case MVT::i64:
+ if (!Subtarget.hasVInstructionsI64())
+ return false;
+ break;
+ case MVT::f16:
+ if (!Subtarget.hasVInstructionsF16())
+ return false;
+ break;
+ case MVT::f32:
+ if (!Subtarget.hasVInstructionsF32())
+ return false;
+ break;
+ case MVT::f64:
+ if (!Subtarget.hasVInstructionsF64())
+ return false;
+ break;
+ }
+
+ // Reject elements larger than ELEN.
+ if (EltVT.getSizeInBits() > Subtarget.getELEN())
+ return false;
+
+ unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
+ // Don't use RVV for types that don't fit.
+ if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
+ return false;
+
+ // TODO: Perhaps an artificial restriction, but worth having whilst getting
+ // the base fixed length RVV support in place.
+ if (!VT.isPow2VectorType())
+ return false;
+
+ return true;
+}
+
+bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
+ return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
+}
+
+// Return the largest legal scalable vector type that matches VT's element type.
+static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
+ const RISCVSubtarget &Subtarget) {
+ // This may be called before legal types are setup.
+ assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
+ useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
+ "Expected legal fixed length vector!");
+
+ unsigned MinVLen = Subtarget.getRealMinVLen();
+ unsigned MaxELen = Subtarget.getELEN();
+
+ MVT EltVT = VT.getVectorElementType();
+ switch (EltVT.SimpleTy) {
+ default:
+ llvm_unreachable("unexpected element type for RVV container");
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ case MVT::i64:
+ case MVT::f16:
+ case MVT::f32:
+ case MVT::f64: {
+ // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
+ // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
+ // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
+ unsigned NumElts =
+ (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
+ NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
+ assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
+ return MVT::getScalableVectorVT(EltVT, NumElts);
+ }
+ }
+}
+
+static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
+ const RISCVSubtarget &Subtarget) {
+ return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
+ Subtarget);
+}
+
+MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
+ return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
+}
+
+// Grow V to consume an entire RVV register.
+static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(VT.isScalableVector() &&
+ "Expected to convert into a scalable vector!");
+ assert(V.getValueType().isFixedLengthVector() &&
+ "Expected a fixed length vector operand!");
+ SDLoc DL(V);
+ SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
+}
+
+// Shrink V so it's just big enough to maintain a VT's worth of data.
+static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(VT.isFixedLengthVector() &&
+ "Expected to convert into a fixed length vector!");
+ assert(V.getValueType().isScalableVector() &&
+ "Expected a scalable vector operand!");
+ SDLoc DL(V);
+ SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
+}
+
+/// Return the type of the mask type suitable for masking the provided
+/// vector type. This is simply an i1 element type vector of the same
+/// (possibly scalable) length.
+static MVT getMaskTypeFor(MVT VecVT) {
+ assert(VecVT.isVector());
+ ElementCount EC = VecVT.getVectorElementCount();
+ return MVT::getVectorVT(MVT::i1, EC);
+}
+
+/// Creates an all ones mask suitable for masking a vector of type VecTy with
+/// vector length VL. .
+static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL,
+ SelectionDAG &DAG) {
+ MVT MaskVT = getMaskTypeFor(VecVT);
+ return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
+}
+
+static SDValue getVLOp(uint64_t NumElts, SDLoc DL, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ return DAG.getConstant(NumElts, DL, Subtarget.getXLenVT());
+}
+
+static std::pair<SDValue, SDValue>
+getDefaultVLOps(uint64_t NumElts, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
+ SDValue VL = getVLOp(NumElts, DL, DAG, Subtarget);
+ SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
+ return {Mask, VL};
+}
+
+// Gets the two common "VL" operands: an all-ones mask and the vector length.
+// VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
+// the vector type that the fixed-length vector is contained in. Otherwise if
+// VecVT is scalable, then ContainerVT should be the same as VecVT.
+static std::pair<SDValue, SDValue>
+getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (VecVT.isFixedLengthVector())
+ return getDefaultVLOps(VecVT.getVectorNumElements(), ContainerVT, DL, DAG,
+ Subtarget);
+ assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDValue VL = DAG.getRegister(RISCV::X0, XLenVT);
+ SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
+ return {Mask, VL};
+}
+
+// As above but assuming the given type is a scalable vector type.
+static std::pair<SDValue, SDValue>
+getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(VecVT.isScalableVector() && "Expecting a scalable vector");
+ return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
+}
+
+// The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
+// of either is (currently) supported. This can get us into an infinite loop
+// where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
+// as a ..., etc.
+// Until either (or both) of these can reliably lower any node, reporting that
+// we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
+// the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
+// which is not desirable.
+bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
+ EVT VT, unsigned DefinedValues) const {
+ return false;
+}
+
+static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ // RISCV FP-to-int conversions saturate to the destination register size, but
+ // don't produce 0 for nan. We can use a conversion instruction and fix the
+ // nan case with a compare and a select.
+ SDValue Src = Op.getOperand(0);
+
+ MVT DstVT = Op.getSimpleValueType();
+ EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+
+ bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
+
+ if (!DstVT.isVector()) {
+ // In absense of Zfh, promote f16 to f32, then saturate the result.
+ if (Src.getSimpleValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) {
+ Src = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, Src);
+ }
+
+ unsigned Opc;
+ if (SatVT == DstVT)
+ Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
+ else if (DstVT == MVT::i64 && SatVT == MVT::i32)
+ Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+ else
+ return SDValue();
+ // FIXME: Support other SatVTs by clamping before or after the conversion.
+
+ SDLoc DL(Op);
+ SDValue FpToInt = DAG.getNode(
+ Opc, DL, DstVT, Src,
+ DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
+
+ if (Opc == RISCVISD::FCVT_WU_RV64)
+ FpToInt = DAG.getZeroExtendInReg(FpToInt, DL, MVT::i32);
+
+ SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
+ return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt,
+ ISD::CondCode::SETUO);
+ }
+
+ // Vectors.
+
+ MVT DstEltVT = DstVT.getVectorElementType();
+ MVT SrcVT = Src.getSimpleValueType();
+ MVT SrcEltVT = SrcVT.getVectorElementType();
+ unsigned SrcEltSize = SrcEltVT.getSizeInBits();
+ unsigned DstEltSize = DstEltVT.getSizeInBits();
+
+ // Only handle saturating to the destination type.
+ if (SatVT != DstEltVT)
+ return SDValue();
+
+ // FIXME: Don't support narrowing by more than 1 steps for now.
+ if (SrcEltSize > (2 * DstEltSize))
+ return SDValue();
+
+ MVT DstContainerVT = DstVT;
+ MVT SrcContainerVT = SrcVT;
+ if (DstVT.isFixedLengthVector()) {
+ DstContainerVT = getContainerForFixedLengthVector(DAG, DstVT, Subtarget);
+ SrcContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
+ assert(DstContainerVT.getVectorElementCount() ==
+ SrcContainerVT.getVectorElementCount() &&
+ "Expected same element count");
+ Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
+ }
+
+ SDLoc DL(Op);
+
+ auto [Mask, VL] = getDefaultVLOps(DstVT, DstContainerVT, DL, DAG, Subtarget);
+
+ SDValue IsNan = DAG.getNode(RISCVISD::SETCC_VL, DL, Mask.getValueType(),
+ {Src, Src, DAG.getCondCode(ISD::SETNE),
+ DAG.getUNDEF(Mask.getValueType()), Mask, VL});
+
+ // Need to widen by more than 1 step, promote the FP type, then do a widening
+ // convert.
+ if (DstEltSize > (2 * SrcEltSize)) {
+ assert(SrcContainerVT.getVectorElementType() == MVT::f16 && "Unexpected VT!");
+ MVT InterVT = SrcContainerVT.changeVectorElementType(MVT::f32);
+ Src = DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterVT, Src, Mask, VL);
+ }
+
+ unsigned RVVOpc =
+ IsSigned ? RISCVISD::VFCVT_RTZ_X_F_VL : RISCVISD::VFCVT_RTZ_XU_F_VL;
+ SDValue Res = DAG.getNode(RVVOpc, DL, DstContainerVT, Src, Mask, VL);
+
+ SDValue SplatZero = DAG.getNode(
+ RISCVISD::VMV_V_X_VL, DL, DstContainerVT, DAG.getUNDEF(DstContainerVT),
+ DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
+ Res = DAG.getNode(RISCVISD::VSELECT_VL, DL, DstContainerVT, IsNan, SplatZero,
+ Res, VL);
+
+ if (DstVT.isFixedLengthVector())
+ Res = convertFromScalableVector(DstVT, Res, DAG, Subtarget);
+
+ return Res;
+}
+
+static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
+ switch (Opc) {
+ case ISD::FROUNDEVEN:
+ case ISD::VP_FROUNDEVEN:
+ return RISCVFPRndMode::RNE;
+ case ISD::FTRUNC:
+ case ISD::VP_FROUNDTOZERO:
+ return RISCVFPRndMode::RTZ;
+ case ISD::FFLOOR:
+ case ISD::VP_FFLOOR:
+ return RISCVFPRndMode::RDN;
+ case ISD::FCEIL:
+ case ISD::VP_FCEIL:
+ return RISCVFPRndMode::RUP;
+ case ISD::FROUND:
+ case ISD::VP_FROUND:
+ return RISCVFPRndMode::RMM;
+ case ISD::FRINT:
+ return RISCVFPRndMode::DYN;
+ }
+
+ return RISCVFPRndMode::Invalid;
+}
+
+// Expand vector FTRUNC, FCEIL, FFLOOR, FROUND, VP_FCEIL, VP_FFLOOR, VP_FROUND
+// VP_FROUNDEVEN, VP_FROUNDTOZERO, VP_FRINT and VP_FNEARBYINT by converting to
+// the integer domain and back. Taking care to avoid converting values that are
+// nan or already correct.
+static SDValue
+lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ MVT VT = Op.getSimpleValueType();
+ assert(VT.isVector() && "Unexpected type");
+
+ SDLoc DL(Op);
+
+ SDValue Src = Op.getOperand(0);
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
+ Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
+ }
+
+ SDValue Mask, VL;
+ if (Op->isVPOpcode()) {
+ Mask = Op.getOperand(1);
+ VL = Op.getOperand(2);
+ } else {
+ std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+ }
+
+ // Freeze the source since we are increasing the number of uses.
+ Src = DAG.getFreeze(Src);
+
+ // We do the conversion on the absolute value and fix the sign at the end.
+ SDValue Abs = DAG.getNode(RISCVISD::FABS_VL, DL, ContainerVT, Src, Mask, VL);
+
+ // Determine the largest integer that can be represented exactly. This and
+ // values larger than it don't have any fractional bits so don't need to
+ // be converted.
+ const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(ContainerVT);
+ unsigned Precision = APFloat::semanticsPrecision(FltSem);
+ APFloat MaxVal = APFloat(FltSem);
+ MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
+ /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
+ SDValue MaxValNode =
+ DAG.getConstantFP(MaxVal, DL, ContainerVT.getVectorElementType());
+ SDValue MaxValSplat = DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), MaxValNode, VL);
+
+ // If abs(Src) was larger than MaxVal or nan, keep it.
+ MVT SetccVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
+ Mask =
+ DAG.getNode(RISCVISD::SETCC_VL, DL, SetccVT,
+ {Abs, MaxValSplat, DAG.getCondCode(ISD::SETOLT),
+ Mask, Mask, VL});
+
+ // Truncate to integer and convert back to FP.
+ MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDValue Truncated;
+
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case ISD::FCEIL:
+ case ISD::VP_FCEIL:
+ case ISD::FFLOOR:
+ case ISD::VP_FFLOOR:
+ case ISD::FROUND:
+ case ISD::FROUNDEVEN:
+ case ISD::VP_FROUND:
+ case ISD::VP_FROUNDEVEN:
+ case ISD::VP_FROUNDTOZERO: {
+ RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
+ assert(FRM != RISCVFPRndMode::Invalid);
+ Truncated = DAG.getNode(RISCVISD::VFCVT_RM_X_F_VL, DL, IntVT, Src, Mask,
+ DAG.getTargetConstant(FRM, DL, XLenVT), VL);
+ break;
+ }
+ case ISD::FTRUNC:
+ Truncated = DAG.getNode(RISCVISD::VFCVT_RTZ_X_F_VL, DL, IntVT, Src,
+ Mask, VL);
+ break;
+ case ISD::VP_FRINT:
+ Truncated = DAG.getNode(RISCVISD::VFCVT_X_F_VL, DL, IntVT, Src, Mask, VL);
+ break;
+ case ISD::VP_FNEARBYINT:
+ Truncated = DAG.getNode(RISCVISD::VFROUND_NOEXCEPT_VL, DL, ContainerVT, Src,
+ Mask, VL);
+ break;
+ }
+
+ // VFROUND_NOEXCEPT_VL includes SINT_TO_FP_VL.
+ if (Op.getOpcode() != ISD::VP_FNEARBYINT)
+ Truncated = DAG.getNode(RISCVISD::SINT_TO_FP_VL, DL, ContainerVT, Truncated,
+ Mask, VL);
+
+ // Restore the original sign so that -0.0 is preserved.
+ Truncated = DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Truncated,
+ Src, Src, Mask, VL);
+
+ if (!VT.isFixedLengthVector())
+ return Truncated;
+
+ return convertFromScalableVector(VT, Truncated, DAG, Subtarget);
+}
+
+static SDValue
+lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ MVT VT = Op.getSimpleValueType();
+ if (VT.isVector())
+ return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
+
+ if (DAG.shouldOptForSize())
+ return SDValue();
+
+ SDLoc DL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ // Create an integer the size of the mantissa with the MSB set. This and all
+ // values larger than it don't have any fractional bits so don't need to be
+ // converted.
+ const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
+ unsigned Precision = APFloat::semanticsPrecision(FltSem);
+ APFloat MaxVal = APFloat(FltSem);
+ MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
+ /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
+ SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
+
+ RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
+ return DAG.getNode(RISCVISD::FROUND, DL, VT, Src, MaxValNode,
+ DAG.getTargetConstant(FRM, DL, Subtarget.getXLenVT()));
+}
+
+struct VIDSequence {
+ int64_t StepNumerator;
+ unsigned StepDenominator;
+ int64_t Addend;
+};
+
+static std::optional<uint64_t> getExactInteger(const APFloat &APF,
+ uint32_t BitWidth) {
+ APSInt ValInt(BitWidth, !APF.isNegative());
+ // We use an arbitrary rounding mode here. If a floating-point is an exact
+ // integer (e.g., 1.0), the rounding mode does not affect the output value. If
+ // the rounding mode changes the output value, then it is not an exact
+ // integer.
+ RoundingMode ArbitraryRM = RoundingMode::TowardZero;
+ bool IsExact;
+ // If it is out of signed integer range, it will return an invalid operation.
+ // If it is not an exact integer, IsExact is false.
+ if ((APF.convertToInteger(ValInt, ArbitraryRM, &IsExact) ==
+ APFloatBase::opInvalidOp) ||
+ !IsExact)
+ return std::nullopt;
+ return ValInt.extractBitsAsZExtValue(BitWidth, 0);
+}
+
+// Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
+// to the (non-zero) step S and start value X. This can be then lowered as the
+// RVV sequence (VID * S) + X, for example.
+// The step S is represented as an integer numerator divided by a positive
+// denominator. Note that the implementation currently only identifies
+// sequences in which either the numerator is +/- 1 or the denominator is 1. It
+// cannot detect 2/3, for example.
+// Note that this method will also match potentially unappealing index
+// sequences, like <i32 0, i32 50939494>, however it is left to the caller to
+// determine whether this is worth generating code for.
+static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
+ unsigned NumElts = Op.getNumOperands();
+ assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
+ bool IsInteger = Op.getValueType().isInteger();
+
+ std::optional<unsigned> SeqStepDenom;
+ std::optional<int64_t> SeqStepNum, SeqAddend;
+ std::optional<std::pair<uint64_t, unsigned>> PrevElt;
+ unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
+ for (unsigned Idx = 0; Idx < NumElts; Idx++) {
+ // Assume undef elements match the sequence; we just have to be careful
+ // when interpolating across them.
+ if (Op.getOperand(Idx).isUndef())
+ continue;
+
+ uint64_t Val;
+ if (IsInteger) {
+ // The BUILD_VECTOR must be all constants.
+ if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
+ return std::nullopt;
+ Val = Op.getConstantOperandVal(Idx) &
+ maskTrailingOnes<uint64_t>(EltSizeInBits);
+ } else {
+ // The BUILD_VECTOR must be all constants.
+ if (!isa<ConstantFPSDNode>(Op.getOperand(Idx)))
+ return std::nullopt;
+ if (auto ExactInteger = getExactInteger(
+ cast<ConstantFPSDNode>(Op.getOperand(Idx))->getValueAPF(),
+ EltSizeInBits))
+ Val = *ExactInteger;
+ else
+ return std::nullopt;
+ }
+
+ if (PrevElt) {
+ // Calculate the step since the last non-undef element, and ensure
+ // it's consistent across the entire sequence.
+ unsigned IdxDiff = Idx - PrevElt->second;
+ int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
+
+ // A zero-value value difference means that we're somewhere in the middle
+ // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
+ // step change before evaluating the sequence.
+ if (ValDiff == 0)
+ continue;
+
+ int64_t Remainder = ValDiff % IdxDiff;
+ // Normalize the step if it's greater than 1.
+ if (Remainder != ValDiff) {
+ // The difference must cleanly divide the element span.
+ if (Remainder != 0)
+ return std::nullopt;
+ ValDiff /= IdxDiff;
+ IdxDiff = 1;
+ }
+
+ if (!SeqStepNum)
+ SeqStepNum = ValDiff;
+ else if (ValDiff != SeqStepNum)
+ return std::nullopt;
+
+ if (!SeqStepDenom)
+ SeqStepDenom = IdxDiff;
+ else if (IdxDiff != *SeqStepDenom)
+ return std::nullopt;
+ }
+
+ // Record this non-undef element for later.
+ if (!PrevElt || PrevElt->first != Val)
+ PrevElt = std::make_pair(Val, Idx);
+ }
+
+ // We need to have logged a step for this to count as a legal index sequence.
+ if (!SeqStepNum || !SeqStepDenom)
+ return std::nullopt;
+
+ // Loop back through the sequence and validate elements we might have skipped
+ // while waiting for a valid step. While doing this, log any sequence addend.
+ for (unsigned Idx = 0; Idx < NumElts; Idx++) {
+ if (Op.getOperand(Idx).isUndef())
+ continue;
+ uint64_t Val;
+ if (IsInteger) {
+ Val = Op.getConstantOperandVal(Idx) &
+ maskTrailingOnes<uint64_t>(EltSizeInBits);
+ } else {
+ Val = *getExactInteger(
+ cast<ConstantFPSDNode>(Op.getOperand(Idx))->getValueAPF(),
+ EltSizeInBits);
+ }
+ uint64_t ExpectedVal =
+ (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
+ int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
+ if (!SeqAddend)
+ SeqAddend = Addend;
+ else if (Addend != SeqAddend)
+ return std::nullopt;
+ }
+
+ assert(SeqAddend && "Must have an addend if we have a step");
+
+ return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
+}
+
+// Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
+// and lower it as a VRGATHER_VX_VL from the source vector.
+static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
+ SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+ SDValue Vec = SplatVal.getOperand(0);
+ // Only perform this optimization on vectors of the same size for simplicity.
+ // Don't perform this optimization for i1 vectors.
+ // FIXME: Support i1 vectors, maybe by promoting to i8?
+ if (Vec.getValueType() != VT || VT.getVectorElementType() == MVT::i1)
+ return SDValue();
+ SDValue Idx = SplatVal.getOperand(1);
+ // The index must be a legal type.
+ if (Idx.getValueType() != Subtarget.getXLenVT())
+ return SDValue();
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+
+ auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
+ Idx, DAG.getUNDEF(ContainerVT), Mask, VL);
+
+ if (!VT.isFixedLengthVector())
+ return Gather;
+
+ return convertFromScalableVector(VT, Gather, DAG, Subtarget);
+}
+
+static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ MVT VT = Op.getSimpleValueType();
+ assert(VT.isFixedLengthVector() && "Unexpected vector!");
+
+ MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
+
+ SDLoc DL(Op);
+ auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ MVT XLenVT = Subtarget.getXLenVT();
+ unsigned NumElts = Op.getNumOperands();
+
+ if (VT.getVectorElementType() == MVT::i1) {
+ if (ISD::isBuildVectorAllZeros(Op.getNode())) {
+ SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
+ return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
+ }
+
+ if (ISD::isBuildVectorAllOnes(Op.getNode())) {
+ SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
+ return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
+ }
+
+ // Lower constant mask BUILD_VECTORs via an integer vector type, in
+ // scalar integer chunks whose bit-width depends on the number of mask
+ // bits and XLEN.
+ // First, determine the most appropriate scalar integer type to use. This
+ // is at most XLenVT, but may be shrunk to a smaller vector element type
+ // according to the size of the final vector - use i8 chunks rather than
+ // XLenVT if we're producing a v8i1. This results in more consistent
+ // codegen across RV32 and RV64.
+ unsigned NumViaIntegerBits =
+ std::min(std::max(NumElts, 8u), Subtarget.getXLen());
+ NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
+ if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
+ // If we have to use more than one INSERT_VECTOR_ELT then this
+ // optimization is likely to increase code size; avoid peforming it in
+ // such a case. We can use a load from a constant pool in this case.
+ if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
+ return SDValue();
+ // Now we can create our integer vector type. Note that it may be larger
+ // than the resulting mask type: v4i1 would use v1i8 as its integer type.
+ MVT IntegerViaVecVT =
+ MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
+ divideCeil(NumElts, NumViaIntegerBits));
+
+ uint64_t Bits = 0;
+ unsigned BitPos = 0, IntegerEltIdx = 0;
+ SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
+
+ for (unsigned I = 0; I < NumElts; I++, BitPos++) {
+ // Once we accumulate enough bits to fill our scalar type, insert into
+ // our vector and clear our accumulated data.
+ if (I != 0 && I % NumViaIntegerBits == 0) {
+ if (NumViaIntegerBits <= 32)
+ Bits = SignExtend64<32>(Bits);
+ SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
+ Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
+ Bits = 0;
+ BitPos = 0;
+ IntegerEltIdx++;
+ }
+ SDValue V = Op.getOperand(I);
+ bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
+ Bits |= ((uint64_t)BitValue << BitPos);
+ }
+
+ // Insert the (remaining) scalar value into position in our integer
+ // vector type.
+ if (NumViaIntegerBits <= 32)
+ Bits = SignExtend64<32>(Bits);
+ SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
+ DAG.getConstant(IntegerEltIdx, DL, XLenVT));
+
+ if (NumElts < NumViaIntegerBits) {
+ // If we're producing a smaller vector than our minimum legal integer
+ // type, bitcast to the equivalent (known-legal) mask type, and extract
+ // our final mask.
+ assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
+ Vec = DAG.getBitcast(MVT::v8i1, Vec);
+ Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
+ DAG.getConstant(0, DL, XLenVT));
+ } else {
+ // Else we must have produced an integer type with the same size as the
+ // mask type; bitcast for the final result.
+ assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
+ Vec = DAG.getBitcast(VT, Vec);
+ }
+
+ return Vec;
+ }
+
+ // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
+ // vector type, we have a legal equivalently-sized i8 type, so we can use
+ // that.
+ MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
+ SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
+
+ SDValue WideVec;
+ if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
+ // For a splat, perform a scalar truncate before creating the wider
+ // vector.
+ assert(Splat.getValueType() == XLenVT &&
+ "Unexpected type for i1 splat value");
+ Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
+ DAG.getConstant(1, DL, XLenVT));
+ WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
+ } else {
+ SmallVector<SDValue, 8> Ops(Op->op_values());
+ WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
+ SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
+ WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
+ }
+
+ return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
+ }
+
+ if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
+ if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
+ return Gather;
+ unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
+ : RISCVISD::VMV_V_X_VL;
+ Splat =
+ DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
+ return convertFromScalableVector(VT, Splat, DAG, Subtarget);
+ }
+
+ // Try and match index sequences, which we can lower to the vid instruction
+ // with optional modifications. An all-undef vector is matched by
+ // getSplatValue, above.
+ if (auto SimpleVID = isSimpleVIDSequence(Op)) {
+ int64_t StepNumerator = SimpleVID->StepNumerator;
+ unsigned StepDenominator = SimpleVID->StepDenominator;
+ int64_t Addend = SimpleVID->Addend;
+
+ assert(StepNumerator != 0 && "Invalid step");
+ bool Negate = false;
+ int64_t SplatStepVal = StepNumerator;
+ unsigned StepOpcode = ISD::MUL;
+ if (StepNumerator != 1) {
+ if (isPowerOf2_64(std::abs(StepNumerator))) {
+ Negate = StepNumerator < 0;
+ StepOpcode = ISD::SHL;
+ SplatStepVal = Log2_64(std::abs(StepNumerator));
+ }
+ }
+
+ // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
+ // threshold since it's the immediate value many RVV instructions accept.
+ // There is no vmul.vi instruction so ensure multiply constant can fit in
+ // a single addi instruction.
+ if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
+ (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
+ isPowerOf2_32(StepDenominator) &&
+ (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
+ MVT VIDVT =
+ VT.isFloatingPoint() ? VT.changeVectorElementTypeToInteger() : VT;
+ MVT VIDContainerVT =
+ getContainerForFixedLengthVector(DAG, VIDVT, Subtarget);
+ SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VIDContainerVT, Mask, VL);
+ // Convert right out of the scalable type so we can use standard ISD
+ // nodes for the rest of the computation. If we used scalable types with
+ // these, we'd lose the fixed-length vector info and generate worse
+ // vsetvli code.
+ VID = convertFromScalableVector(VIDVT, VID, DAG, Subtarget);
+ if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
+ (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
+ SDValue SplatStep = DAG.getSplatBuildVector(
+ VIDVT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
+ VID = DAG.getNode(StepOpcode, DL, VIDVT, VID, SplatStep);
+ }
+ if (StepDenominator != 1) {
+ SDValue SplatStep = DAG.getSplatBuildVector(
+ VIDVT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
+ VID = DAG.getNode(ISD::SRL, DL, VIDVT, VID, SplatStep);
+ }
+ if (Addend != 0 || Negate) {
+ SDValue SplatAddend = DAG.getSplatBuildVector(
+ VIDVT, DL, DAG.getConstant(Addend, DL, XLenVT));
+ VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VIDVT, SplatAddend,
+ VID);
+ }
+ if (VT.isFloatingPoint()) {
+ // TODO: Use vfwcvt to reduce register pressure.
+ VID = DAG.getNode(ISD::SINT_TO_FP, DL, VT, VID);
+ }
+ return VID;
+ }
+ }
+
+ // Attempt to detect "hidden" splats, which only reveal themselves as splats
+ // when re-interpreted as a vector with a larger element type. For example,
+ // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
+ // could be instead splat as
+ // v2i32 = build_vector i32 0x00010000, i32 0x00010000
+ // TODO: This optimization could also work on non-constant splats, but it
+ // would require bit-manipulation instructions to construct the splat value.
+ SmallVector<SDValue> Sequence;
+ unsigned EltBitSize = VT.getScalarSizeInBits();
+ const auto *BV = cast<BuildVectorSDNode>(Op);
+ if (VT.isInteger() && EltBitSize < 64 &&
+ ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
+ BV->getRepeatedSequence(Sequence) &&
+ (Sequence.size() * EltBitSize) <= 64) {
+ unsigned SeqLen = Sequence.size();
+ MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
+ MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
+ assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
+ ViaIntVT == MVT::i64) &&
+ "Unexpected sequence type");
+
+ unsigned EltIdx = 0;
+ uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
+ uint64_t SplatValue = 0;
+ // Construct the amalgamated value which can be splatted as this larger
+ // vector type.
+ for (const auto &SeqV : Sequence) {
+ if (!SeqV.isUndef())
+ SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
+ << (EltIdx * EltBitSize));
+ EltIdx++;
+ }
+
+ // On RV64, sign-extend from 32 to 64 bits where possible in order to
+ // achieve better constant materializion.
+ if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
+ SplatValue = SignExtend64<32>(SplatValue);
+
+ // Since we can't introduce illegal i64 types at this stage, we can only
+ // perform an i64 splat on RV32 if it is its own sign-extended value. That
+ // way we can use RVV instructions to splat.
+ assert((ViaIntVT.bitsLE(XLenVT) ||
+ (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
+ "Unexpected bitcast sequence");
+ if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
+ SDValue ViaVL =
+ DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
+ MVT ViaContainerVT =
+ getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
+ SDValue Splat =
+ DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
+ DAG.getUNDEF(ViaContainerVT),
+ DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
+ Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
+ return DAG.getBitcast(VT, Splat);
+ }
+ }
+
+ // Try and optimize BUILD_VECTORs with "dominant values" - these are values
+ // which constitute a large proportion of the elements. In such cases we can
+ // splat a vector with the dominant element and make up the shortfall with
+ // INSERT_VECTOR_ELTs.
+ // Note that this includes vectors of 2 elements by association. The
+ // upper-most element is the "dominant" one, allowing us to use a splat to
+ // "insert" the upper element, and an insert of the lower element at position
+ // 0, which improves codegen.
+ SDValue DominantValue;
+ unsigned MostCommonCount = 0;
+ DenseMap<SDValue, unsigned> ValueCounts;
+ unsigned NumUndefElts =
+ count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
+
+ // Track the number of scalar loads we know we'd be inserting, estimated as
+ // any non-zero floating-point constant. Other kinds of element are either
+ // already in registers or are materialized on demand. The threshold at which
+ // a vector load is more desirable than several scalar materializion and
+ // vector-insertion instructions is not known.
+ unsigned NumScalarLoads = 0;
+
+ for (SDValue V : Op->op_values()) {
+ if (V.isUndef())
+ continue;
+
+ ValueCounts.insert(std::make_pair(V, 0));
+ unsigned &Count = ValueCounts[V];
+
+ if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
+ NumScalarLoads += !CFP->isExactlyValue(+0.0);
+
+ // Is this value dominant? In case of a tie, prefer the highest element as
+ // it's cheaper to insert near the beginning of a vector than it is at the
+ // end.
+ if (++Count >= MostCommonCount) {
+ DominantValue = V;
+ MostCommonCount = Count;
+ }
+ }
+
+ assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
+ unsigned NumDefElts = NumElts - NumUndefElts;
+ unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
+
+ // Don't perform this optimization when optimizing for size, since
+ // materializing elements and inserting them tends to cause code bloat.
+ if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
+ ((MostCommonCount > DominantValueCountThreshold) ||
+ (ValueCounts.size() <= Log2_32(NumDefElts)))) {
+ // Start by splatting the most common element.
+ SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
+
+ DenseSet<SDValue> Processed{DominantValue};
+ MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
+ for (const auto &OpIdx : enumerate(Op->ops())) {
+ const SDValue &V = OpIdx.value();
+ if (V.isUndef() || !Processed.insert(V).second)
+ continue;
+ if (ValueCounts[V] == 1) {
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
+ DAG.getConstant(OpIdx.index(), DL, XLenVT));
+ } else {
+ // Blend in all instances of this value using a VSELECT, using a
+ // mask where each bit signals whether that element is the one
+ // we're after.
+ SmallVector<SDValue> Ops;
+ transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
+ return DAG.getConstant(V == V1, DL, XLenVT);
+ });
+ Vec = DAG.getNode(ISD::VSELECT, DL, VT,
+ DAG.getBuildVector(SelMaskTy, DL, Ops),
+ DAG.getSplatBuildVector(VT, DL, V), Vec);
+ }
+ }
+
+ return Vec;
+ }
+
+ return SDValue();
+}
+
+static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
+ SDValue Lo, SDValue Hi, SDValue VL,
+ SelectionDAG &DAG) {
+ if (!Passthru)
+ Passthru = DAG.getUNDEF(VT);
+ if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
+ int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
+ int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
+ // If Hi constant is all the same sign bit as Lo, lower this as a custom
+ // node in order to try and match RVV vector/scalar instructions.
+ if ((LoC >> 31) == HiC)
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
+
+ // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
+ // vmv.v.x whose EEW = 32 to lower it.
+ auto *Const = dyn_cast<ConstantSDNode>(VL);
+ if (LoC == HiC && Const && Const->isAllOnesValue()) {
+ MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
+ // TODO: if vl <= min(VLMAX), we can also do this. But we could not
+ // access the subtarget here now.
+ auto InterVec = DAG.getNode(
+ RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
+ DAG.getRegister(RISCV::X0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
+ }
+ }
+
+ // Fall back to a stack store and stride x0 vector load.
+ return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
+ Hi, VL);
+}
+
+// Called by type legalization to handle splat of i64 on RV32.
+// FIXME: We can optimize this when the type has sign or zero bits in one
+// of the halves.
+static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
+ SDValue Scalar, SDValue VL,
+ SelectionDAG &DAG) {
+ assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
+ DAG.getConstant(0, DL, MVT::i32));
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
+ DAG.getConstant(1, DL, MVT::i32));
+ return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
+}
+
+// This function lowers a splat of a scalar operand Splat with the vector
+// length VL. It ensures the final sequence is type legal, which is useful when
+// lowering a splat after type legalization.
+static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
+ MVT VT, SDLoc DL, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ bool HasPassthru = Passthru && !Passthru.isUndef();
+ if (!HasPassthru && !Passthru)
+ Passthru = DAG.getUNDEF(VT);
+ if (VT.isFloatingPoint()) {
+ // If VL is 1, we could use vfmv.s.f.
+ if (isOneConstant(VL))
+ return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
+ return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
+ }
+
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Simplest case is that the operand needs to be promoted to XLenVT.
+ if (Scalar.getValueType().bitsLE(XLenVT)) {
+ // If the operand is a constant, sign extend to increase our chances
+ // of being able to use a .vi instruction. ANY_EXTEND would become a
+ // a zero extend and the simm5 check in isel would fail.
+ // FIXME: Should we ignore the upper bits in isel instead?
+ unsigned ExtOpc =
+ isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
+ Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
+ ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
+ // If VL is 1 and the scalar value won't benefit from immediate, we could
+ // use vmv.s.x.
+ if (isOneConstant(VL) &&
+ (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
+ return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
+ }
+
+ assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
+ "Unexpected scalar for splat lowering!");
+
+ if (isOneConstant(VL) && isNullConstant(Scalar))
+ return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
+ DAG.getConstant(0, DL, XLenVT), VL);
+
+ // Otherwise use the more complicated splatting algorithm.
+ return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
+}
+
+static MVT getLMUL1VT(MVT VT) {
+ assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
+ "Unexpected vector MVT");
+ return MVT::getScalableVectorVT(
+ VT.getVectorElementType(),
+ RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
+}
+
+// This function lowers an insert of a scalar operand Scalar into lane
+// 0 of the vector regardless of the value of VL. The contents of the
+// remaining lanes of the result vector are unspecified. VL is assumed
+// to be non-zero.
+static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL,
+ MVT VT, SDLoc DL, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ const MVT XLenVT = Subtarget.getXLenVT();
+
+ SDValue Passthru = DAG.getUNDEF(VT);
+ if (VT.isFloatingPoint()) {
+ // TODO: Use vmv.v.i for appropriate constants
+ // Use M1 or smaller to avoid over constraining register allocation
+ const MVT M1VT = getLMUL1VT(VT);
+ auto InnerVT = VT.bitsLE(M1VT) ? VT : M1VT;
+ SDValue Result = DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, InnerVT,
+ DAG.getUNDEF(InnerVT), Scalar, VL);
+ if (VT != InnerVT)
+ Result = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
+ DAG.getUNDEF(VT),
+ Result, DAG.getConstant(0, DL, XLenVT));
+ return Result;
+ }
+
+
+ // Avoid the tricky legalization cases by falling back to using the
+ // splat code which already handles it gracefully.
+ if (!Scalar.getValueType().bitsLE(XLenVT))
+ return lowerScalarSplat(DAG.getUNDEF(VT), Scalar,
+ DAG.getConstant(1, DL, XLenVT),
+ VT, DL, DAG, Subtarget);
+
+ // If the operand is a constant, sign extend to increase our chances
+ // of being able to use a .vi instruction. ANY_EXTEND would become a
+ // a zero extend and the simm5 check in isel would fail.
+ // FIXME: Should we ignore the upper bits in isel instead?
+ unsigned ExtOpc =
+ isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
+ Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
+ // We use a vmv.v.i if possible. We limit this to LMUL1. LMUL2 or
+ // higher would involve overly constraining the register allocator for
+ // no purpose.
+ if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar)) {
+ if (!isNullConstant(Scalar) && isInt<5>(Const->getSExtValue()) &&
+ VT.bitsLE(getLMUL1VT(VT)))
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
+ }
+ // Use M1 or smaller to avoid over constraining register allocation
+ const MVT M1VT = getLMUL1VT(VT);
+ auto InnerVT = VT.bitsLE(M1VT) ? VT : M1VT;
+ SDValue Result = DAG.getNode(RISCVISD::VMV_S_X_VL, DL, InnerVT,
+ DAG.getUNDEF(InnerVT), Scalar, VL);
+ if (VT != InnerVT)
+ Result = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
+ DAG.getUNDEF(VT),
+ Result, DAG.getConstant(0, DL, XLenVT));
+ return Result;
+
+}
+
+static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
+ const RISCVSubtarget &Subtarget) {
+ // We need to be able to widen elements to the next larger integer type.
+ if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
+ return false;
+
+ int Size = Mask.size();
+ assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
+
+ int Srcs[] = {-1, -1};
+ for (int i = 0; i != Size; ++i) {
+ // Ignore undef elements.
+ if (Mask[i] < 0)
+ continue;
+
+ // Is this an even or odd element.
+ int Pol = i % 2;
+
+ // Ensure we consistently use the same source for this element polarity.
+ int Src = Mask[i] / Size;
+ if (Srcs[Pol] < 0)
+ Srcs[Pol] = Src;
+ if (Srcs[Pol] != Src)
+ return false;
+
+ // Make sure the element within the source is appropriate for this element
+ // in the destination.
+ int Elt = Mask[i] % Size;
+ if (Elt != i / 2)
+ return false;
+ }
+
+ // We need to find a source for each polarity and they can't be the same.
+ if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
+ return false;
+
+ // Swap the sources if the second source was in the even polarity.
+ SwapSources = Srcs[0] > Srcs[1];
+
+ return true;
+}
+
+/// Match shuffles that concatenate two vectors, rotate the concatenation,
+/// and then extract the original number of elements from the rotated result.
+/// This is equivalent to vector.splice or X86's PALIGNR instruction. The
+/// returned rotation amount is for a rotate right, where elements move from
+/// higher elements to lower elements. \p LoSrc indicates the first source
+/// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
+/// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
+/// 0 or 1 if a rotation is found.
+///
+/// NOTE: We talk about rotate to the right which matches how bit shift and
+/// rotate instructions are described where LSBs are on the right, but LLVM IR
+/// and the table below write vectors with the lowest elements on the left.
+static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
+ int Size = Mask.size();
+
+ // We need to detect various ways of spelling a rotation:
+ // [11, 12, 13, 14, 15, 0, 1, 2]
+ // [-1, 12, 13, 14, -1, -1, 1, -1]
+ // [-1, -1, -1, -1, -1, -1, 1, 2]
+ // [ 3, 4, 5, 6, 7, 8, 9, 10]
+ // [-1, 4, 5, 6, -1, -1, 9, -1]
+ // [-1, 4, 5, 6, -1, -1, -1, -1]
+ int Rotation = 0;
+ LoSrc = -1;
+ HiSrc = -1;
+ for (int i = 0; i != Size; ++i) {
+ int M = Mask[i];
+ if (M < 0)
+ continue;
+
+ // Determine where a rotate vector would have started.
+ int StartIdx = i - (M % Size);
+ // The identity rotation isn't interesting, stop.
+ if (StartIdx == 0)
+ return -1;
+
+ // If we found the tail of a vector the rotation must be the missing
+ // front. If we found the head of a vector, it must be how much of the
+ // head.
+ int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
+
+ if (Rotation == 0)
+ Rotation = CandidateRotation;
+ else if (Rotation != CandidateRotation)
+ // The rotations don't match, so we can't match this mask.
+ return -1;
+
+ // Compute which value this mask is pointing at.
+ int MaskSrc = M < Size ? 0 : 1;
+
+ // Compute which of the two target values this index should be assigned to.
+ // This reflects whether the high elements are remaining or the low elemnts
+ // are remaining.
+ int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
+
+ // Either set up this value if we've not encountered it before, or check
+ // that it remains consistent.
+ if (TargetSrc < 0)
+ TargetSrc = MaskSrc;
+ else if (TargetSrc != MaskSrc)
+ // This may be a rotation, but it pulls from the inputs in some
+ // unsupported interleaving.
+ return -1;
+ }
+
+ // Check that we successfully analyzed the mask, and normalize the results.
+ assert(Rotation != 0 && "Failed to locate a viable rotation!");
+ assert((LoSrc >= 0 || HiSrc >= 0) &&
+ "Failed to find a rotated input vector!");
+
+ return Rotation;
+}
+
+// Lower the following shuffles to vnsrl.
+// t34: v8i8 = extract_subvector t11, Constant:i64<0>
+// t33: v8i8 = extract_subvector t11, Constant:i64<8>
+// a) t35: v8i8 = vector_shuffle<0,2,4,6,8,10,12,14> t34, t33
+// b) t35: v8i8 = vector_shuffle<1,3,5,7,9,11,13,15> t34, t33
+static SDValue lowerVECTOR_SHUFFLEAsVNSRL(const SDLoc &DL, MVT VT,
+ MVT ContainerVT, SDValue V1,
+ SDValue V2, SDValue TrueMask,
+ SDValue VL, ArrayRef<int> Mask,
+ const RISCVSubtarget &Subtarget,
+ SelectionDAG &DAG) {
+ // Need to be able to widen the vector.
+ if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
+ return SDValue();
+
+ // Both input must be extracts.
+ if (V1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
+ V2.getOpcode() != ISD::EXTRACT_SUBVECTOR)
+ return SDValue();
+
+ // Extracting from the same source.
+ SDValue Src = V1.getOperand(0);
+ if (Src != V2.getOperand(0))
+ return SDValue();
+
+ // Src needs to have twice the number of elements.
+ if (Src.getValueType().getVectorNumElements() != (Mask.size() * 2))
+ return SDValue();
+
+ // The extracts must extract the two halves of the source.
+ if (V1.getConstantOperandVal(1) != 0 ||
+ V2.getConstantOperandVal(1) != Mask.size())
+ return SDValue();
+
+ // First index must be the first even or odd element from V1.
+ if (Mask[0] != 0 && Mask[0] != 1)
+ return SDValue();
+
+ // The others must increase by 2 each time.
+ // TODO: Support undef elements?
+ for (unsigned i = 1; i != Mask.size(); ++i)
+ if (Mask[i] != Mask[i - 1] + 2)
+ return SDValue();
+
+ // Convert the source using a container type with twice the elements. Since
+ // source VT is legal and twice this VT, we know VT isn't LMUL=8 so it is
+ // safe to double.
+ MVT DoubleContainerVT =
+ MVT::getVectorVT(ContainerVT.getVectorElementType(),
+ ContainerVT.getVectorElementCount() * 2);
+ Src = convertToScalableVector(DoubleContainerVT, Src, DAG, Subtarget);
+
+ // Convert the vector to a wider integer type with the original element
+ // count. This also converts FP to int.
+ unsigned EltBits = ContainerVT.getScalarSizeInBits();
+ MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
+ MVT WideIntContainerVT =
+ MVT::getVectorVT(WideIntEltVT, ContainerVT.getVectorElementCount());
+ Src = DAG.getBitcast(WideIntContainerVT, Src);
+
+ // Convert to the integer version of the container type.
+ MVT IntEltVT = MVT::getIntegerVT(EltBits);
+ MVT IntContainerVT =
+ MVT::getVectorVT(IntEltVT, ContainerVT.getVectorElementCount());
+
+ // If we want even elements, then the shift amount is 0. Otherwise, shift by
+ // the original element size.
+ unsigned Shift = Mask[0] == 0 ? 0 : EltBits;
+ SDValue SplatShift = DAG.getNode(
+ RISCVISD::VMV_V_X_VL, DL, IntContainerVT, DAG.getUNDEF(ContainerVT),
+ DAG.getConstant(Shift, DL, Subtarget.getXLenVT()), VL);
+ SDValue Res =
+ DAG.getNode(RISCVISD::VNSRL_VL, DL, IntContainerVT, Src, SplatShift,
+ DAG.getUNDEF(IntContainerVT), TrueMask, VL);
+ // Cast back to FP if needed.
+ Res = DAG.getBitcast(ContainerVT, Res);
+
+ return convertFromScalableVector(VT, Res, DAG, Subtarget);
+}
+
+static SDValue
+getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, SDLoc DL,
+ EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask,
+ SDValue VL,
+ unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
+ if (Merge.isUndef())
+ Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
+ SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
+ return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops);
+}
+
+static SDValue
+getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, SDLoc DL,
+ EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask,
+ SDValue VL,
+ unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
+ if (Merge.isUndef())
+ Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
+ SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
+ return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops);
+}
+
+// Lower the following shuffle to vslidedown.
+// a)
+// t49: v8i8 = extract_subvector t13, Constant:i64<0>
+// t109: v8i8 = extract_subvector t13, Constant:i64<8>
+// t108: v8i8 = vector_shuffle<1,2,3,4,5,6,7,8> t49, t106
+// b)
+// t69: v16i16 = extract_subvector t68, Constant:i64<0>
+// t23: v8i16 = extract_subvector t69, Constant:i64<0>
+// t29: v4i16 = extract_subvector t23, Constant:i64<4>
+// t26: v8i16 = extract_subvector t69, Constant:i64<8>
+// t30: v4i16 = extract_subvector t26, Constant:i64<0>
+// t54: v4i16 = vector_shuffle<1,2,3,4> t29, t30
+static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
+ const RISCVSubtarget &Subtarget,
+ SelectionDAG &DAG) {
+ auto findNonEXTRACT_SUBVECTORParent =
+ [](SDValue Parent) -> std::pair<SDValue, uint64_t> {
+ uint64_t Offset = 0;
+ while (Parent.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+ // EXTRACT_SUBVECTOR can be used to extract a fixed-width vector from
+ // a scalable vector. But we don't want to match the case.
+ Parent.getOperand(0).getSimpleValueType().isFixedLengthVector()) {
+ Offset += Parent.getConstantOperandVal(1);
+ Parent = Parent.getOperand(0);
+ }
+ return std::make_pair(Parent, Offset);
+ };
+
+ auto [V1Src, V1IndexOffset] = findNonEXTRACT_SUBVECTORParent(V1);
+ auto [V2Src, V2IndexOffset] = findNonEXTRACT_SUBVECTORParent(V2);
+
+ // Extracting from the same source.
+ SDValue Src = V1Src;
+ if (Src != V2Src)
+ return SDValue();
+
+ // Rebuild mask because Src may be from multiple EXTRACT_SUBVECTORs.
+ SmallVector<int, 16> NewMask(Mask);
+ for (size_t i = 0; i != NewMask.size(); ++i) {
+ if (NewMask[i] == -1)
+ continue;
+
+ if (static_cast<size_t>(NewMask[i]) < NewMask.size()) {
+ NewMask[i] = NewMask[i] + V1IndexOffset;
+ } else {
+ // Minus NewMask.size() is needed. Otherwise, the b case would be
+ // <5,6,7,12> instead of <5,6,7,8>.
+ NewMask[i] = NewMask[i] - NewMask.size() + V2IndexOffset;
+ }
+ }
+
+ // First index must be known and non-zero. It will be used as the slidedown
+ // amount.
+ if (NewMask[0] <= 0)
+ return SDValue();
+
+ // NewMask is also continuous.
+ for (unsigned i = 1; i != NewMask.size(); ++i)
+ if (NewMask[i - 1] + 1 != NewMask[i])
+ return SDValue();
+
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT SrcVT = Src.getSimpleValueType();
+ MVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
+ auto [TrueMask, VL] = getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
+ SDValue Slidedown =
+ getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+ convertToScalableVector(ContainerVT, Src, DAG, Subtarget),
+ DAG.getConstant(NewMask[0], DL, XLenVT), TrueMask, VL);
+ return DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, DL, VT,
+ convertFromScalableVector(SrcVT, Slidedown, DAG, Subtarget),
+ DAG.getConstant(0, DL, XLenVT));
+}
+
+static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT VT = Op.getSimpleValueType();
+ unsigned NumElts = VT.getVectorNumElements();
+ ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
+
+ MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
+
+ auto [TrueMask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ if (SVN->isSplat()) {
+ const int Lane = SVN->getSplatIndex();
+ if (Lane >= 0) {
+ MVT SVT = VT.getVectorElementType();
+
+ // Turn splatted vector load into a strided load with an X0 stride.
+ SDValue V = V1;
+ // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
+ // with undef.
+ // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
+ int Offset = Lane;
+ if (V.getOpcode() == ISD::CONCAT_VECTORS) {
+ int OpElements =
+ V.getOperand(0).getSimpleValueType().getVectorNumElements();
+ V = V.getOperand(Offset / OpElements);
+ Offset %= OpElements;
+ }
+
+ // We need to ensure the load isn't atomic or volatile.
+ if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
+ auto *Ld = cast<LoadSDNode>(V);
+ Offset *= SVT.getStoreSize();
+ SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
+ TypeSize::Fixed(Offset), DL);
+
+ // If this is SEW=64 on RV32, use a strided load with a stride of x0.
+ if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
+ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+ SDValue IntID =
+ DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
+ SDValue Ops[] = {Ld->getChain(),
+ IntID,
+ DAG.getUNDEF(ContainerVT),
+ NewAddr,
+ DAG.getRegister(RISCV::X0, XLenVT),
+ VL};
+ SDValue NewLoad = DAG.getMemIntrinsicNode(
+ ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
+ DAG.getMachineFunction().getMachineMemOperand(
+ Ld->getMemOperand(), Offset, SVT.getStoreSize()));
+ DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
+ return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
+ }
+
+ // Otherwise use a scalar load and splat. This will give the best
+ // opportunity to fold a splat into the operation. ISel can turn it into
+ // the x0 strided load if we aren't able to fold away the select.
+ if (SVT.isFloatingPoint())
+ V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
+ Ld->getPointerInfo().getWithOffset(Offset),
+ Ld->getOriginalAlign(),
+ Ld->getMemOperand()->getFlags());
+ else
+ V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
+ Ld->getPointerInfo().getWithOffset(Offset), SVT,
+ Ld->getOriginalAlign(),
+ Ld->getMemOperand()->getFlags());
+ DAG.makeEquivalentMemoryOrdering(Ld, V);
+
+ unsigned Opc =
+ VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
+ SDValue Splat =
+ DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
+ return convertFromScalableVector(VT, Splat, DAG, Subtarget);
+ }
+
+ V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
+ assert(Lane < (int)NumElts && "Unexpected lane!");
+ SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT,
+ V1, DAG.getConstant(Lane, DL, XLenVT),
+ DAG.getUNDEF(ContainerVT), TrueMask, VL);
+ return convertFromScalableVector(VT, Gather, DAG, Subtarget);
+ }
+ }
+
+ ArrayRef<int> Mask = SVN->getMask();
+
+ if (SDValue V =
+ lowerVECTOR_SHUFFLEAsVSlidedown(DL, VT, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
+ // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
+ // be undef which can be handled with a single SLIDEDOWN/UP.
+ int LoSrc, HiSrc;
+ int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
+ if (Rotation > 0) {
+ SDValue LoV, HiV;
+ if (LoSrc >= 0) {
+ LoV = LoSrc == 0 ? V1 : V2;
+ LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
+ }
+ if (HiSrc >= 0) {
+ HiV = HiSrc == 0 ? V1 : V2;
+ HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
+ }
+
+ // We found a rotation. We need to slide HiV down by Rotation. Then we need
+ // to slide LoV up by (NumElts - Rotation).
+ unsigned InvRotate = NumElts - Rotation;
+
+ SDValue Res = DAG.getUNDEF(ContainerVT);
+ if (HiV) {
+ // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
+ // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
+ // causes multiple vsetvlis in some test cases such as lowering
+ // reduce.mul
+ SDValue DownVL = VL;
+ if (LoV)
+ DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
+ Res = getVSlidedown(DAG, Subtarget, DL, ContainerVT, Res, HiV,
+ DAG.getConstant(Rotation, DL, XLenVT), TrueMask,
+ DownVL);
+ }
+ if (LoV)
+ Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV,
+ DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL,
+ RISCVII::TAIL_AGNOSTIC);
+
+ return convertFromScalableVector(VT, Res, DAG, Subtarget);
+ }
+
+ if (SDValue V = lowerVECTOR_SHUFFLEAsVNSRL(
+ DL, VT, ContainerVT, V1, V2, TrueMask, VL, Mask, Subtarget, DAG))
+ return V;
+
+ // Detect an interleave shuffle and lower to
+ // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
+ bool SwapSources;
+ if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
+ // Swap sources if needed.
+ if (SwapSources)
+ std::swap(V1, V2);
+
+ // Extract the lower half of the vectors.
+ MVT HalfVT = VT.getHalfNumVectorElementsVT();
+ V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
+ DAG.getConstant(0, DL, XLenVT));
+ V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
+ DAG.getConstant(0, DL, XLenVT));
+
+ // Double the element width and halve the number of elements in an int type.
+ unsigned EltBits = VT.getScalarSizeInBits();
+ MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
+ MVT WideIntVT =
+ MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
+ // Convert this to a scalable vector. We need to base this on the
+ // destination size to ensure there's always a type with a smaller LMUL.
+ MVT WideIntContainerVT =
+ getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
+
+ // Convert sources to scalable vectors with the same element count as the
+ // larger type.
+ MVT HalfContainerVT = MVT::getVectorVT(
+ VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
+ V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
+ V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
+
+ // Cast sources to integer.
+ MVT IntEltVT = MVT::getIntegerVT(EltBits);
+ MVT IntHalfVT =
+ MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
+ V1 = DAG.getBitcast(IntHalfVT, V1);
+ V2 = DAG.getBitcast(IntHalfVT, V2);
+
+ // Freeze V2 since we use it twice and we need to be sure that the add and
+ // multiply see the same value.
+ V2 = DAG.getFreeze(V2);
+
+ // Recreate TrueMask using the widened type's element count.
+ TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG);
+
+ // Widen V1 and V2 with 0s and add one copy of V2 to V1.
+ SDValue Add =
+ DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1, V2,
+ DAG.getUNDEF(WideIntContainerVT), TrueMask, VL);
+ // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
+ SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
+ DAG.getUNDEF(IntHalfVT),
+ DAG.getAllOnesConstant(DL, XLenVT), VL);
+ SDValue WidenMul =
+ DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT, V2, Multiplier,
+ DAG.getUNDEF(WideIntContainerVT), TrueMask, VL);
+ // Add the new copies to our previous addition giving us 2^eltbits copies of
+ // V2. This is equivalent to shifting V2 left by eltbits. This should
+ // combine with the vwmulu.vv above to form vwmaccu.vv.
+ Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
+ DAG.getUNDEF(WideIntContainerVT), TrueMask, VL);
+ // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
+ // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
+ // vector VT.
+ ContainerVT =
+ MVT::getVectorVT(VT.getVectorElementType(),
+ WideIntContainerVT.getVectorElementCount() * 2);
+ Add = DAG.getBitcast(ContainerVT, Add);
+ return convertFromScalableVector(VT, Add, DAG, Subtarget);
+ }
+
+ // Detect shuffles which can be re-expressed as vector selects; these are
+ // shuffles in which each element in the destination is taken from an element
+ // at the corresponding index in either source vectors.
+ bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
+ int MaskIndex = MaskIdx.value();
+ return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
+ });
+
+ assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
+
+ SmallVector<SDValue> MaskVals;
+ // As a backup, shuffles can be lowered via a vrgather instruction, possibly
+ // merged with a second vrgather.
+ SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
+
+ // By default we preserve the original operand order, and use a mask to
+ // select LHS as true and RHS as false. However, since RVV vector selects may
+ // feature splats but only on the LHS, we may choose to invert our mask and
+ // instead select between RHS and LHS.
+ bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
+ bool InvertMask = IsSelect == SwapOps;
+
+ // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
+ // half.
+ DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
+
+ // Now construct the mask that will be used by the vselect or blended
+ // vrgather operation. For vrgathers, construct the appropriate indices into
+ // each vector.
+ for (int MaskIndex : Mask) {
+ bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
+ MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
+ if (!IsSelect) {
+ bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
+ GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
+ ? DAG.getConstant(MaskIndex, DL, XLenVT)
+ : DAG.getUNDEF(XLenVT));
+ GatherIndicesRHS.push_back(
+ IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
+ : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
+ if (IsLHSOrUndefIndex && MaskIndex >= 0)
+ ++LHSIndexCounts[MaskIndex];
+ if (!IsLHSOrUndefIndex)
+ ++RHSIndexCounts[MaskIndex - NumElts];
+ }
+ }
+
+ if (SwapOps) {
+ std::swap(V1, V2);
+ std::swap(GatherIndicesLHS, GatherIndicesRHS);
+ }
+
+ assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
+ SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
+
+ if (IsSelect)
+ return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
+
+ if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
+ // On such a large vector we're unable to use i8 as the index type.
+ // FIXME: We could promote the index to i16 and use vrgatherei16, but that
+ // may involve vector splitting if we're already at LMUL=8, or our
+ // user-supplied maximum fixed-length LMUL.
+ return SDValue();
+ }
+
+ unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
+ unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
+ MVT IndexVT = VT.changeTypeToInteger();
+ // Since we can't introduce illegal index types at this stage, use i16 and
+ // vrgatherei16 if the corresponding index type for plain vrgather is greater
+ // than XLenVT.
+ if (IndexVT.getScalarType().bitsGT(XLenVT)) {
+ GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
+ IndexVT = IndexVT.changeVectorElementType(MVT::i16);
+ }
+
+ MVT IndexContainerVT =
+ ContainerVT.changeVectorElementType(IndexVT.getScalarType());
+
+ SDValue Gather;
+ // TODO: This doesn't trigger for i64 vectors on RV32, since there we
+ // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
+ if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
+ Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
+ Subtarget);
+ } else {
+ V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
+ // If only one index is used, we can use a "splat" vrgather.
+ // TODO: We can splat the most-common index and fix-up any stragglers, if
+ // that's beneficial.
+ if (LHSIndexCounts.size() == 1) {
+ int SplatIndex = LHSIndexCounts.begin()->getFirst();
+ Gather = DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
+ DAG.getConstant(SplatIndex, DL, XLenVT),
+ DAG.getUNDEF(ContainerVT), TrueMask, VL);
+ } else {
+ SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
+ LHSIndices =
+ convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
+
+ Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
+ DAG.getUNDEF(ContainerVT), TrueMask, VL);
+ }
+ }
+
+ // If a second vector operand is used by this shuffle, blend it in with an
+ // additional vrgather.
+ if (!V2.isUndef()) {
+ V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
+
+ MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
+ SelectMask =
+ convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
+
+ // If only one index is used, we can use a "splat" vrgather.
+ // TODO: We can splat the most-common index and fix-up any stragglers, if
+ // that's beneficial.
+ if (RHSIndexCounts.size() == 1) {
+ int SplatIndex = RHSIndexCounts.begin()->getFirst();
+ Gather = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
+ DAG.getConstant(SplatIndex, DL, XLenVT), Gather,
+ SelectMask, VL);
+ } else {
+ SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
+ RHSIndices =
+ convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
+ Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, Gather,
+ SelectMask, VL);
+ }
+ }
+
+ return convertFromScalableVector(VT, Gather, DAG, Subtarget);
+}
+
+bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
+ // Support splats for any type. These should type legalize well.
+ if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
+ return true;
+
+ // Only support legal VTs for other shuffles for now.
+ if (!isTypeLegal(VT))
+ return false;
+
+ MVT SVT = VT.getSimpleVT();
+
+ bool SwapSources;
+ int LoSrc, HiSrc;
+ return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
+ isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
+}
+
+// Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
+// the exponent.
+SDValue
+RISCVTargetLowering::lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op,
+ SelectionDAG &DAG) const {
+ MVT VT = Op.getSimpleValueType();
+ unsigned EltSize = VT.getScalarSizeInBits();
+ SDValue Src = Op.getOperand(0);
+ SDLoc DL(Op);
+
+ // We choose FP type that can represent the value if possible. Otherwise, we
+ // use rounding to zero conversion for correct exponent of the result.
+ // TODO: Use f16 for i8 when possible?
+ MVT FloatEltVT = (EltSize >= 32) ? MVT::f64 : MVT::f32;
+ if (!isTypeLegal(MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount())))
+ FloatEltVT = MVT::f32;
+ MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
+
+ // Legal types should have been checked in the RISCVTargetLowering
+ // constructor.
+ // TODO: Splitting may make sense in some cases.
+ assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
+ "Expected legal float type!");
+
+ // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
+ // The trailing zero count is equal to log2 of this single bit value.
+ if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
+ SDValue Neg = DAG.getNegative(Src, DL, VT);
+ Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
+ }
+
+ // We have a legal FP type, convert to it.
+ SDValue FloatVal;
+ if (FloatVT.bitsGT(VT)) {
+ FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
+ } else {
+ // Use RTZ to avoid rounding influencing exponent of FloatVal.
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
+ }
+
+ auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+ SDValue RTZRM =
+ DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT());
+ MVT ContainerFloatVT =
+ MVT::getVectorVT(FloatEltVT, ContainerVT.getVectorElementCount());
+ FloatVal = DAG.getNode(RISCVISD::VFCVT_RM_F_XU_VL, DL, ContainerFloatVT,
+ Src, Mask, RTZRM, VL);
+ if (VT.isFixedLengthVector())
+ FloatVal = convertFromScalableVector(FloatVT, FloatVal, DAG, Subtarget);
+ }
+ // Bitcast to integer and shift the exponent to the LSB.
+ EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
+ SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
+ unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
+ SDValue Exp = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
+ DAG.getConstant(ShiftAmt, DL, IntVT));
+ // Restore back to original type. Truncation after SRL is to generate vnsrl.
+ if (IntVT.bitsLT(VT))
+ Exp = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Exp);
+ else if (IntVT.bitsGT(VT))
+ Exp = DAG.getNode(ISD::TRUNCATE, DL, VT, Exp);
+ // The exponent contains log2 of the value in biased form.
+ unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
+
+ // For trailing zeros, we just need to subtract the bias.
+ if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
+ return DAG.getNode(ISD::SUB, DL, VT, Exp,
+ DAG.getConstant(ExponentBias, DL, VT));
+
+ // For leading zeros, we need to remove the bias and convert from log2 to
+ // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
+ unsigned Adjust = ExponentBias + (EltSize - 1);
+ SDValue Res =
+ DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Exp);
+ // The above result with zero input equals to Adjust which is greater than
+ // EltSize. Hence, we can do min(Res, EltSize) for CTLZ.
+ if (Op.getOpcode() == ISD::CTLZ)
+ Res = DAG.getNode(ISD::UMIN, DL, VT, Res, DAG.getConstant(EltSize, DL, VT));
+ return Res;
+}
+
+// While RVV has alignment restrictions, we should always be able to load as a
+// legal equivalently-sized byte-typed vector instead. This method is
+// responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
+// the load is already correctly-aligned, it returns SDValue().
+SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Load = cast<LoadSDNode>(Op);
+ assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
+
+ if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ Load->getMemoryVT(),
+ *Load->getMemOperand()))
+ return SDValue();
+
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ unsigned EltSizeBits = VT.getScalarSizeInBits();
+ assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
+ "Unexpected unaligned RVV load type");
+ MVT NewVT =
+ MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
+ assert(NewVT.isValid() &&
+ "Expecting equally-sized RVV vector types to be legal");
+ SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
+ Load->getPointerInfo(), Load->getOriginalAlign(),
+ Load->getMemOperand()->getFlags());
+ return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
+}
+
+// While RVV has alignment restrictions, we should always be able to store as a
+// legal equivalently-sized byte-typed vector instead. This method is
+// responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
+// returns SDValue() if the store is already correctly aligned.
+SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Store = cast<StoreSDNode>(Op);
+ assert(Store && Store->getValue().getValueType().isVector() &&
+ "Expected vector store");
+
+ if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ Store->getMemoryVT(),
+ *Store->getMemOperand()))
+ return SDValue();
+
+ SDLoc DL(Op);
+ SDValue StoredVal = Store->getValue();
+ MVT VT = StoredVal.getSimpleValueType();
+ unsigned EltSizeBits = VT.getScalarSizeInBits();
+ assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
+ "Unexpected unaligned RVV store type");
+ MVT NewVT =
+ MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
+ assert(NewVT.isValid() &&
+ "Expecting equally-sized RVV vector types to be legal");
+ StoredVal = DAG.getBitcast(NewVT, StoredVal);
+ return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
+ Store->getPointerInfo(), Store->getOriginalAlign(),
+ Store->getMemOperand()->getFlags());
+}
+
+static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(Op.getValueType() == MVT::i64 && "Unexpected VT");
+
+ int64_t Imm = cast<ConstantSDNode>(Op)->getSExtValue();
+
+ // All simm32 constants should be handled by isel.
+ // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
+ // this check redundant, but small immediates are common so this check
+ // should have better compile time.
+ if (isInt<32>(Imm))
+ return Op;
+
+ // We only need to cost the immediate, if constant pool lowering is enabled.
+ if (!Subtarget.useConstantPoolForLargeInts())
+ return Op;
+
+ RISCVMatInt::InstSeq Seq =
+ RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
+ if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
+ return Op;
+
+ // Expand to a constant pool using the default expansion code.
+ return SDValue();
+}
+
+static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) {
+ SDLoc dl(Op);
+ SyncScope::ID FenceSSID =
+ static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
+
+ // singlethread fences only synchronize with signal handlers on the same
+ // thread and thus only need to preserve instruction order, not actually
+ // enforce memory ordering.
+ if (FenceSSID == SyncScope::SingleThread)
+ // MEMBARRIER is a compiler barrier; it codegens to a no-op.
+ return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
+
+ return Op;
+}
+
+SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ default:
+ report_fatal_error("unimplemented operand");
+ case ISD::ATOMIC_FENCE:
+ return LowerATOMIC_FENCE(Op, DAG);
+ case ISD::GlobalAddress:
+ return lowerGlobalAddress(Op, DAG);
+ case ISD::BlockAddress:
+ return lowerBlockAddress(Op, DAG);
+ case ISD::ConstantPool:
+ return lowerConstantPool(Op, DAG);
+ case ISD::JumpTable:
+ return lowerJumpTable(Op, DAG);
+ case ISD::GlobalTLSAddress:
+ return lowerGlobalTLSAddress(Op, DAG);
+ case ISD::Constant:
+ return lowerConstant(Op, DAG, Subtarget);
+ case ISD::SELECT:
+ return lowerSELECT(Op, DAG);
+ case ISD::BRCOND:
+ return lowerBRCOND(Op, DAG);
+ case ISD::VASTART:
+ return lowerVASTART(Op, DAG);
+ case ISD::FRAMEADDR:
+ return lowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR:
+ return lowerRETURNADDR(Op, DAG);
+ case ISD::SHL_PARTS:
+ return lowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ return lowerShiftRightParts(Op, DAG, true);
+ case ISD::SRL_PARTS:
+ return lowerShiftRightParts(Op, DAG, false);
+ case ISD::BITCAST: {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue Op0 = Op.getOperand(0);
+ EVT Op0VT = Op0.getValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+ if (VT == MVT::f16 && Op0VT == MVT::i16 &&
+ Subtarget.hasStdExtZfhOrZfhmin()) {
+ SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
+ SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
+ return FPConv;
+ }
+ if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
+ Subtarget.hasStdExtF()) {
+ SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
+ SDValue FPConv =
+ DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
+ return FPConv;
+ }
+
+ // Consider other scalar<->scalar casts as legal if the types are legal.
+ // Otherwise expand them.
+ if (!VT.isVector() && !Op0VT.isVector()) {
+ if (isTypeLegal(VT) && isTypeLegal(Op0VT))
+ return Op;
+ return SDValue();
+ }
+
+ assert(!VT.isScalableVector() && !Op0VT.isScalableVector() &&
+ "Unexpected types");
+
+ if (VT.isFixedLengthVector()) {
+ // We can handle fixed length vector bitcasts with a simple replacement
+ // in isel.
+ if (Op0VT.isFixedLengthVector())
+ return Op;
+ // When bitcasting from scalar to fixed-length vector, insert the scalar
+ // into a one-element vector of the result type, and perform a vector
+ // bitcast.
+ if (!Op0VT.isVector()) {
+ EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
+ if (!isTypeLegal(BVT))
+ return SDValue();
+ return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
+ DAG.getUNDEF(BVT), Op0,
+ DAG.getConstant(0, DL, XLenVT)));
+ }
+ return SDValue();
+ }
+ // Custom-legalize bitcasts from fixed-length vector types to scalar types
+ // thus: bitcast the vector to a one-element vector type whose element type
+ // is the same as the result type, and extract the first element.
+ if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
+ EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
+ if (!isTypeLegal(BVT))
+ return SDValue();
+ SDValue BVec = DAG.getBitcast(BVT, Op0);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
+ DAG.getConstant(0, DL, XLenVT));
+ }
+ return SDValue();
+ }
+ case ISD::INTRINSIC_WO_CHAIN:
+ return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN:
+ return LowerINTRINSIC_W_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_VOID:
+ return LowerINTRINSIC_VOID(Op, DAG);
+ case ISD::BITREVERSE: {
+ MVT VT = Op.getSimpleValueType();
+ SDLoc DL(Op);
+ assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
+ assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
+ // Expand bitreverse to a bswap(rev8) followed by brev8.
+ SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
+ return DAG.getNode(RISCVISD::BREV8, DL, VT, BSwap);
+ }
+ case ISD::TRUNCATE:
+ // Only custom-lower vector truncates
+ if (!Op.getSimpleValueType().isVector())
+ return Op;
+ return lowerVectorTruncLike(Op, DAG);
+ case ISD::ANY_EXTEND:
+ case ISD::ZERO_EXTEND:
+ if (Op.getOperand(0).getValueType().isVector() &&
+ Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
+ return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
+ return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
+ case ISD::SIGN_EXTEND:
+ if (Op.getOperand(0).getValueType().isVector() &&
+ Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
+ return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
+ return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
+ case ISD::SPLAT_VECTOR_PARTS:
+ return lowerSPLAT_VECTOR_PARTS(Op, DAG);
+ case ISD::INSERT_VECTOR_ELT:
+ return lowerINSERT_VECTOR_ELT(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT:
+ return lowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::VSCALE: {
+ MVT VT = Op.getSimpleValueType();
+ SDLoc DL(Op);
+ SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
+ // We define our scalable vector types for lmul=1 to use a 64 bit known
+ // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
+ // vscale as VLENB / 8.
+ static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
+ if (Subtarget.getRealMinVLen() < RISCV::RVVBitsPerBlock)
+ report_fatal_error("Support for VLEN==32 is incomplete.");
+ // We assume VLENB is a multiple of 8. We manually choose the best shift
+ // here because SimplifyDemandedBits isn't always able to simplify it.
+ uint64_t Val = Op.getConstantOperandVal(0);
+ if (isPowerOf2_64(Val)) {
+ uint64_t Log2 = Log2_64(Val);
+ if (Log2 < 3)
+ return DAG.getNode(ISD::SRL, DL, VT, VLENB,
+ DAG.getConstant(3 - Log2, DL, VT));
+ if (Log2 > 3)
+ return DAG.getNode(ISD::SHL, DL, VT, VLENB,
+ DAG.getConstant(Log2 - 3, DL, VT));
+ return VLENB;
+ }
+ // If the multiplier is a multiple of 8, scale it down to avoid needing
+ // to shift the VLENB value.
+ if ((Val % 8) == 0)
+ return DAG.getNode(ISD::MUL, DL, VT, VLENB,
+ DAG.getConstant(Val / 8, DL, VT));
+
+ SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
+ DAG.getConstant(3, DL, VT));
+ return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
+ }
+ case ISD::FPOWI: {
+ // Custom promote f16 powi with illegal i32 integer type on RV64. Once
+ // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
+ if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
+ Op.getOperand(1).getValueType() == MVT::i32) {
+ SDLoc DL(Op);
+ SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
+ SDValue Powi =
+ DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
+ return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
+ DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
+ }
+ return SDValue();
+ }
+ case ISD::FP_EXTEND:
+ case ISD::FP_ROUND:
+ if (!Op.getValueType().isVector())
+ return Op;
+ return lowerVectorFPExtendOrRoundLike(Op, DAG);
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP: {
+ // RVV can only do fp<->int conversions to types half/double the size as
+ // the source. We custom-lower any conversions that do two hops into
+ // sequences.
+ MVT VT = Op.getSimpleValueType();
+ if (!VT.isVector())
+ return Op;
+ SDLoc DL(Op);
+ SDValue Src = Op.getOperand(0);
+ MVT EltVT = VT.getVectorElementType();
+ MVT SrcVT = Src.getSimpleValueType();
+ MVT SrcEltVT = SrcVT.getVectorElementType();
+ unsigned EltSize = EltVT.getSizeInBits();
+ unsigned SrcEltSize = SrcEltVT.getSizeInBits();
+ assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
+ "Unexpected vector element types");
+
+ bool IsInt2FP = SrcEltVT.isInteger();
+ // Widening conversions
+ if (EltSize > (2 * SrcEltSize)) {
+ if (IsInt2FP) {
+ // Do a regular integer sign/zero extension then convert to float.
+ MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize / 2),
+ VT.getVectorElementCount());
+ unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
+ ? ISD::ZERO_EXTEND
+ : ISD::SIGN_EXTEND;
+ SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
+ return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
+ }
+ // FP2Int
+ assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
+ // Do one doubling fp_extend then complete the operation by converting
+ // to int.
+ MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
+ return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
+ }
+
+ // Narrowing conversions
+ if (SrcEltSize > (2 * EltSize)) {
+ if (IsInt2FP) {
+ // One narrowing int_to_fp, then an fp_round.
+ assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
+ MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
+ return DAG.getFPExtendOrRound(Int2FP, DL, VT);
+ }
+ // FP2Int
+ // One narrowing fp_to_int, then truncate the integer. If the float isn't
+ // representable by the integer, the result is poison.
+ MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
+ VT.getVectorElementCount());
+ SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
+ }
+
+ // Scalable vectors can exit here. Patterns will handle equally-sized
+ // conversions halving/doubling ones.
+ if (!VT.isFixedLengthVector())
+ return Op;
+
+ // For fixed-length vectors we lower to a custom "VL" node.
+ unsigned RVVOpc = 0;
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("Impossible opcode");
+ case ISD::FP_TO_SINT:
+ RVVOpc = RISCVISD::VFCVT_RTZ_X_F_VL;
+ break;
+ case ISD::FP_TO_UINT:
+ RVVOpc = RISCVISD::VFCVT_RTZ_XU_F_VL;
+ break;
+ case ISD::SINT_TO_FP:
+ RVVOpc = RISCVISD::SINT_TO_FP_VL;
+ break;
+ case ISD::UINT_TO_FP:
+ RVVOpc = RISCVISD::UINT_TO_FP_VL;
+ break;
+ }
+
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+ MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
+ assert(ContainerVT.getVectorElementCount() == SrcContainerVT.getVectorElementCount() &&
+ "Expected same element count");
+
+ auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
+ Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
+ return convertFromScalableVector(VT, Src, DAG, Subtarget);
+ }
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
+ case ISD::FTRUNC:
+ case ISD::FCEIL:
+ case ISD::FFLOOR:
+ case ISD::FRINT:
+ case ISD::FROUND:
+ case ISD::FROUNDEVEN:
+ return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
+ case ISD::VECREDUCE_ADD:
+ case ISD::VECREDUCE_UMAX:
+ case ISD::VECREDUCE_SMAX:
+ case ISD::VECREDUCE_UMIN:
+ case ISD::VECREDUCE_SMIN:
+ return lowerVECREDUCE(Op, DAG);
+ case ISD::VECREDUCE_AND:
+ case ISD::VECREDUCE_OR:
+ case ISD::VECREDUCE_XOR:
+ if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
+ return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
+ return lowerVECREDUCE(Op, DAG);
+ case ISD::VECREDUCE_FADD:
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_FMIN:
+ case ISD::VECREDUCE_FMAX:
+ return lowerFPVECREDUCE(Op, DAG);
+ case ISD::VP_REDUCE_ADD:
+ case ISD::VP_REDUCE_UMAX:
+ case ISD::VP_REDUCE_SMAX:
+ case ISD::VP_REDUCE_UMIN:
+ case ISD::VP_REDUCE_SMIN:
+ case ISD::VP_REDUCE_FADD:
+ case ISD::VP_REDUCE_SEQ_FADD:
+ case ISD::VP_REDUCE_FMIN:
+ case ISD::VP_REDUCE_FMAX:
+ return lowerVPREDUCE(Op, DAG);
+ case ISD::VP_REDUCE_AND:
+ case ISD::VP_REDUCE_OR:
+ case ISD::VP_REDUCE_XOR:
+ if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
+ return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
+ return lowerVPREDUCE(Op, DAG);
+ case ISD::INSERT_SUBVECTOR:
+ return lowerINSERT_SUBVECTOR(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR:
+ return lowerEXTRACT_SUBVECTOR(Op, DAG);
+ case ISD::STEP_VECTOR:
+ return lowerSTEP_VECTOR(Op, DAG);
+ case ISD::VECTOR_REVERSE:
+ return lowerVECTOR_REVERSE(Op, DAG);
+ case ISD::VECTOR_SPLICE:
+ return lowerVECTOR_SPLICE(Op, DAG);
+ case ISD::BUILD_VECTOR:
+ return lowerBUILD_VECTOR(Op, DAG, Subtarget);
+ case ISD::SPLAT_VECTOR:
+ if (Op.getValueType().getVectorElementType() == MVT::i1)
+ return lowerVectorMaskSplat(Op, DAG);
+ return SDValue();
+ case ISD::VECTOR_SHUFFLE:
+ return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
+ case ISD::CONCAT_VECTORS: {
+ // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
+ // better than going through the stack, as the default expansion does.
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ unsigned NumOpElts =
+ Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
+ SDValue Vec = DAG.getUNDEF(VT);
+ for (const auto &OpIdx : enumerate(Op->ops())) {
+ SDValue SubVec = OpIdx.value();
+ // Don't insert undef subvectors.
+ if (SubVec.isUndef())
+ continue;
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
+ DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
+ }
+ return Vec;
+ }
+ case ISD::LOAD:
+ if (auto V = expandUnalignedRVVLoad(Op, DAG))
+ return V;
+ if (Op.getValueType().isFixedLengthVector())
+ return lowerFixedLengthVectorLoadToRVV(Op, DAG);
+ return Op;
+ case ISD::STORE:
+ if (auto V = expandUnalignedRVVStore(Op, DAG))
+ return V;
+ if (Op.getOperand(1).getValueType().isFixedLengthVector())
+ return lowerFixedLengthVectorStoreToRVV(Op, DAG);
+ return Op;
+ case ISD::MLOAD:
+ case ISD::VP_LOAD:
+ return lowerMaskedLoad(Op, DAG);
+ case ISD::MSTORE:
+ case ISD::VP_STORE:
+ return lowerMaskedStore(Op, DAG);
+ case ISD::SELECT_CC: {
+ // This occurs because we custom legalize SETGT and SETUGT for setcc. That
+ // causes LegalizeDAG to think we need to custom legalize select_cc. Expand
+ // into separate SETCC+SELECT_CC just like LegalizeDAG.
+ SDValue Tmp1 = Op.getOperand(0);
+ SDValue Tmp2 = Op.getOperand(1);
+ SDValue True = Op.getOperand(2);
+ SDValue False = Op.getOperand(3);
+ EVT VT = Op.getValueType();
+ SDValue CC = Op.getOperand(4);
+ EVT CmpVT = Tmp1.getValueType();
+ EVT CCVT =
+ getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
+ SDLoc DL(Op);
+ SDValue Cond =
+ DAG.getNode(ISD::SETCC, DL, CCVT, Tmp1, Tmp2, CC, Op->getFlags());
+ return DAG.getSelect(DL, VT, Cond, True, False);
+ }
+ case ISD::SETCC: {
+ MVT OpVT = Op.getOperand(0).getSimpleValueType();
+ if (OpVT.isScalarInteger()) {
+ MVT VT = Op.getSimpleValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ assert((CCVal == ISD::SETGT || CCVal == ISD::SETUGT) &&
+ "Unexpected CondCode");
+
+ SDLoc DL(Op);
+
+ // If the RHS is a constant in the range [-2049, 0) or (0, 2046], we can
+ // convert this to the equivalent of (set(u)ge X, C+1) by using
+ // (xori (slti(u) X, C+1), 1). This avoids materializing a small constant
+ // in a register.
+ if (isa<ConstantSDNode>(RHS)) {
+ int64_t Imm = cast<ConstantSDNode>(RHS)->getSExtValue();
+ if (Imm != 0 && isInt<12>((uint64_t)Imm + 1)) {
+ // X > -1 should have been replaced with false.
+ assert((CCVal != ISD::SETUGT || Imm != -1) &&
+ "Missing canonicalization");
+ // Using getSetCCSwappedOperands will convert SET(U)GT->SET(U)LT.
+ CCVal = ISD::getSetCCSwappedOperands(CCVal);
+ SDValue SetCC = DAG.getSetCC(
+ DL, VT, LHS, DAG.getConstant(Imm + 1, DL, OpVT), CCVal);
+ return DAG.getLogicalNOT(DL, SetCC, VT);
+ }
+ }
+
+ // Not a constant we could handle, swap the operands and condition code to
+ // SETLT/SETULT.
+ CCVal = ISD::getSetCCSwappedOperands(CCVal);
+ return DAG.getSetCC(DL, VT, RHS, LHS, CCVal);
+ }
+
+ return lowerFixedLengthVectorSetccToRVV(Op, DAG);
+ }
+ case ISD::ADD:
+ return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL, /*HasMergeOp*/ true);
+ case ISD::SUB:
+ return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL, /*HasMergeOp*/ true);
+ case ISD::MUL:
+ return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL, /*HasMergeOp*/ true);
+ case ISD::MULHS:
+ return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL, /*HasMergeOp*/ true);
+ case ISD::MULHU:
+ return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL, /*HasMergeOp*/ true);
+ case ISD::AND:
+ return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
+ RISCVISD::AND_VL);
+ case ISD::OR:
+ return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
+ RISCVISD::OR_VL);
+ case ISD::XOR:
+ return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
+ RISCVISD::XOR_VL);
+ case ISD::SDIV:
+ return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL, /*HasMergeOp*/ true);
+ case ISD::SREM:
+ return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL, /*HasMergeOp*/ true);
+ case ISD::UDIV:
+ return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL, /*HasMergeOp*/ true);
+ case ISD::UREM:
+ return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL, /*HasMergeOp*/ true);
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ if (Op.getSimpleValueType().isFixedLengthVector())
+ return lowerFixedLengthVectorShiftToRVV(Op, DAG);
+ // This can be called for an i32 shift amount that needs to be promoted.
+ assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ return SDValue();
+ case ISD::SADDSAT:
+ return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL,
+ /*HasMergeOp*/ true);
+ case ISD::UADDSAT:
+ return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL,
+ /*HasMergeOp*/ true);
+ case ISD::SSUBSAT:
+ return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL,
+ /*HasMergeOp*/ true);
+ case ISD::USUBSAT:
+ return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL,
+ /*HasMergeOp*/ true);
+ case ISD::FADD:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL, /*HasMergeOp*/ true);
+ case ISD::FSUB:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL, /*HasMergeOp*/ true);
+ case ISD::FMUL:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL, /*HasMergeOp*/ true);
+ case ISD::FDIV:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL, /*HasMergeOp*/ true);
+ case ISD::FNEG:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
+ case ISD::FABS:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
+ case ISD::FSQRT:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
+ case ISD::FMA:
+ return lowerToScalableOp(Op, DAG, RISCVISD::VFMADD_VL);
+ case ISD::SMIN:
+ return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL, /*HasMergeOp*/ true);
+ case ISD::SMAX:
+ return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL, /*HasMergeOp*/ true);
+ case ISD::UMIN:
+ return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL, /*HasMergeOp*/ true);
+ case ISD::UMAX:
+ return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL, /*HasMergeOp*/ true);
+ case ISD::FMINNUM:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL,
+ /*HasMergeOp*/ true);
+ case ISD::FMAXNUM:
+ return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL,
+ /*HasMergeOp*/ true);
+ case ISD::ABS:
+ case ISD::VP_ABS:
+ return lowerABS(Op, DAG);
+ case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF:
+ case ISD::CTTZ_ZERO_UNDEF:
+ return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
+ case ISD::VSELECT:
+ return lowerFixedLengthVectorSelectToRVV(Op, DAG);
+ case ISD::FCOPYSIGN:
+ return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
+ case ISD::MGATHER:
+ case ISD::VP_GATHER:
+ return lowerMaskedGather(Op, DAG);
+ case ISD::MSCATTER:
+ case ISD::VP_SCATTER:
+ return lowerMaskedScatter(Op, DAG);
+ case ISD::GET_ROUNDING:
+ return lowerGET_ROUNDING(Op, DAG);
+ case ISD::SET_ROUNDING:
+ return lowerSET_ROUNDING(Op, DAG);
+ case ISD::EH_DWARF_CFA:
+ return lowerEH_DWARF_CFA(Op, DAG);
+ case ISD::VP_SELECT:
+ return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
+ case ISD::VP_MERGE:
+ return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
+ case ISD::VP_ADD:
+ return lowerVPOp(Op, DAG, RISCVISD::ADD_VL, /*HasMergeOp*/ true);
+ case ISD::VP_SUB:
+ return lowerVPOp(Op, DAG, RISCVISD::SUB_VL, /*HasMergeOp*/ true);
+ case ISD::VP_MUL:
+ return lowerVPOp(Op, DAG, RISCVISD::MUL_VL, /*HasMergeOp*/ true);
+ case ISD::VP_SDIV:
+ return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL, /*HasMergeOp*/ true);
+ case ISD::VP_UDIV:
+ return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL, /*HasMergeOp*/ true);
+ case ISD::VP_SREM:
+ return lowerVPOp(Op, DAG, RISCVISD::SREM_VL, /*HasMergeOp*/ true);
+ case ISD::VP_UREM:
+ return lowerVPOp(Op, DAG, RISCVISD::UREM_VL, /*HasMergeOp*/ true);
+ case ISD::VP_AND:
+ return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
+ case ISD::VP_OR:
+ return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
+ case ISD::VP_XOR:
+ return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
+ case ISD::VP_ASHR:
+ return lowerVPOp(Op, DAG, RISCVISD::SRA_VL, /*HasMergeOp*/ true);
+ case ISD::VP_LSHR:
+ return lowerVPOp(Op, DAG, RISCVISD::SRL_VL, /*HasMergeOp*/ true);
+ case ISD::VP_SHL:
+ return lowerVPOp(Op, DAG, RISCVISD::SHL_VL, /*HasMergeOp*/ true);
+ case ISD::VP_FADD:
+ return lowerVPOp(Op, DAG, RISCVISD::FADD_VL, /*HasMergeOp*/ true);
+ case ISD::VP_FSUB:
+ return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL, /*HasMergeOp*/ true);
+ case ISD::VP_FMUL:
+ return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL, /*HasMergeOp*/ true);
+ case ISD::VP_FDIV:
+ return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL, /*HasMergeOp*/ true);
+ case ISD::VP_FNEG:
+ return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
+ case ISD::VP_FABS:
+ return lowerVPOp(Op, DAG, RISCVISD::FABS_VL);
+ case ISD::VP_SQRT:
+ return lowerVPOp(Op, DAG, RISCVISD::FSQRT_VL);
+ case ISD::VP_FMA:
+ return lowerVPOp(Op, DAG, RISCVISD::VFMADD_VL);
+ case ISD::VP_FMINNUM:
+ return lowerVPOp(Op, DAG, RISCVISD::FMINNUM_VL, /*HasMergeOp*/ true);
+ case ISD::VP_FMAXNUM:
+ return lowerVPOp(Op, DAG, RISCVISD::FMAXNUM_VL, /*HasMergeOp*/ true);
+ case ISD::VP_FCOPYSIGN:
+ return lowerVPOp(Op, DAG, RISCVISD::FCOPYSIGN_VL, /*HasMergeOp*/ true);
+ case ISD::VP_SIGN_EXTEND:
+ case ISD::VP_ZERO_EXTEND:
+ if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
+ return lowerVPExtMaskOp(Op, DAG);
+ return lowerVPOp(Op, DAG,
+ Op.getOpcode() == ISD::VP_SIGN_EXTEND
+ ? RISCVISD::VSEXT_VL
+ : RISCVISD::VZEXT_VL);
+ case ISD::VP_TRUNCATE:
+ return lowerVectorTruncLike(Op, DAG);
+ case ISD::VP_FP_EXTEND:
+ case ISD::VP_FP_ROUND:
+ return lowerVectorFPExtendOrRoundLike(Op, DAG);
+ case ISD::VP_FP_TO_SINT:
+ return lowerVPFPIntConvOp(Op, DAG, RISCVISD::VFCVT_RTZ_X_F_VL);
+ case ISD::VP_FP_TO_UINT:
+ return lowerVPFPIntConvOp(Op, DAG, RISCVISD::VFCVT_RTZ_XU_F_VL);
+ case ISD::VP_SINT_TO_FP:
+ return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
+ case ISD::VP_UINT_TO_FP:
+ return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
+ case ISD::VP_SETCC:
+ if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
+ return lowerVPSetCCMaskOp(Op, DAG);
+ return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL, /*HasMergeOp*/ true);
+ case ISD::VP_SMIN:
+ return lowerVPOp(Op, DAG, RISCVISD::SMIN_VL, /*HasMergeOp*/ true);
+ case ISD::VP_SMAX:
+ return lowerVPOp(Op, DAG, RISCVISD::SMAX_VL, /*HasMergeOp*/ true);
+ case ISD::VP_UMIN:
+ return lowerVPOp(Op, DAG, RISCVISD::UMIN_VL, /*HasMergeOp*/ true);
+ case ISD::VP_UMAX:
+ return lowerVPOp(Op, DAG, RISCVISD::UMAX_VL, /*HasMergeOp*/ true);
+ case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
+ return lowerVPStridedLoad(Op, DAG);
+ case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
+ return lowerVPStridedStore(Op, DAG);
+ case ISD::VP_FCEIL:
+ case ISD::VP_FFLOOR:
+ case ISD::VP_FRINT:
+ case ISD::VP_FNEARBYINT:
+ case ISD::VP_FROUND:
+ case ISD::VP_FROUNDEVEN:
+ case ISD::VP_FROUNDTOZERO:
+ return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
+ }
+}
+
+static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) {
+ return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
+}
+
+static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) {
+ return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
+ Flags);
+}
+
+static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) {
+ return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
+ N->getOffset(), Flags);
+}
+
+static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) {
+ return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
+}
+
+template <class NodeTy>
+SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
+ bool IsLocal) const {
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+
+ // When HWASAN is used and tagging of global variables is enabled
+ // they should be accessed via the GOT, since the tagged address of a global
+ // is incompatible with existing code models. This also applies to non-pic
+ // mode.
+ if (isPositionIndependent() || Subtarget.allowTaggedGlobals()) {
+ SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
+ if (IsLocal && !Subtarget.allowTaggedGlobals())
+ // Use PC-relative addressing to access the symbol. This generates the
+ // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
+ // %pcrel_lo(auipc)).
+ return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
+
+ // Use PC-relative addressing to access the GOT for this symbol, then load
+ // the address from the GOT. This generates the pattern (PseudoLA sym),
+ // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MemOp = MF.getMachineMemOperand(
+ MachinePointerInfo::getGOT(MF),
+ MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant,
+ LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+ SDValue Load =
+ DAG.getMemIntrinsicNode(RISCVISD::LA, DL, DAG.getVTList(Ty, MVT::Other),
+ {DAG.getEntryNode(), Addr}, Ty, MemOp);
+ return Load;
+ }
+
+ switch (getTargetMachine().getCodeModel()) {
+ default:
+ report_fatal_error("Unsupported code model for lowering");
+ case CodeModel::Small: {
+ // Generate a sequence for accessing addresses within the first 2 GiB of
+ // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
+ SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
+ SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
+ SDValue MNHi = DAG.getNode(RISCVISD::HI, DL, Ty, AddrHi);
+ return DAG.getNode(RISCVISD::ADD_LO, DL, Ty, MNHi, AddrLo);
+ }
+ case CodeModel::Medium: {
+ // Generate a sequence for accessing addresses within any 2GiB range within
+ // the address space. This generates the pattern (PseudoLLA sym), which
+ // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
+ SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
+ return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
+ }
+ }
+}
+
+SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ assert(N->getOffset() == 0 && "unexpected offset in global node");
+ return getAddr(N, DAG, N->getGlobal()->isDSOLocal());
+}
+
+SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
+
+ return getAddr(N, DAG);
+}
+
+SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
+ SelectionDAG &DAG) const {
+ ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
+
+ return getAddr(N, DAG);
+}
+
+SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
+ SelectionDAG &DAG) const {
+ JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
+
+ return getAddr(N, DAG);
+}
+
+SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
+ SelectionDAG &DAG,
+ bool UseGOT) const {
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ const GlobalValue *GV = N->getGlobal();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ if (UseGOT) {
+ // Use PC-relative addressing to access the GOT for this TLS symbol, then
+ // load the address from the GOT and add the thread pointer. This generates
+ // the pattern (PseudoLA_TLS_IE sym), which expands to
+ // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
+ SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MemOp = MF.getMachineMemOperand(
+ MachinePointerInfo::getGOT(MF),
+ MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant,
+ LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+ SDValue Load = DAG.getMemIntrinsicNode(
+ RISCVISD::LA_TLS_IE, DL, DAG.getVTList(Ty, MVT::Other),
+ {DAG.getEntryNode(), Addr}, Ty, MemOp);
+
+ // Add the thread pointer.
+ SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
+ return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
+ }
+
+ // Generate a sequence for accessing the address relative to the thread
+ // pointer, with the appropriate adjustment for the thread pointer offset.
+ // This generates the pattern
+ // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
+ SDValue AddrHi =
+ DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
+ SDValue AddrAdd =
+ DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
+ SDValue AddrLo =
+ DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
+
+ SDValue MNHi = DAG.getNode(RISCVISD::HI, DL, Ty, AddrHi);
+ SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
+ SDValue MNAdd =
+ DAG.getNode(RISCVISD::ADD_TPREL, DL, Ty, MNHi, TPReg, AddrAdd);
+ return DAG.getNode(RISCVISD::ADD_LO, DL, Ty, MNAdd, AddrLo);
+}
+
+SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
+ SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
+ const GlobalValue *GV = N->getGlobal();
+
+ // Use a PC-relative addressing mode to access the global dynamic GOT address.
+ // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
+ // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
+ SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
+ SDValue Load = DAG.getNode(RISCVISD::LA_TLS_GD, DL, Ty, Addr);
+
+ // Prepare argument list to generate call.
+ ArgListTy Args;
+ ArgListEntry Entry;
+ Entry.Node = Load;
+ Entry.Ty = CallTy;
+ Args.push_back(Entry);
+
+ // Setup call to __tls_get_addr.
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL)
+ .setChain(DAG.getEntryNode())
+ .setLibCallee(CallingConv::C, CallTy,
+ DAG.getExternalSymbol("__tls_get_addr", Ty),
+ std::move(Args));
+
+ return LowerCallTo(CLI).first;
+}
+
+SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ assert(N->getOffset() == 0 && "unexpected offset in global node");
+
+ TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
+
+ if (DAG.getMachineFunction().getFunction().getCallingConv() ==
+ CallingConv::GHC)
+ report_fatal_error("In GHC calling convention TLS is not supported");
+
+ SDValue Addr;
+ switch (Model) {
+ case TLSModel::LocalExec:
+ Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
+ break;
+ case TLSModel::InitialExec:
+ Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
+ break;
+ case TLSModel::LocalDynamic:
+ case TLSModel::GeneralDynamic:
+ Addr = getDynamicTLSAddr(N, DAG);
+ break;
+ }
+
+ return Addr;
+}
+
+SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
+ SDValue CondV = Op.getOperand(0);
+ SDValue TrueV = Op.getOperand(1);
+ SDValue FalseV = Op.getOperand(2);
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Lower vector SELECTs to VSELECTs by splatting the condition.
+ if (VT.isVector()) {
+ MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
+ SDValue CondSplat = DAG.getSplat(SplatCondVT, DL, CondV);
+ return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
+ }
+
+ if (!Subtarget.hasShortForwardBranchOpt()) {
+ // (select c, -1, y) -> -c | y
+ if (isAllOnesConstant(TrueV)) {
+ SDValue Neg = DAG.getNegative(CondV, DL, VT);
+ return DAG.getNode(ISD::OR, DL, VT, Neg, FalseV);
+ }
+ // (select c, y, -1) -> (c-1) | y
+ if (isAllOnesConstant(FalseV)) {
+ SDValue Neg = DAG.getNode(ISD::ADD, DL, VT, CondV,
+ DAG.getAllOnesConstant(DL, VT));
+ return DAG.getNode(ISD::OR, DL, VT, Neg, TrueV);
+ }
+
+ // (select c, 0, y) -> (c-1) & y
+ if (isNullConstant(TrueV)) {
+ SDValue Neg = DAG.getNode(ISD::ADD, DL, VT, CondV,
+ DAG.getAllOnesConstant(DL, VT));
+ return DAG.getNode(ISD::AND, DL, VT, Neg, FalseV);
+ }
+ // (select c, y, 0) -> -c & y
+ if (isNullConstant(FalseV)) {
+ SDValue Neg = DAG.getNegative(CondV, DL, VT);
+ return DAG.getNode(ISD::AND, DL, VT, Neg, TrueV);
+ }
+ }
+
+ // If the condition is not an integer SETCC which operates on XLenVT, we need
+ // to emit a RISCVISD::SELECT_CC comparing the condition to zero. i.e.:
+ // (select condv, truev, falsev)
+ // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
+ if (CondV.getOpcode() != ISD::SETCC ||
+ CondV.getOperand(0).getSimpleValueType() != XLenVT) {
+ SDValue Zero = DAG.getConstant(0, DL, XLenVT);
+ SDValue SetNE = DAG.getCondCode(ISD::SETNE);
+
+ SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
+
+ return DAG.getNode(RISCVISD::SELECT_CC, DL, VT, Ops);
+ }
+
+ // If the CondV is the output of a SETCC node which operates on XLenVT inputs,
+ // then merge the SETCC node into the lowered RISCVISD::SELECT_CC to take
+ // advantage of the integer compare+branch instructions. i.e.:
+ // (select (setcc lhs, rhs, cc), truev, falsev)
+ // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
+ SDValue LHS = CondV.getOperand(0);
+ SDValue RHS = CondV.getOperand(1);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
+
+ // Special case for a select of 2 constants that have a diffence of 1.
+ // Normally this is done by DAGCombine, but if the select is introduced by
+ // type legalization or op legalization, we miss it. Restricting to SETLT
+ // case for now because that is what signed saturating add/sub need.
+ // FIXME: We don't need the condition to be SETLT or even a SETCC,
+ // but we would probably want to swap the true/false values if the condition
+ // is SETGE/SETLE to avoid an XORI.
+ if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
+ CCVal == ISD::SETLT) {
+ const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
+ const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
+ if (TrueVal - 1 == FalseVal)
+ return DAG.getNode(ISD::ADD, DL, VT, CondV, FalseV);
+ if (TrueVal + 1 == FalseVal)
+ return DAG.getNode(ISD::SUB, DL, VT, FalseV, CondV);
+ }
+
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+ // 1 < x ? x : 1 -> 0 < x ? x : 1
+ if (isOneConstant(LHS) && (CCVal == ISD::SETLT || CCVal == ISD::SETULT) &&
+ RHS == TrueV && LHS == FalseV) {
+ LHS = DAG.getConstant(0, DL, VT);
+ // 0 <u x is the same as x != 0.
+ if (CCVal == ISD::SETULT) {
+ std::swap(LHS, RHS);
+ CCVal = ISD::SETNE;
+ }
+ }
+
+ // x <s -1 ? x : -1 -> x <s 0 ? x : -1
+ if (isAllOnesConstant(RHS) && CCVal == ISD::SETLT && LHS == TrueV &&
+ RHS == FalseV) {
+ RHS = DAG.getConstant(0, DL, VT);
+ }
+
+ SDValue TargetCC = DAG.getCondCode(CCVal);
+
+ if (isa<ConstantSDNode>(TrueV) && !isa<ConstantSDNode>(FalseV)) {
+ // (select (setcc lhs, rhs, CC), constant, falsev)
+ // -> (select (setcc lhs, rhs, InverseCC), falsev, constant)
+ std::swap(TrueV, FalseV);
+ TargetCC = DAG.getCondCode(ISD::getSetCCInverse(CCVal, LHS.getValueType()));
+ }
+
+ SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
+ return DAG.getNode(RISCVISD::SELECT_CC, DL, VT, Ops);
+}
+
+SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
+ SDValue CondV = Op.getOperand(1);
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ if (CondV.getOpcode() == ISD::SETCC &&
+ CondV.getOperand(0).getValueType() == XLenVT) {
+ SDValue LHS = CondV.getOperand(0);
+ SDValue RHS = CondV.getOperand(1);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
+
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ SDValue TargetCC = DAG.getCondCode(CCVal);
+ return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
+ LHS, RHS, TargetCC, Op.getOperand(2));
+ }
+
+ return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
+ CondV, DAG.getConstant(0, DL, XLenVT),
+ DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
+}
+
+SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ SDLoc DL(Op);
+ SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
+ getPointerTy(MF.getDataLayout()));
+
+ // vastart just stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
+ MachinePointerInfo(SV));
+}
+
+SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MFI.setFrameAddressIsTaken(true);
+ Register FrameReg = RI.getFrameRegister(MF);
+ int XLenInBytes = Subtarget.getXLen() / 8;
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ while (Depth--) {
+ int Offset = -(XLenInBytes * 2);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
+ DAG.getIntPtrConstant(Offset, DL));
+ FrameAddr =
+ DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
+ }
+ return FrameAddr;
+}
+
+SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MFI.setReturnAddressIsTaken(true);
+ MVT XLenVT = Subtarget.getXLenVT();
+ int XLenInBytes = Subtarget.getXLen() / 8;
+
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ int Off = -XLenInBytes;
+ SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
+ SDValue Offset = DAG.getConstant(Off, DL, VT);
+ return DAG.getLoad(VT, DL, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
+ MachinePointerInfo());
+ }
+
+ // Return the value of the return address register, marking it an implicit
+ // live-in.
+ Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
+}
+
+SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+ EVT VT = Lo.getValueType();
+
+ // if Shamt-XLEN < 0: // Shamt < XLEN
+ // Lo = Lo << Shamt
+ // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
+ // else:
+ // Lo = 0
+ // Hi = Lo << (Shamt-XLEN)
+
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
+ SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
+ SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
+ SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
+ SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
+
+ SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
+ SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
+ SDValue ShiftRightLo =
+ DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
+ SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+ SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
+
+ SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
+
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+ SDValue Parts[2] = {Lo, Hi};
+ return DAG.getMergeValues(Parts, DL);
+}
+
+SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
+ bool IsSRA) const {
+ SDLoc DL(Op);
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+ EVT VT = Lo.getValueType();
+
+ // SRA expansion:
+ // if Shamt-XLEN < 0: // Shamt < XLEN
+ // Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
+ // Hi = Hi >>s Shamt
+ // else:
+ // Lo = Hi >>s (Shamt-XLEN);
+ // Hi = Hi >>s (XLEN-1)
+ //
+ // SRL expansion:
+ // if Shamt-XLEN < 0: // Shamt < XLEN
+ // Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
+ // Hi = Hi >>u Shamt
+ // else:
+ // Lo = Hi >>u (Shamt-XLEN);
+ // Hi = 0;
+
+ unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
+
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
+ SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
+ SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
+ SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
+ SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
+
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
+ SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
+ SDValue ShiftLeftHi =
+ DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
+ SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
+ SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
+ SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
+ SDValue HiFalse =
+ IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
+
+ SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
+
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+ SDValue Parts[2] = {Lo, Hi};
+ return DAG.getMergeValues(Parts, DL);
+}
+
+// Lower splats of i1 types to SETCC. For each mask vector type, we have a
+// legal equivalently-sized i8 type, so we can use that as a go-between.
+SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ SDValue SplatVal = Op.getOperand(0);
+ // All-zeros or all-ones splats are handled specially.
+ if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
+ SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
+ return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
+ }
+ if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
+ SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
+ return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
+ }
+ MVT XLenVT = Subtarget.getXLenVT();
+ assert(SplatVal.getValueType() == XLenVT &&
+ "Unexpected type for i1 splat value");
+ MVT InterVT = VT.changeVectorElementType(MVT::i8);
+ SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
+ DAG.getConstant(1, DL, XLenVT));
+ SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
+ SDValue Zero = DAG.getConstant(0, DL, InterVT);
+ return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
+}
+
+// Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
+// illegal (currently only vXi64 RV32).
+// FIXME: We could also catch non-constant sign-extended i32 values and lower
+// them to VMV_V_X_VL.
+SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VecVT = Op.getSimpleValueType();
+ assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
+ "Unexpected SPLAT_VECTOR_PARTS lowering");
+
+ assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+
+ if (VecVT.isFixedLengthVector()) {
+ MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
+ SDLoc DL(Op);
+ auto VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
+
+ SDValue Res =
+ splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
+ return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
+ }
+
+ if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
+ int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
+ int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
+ // If Hi constant is all the same sign bit as Lo, lower this as a custom
+ // node in order to try and match RVV vector/scalar instructions.
+ if ((LoC >> 31) == HiC)
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
+ Lo, DAG.getRegister(RISCV::X0, MVT::i32));
+ }
+
+ // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
+ if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
+ isa<ConstantSDNode>(Hi.getOperand(1)) &&
+ Hi.getConstantOperandVal(1) == 31)
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
+ DAG.getRegister(RISCV::X0, MVT::i32));
+
+ // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
+ return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
+ DAG.getUNDEF(VecVT), Lo, Hi,
+ DAG.getRegister(RISCV::X0, MVT::i32));
+}
+
+// Custom-lower extensions from mask vectors by using a vselect either with 1
+// for zero/any-extension or -1 for sign-extension:
+// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
+// Note that any-extension is lowered identically to zero-extension.
+SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
+ int64_t ExtTrueVal) const {
+ SDLoc DL(Op);
+ MVT VecVT = Op.getSimpleValueType();
+ SDValue Src = Op.getOperand(0);
+ // Only custom-lower extensions from mask types
+ assert(Src.getValueType().isVector() &&
+ Src.getValueType().getVectorElementType() == MVT::i1);
+
+ if (VecVT.isScalableVector()) {
+ SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
+ SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
+ return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
+ }
+
+ MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
+ MVT I1ContainerVT =
+ MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
+
+ SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
+
+ SDValue VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
+
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
+ SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
+
+ SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), SplatZero, VL);
+ SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
+ SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
+ SplatTrueVal, SplatZero, VL);
+
+ return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
+ SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
+ MVT ExtVT = Op.getSimpleValueType();
+ // Only custom-lower extensions from fixed-length vector types.
+ if (!ExtVT.isFixedLengthVector())
+ return Op;
+ MVT VT = Op.getOperand(0).getSimpleValueType();
+ // Grab the canonical container type for the extended type. Infer the smaller
+ // type from that to ensure the same number of vector elements, as we know
+ // the LMUL will be sufficient to hold the smaller type.
+ MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
+ // Get the extended container type manually to ensure the same number of
+ // vector elements between source and dest.
+ MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
+ ContainerExtVT.getVectorElementCount());
+
+ SDValue Op1 =
+ convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
+
+ SDLoc DL(Op);
+ auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
+
+ return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
+}
+
+// Custom-lower truncations from vectors to mask vectors by using a mask and a
+// setcc operation:
+// (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
+SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
+ SelectionDAG &DAG) const {
+ bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
+ SDLoc DL(Op);
+ EVT MaskVT = Op.getValueType();
+ // Only expect to custom-lower truncations to mask types
+ assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
+ "Unexpected type for vector mask lowering");
+ SDValue Src = Op.getOperand(0);
+ MVT VecVT = Src.getSimpleValueType();
+ SDValue Mask, VL;
+ if (IsVPTrunc) {
+ Mask = Op.getOperand(1);
+ VL = Op.getOperand(2);
+ }
+ // If this is a fixed vector, we need to convert it to a scalable vector.
+ MVT ContainerVT = VecVT;
+
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
+ if (IsVPTrunc) {
+ MVT MaskContainerVT =
+ getContainerForFixedLengthVector(Mask.getSimpleValueType());
+ Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
+ }
+ }
+
+ if (!IsVPTrunc) {
+ std::tie(Mask, VL) =
+ getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
+ }
+
+ SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
+ SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
+
+ SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), SplatOne, VL);
+ SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), SplatZero, VL);
+
+ MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
+ SDValue Trunc = DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne,
+ DAG.getUNDEF(ContainerVT), Mask, VL);
+ Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT,
+ {Trunc, SplatZero, DAG.getCondCode(ISD::SETNE),
+ DAG.getUNDEF(MaskContainerVT), Mask, VL});
+ if (MaskVT.isFixedLengthVector())
+ Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
+ return Trunc;
+}
+
+SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
+ SelectionDAG &DAG) const {
+ bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
+ SDLoc DL(Op);
+
+ MVT VT = Op.getSimpleValueType();
+ // Only custom-lower vector truncates
+ assert(VT.isVector() && "Unexpected type for vector truncate lowering");
+
+ // Truncates to mask types are handled differently
+ if (VT.getVectorElementType() == MVT::i1)
+ return lowerVectorMaskTruncLike(Op, DAG);
+
+ // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
+ // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
+ // truncate by one power of two at a time.
+ MVT DstEltVT = VT.getVectorElementType();
+
+ SDValue Src = Op.getOperand(0);
+ MVT SrcVT = Src.getSimpleValueType();
+ MVT SrcEltVT = SrcVT.getVectorElementType();
+
+ assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
+ isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
+ "Unexpected vector truncate lowering");
+
+ MVT ContainerVT = SrcVT;
+ SDValue Mask, VL;
+ if (IsVPTrunc) {
+ Mask = Op.getOperand(1);
+ VL = Op.getOperand(2);
+ }
+ if (SrcVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(SrcVT);
+ Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
+ if (IsVPTrunc) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+ }
+
+ SDValue Result = Src;
+ if (!IsVPTrunc) {
+ std::tie(Mask, VL) =
+ getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
+ }
+
+ LLVMContext &Context = *DAG.getContext();
+ const ElementCount Count = ContainerVT.getVectorElementCount();
+ do {
+ SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
+ EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
+ Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
+ Mask, VL);
+ } while (SrcEltVT != DstEltVT);
+
+ if (SrcVT.isFixedLengthVector())
+ Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+
+ return Result;
+}
+
+SDValue
+RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
+ SelectionDAG &DAG) const {
+ bool IsVP =
+ Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
+ bool IsExtend =
+ Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
+ // RVV can only do truncate fp to types half the size as the source. We
+ // custom-lower f64->f16 rounds via RVV's round-to-odd float
+ // conversion instruction.
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ assert(VT.isVector() && "Unexpected type for vector truncate lowering");
+
+ SDValue Src = Op.getOperand(0);
+ MVT SrcVT = Src.getSimpleValueType();
+
+ bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 ||
+ SrcVT.getVectorElementType() != MVT::f16);
+ bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 ||
+ SrcVT.getVectorElementType() != MVT::f64);
+
+ bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
+
+ // Prepare any fixed-length vector operands.
+ MVT ContainerVT = VT;
+ SDValue Mask, VL;
+ if (IsVP) {
+ Mask = Op.getOperand(1);
+ VL = Op.getOperand(2);
+ }
+ if (VT.isFixedLengthVector()) {
+ MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
+ ContainerVT =
+ SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
+ Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
+ if (IsVP) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+ }
+
+ if (!IsVP)
+ std::tie(Mask, VL) =
+ getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
+
+ unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
+
+ if (IsDirectConv) {
+ Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL);
+ if (VT.isFixedLengthVector())
+ Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
+ return Src;
+ }
+
+ unsigned InterConvOpc =
+ IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
+
+ MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
+ SDValue IntermediateConv =
+ DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL);
+ SDValue Result =
+ DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL);
+ if (VT.isFixedLengthVector())
+ return convertFromScalableVector(VT, Result, DAG, Subtarget);
+ return Result;
+}
+
+// Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
+// first position of a vector, and that vector is slid up to the insert index.
+// By limiting the active vector length to index+1 and merging with the
+// original vector (with an undisturbed tail policy for elements >= VL), we
+// achieve the desired result of leaving all elements untouched except the one
+// at VL-1, which is replaced with the desired value.
+SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VecVT = Op.getSimpleValueType();
+ SDValue Vec = Op.getOperand(0);
+ SDValue Val = Op.getOperand(1);
+ SDValue Idx = Op.getOperand(2);
+
+ if (VecVT.getVectorElementType() == MVT::i1) {
+ // FIXME: For now we just promote to an i8 vector and insert into that,
+ // but this is probably not optimal.
+ MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
+ Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
+ return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
+ }
+
+ MVT ContainerVT = VecVT;
+ // If the operand is a fixed-length vector, convert to a scalable one.
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ SDValue Zero = DAG.getConstant(0, DL, XLenVT);
+ bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
+ // Even i64-element vectors on RV32 can be lowered without scalar
+ // legalization if the most-significant 32 bits of the value are not affected
+ // by the sign-extension of the lower 32 bits.
+ // TODO: We could also catch sign extensions of a 32-bit value.
+ if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
+ const auto *CVal = cast<ConstantSDNode>(Val);
+ if (isInt<32>(CVal->getSExtValue())) {
+ IsLegalInsert = true;
+ Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
+ }
+ }
+
+ auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
+
+ SDValue ValInVec;
+
+ if (IsLegalInsert) {
+ unsigned Opc =
+ VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
+ if (isNullConstant(Idx)) {
+ Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
+ if (!VecVT.isFixedLengthVector())
+ return Vec;
+ return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
+ }
+ ValInVec = lowerScalarInsert(Val, VL, ContainerVT, DL, DAG, Subtarget);
+ } else {
+ // On RV32, i64-element vectors must be specially handled to place the
+ // value at element 0, by using two vslide1down instructions in sequence on
+ // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
+ // this.
+ SDValue One = DAG.getConstant(1, DL, XLenVT);
+ SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
+ SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
+ MVT I32ContainerVT =
+ MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
+ SDValue I32Mask =
+ getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
+ // Limit the active VL to two.
+ SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
+ // If the Idx is 0 we can insert directly into the vector.
+ if (isNullConstant(Idx)) {
+ // First slide in the lo value, then the hi in above it. We use slide1down
+ // to avoid the register group overlap constraint of vslide1up.
+ ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
+ Vec, Vec, ValLo, I32Mask, InsertI64VL);
+ // If the source vector is undef don't pass along the tail elements from
+ // the previous slide1down.
+ SDValue Tail = Vec.isUndef() ? Vec : ValInVec;
+ ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
+ Tail, ValInVec, ValHi, I32Mask, InsertI64VL);
+ // Bitcast back to the right container type.
+ ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
+
+ if (!VecVT.isFixedLengthVector())
+ return ValInVec;
+ return convertFromScalableVector(VecVT, ValInVec, DAG, Subtarget);
+ }
+
+ // First slide in the lo value, then the hi in above it. We use slide1down
+ // to avoid the register group overlap constraint of vslide1up.
+ ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
+ DAG.getUNDEF(I32ContainerVT),
+ DAG.getUNDEF(I32ContainerVT), ValLo,
+ I32Mask, InsertI64VL);
+ ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
+ DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
+ I32Mask, InsertI64VL);
+ // Bitcast back to the right container type.
+ ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
+ }
+
+ // Now that the value is in a vector, slide it into position.
+ SDValue InsertVL =
+ DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
+
+ // Use tail agnostic policy if Idx is the last index of Vec.
+ unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Idx) &&
+ cast<ConstantSDNode>(Idx)->getZExtValue() + 1 ==
+ VecVT.getVectorNumElements())
+ Policy = RISCVII::TAIL_AGNOSTIC;
+ SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
+ Idx, Mask, InsertVL, Policy);
+ if (!VecVT.isFixedLengthVector())
+ return Slideup;
+ return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
+}
+
+// Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
+// extract the first element: (extractelt (slidedown vec, idx), 0). For integer
+// types this is done using VMV_X_S to allow us to glean information about the
+// sign bits of the result.
+SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Idx = Op.getOperand(1);
+ SDValue Vec = Op.getOperand(0);
+ EVT EltVT = Op.getValueType();
+ MVT VecVT = Vec.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ if (VecVT.getVectorElementType() == MVT::i1) {
+ // Use vfirst.m to extract the first bit.
+ if (isNullConstant(Idx)) {
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+ auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
+ SDValue Vfirst =
+ DAG.getNode(RISCVISD::VFIRST_VL, DL, XLenVT, Vec, Mask, VL);
+ return DAG.getSetCC(DL, XLenVT, Vfirst, DAG.getConstant(0, DL, XLenVT),
+ ISD::SETEQ);
+ }
+ if (VecVT.isFixedLengthVector()) {
+ unsigned NumElts = VecVT.getVectorNumElements();
+ if (NumElts >= 8) {
+ MVT WideEltVT;
+ unsigned WidenVecLen;
+ SDValue ExtractElementIdx;
+ SDValue ExtractBitIdx;
+ unsigned MaxEEW = Subtarget.getELEN();
+ MVT LargestEltVT = MVT::getIntegerVT(
+ std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
+ if (NumElts <= LargestEltVT.getSizeInBits()) {
+ assert(isPowerOf2_32(NumElts) &&
+ "the number of elements should be power of 2");
+ WideEltVT = MVT::getIntegerVT(NumElts);
+ WidenVecLen = 1;
+ ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
+ ExtractBitIdx = Idx;
+ } else {
+ WideEltVT = LargestEltVT;
+ WidenVecLen = NumElts / WideEltVT.getSizeInBits();
+ // extract element index = index / element width
+ ExtractElementIdx = DAG.getNode(
+ ISD::SRL, DL, XLenVT, Idx,
+ DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
+ // mask bit index = index % element width
+ ExtractBitIdx = DAG.getNode(
+ ISD::AND, DL, XLenVT, Idx,
+ DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
+ }
+ MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
+ Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
+ SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
+ Vec, ExtractElementIdx);
+ // Extract the bit from GPR.
+ SDValue ShiftRight =
+ DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
+ return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
+ DAG.getConstant(1, DL, XLenVT));
+ }
+ }
+ // Otherwise, promote to an i8 vector and extract from that.
+ MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
+ Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
+ }
+
+ // If this is a fixed vector, we need to convert it to a scalable vector.
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+
+ // If the index is 0, the vector is already in the right position.
+ if (!isNullConstant(Idx)) {
+ // Use a VL of 1 to avoid processing more elements than we need.
+ auto [Mask, VL] = getDefaultVLOps(1, ContainerVT, DL, DAG, Subtarget);
+ Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
+ }
+
+ if (!EltVT.isInteger()) {
+ // Floating-point extracts are handled in TableGen.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
+ DAG.getConstant(0, DL, XLenVT));
+ }
+
+ SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
+ return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
+}
+
+// Some RVV intrinsics may claim that they want an integer operand to be
+// promoted or expanded.
+static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
+ Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
+ "Unexpected opcode");
+
+ if (!Subtarget.hasVInstructions())
+ return SDValue();
+
+ bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
+ unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
+ SDLoc DL(Op);
+
+ const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
+ RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ if (!II || !II->hasScalarOperand())
+ return SDValue();
+
+ unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
+ assert(SplatOp < Op.getNumOperands());
+
+ SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
+ SDValue &ScalarOp = Operands[SplatOp];
+ MVT OpVT = ScalarOp.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // If this isn't a scalar, or its type is XLenVT we're done.
+ if (!OpVT.isScalarInteger() || OpVT == XLenVT)
+ return SDValue();
+
+ // Simplest case is that the operand needs to be promoted to XLenVT.
+ if (OpVT.bitsLT(XLenVT)) {
+ // If the operand is a constant, sign extend to increase our chances
+ // of being able to use a .vi instruction. ANY_EXTEND would become a
+ // a zero extend and the simm5 check in isel would fail.
+ // FIXME: Should we ignore the upper bits in isel instead?
+ unsigned ExtOpc =
+ isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
+ ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
+ return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
+ }
+
+ // Use the previous operand to get the vXi64 VT. The result might be a mask
+ // VT for compares. Using the previous operand assumes that the previous
+ // operand will never have a smaller element size than a scalar operand and
+ // that a widening operation never uses SEW=64.
+ // NOTE: If this fails the below assert, we can probably just find the
+ // element count from any operand or result and use it to construct the VT.
+ assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
+ MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
+
+ // The more complex case is when the scalar is larger than XLenVT.
+ assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
+ VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
+
+ // If this is a sign-extended 32-bit value, we can truncate it and rely on the
+ // instruction to sign-extend since SEW>XLEN.
+ if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
+ ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
+ return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
+ }
+
+ switch (IntNo) {
+ case Intrinsic::riscv_vslide1up:
+ case Intrinsic::riscv_vslide1down:
+ case Intrinsic::riscv_vslide1up_mask:
+ case Intrinsic::riscv_vslide1down_mask: {
+ // We need to special case these when the scalar is larger than XLen.
+ unsigned NumOps = Op.getNumOperands();
+ bool IsMasked = NumOps == 7;
+
+ // Convert the vector source to the equivalent nxvXi32 vector.
+ MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
+ SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
+
+ SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
+ DAG.getConstant(0, DL, XLenVT));
+ SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
+ DAG.getConstant(1, DL, XLenVT));
+
+ // Double the VL since we halved SEW.
+ SDValue AVL = getVLOperand(Op);
+ SDValue I32VL;
+
+ // Optimize for constant AVL
+ if (isa<ConstantSDNode>(AVL)) {
+ unsigned EltSize = VT.getScalarSizeInBits();
+ unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
+
+ unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
+ unsigned MaxVLMAX =
+ RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
+
+ unsigned VectorBitsMin = Subtarget.getRealMinVLen();
+ unsigned MinVLMAX =
+ RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
+
+ uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
+ if (AVLInt <= MinVLMAX) {
+ I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
+ } else if (AVLInt >= 2 * MaxVLMAX) {
+ // Just set vl to VLMAX in this situation
+ RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
+ SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
+ unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
+ SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
+ SDValue SETVLMAX = DAG.getTargetConstant(
+ Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
+ I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
+ LMUL);
+ } else {
+ // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
+ // is related to the hardware implementation.
+ // So let the following code handle
+ }
+ }
+ if (!I32VL) {
+ RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
+ SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
+ unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
+ SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
+ SDValue SETVL =
+ DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
+ // Using vsetvli instruction to get actually used length which related to
+ // the hardware implementation
+ SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
+ SEW, LMUL);
+ I32VL =
+ DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
+ }
+
+ SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
+
+ // Shift the two scalar parts in using SEW=32 slide1up/slide1down
+ // instructions.
+ SDValue Passthru;
+ if (IsMasked)
+ Passthru = DAG.getUNDEF(I32VT);
+ else
+ Passthru = DAG.getBitcast(I32VT, Operands[1]);
+
+ if (IntNo == Intrinsic::riscv_vslide1up ||
+ IntNo == Intrinsic::riscv_vslide1up_mask) {
+ Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
+ ScalarHi, I32Mask, I32VL);
+ Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
+ ScalarLo, I32Mask, I32VL);
+ } else {
+ Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
+ ScalarLo, I32Mask, I32VL);
+ Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
+ ScalarHi, I32Mask, I32VL);
+ }
+
+ // Convert back to nxvXi64.
+ Vec = DAG.getBitcast(VT, Vec);
+
+ if (!IsMasked)
+ return Vec;
+ // Apply mask after the operation.
+ SDValue Mask = Operands[NumOps - 3];
+ SDValue MaskedOff = Operands[1];
+ // Assume Policy operand is the last operand.
+ uint64_t Policy =
+ cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
+ // We don't need to select maskedoff if it's undef.
+ if (MaskedOff.isUndef())
+ return Vec;
+ // TAMU
+ if (Policy == RISCVII::TAIL_AGNOSTIC)
+ return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
+ AVL);
+ // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
+ // It's fine because vmerge does not care mask policy.
+ return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
+ AVL);
+ }
+ }
+
+ // We need to convert the scalar to a splat vector.
+ SDValue VL = getVLOperand(Op);
+ assert(VL.getValueType() == XLenVT);
+ ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
+ return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
+}
+
+SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IntNo = Op.getConstantOperandVal(0);
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ switch (IntNo) {
+ default:
+ break; // Don't custom lower most intrinsics.
+ case Intrinsic::thread_pointer: {
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ return DAG.getRegister(RISCV::X4, PtrVT);
+ }
+ case Intrinsic::riscv_orc_b:
+ case Intrinsic::riscv_brev8: {
+ unsigned Opc =
+ IntNo == Intrinsic::riscv_brev8 ? RISCVISD::BREV8 : RISCVISD::ORC_B;
+ return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
+ }
+ case Intrinsic::riscv_zip:
+ case Intrinsic::riscv_unzip: {
+ unsigned Opc =
+ IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
+ return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
+ }
+ case Intrinsic::riscv_vmv_x_s:
+ assert(Op.getValueType() == XLenVT && "Unexpected VT!");
+ return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
+ Op.getOperand(1));
+ case Intrinsic::riscv_vfmv_f_s:
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(),
+ Op.getOperand(1), DAG.getConstant(0, DL, XLenVT));
+ case Intrinsic::riscv_vmv_v_x:
+ return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
+ Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
+ Subtarget);
+ case Intrinsic::riscv_vfmv_v_f:
+ return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+ case Intrinsic::riscv_vmv_s_x: {
+ SDValue Scalar = Op.getOperand(2);
+
+ if (Scalar.getValueType().bitsLE(XLenVT)) {
+ Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
+ return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
+ Op.getOperand(1), Scalar, Op.getOperand(3));
+ }
+
+ assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
+
+ // This is an i64 value that lives in two scalar registers. We have to
+ // insert this in a convoluted way. First we build vXi64 splat containing
+ // the two values that we assemble using some bit math. Next we'll use
+ // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
+ // to merge element 0 from our splat into the source vector.
+ // FIXME: This is probably not the best way to do this, but it is
+ // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
+ // point.
+ // sw lo, (a0)
+ // sw hi, 4(a0)
+ // vlse vX, (a0)
+ //
+ // vid.v vVid
+ // vmseq.vx mMask, vVid, 0
+ // vmerge.vvm vDest, vSrc, vVal, mMask
+ MVT VT = Op.getSimpleValueType();
+ SDValue Vec = Op.getOperand(1);
+ SDValue VL = getVLOperand(Op);
+
+ SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
+ if (Op.getOperand(1).isUndef())
+ return SplattedVal;
+ SDValue SplattedIdx =
+ DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
+ DAG.getConstant(0, DL, MVT::i32), VL);
+
+ MVT MaskVT = getMaskTypeFor(VT);
+ SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
+ SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
+ SDValue SelectCond =
+ DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT,
+ {VID, SplattedIdx, DAG.getCondCode(ISD::SETEQ),
+ DAG.getUNDEF(MaskVT), Mask, VL});
+ return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
+ Vec, VL);
+ }
+ }
+
+ return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IntNo = Op.getConstantOperandVal(1);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::riscv_masked_strided_load: {
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // If the mask is known to be all ones, optimize to an unmasked intrinsic;
+ // the selection of the masked intrinsics doesn't do this for us.
+ SDValue Mask = Op.getOperand(5);
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ MVT VT = Op->getSimpleValueType(0);
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector())
+ ContainerVT = getContainerForFixedLengthVector(VT);
+
+ SDValue PassThru = Op.getOperand(2);
+ if (!IsUnmasked) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ if (VT.isFixedLengthVector()) {
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
+ }
+ }
+
+ auto *Load = cast<MemIntrinsicSDNode>(Op);
+ SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+ SDValue Ptr = Op.getOperand(3);
+ SDValue Stride = Op.getOperand(4);
+ SDValue Result, Chain;
+
+ // TODO: We restrict this to unmasked loads currently in consideration of
+ // the complexity of hanlding all falses masks.
+ if (IsUnmasked && isNullConstant(Stride)) {
+ MVT ScalarVT = ContainerVT.getVectorElementType();
+ SDValue ScalarLoad =
+ DAG.getExtLoad(ISD::ZEXTLOAD, DL, XLenVT, Load->getChain(), Ptr,
+ ScalarVT, Load->getMemOperand());
+ Chain = ScalarLoad.getValue(1);
+ Result = lowerScalarSplat(SDValue(), ScalarLoad, VL, ContainerVT, DL, DAG,
+ Subtarget);
+ } else {
+ SDValue IntID = DAG.getTargetConstant(
+ IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
+ XLenVT);
+
+ SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
+ if (IsUnmasked)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ else
+ Ops.push_back(PassThru);
+ Ops.push_back(Ptr);
+ Ops.push_back(Stride);
+ if (!IsUnmasked)
+ Ops.push_back(Mask);
+ Ops.push_back(VL);
+ if (!IsUnmasked) {
+ SDValue Policy =
+ DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+ Ops.push_back(Policy);
+ }
+
+ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+ Result =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
+ Load->getMemoryVT(), Load->getMemOperand());
+ Chain = Result.getValue(1);
+ }
+ if (VT.isFixedLengthVector())
+ Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+ return DAG.getMergeValues({Result, Chain}, DL);
+ }
+ case Intrinsic::riscv_seg2_load:
+ case Intrinsic::riscv_seg3_load:
+ case Intrinsic::riscv_seg4_load:
+ case Intrinsic::riscv_seg5_load:
+ case Intrinsic::riscv_seg6_load:
+ case Intrinsic::riscv_seg7_load:
+ case Intrinsic::riscv_seg8_load: {
+ SDLoc DL(Op);
+ static const Intrinsic::ID VlsegInts[7] = {
+ Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
+ Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
+ Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
+ Intrinsic::riscv_vlseg8};
+ unsigned NF = Op->getNumValues() - 1;
+ assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT VT = Op->getSimpleValueType(0);
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+
+ SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
+ SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
+ auto *Load = cast<MemIntrinsicSDNode>(Op);
+ SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
+ ContainerVTs.push_back(MVT::Other);
+ SDVTList VTs = DAG.getVTList(ContainerVTs);
+ SmallVector<SDValue, 12> Ops = {Load->getChain(), IntID};
+ Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT));
+ Ops.push_back(Op.getOperand(2));
+ Ops.push_back(VL);
+ SDValue Result =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
+ Load->getMemoryVT(), Load->getMemOperand());
+ SmallVector<SDValue, 9> Results;
+ for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
+ Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
+ DAG, Subtarget));
+ Results.push_back(Result.getValue(NF));
+ return DAG.getMergeValues(Results, DL);
+ }
+ }
+
+ return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IntNo = Op.getConstantOperandVal(1);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::riscv_masked_strided_store: {
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // If the mask is known to be all ones, optimize to an unmasked intrinsic;
+ // the selection of the masked intrinsics doesn't do this for us.
+ SDValue Mask = Op.getOperand(5);
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ SDValue Val = Op.getOperand(2);
+ MVT VT = Val.getSimpleValueType();
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
+ }
+ if (!IsUnmasked) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ if (VT.isFixedLengthVector())
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+
+ SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ SDValue IntID = DAG.getTargetConstant(
+ IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
+ XLenVT);
+
+ auto *Store = cast<MemIntrinsicSDNode>(Op);
+ SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
+ Ops.push_back(Val);
+ Ops.push_back(Op.getOperand(3)); // Ptr
+ Ops.push_back(Op.getOperand(4)); // Stride
+ if (!IsUnmasked)
+ Ops.push_back(Mask);
+ Ops.push_back(VL);
+
+ return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
+ Ops, Store->getMemoryVT(),
+ Store->getMemOperand());
+ }
+ }
+
+ return SDValue();
+}
+
+static unsigned getRVVReductionOp(unsigned ISDOpcode) {
+ switch (ISDOpcode) {
+ default:
+ llvm_unreachable("Unhandled reduction");
+ case ISD::VECREDUCE_ADD:
+ return RISCVISD::VECREDUCE_ADD_VL;
+ case ISD::VECREDUCE_UMAX:
+ return RISCVISD::VECREDUCE_UMAX_VL;
+ case ISD::VECREDUCE_SMAX:
+ return RISCVISD::VECREDUCE_SMAX_VL;
+ case ISD::VECREDUCE_UMIN:
+ return RISCVISD::VECREDUCE_UMIN_VL;
+ case ISD::VECREDUCE_SMIN:
+ return RISCVISD::VECREDUCE_SMIN_VL;
+ case ISD::VECREDUCE_AND:
+ return RISCVISD::VECREDUCE_AND_VL;
+ case ISD::VECREDUCE_OR:
+ return RISCVISD::VECREDUCE_OR_VL;
+ case ISD::VECREDUCE_XOR:
+ return RISCVISD::VECREDUCE_XOR_VL;
+ }
+}
+
+SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
+ SelectionDAG &DAG,
+ bool IsVP) const {
+ SDLoc DL(Op);
+ SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
+ MVT VecVT = Vec.getSimpleValueType();
+ assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
+ Op.getOpcode() == ISD::VECREDUCE_OR ||
+ Op.getOpcode() == ISD::VECREDUCE_XOR ||
+ Op.getOpcode() == ISD::VP_REDUCE_AND ||
+ Op.getOpcode() == ISD::VP_REDUCE_OR ||
+ Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
+ "Unexpected reduction lowering");
+
+ MVT XLenVT = Subtarget.getXLenVT();
+ assert(Op.getValueType() == XLenVT &&
+ "Expected reduction output to be legalized to XLenVT");
+
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+
+ SDValue Mask, VL;
+ if (IsVP) {
+ Mask = Op.getOperand(2);
+ VL = Op.getOperand(3);
+ } else {
+ std::tie(Mask, VL) =
+ getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
+ }
+
+ unsigned BaseOpc;
+ ISD::CondCode CC;
+ SDValue Zero = DAG.getConstant(0, DL, XLenVT);
+
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("Unhandled reduction");
+ case ISD::VECREDUCE_AND:
+ case ISD::VP_REDUCE_AND: {
+ // vcpop ~x == 0
+ SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
+ Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
+ Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
+ CC = ISD::SETEQ;
+ BaseOpc = ISD::AND;
+ break;
+ }
+ case ISD::VECREDUCE_OR:
+ case ISD::VP_REDUCE_OR:
+ // vcpop x != 0
+ Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
+ CC = ISD::SETNE;
+ BaseOpc = ISD::OR;
+ break;
+ case ISD::VECREDUCE_XOR:
+ case ISD::VP_REDUCE_XOR: {
+ // ((vcpop x) & 1) != 0
+ SDValue One = DAG.getConstant(1, DL, XLenVT);
+ Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
+ Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
+ CC = ISD::SETNE;
+ BaseOpc = ISD::XOR;
+ break;
+ }
+ }
+
+ SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
+
+ if (!IsVP)
+ return SetCC;
+
+ // Now include the start value in the operation.
+ // Note that we must return the start value when no elements are operated
+ // upon. The vcpop instructions we've emitted in each case above will return
+ // 0 for an inactive vector, and so we've already received the neutral value:
+ // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
+ // can simply include the start value.
+ return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
+}
+
+static bool hasNonZeroAVL(SDValue AVL) {
+ auto *RegisterAVL = dyn_cast<RegisterSDNode>(AVL);
+ auto *ImmAVL = dyn_cast<ConstantSDNode>(AVL);
+ return (RegisterAVL && RegisterAVL->getReg() == RISCV::X0) ||
+ (ImmAVL && ImmAVL->getZExtValue() >= 1);
+}
+
+/// Helper to lower a reduction sequence of the form:
+/// scalar = reduce_op vec, scalar_start
+static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT,
+ SDValue StartValue, SDValue Vec, SDValue Mask,
+ SDValue VL, SDLoc DL, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ const MVT VecVT = Vec.getSimpleValueType();
+ const MVT M1VT = getLMUL1VT(VecVT);
+ const MVT XLenVT = Subtarget.getXLenVT();
+ const bool NonZeroAVL = hasNonZeroAVL(VL);
+
+ // The reduction needs an LMUL1 input; do the splat at either LMUL1
+ // or the original VT if fractional.
+ auto InnerVT = VecVT.bitsLE(M1VT) ? VecVT : M1VT;
+ // We reuse the VL of the reduction to reduce vsetvli toggles if we can
+ // prove it is non-zero. For the AVL=0 case, we need the scalar to
+ // be the result of the reduction operation.
+ auto InnerVL = NonZeroAVL ? VL : DAG.getConstant(1, DL, XLenVT);
+ SDValue InitialValue = lowerScalarInsert(StartValue, InnerVL, InnerVT, DL,
+ DAG, Subtarget);
+ if (M1VT != InnerVT)
+ InitialValue = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, M1VT,
+ DAG.getUNDEF(M1VT),
+ InitialValue, DAG.getConstant(0, DL, XLenVT));
+ SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(M1VT) : InitialValue;
+ SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, PassThru, Vec,
+ InitialValue, Mask, VL);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
+ DAG.getConstant(0, DL, XLenVT));
+}
+
+SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Vec = Op.getOperand(0);
+ EVT VecEVT = Vec.getValueType();
+
+ unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
+
+ // Due to ordering in legalize types we may have a vector type that needs to
+ // be split. Do that manually so we can get down to a legal type.
+ while (getTypeAction(*DAG.getContext(), VecEVT) ==
+ TargetLowering::TypeSplitVector) {
+ auto [Lo, Hi] = DAG.SplitVector(Vec, DL);
+ VecEVT = Lo.getValueType();
+ Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
+ }
+
+ // TODO: The type may need to be widened rather than split. Or widened before
+ // it can be split.
+ if (!isTypeLegal(VecEVT))
+ return SDValue();
+
+ MVT VecVT = VecEVT.getSimpleVT();
+ MVT VecEltVT = VecVT.getVectorElementType();
+ unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
+
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+
+ auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
+
+ SDValue NeutralElem =
+ DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
+ return lowerReductionSeq(RVVOpcode, Op.getSimpleValueType(), NeutralElem, Vec,
+ Mask, VL, DL, DAG, Subtarget);
+}
+
+// Given a reduction op, this function returns the matching reduction opcode,
+// the vector SDValue and the scalar SDValue required to lower this to a
+// RISCVISD node.
+static std::tuple<unsigned, SDValue, SDValue>
+getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
+ SDLoc DL(Op);
+ auto Flags = Op->getFlags();
+ unsigned Opcode = Op.getOpcode();
+ unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Unhandled reduction");
+ case ISD::VECREDUCE_FADD: {
+ // Use positive zero if we can. It is cheaper to materialize.
+ SDValue Zero =
+ DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
+ return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
+ }
+ case ISD::VECREDUCE_SEQ_FADD:
+ return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
+ Op.getOperand(0));
+ case ISD::VECREDUCE_FMIN:
+ return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
+ DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
+ case ISD::VECREDUCE_FMAX:
+ return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
+ DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
+ }
+}
+
+SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VecEltVT = Op.getSimpleValueType();
+
+ unsigned RVVOpcode;
+ SDValue VectorVal, ScalarVal;
+ std::tie(RVVOpcode, VectorVal, ScalarVal) =
+ getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
+ MVT VecVT = VectorVal.getSimpleValueType();
+
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
+ }
+
+ auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
+ return lowerReductionSeq(RVVOpcode, Op.getSimpleValueType(), ScalarVal,
+ VectorVal, Mask, VL, DL, DAG, Subtarget);
+}
+
+static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
+ switch (ISDOpcode) {
+ default:
+ llvm_unreachable("Unhandled reduction");
+ case ISD::VP_REDUCE_ADD:
+ return RISCVISD::VECREDUCE_ADD_VL;
+ case ISD::VP_REDUCE_UMAX:
+ return RISCVISD::VECREDUCE_UMAX_VL;
+ case ISD::VP_REDUCE_SMAX:
+ return RISCVISD::VECREDUCE_SMAX_VL;
+ case ISD::VP_REDUCE_UMIN:
+ return RISCVISD::VECREDUCE_UMIN_VL;
+ case ISD::VP_REDUCE_SMIN:
+ return RISCVISD::VECREDUCE_SMIN_VL;
+ case ISD::VP_REDUCE_AND:
+ return RISCVISD::VECREDUCE_AND_VL;
+ case ISD::VP_REDUCE_OR:
+ return RISCVISD::VECREDUCE_OR_VL;
+ case ISD::VP_REDUCE_XOR:
+ return RISCVISD::VECREDUCE_XOR_VL;
+ case ISD::VP_REDUCE_FADD:
+ return RISCVISD::VECREDUCE_FADD_VL;
+ case ISD::VP_REDUCE_SEQ_FADD:
+ return RISCVISD::VECREDUCE_SEQ_FADD_VL;
+ case ISD::VP_REDUCE_FMAX:
+ return RISCVISD::VECREDUCE_FMAX_VL;
+ case ISD::VP_REDUCE_FMIN:
+ return RISCVISD::VECREDUCE_FMIN_VL;
+ }
+}
+
+SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Vec = Op.getOperand(1);
+ EVT VecEVT = Vec.getValueType();
+
+ // TODO: The type may need to be widened rather than split. Or widened before
+ // it can be split.
+ if (!isTypeLegal(VecEVT))
+ return SDValue();
+
+ MVT VecVT = VecEVT.getSimpleVT();
+ unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
+
+ if (VecVT.isFixedLengthVector()) {
+ auto ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+
+ SDValue VL = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(2);
+ return lowerReductionSeq(RVVOpcode, Op.getSimpleValueType(), Op.getOperand(0),
+ Vec, Mask, VL, DL, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Vec = Op.getOperand(0);
+ SDValue SubVec = Op.getOperand(1);
+ MVT VecVT = Vec.getSimpleValueType();
+ MVT SubVecVT = SubVec.getSimpleValueType();
+
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+ unsigned OrigIdx = Op.getConstantOperandVal(2);
+ const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
+
+ // We don't have the ability to slide mask vectors up indexed by their i1
+ // elements; the smallest we can do is i8. Often we are able to bitcast to
+ // equivalent i8 vectors. Note that when inserting a fixed-length vector
+ // into a scalable one, we might not necessarily have enough scalable
+ // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
+ if (SubVecVT.getVectorElementType() == MVT::i1 &&
+ (OrigIdx != 0 || !Vec.isUndef())) {
+ if (VecVT.getVectorMinNumElements() >= 8 &&
+ SubVecVT.getVectorMinNumElements() >= 8) {
+ assert(OrigIdx % 8 == 0 && "Invalid index");
+ assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
+ SubVecVT.getVectorMinNumElements() % 8 == 0 &&
+ "Unexpected mask vector lowering");
+ OrigIdx /= 8;
+ SubVecVT =
+ MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
+ SubVecVT.isScalableVector());
+ VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
+ VecVT.isScalableVector());
+ Vec = DAG.getBitcast(VecVT, Vec);
+ SubVec = DAG.getBitcast(SubVecVT, SubVec);
+ } else {
+ // We can't slide this mask vector up indexed by its i1 elements.
+ // This poses a problem when we wish to insert a scalable vector which
+ // can't be re-expressed as a larger type. Just choose the slow path and
+ // extend to a larger type, then truncate back down.
+ MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
+ MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
+ Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
+ SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
+ Op.getOperand(2));
+ SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
+ return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
+ }
+ }
+
+ // If the subvector vector is a fixed-length type, we cannot use subregister
+ // manipulation to simplify the codegen; we don't know which register of a
+ // LMUL group contains the specific subvector as we only know the minimum
+ // register size. Therefore we must slide the vector group up the full
+ // amount.
+ if (SubVecVT.isFixedLengthVector()) {
+ if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
+ return Op;
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+ SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), SubVec,
+ DAG.getConstant(0, DL, XLenVT));
+ if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
+ SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
+ return DAG.getBitcast(Op.getValueType(), SubVec);
+ }
+ SDValue Mask =
+ getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
+ // Set the vector length to only the number of elements we care about. Note
+ // that for slideup this includes the offset.
+ SDValue VL =
+ getVLOp(OrigIdx + SubVecVT.getVectorNumElements(), DL, DAG, Subtarget);
+ SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
+
+ // Use tail agnostic policy if OrigIdx is the last index of Vec.
+ unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ if (VecVT.isFixedLengthVector() &&
+ OrigIdx + 1 == VecVT.getVectorNumElements())
+ Policy = RISCVII::TAIL_AGNOSTIC;
+ SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, SubVec,
+ SlideupAmt, Mask, VL, Policy);
+ if (VecVT.isFixedLengthVector())
+ Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
+ return DAG.getBitcast(Op.getValueType(), Slideup);
+ }
+
+ unsigned SubRegIdx, RemIdx;
+ std::tie(SubRegIdx, RemIdx) =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ VecVT, SubVecVT, OrigIdx, TRI);
+
+ RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
+ bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
+ SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
+ SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
+
+ // 1. If the Idx has been completely eliminated and this subvector's size is
+ // a vector register or a multiple thereof, or the surrounding elements are
+ // undef, then this is a subvector insert which naturally aligns to a vector
+ // register. These can easily be handled using subregister manipulation.
+ // 2. If the subvector is smaller than a vector register, then the insertion
+ // must preserve the undisturbed elements of the register. We do this by
+ // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
+ // (which resolves to a subregister copy), performing a VSLIDEUP to place the
+ // subvector within the vector register, and an INSERT_SUBVECTOR of that
+ // LMUL=1 type back into the larger vector (resolving to another subregister
+ // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
+ // to avoid allocating a large register group to hold our subvector.
+ if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
+ return Op;
+
+ // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
+ // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
+ // (in our case undisturbed). This means we can set up a subvector insertion
+ // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
+ // size of the subvector.
+ MVT InterSubVT = VecVT;
+ SDValue AlignedExtract = Vec;
+ unsigned AlignedIdx = OrigIdx - RemIdx;
+ if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
+ InterSubVT = getLMUL1VT(VecVT);
+ // Extract a subvector equal to the nearest full vector register type. This
+ // should resolve to a EXTRACT_SUBREG instruction.
+ AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
+ DAG.getConstant(AlignedIdx, DL, XLenVT));
+ }
+
+ SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
+ // For scalable vectors this must be further multiplied by vscale.
+ SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
+
+ auto [Mask, VL] = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
+
+ // Construct the vector length corresponding to RemIdx + length(SubVecVT).
+ VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
+ VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
+ VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
+
+ SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
+ DAG.getUNDEF(InterSubVT), SubVec,
+ DAG.getConstant(0, DL, XLenVT));
+
+ SDValue Slideup = getVSlideup(DAG, Subtarget, DL, InterSubVT, AlignedExtract,
+ SubVec, SlideupAmt, Mask, VL);
+
+ // If required, insert this subvector back into the correct vector register.
+ // This should resolve to an INSERT_SUBREG instruction.
+ if (VecVT.bitsGT(InterSubVT))
+ Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
+ DAG.getConstant(AlignedIdx, DL, XLenVT));
+
+ // We might have bitcast from a mask type: cast back to the original type if
+ // required.
+ return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
+}
+
+SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Vec = Op.getOperand(0);
+ MVT SubVecVT = Op.getSimpleValueType();
+ MVT VecVT = Vec.getSimpleValueType();
+
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+ unsigned OrigIdx = Op.getConstantOperandVal(1);
+ const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
+
+ // We don't have the ability to slide mask vectors down indexed by their i1
+ // elements; the smallest we can do is i8. Often we are able to bitcast to
+ // equivalent i8 vectors. Note that when extracting a fixed-length vector
+ // from a scalable one, we might not necessarily have enough scalable
+ // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
+ if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
+ if (VecVT.getVectorMinNumElements() >= 8 &&
+ SubVecVT.getVectorMinNumElements() >= 8) {
+ assert(OrigIdx % 8 == 0 && "Invalid index");
+ assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
+ SubVecVT.getVectorMinNumElements() % 8 == 0 &&
+ "Unexpected mask vector lowering");
+ OrigIdx /= 8;
+ SubVecVT =
+ MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
+ SubVecVT.isScalableVector());
+ VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
+ VecVT.isScalableVector());
+ Vec = DAG.getBitcast(VecVT, Vec);
+ } else {
+ // We can't slide this mask vector down, indexed by its i1 elements.
+ // This poses a problem when we wish to extract a scalable vector which
+ // can't be re-expressed as a larger type. Just choose the slow path and
+ // extend to a larger type, then truncate back down.
+ // TODO: We could probably improve this when extracting certain fixed
+ // from fixed, where we can extract as i8 and shift the correct element
+ // right to reach the desired subvector?
+ MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
+ MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
+ Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
+ Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
+ Op.getOperand(1));
+ SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
+ return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
+ }
+ }
+
+ // If the subvector vector is a fixed-length type, we cannot use subregister
+ // manipulation to simplify the codegen; we don't know which register of a
+ // LMUL group contains the specific subvector as we only know the minimum
+ // register size. Therefore we must slide the vector group down the full
+ // amount.
+ if (SubVecVT.isFixedLengthVector()) {
+ // With an index of 0 this is a cast-like subvector, which can be performed
+ // with subregister operations.
+ if (OrigIdx == 0)
+ return Op;
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+ SDValue Mask =
+ getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
+ // Set the vector length to only the number of elements we care about. This
+ // avoids sliding down elements we're going to discard straight away.
+ SDValue VL = getVLOp(SubVecVT.getVectorNumElements(), DL, DAG, Subtarget);
+ SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
+ SDValue Slidedown =
+ getVSlidedown(DAG, Subtarget, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
+ // Now we can use a cast-like subvector extract to get the result.
+ Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
+ DAG.getConstant(0, DL, XLenVT));
+ return DAG.getBitcast(Op.getValueType(), Slidedown);
+ }
+
+ unsigned SubRegIdx, RemIdx;
+ std::tie(SubRegIdx, RemIdx) =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ VecVT, SubVecVT, OrigIdx, TRI);
+
+ // If the Idx has been completely eliminated then this is a subvector extract
+ // which naturally aligns to a vector register. These can easily be handled
+ // using subregister manipulation.
+ if (RemIdx == 0)
+ return Op;
+
+ // Else we must shift our vector register directly to extract the subvector.
+ // Do this using VSLIDEDOWN.
+
+ // If the vector type is an LMUL-group type, extract a subvector equal to the
+ // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
+ // instruction.
+ MVT InterSubVT = VecVT;
+ if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
+ InterSubVT = getLMUL1VT(VecVT);
+ Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
+ DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
+ }
+
+ // Slide this vector register down by the desired number of elements in order
+ // to place the desired subvector starting at element 0.
+ SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
+ // For scalable vectors this must be further multiplied by vscale.
+ SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
+
+ auto [Mask, VL] = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
+ SDValue Slidedown =
+ getVSlidedown(DAG, Subtarget, DL, InterSubVT, DAG.getUNDEF(InterSubVT),
+ Vec, SlidedownAmt, Mask, VL);
+
+ // Now the vector is in the right position, extract our final subvector. This
+ // should resolve to a COPY.
+ Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
+ DAG.getConstant(0, DL, XLenVT));
+
+ // We might have bitcast from a mask type: cast back to the original type if
+ // required.
+ return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
+}
+
+// Lower step_vector to the vid instruction. Any non-identity step value must
+// be accounted for my manual expansion.
+SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ assert(VT.isScalableVector() && "Expected scalable vector");
+ MVT XLenVT = Subtarget.getXLenVT();
+ auto [Mask, VL] = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
+ SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
+ uint64_t StepValImm = Op.getConstantOperandVal(0);
+ if (StepValImm != 1) {
+ if (isPowerOf2_64(StepValImm)) {
+ SDValue StepVal =
+ DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
+ DAG.getConstant(Log2_64(StepValImm), DL, XLenVT), VL);
+ StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
+ } else {
+ SDValue StepVal = lowerScalarSplat(
+ SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
+ VL, VT, DL, DAG, Subtarget);
+ StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
+ }
+ }
+ return StepVec;
+}
+
+// Implement vector_reverse using vrgather.vv with indices determined by
+// subtracting the id of each element from (VLMAX-1). This will convert
+// the indices like so:
+// (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
+// TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
+SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VecVT = Op.getSimpleValueType();
+ if (VecVT.getVectorElementType() == MVT::i1) {
+ MVT WidenVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
+ SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, Op.getOperand(0));
+ SDValue Op2 = DAG.getNode(ISD::VECTOR_REVERSE, DL, WidenVT, Op1);
+ return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
+ }
+ unsigned EltSize = VecVT.getScalarSizeInBits();
+ unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
+ unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
+ unsigned MaxVLMAX =
+ RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
+
+ unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
+ MVT IntVT = VecVT.changeVectorElementTypeToInteger();
+
+ // If this is SEW=8 and VLMAX is potentially more than 256, we need
+ // to use vrgatherei16.vv.
+ // TODO: It's also possible to use vrgatherei16.vv for other types to
+ // decrease register width for the index calculation.
+ if (MaxVLMAX > 256 && EltSize == 8) {
+ // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
+ // Reverse each half, then reassemble them in reverse order.
+ // NOTE: It's also possible that after splitting that VLMAX no longer
+ // requires vrgatherei16.vv.
+ if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
+ auto [Lo, Hi] = DAG.SplitVectorOperand(Op.getNode(), 0);
+ auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VecVT);
+ Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
+ Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
+ // Reassemble the low and high pieces reversed.
+ // FIXME: This is a CONCAT_VECTORS.
+ SDValue Res =
+ DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
+ DAG.getIntPtrConstant(0, DL));
+ return DAG.getNode(
+ ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
+ DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
+ }
+
+ // Just promote the int type to i16 which will double the LMUL.
+ IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
+ GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
+ }
+
+ MVT XLenVT = Subtarget.getXLenVT();
+ auto [Mask, VL] = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
+
+ // Calculate VLMAX-1 for the desired SEW.
+ unsigned MinElts = VecVT.getVectorMinNumElements();
+ SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
+ getVLOp(MinElts, DL, DAG, Subtarget));
+ SDValue VLMinus1 =
+ DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
+
+ // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
+ bool IsRV32E64 =
+ !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
+ SDValue SplatVL;
+ if (!IsRV32E64)
+ SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
+ else
+ SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
+ VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
+
+ SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
+ SDValue Indices = DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID,
+ DAG.getUNDEF(IntVT), Mask, VL);
+
+ return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices,
+ DAG.getUNDEF(VecVT), Mask, VL);
+}
+
+SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT VecVT = Op.getSimpleValueType();
+
+ unsigned MinElts = VecVT.getVectorMinNumElements();
+ SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
+ getVLOp(MinElts, DL, DAG, Subtarget));
+
+ int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
+ SDValue DownOffset, UpOffset;
+ if (ImmValue >= 0) {
+ // The operand is a TargetConstant, we need to rebuild it as a regular
+ // constant.
+ DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
+ UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
+ } else {
+ // The operand is a TargetConstant, we need to rebuild it as a regular
+ // constant rather than negating the original operand.
+ UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
+ DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
+ }
+
+ SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
+
+ SDValue SlideDown =
+ getVSlidedown(DAG, Subtarget, DL, VecVT, DAG.getUNDEF(VecVT), V1,
+ DownOffset, TrueMask, UpOffset);
+ return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset,
+ TrueMask, DAG.getRegister(RISCV::X0, XLenVT),
+ RISCVII::TAIL_AGNOSTIC);
+}
+
+SDValue
+RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ auto *Load = cast<LoadSDNode>(Op);
+
+ assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ Load->getMemoryVT(),
+ *Load->getMemOperand()) &&
+ "Expecting a correctly-aligned load");
+
+ MVT VT = Op.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+
+ SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
+
+ bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
+ SDValue IntID = DAG.getTargetConstant(
+ IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
+ SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
+ if (!IsMaskOp)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ Ops.push_back(Load->getBasePtr());
+ Ops.push_back(VL);
+ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+ SDValue NewLoad =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
+ Load->getMemoryVT(), Load->getMemOperand());
+
+ SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
+ return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
+}
+
+SDValue
+RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ auto *Store = cast<StoreSDNode>(Op);
+
+ assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ Store->getMemoryVT(),
+ *Store->getMemOperand()) &&
+ "Expecting a correctly-aligned store");
+
+ SDValue StoreVal = Store->getValue();
+ MVT VT = StoreVal.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // If the size less than a byte, we need to pad with zeros to make a byte.
+ if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
+ VT = MVT::v8i1;
+ StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
+ DAG.getConstant(0, DL, VT), StoreVal,
+ DAG.getIntPtrConstant(0, DL));
+ }
+
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+
+ SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
+
+ SDValue NewValue =
+ convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
+
+ bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
+ SDValue IntID = DAG.getTargetConstant(
+ IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
+ return DAG.getMemIntrinsicNode(
+ ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
+ {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
+ Store->getMemoryVT(), Store->getMemOperand());
+}
+
+SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ const auto *MemSD = cast<MemSDNode>(Op);
+ EVT MemVT = MemSD->getMemoryVT();
+ MachineMemOperand *MMO = MemSD->getMemOperand();
+ SDValue Chain = MemSD->getChain();
+ SDValue BasePtr = MemSD->getBasePtr();
+
+ SDValue Mask, PassThru, VL;
+ if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
+ Mask = VPLoad->getMask();
+ PassThru = DAG.getUNDEF(VT);
+ VL = VPLoad->getVectorLength();
+ } else {
+ const auto *MLoad = cast<MaskedLoadSDNode>(Op);
+ Mask = MLoad->getMask();
+ PassThru = MLoad->getPassThru();
+ }
+
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
+ if (!IsUnmasked) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+ }
+
+ if (!VL)
+ VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ unsigned IntID =
+ IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
+ SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
+ if (IsUnmasked)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ else
+ Ops.push_back(PassThru);
+ Ops.push_back(BasePtr);
+ if (!IsUnmasked)
+ Ops.push_back(Mask);
+ Ops.push_back(VL);
+ if (!IsUnmasked)
+ Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
+
+ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+
+ SDValue Result =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
+ Chain = Result.getValue(1);
+
+ if (VT.isFixedLengthVector())
+ Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+
+ return DAG.getMergeValues({Result, Chain}, DL);
+}
+
+SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+
+ const auto *MemSD = cast<MemSDNode>(Op);
+ EVT MemVT = MemSD->getMemoryVT();
+ MachineMemOperand *MMO = MemSD->getMemOperand();
+ SDValue Chain = MemSD->getChain();
+ SDValue BasePtr = MemSD->getBasePtr();
+ SDValue Val, Mask, VL;
+
+ if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
+ Val = VPStore->getValue();
+ Mask = VPStore->getMask();
+ VL = VPStore->getVectorLength();
+ } else {
+ const auto *MStore = cast<MaskedStoreSDNode>(Op);
+ Val = MStore->getValue();
+ Mask = MStore->getMask();
+ }
+
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ MVT VT = Val.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+
+ Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
+ if (!IsUnmasked) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+ }
+
+ if (!VL)
+ VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ unsigned IntID =
+ IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
+ SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
+ Ops.push_back(Val);
+ Ops.push_back(BasePtr);
+ if (!IsUnmasked)
+ Ops.push_back(Mask);
+ Ops.push_back(VL);
+
+ return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
+ DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
+}
+
+SDValue
+RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
+ SelectionDAG &DAG) const {
+ MVT InVT = Op.getOperand(0).getSimpleValueType();
+ MVT ContainerVT = getContainerForFixedLengthVector(InVT);
+
+ MVT VT = Op.getSimpleValueType();
+
+ SDValue Op1 =
+ convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
+ SDValue Op2 =
+ convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
+
+ SDLoc DL(Op);
+ auto [Mask, VL] = getDefaultVLOps(VT.getVectorNumElements(), ContainerVT, DL,
+ DAG, Subtarget);
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+
+ SDValue Cmp =
+ DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT,
+ {Op1, Op2, Op.getOperand(2), DAG.getUNDEF(MaskVT), Mask, VL});
+
+ return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
+ SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
+ MVT VT = Op.getSimpleValueType();
+
+ if (VT.getVectorElementType() == MVT::i1)
+ return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMergeOp*/ false,
+ /*HasMask*/ false);
+
+ return lowerToScalableOp(Op, DAG, VecOpc, /*HasMergeOp*/ true);
+}
+
+SDValue
+RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned Opc;
+ switch (Op.getOpcode()) {
+ default: llvm_unreachable("Unexpected opcode!");
+ case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
+ case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
+ case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
+ }
+
+ return lowerToScalableOp(Op, DAG, Opc, /*HasMergeOp*/ true);
+}
+
+// Lower vector ABS to smax(X, sub(0, X)).
+SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ SDValue X = Op.getOperand(0);
+
+ assert((Op.getOpcode() == ISD::VP_ABS || VT.isFixedLengthVector()) &&
+ "Unexpected type for ISD::ABS");
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
+ }
+
+ SDValue Mask, VL;
+ if (Op->getOpcode() == ISD::VP_ABS) {
+ Mask = Op->getOperand(1);
+ VL = Op->getOperand(2);
+ } else
+ std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ SDValue SplatZero = DAG.getNode(
+ RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+ DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
+ SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
+ DAG.getUNDEF(ContainerVT), Mask, VL);
+ SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
+ DAG.getUNDEF(ContainerVT), Mask, VL);
+
+ if (VT.isFixedLengthVector())
+ Max = convertFromScalableVector(VT, Max, DAG, Subtarget);
+ return Max;
+}
+
+SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
+ SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ SDValue Mag = Op.getOperand(0);
+ SDValue Sign = Op.getOperand(1);
+ assert(Mag.getValueType() == Sign.getValueType() &&
+ "Can only handle COPYSIGN with matching types.");
+
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+ Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
+ Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
+
+ auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ SDValue CopySign = DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag,
+ Sign, DAG.getUNDEF(ContainerVT), Mask, VL);
+
+ return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
+ SDValue Op, SelectionDAG &DAG) const {
+ MVT VT = Op.getSimpleValueType();
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+
+ MVT I1ContainerVT =
+ MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
+
+ SDValue CC =
+ convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
+ SDValue Op1 =
+ convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
+ SDValue Op2 =
+ convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
+
+ SDLoc DL(Op);
+ SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ SDValue Select =
+ DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
+
+ return convertFromScalableVector(VT, Select, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
+ unsigned NewOpc, bool HasMergeOp,
+ bool HasMask) const {
+ MVT VT = Op.getSimpleValueType();
+ MVT ContainerVT = getContainerForFixedLengthVector(VT);
+
+ // Create list of operands by converting existing ones to scalable types.
+ SmallVector<SDValue, 6> Ops;
+ for (const SDValue &V : Op->op_values()) {
+ assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
+
+ // Pass through non-vector operands.
+ if (!V.getValueType().isVector()) {
+ Ops.push_back(V);
+ continue;
+ }
+
+ // "cast" fixed length vector to a scalable vector.
+ assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
+ "Only fixed length vectors are supported!");
+ Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
+ }
+
+ SDLoc DL(Op);
+ auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+ if (HasMergeOp)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ if (HasMask)
+ Ops.push_back(Mask);
+ Ops.push_back(VL);
+
+ SDValue ScalableRes =
+ DAG.getNode(NewOpc, DL, ContainerVT, Ops, Op->getFlags());
+ return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
+}
+
+// Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
+// * Operands of each node are assumed to be in the same order.
+// * The EVL operand is promoted from i32 to i64 on RV64.
+// * Fixed-length vectors are converted to their scalable-vector container
+// types.
+SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
+ unsigned RISCVISDOpc,
+ bool HasMergeOp) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+ SmallVector<SDValue, 4> Ops;
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector())
+ ContainerVT = getContainerForFixedLengthVector(VT);
+
+ for (const auto &OpIdx : enumerate(Op->ops())) {
+ SDValue V = OpIdx.value();
+ assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
+ // Add dummy merge value before the mask.
+ if (HasMergeOp && *ISD::getVPMaskIdx(Op.getOpcode()) == OpIdx.index())
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ // Pass through operands which aren't fixed-length vectors.
+ if (!V.getValueType().isFixedLengthVector()) {
+ Ops.push_back(V);
+ continue;
+ }
+ // "cast" fixed length vector to a scalable vector.
+ MVT OpVT = V.getSimpleValueType();
+ MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
+ assert(useRVVForFixedLengthVectorVT(OpVT) &&
+ "Only fixed length vectors are supported!");
+ Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
+ }
+
+ if (!VT.isFixedLengthVector())
+ return DAG.getNode(RISCVISDOpc, DL, VT, Ops, Op->getFlags());
+
+ SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops, Op->getFlags());
+
+ return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ SDValue Src = Op.getOperand(0);
+ // NOTE: Mask is dropped.
+ SDValue VL = Op.getOperand(2);
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
+ Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
+ }
+
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDValue Zero = DAG.getConstant(0, DL, XLenVT);
+ SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), Zero, VL);
+
+ SDValue SplatValue = DAG.getConstant(
+ Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
+ SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), SplatValue, VL);
+
+ SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
+ Splat, ZeroSplat, VL);
+ if (!VT.isFixedLengthVector())
+ return Result;
+ return convertFromScalableVector(VT, Result, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ SDValue Op1 = Op.getOperand(0);
+ SDValue Op2 = Op.getOperand(1);
+ ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ // NOTE: Mask is dropped.
+ SDValue VL = Op.getOperand(4);
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
+ Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
+ }
+
+ SDValue Result;
+ SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
+
+ switch (Condition) {
+ default:
+ break;
+ // X != Y --> (X^Y)
+ case ISD::SETNE:
+ Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
+ break;
+ // X == Y --> ~(X^Y)
+ case ISD::SETEQ: {
+ SDValue Temp =
+ DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
+ Result =
+ DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
+ break;
+ }
+ // X >s Y --> X == 0 & Y == 1 --> ~X & Y
+ // X <u Y --> X == 0 & Y == 1 --> ~X & Y
+ case ISD::SETGT:
+ case ISD::SETULT: {
+ SDValue Temp =
+ DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
+ Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
+ break;
+ }
+ // X <s Y --> X == 1 & Y == 0 --> ~Y & X
+ // X >u Y --> X == 1 & Y == 0 --> ~Y & X
+ case ISD::SETLT:
+ case ISD::SETUGT: {
+ SDValue Temp =
+ DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
+ Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
+ break;
+ }
+ // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
+ // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
+ case ISD::SETGE:
+ case ISD::SETULE: {
+ SDValue Temp =
+ DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
+ Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
+ break;
+ }
+ // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
+ // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
+ case ISD::SETLE:
+ case ISD::SETUGE: {
+ SDValue Temp =
+ DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
+ Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
+ break;
+ }
+ }
+
+ if (!VT.isFixedLengthVector())
+ return Result;
+ return convertFromScalableVector(VT, Result, DAG, Subtarget);
+}
+
+// Lower Floating-Point/Integer Type-Convert VP SDNodes
+SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
+ unsigned RISCVISDOpc) const {
+ SDLoc DL(Op);
+
+ SDValue Src = Op.getOperand(0);
+ SDValue Mask = Op.getOperand(1);
+ SDValue VL = Op.getOperand(2);
+
+ MVT DstVT = Op.getSimpleValueType();
+ MVT SrcVT = Src.getSimpleValueType();
+ if (DstVT.isFixedLengthVector()) {
+ DstVT = getContainerForFixedLengthVector(DstVT);
+ SrcVT = getContainerForFixedLengthVector(SrcVT);
+ Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
+ MVT MaskVT = getMaskTypeFor(DstVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+
+ unsigned DstEltSize = DstVT.getScalarSizeInBits();
+ unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
+
+ SDValue Result;
+ if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
+ if (SrcVT.isInteger()) {
+ assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
+
+ unsigned RISCVISDExtOpc = RISCVISDOpc == RISCVISD::SINT_TO_FP_VL
+ ? RISCVISD::VSEXT_VL
+ : RISCVISD::VZEXT_VL;
+
+ // Do we need to do any pre-widening before converting?
+ if (SrcEltSize == 1) {
+ MVT IntVT = DstVT.changeVectorElementTypeToInteger();
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDValue Zero = DAG.getConstant(0, DL, XLenVT);
+ SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
+ DAG.getUNDEF(IntVT), Zero, VL);
+ SDValue One = DAG.getConstant(
+ RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
+ SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
+ DAG.getUNDEF(IntVT), One, VL);
+ Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
+ ZeroSplat, VL);
+ } else if (DstEltSize > (2 * SrcEltSize)) {
+ // Widen before converting.
+ MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
+ DstVT.getVectorElementCount());
+ Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
+ }
+
+ Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
+ } else {
+ assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
+ "Wrong input/output vector types");
+
+ // Convert f16 to f32 then convert f32 to i64.
+ if (DstEltSize > (2 * SrcEltSize)) {
+ assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
+ MVT InterimFVT =
+ MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
+ Src =
+ DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
+ }
+
+ Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
+ }
+ } else { // Narrowing + Conversion
+ if (SrcVT.isInteger()) {
+ assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
+ // First do a narrowing convert to an FP type half the size, then round
+ // the FP type to a small FP type if needed.
+
+ MVT InterimFVT = DstVT;
+ if (SrcEltSize > (2 * DstEltSize)) {
+ assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
+ assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
+ InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
+ }
+
+ Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
+
+ if (InterimFVT != DstVT) {
+ Src = Result;
+ Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
+ }
+ } else {
+ assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
+ "Wrong input/output vector types");
+ // First do a narrowing conversion to an integer half the size, then
+ // truncate if needed.
+
+ if (DstEltSize == 1) {
+ // First convert to the same size integer, then convert to mask using
+ // setcc.
+ assert(SrcEltSize >= 16 && "Unexpected FP type!");
+ MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
+ DstVT.getVectorElementCount());
+ Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
+
+ // Compare the integer result to 0. The integer should be 0 or 1/-1,
+ // otherwise the conversion was undefined.
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
+ SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
+ DAG.getUNDEF(InterimIVT), SplatZero, VL);
+ Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT,
+ {Result, SplatZero, DAG.getCondCode(ISD::SETNE),
+ DAG.getUNDEF(DstVT), Mask, VL});
+ } else {
+ MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
+ DstVT.getVectorElementCount());
+
+ Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
+
+ while (InterimIVT != DstVT) {
+ SrcEltSize /= 2;
+ Src = Result;
+ InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
+ DstVT.getVectorElementCount());
+ Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
+ Src, Mask, VL);
+ }
+ }
+ }
+ }
+
+ MVT VT = Op.getSimpleValueType();
+ if (!VT.isFixedLengthVector())
+ return Result;
+ return convertFromScalableVector(VT, Result, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
+ unsigned MaskOpc,
+ unsigned VecOpc) const {
+ MVT VT = Op.getSimpleValueType();
+ if (VT.getVectorElementType() != MVT::i1)
+ return lowerVPOp(Op, DAG, VecOpc, true);
+
+ // It is safe to drop mask parameter as masked-off elements are undef.
+ SDValue Op1 = Op->getOperand(0);
+ SDValue Op2 = Op->getOperand(1);
+ SDValue VL = Op->getOperand(3);
+
+ MVT ContainerVT = VT;
+ const bool IsFixed = VT.isFixedLengthVector();
+ if (IsFixed) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
+ Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
+ }
+
+ SDLoc DL(Op);
+ SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
+ if (!IsFixed)
+ return Val;
+ return convertFromScalableVector(VT, Val, DAG, Subtarget);
+}
+
+SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT VT = Op.getSimpleValueType();
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector())
+ ContainerVT = getContainerForFixedLengthVector(VT);
+
+ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+
+ auto *VPNode = cast<VPStridedLoadSDNode>(Op);
+ // Check if the mask is known to be all ones
+ SDValue Mask = VPNode->getMask();
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vlse
+ : Intrinsic::riscv_vlse_mask,
+ DL, XLenVT);
+ SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID,
+ DAG.getUNDEF(ContainerVT), VPNode->getBasePtr(),
+ VPNode->getStride()};
+ if (!IsUnmasked) {
+ if (VT.isFixedLengthVector()) {
+ MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+ Ops.push_back(Mask);
+ }
+ Ops.push_back(VPNode->getVectorLength());
+ if (!IsUnmasked) {
+ SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+ Ops.push_back(Policy);
+ }
+
+ SDValue Result =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
+ VPNode->getMemoryVT(), VPNode->getMemOperand());
+ SDValue Chain = Result.getValue(1);
+
+ if (VT.isFixedLengthVector())
+ Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+
+ return DAG.getMergeValues({Result, Chain}, DL);
+}
+
+SDValue RISCVTargetLowering::lowerVPStridedStore(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ auto *VPNode = cast<VPStridedStoreSDNode>(Op);
+ SDValue StoreVal = VPNode->getValue();
+ MVT VT = StoreVal.getSimpleValueType();
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ StoreVal = convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
+ }
+
+ // Check if the mask is known to be all ones
+ SDValue Mask = VPNode->getMask();
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vsse
+ : Intrinsic::riscv_vsse_mask,
+ DL, XLenVT);
+ SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID, StoreVal,
+ VPNode->getBasePtr(), VPNode->getStride()};
+ if (!IsUnmasked) {
+ if (VT.isFixedLengthVector()) {
+ MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+ Ops.push_back(Mask);
+ }
+ Ops.push_back(VPNode->getVectorLength());
+
+ return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, VPNode->getVTList(),
+ Ops, VPNode->getMemoryVT(),
+ VPNode->getMemOperand());
+}
+
+// Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
+// matched to a RVV indexed load. The RVV indexed load instructions only
+// support the "unsigned unscaled" addressing mode; indices are implicitly
+// zero-extended or truncated to XLEN and are treated as byte offsets. Any
+// signed or scaled indexing is extended to the XLEN value type and scaled
+// accordingly.
+SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ const auto *MemSD = cast<MemSDNode>(Op.getNode());
+ EVT MemVT = MemSD->getMemoryVT();
+ MachineMemOperand *MMO = MemSD->getMemOperand();
+ SDValue Chain = MemSD->getChain();
+ SDValue BasePtr = MemSD->getBasePtr();
+
+ ISD::LoadExtType LoadExtType;
+ SDValue Index, Mask, PassThru, VL;
+
+ if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
+ Index = VPGN->getIndex();
+ Mask = VPGN->getMask();
+ PassThru = DAG.getUNDEF(VT);
+ VL = VPGN->getVectorLength();
+ // VP doesn't support extending loads.
+ LoadExtType = ISD::NON_EXTLOAD;
+ } else {
+ // Else it must be a MGATHER.
+ auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
+ Index = MGN->getIndex();
+ Mask = MGN->getMask();
+ PassThru = MGN->getPassThru();
+ LoadExtType = MGN->getExtensionType();
+ }
+
+ MVT IndexVT = Index.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Unexpected VTs!");
+ assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
+ // Targets have to explicitly opt-in for extending vector loads.
+ assert(LoadExtType == ISD::NON_EXTLOAD &&
+ "Unexpected extending MGATHER/VP_GATHER");
+ (void)LoadExtType;
+
+ // If the mask is known to be all ones, optimize to an unmasked intrinsic;
+ // the selection of the masked intrinsics doesn't do this for us.
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
+ ContainerVT.getVectorElementCount());
+
+ Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
+
+ if (!IsUnmasked) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
+ }
+ }
+
+ if (!VL)
+ VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
+ IndexVT = IndexVT.changeVectorElementType(XLenVT);
+ SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
+ VL);
+ Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
+ TrueMask, VL);
+ }
+
+ unsigned IntID =
+ IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
+ SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
+ if (IsUnmasked)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ else
+ Ops.push_back(PassThru);
+ Ops.push_back(BasePtr);
+ Ops.push_back(Index);
+ if (!IsUnmasked)
+ Ops.push_back(Mask);
+ Ops.push_back(VL);
+ if (!IsUnmasked)
+ Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
+
+ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+ SDValue Result =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
+ Chain = Result.getValue(1);
+
+ if (VT.isFixedLengthVector())
+ Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+
+ return DAG.getMergeValues({Result, Chain}, DL);
+}
+
+// Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
+// matched to a RVV indexed store. The RVV indexed store instructions only
+// support the "unsigned unscaled" addressing mode; indices are implicitly
+// zero-extended or truncated to XLEN and are treated as byte offsets. Any
+// signed or scaled indexing is extended to the XLEN value type and scaled
+// accordingly.
+SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const auto *MemSD = cast<MemSDNode>(Op.getNode());
+ EVT MemVT = MemSD->getMemoryVT();
+ MachineMemOperand *MMO = MemSD->getMemOperand();
+ SDValue Chain = MemSD->getChain();
+ SDValue BasePtr = MemSD->getBasePtr();
+
+ bool IsTruncatingStore = false;
+ SDValue Index, Mask, Val, VL;
+
+ if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
+ Index = VPSN->getIndex();
+ Mask = VPSN->getMask();
+ Val = VPSN->getValue();
+ VL = VPSN->getVectorLength();
+ // VP doesn't support truncating stores.
+ IsTruncatingStore = false;
+ } else {
+ // Else it must be a MSCATTER.
+ auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
+ Index = MSN->getIndex();
+ Mask = MSN->getMask();
+ Val = MSN->getValue();
+ IsTruncatingStore = MSN->isTruncatingStore();
+ }
+
+ MVT VT = Val.getSimpleValueType();
+ MVT IndexVT = Index.getSimpleValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Unexpected VTs!");
+ assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
+ // Targets have to explicitly opt-in for extending vector loads and
+ // truncating vector stores.
+ assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
+ (void)IsTruncatingStore;
+
+ // If the mask is known to be all ones, optimize to an unmasked intrinsic;
+ // the selection of the masked intrinsics doesn't do this for us.
+ bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
+ ContainerVT.getVectorElementCount());
+
+ Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
+ Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
+
+ if (!IsUnmasked) {
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+ }
+
+ if (!VL)
+ VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
+ IndexVT = IndexVT.changeVectorElementType(XLenVT);
+ SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
+ VL);
+ Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
+ TrueMask, VL);
+ }
+
+ unsigned IntID =
+ IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
+ SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
+ Ops.push_back(Val);
+ Ops.push_back(BasePtr);
+ Ops.push_back(Index);
+ if (!IsUnmasked)
+ Ops.push_back(Mask);
+ Ops.push_back(VL);
+
+ return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
+ DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
+}
+
+SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
+ SelectionDAG &DAG) const {
+ const MVT XLenVT = Subtarget.getXLenVT();
+ SDLoc DL(Op);
+ SDValue Chain = Op->getOperand(0);
+ SDValue SysRegNo = DAG.getTargetConstant(
+ RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
+ SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
+ SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
+
+ // Encoding used for rounding mode in RISCV differs from that used in
+ // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
+ // table, which consists of a sequence of 4-bit fields, each representing
+ // corresponding FLT_ROUNDS mode.
+ static const int Table =
+ (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
+ (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
+ (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
+ (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
+ (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
+
+ SDValue Shift =
+ DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
+ SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
+ DAG.getConstant(Table, DL, XLenVT), Shift);
+ SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
+ DAG.getConstant(7, DL, XLenVT));
+
+ return DAG.getMergeValues({Masked, Chain}, DL);
+}
+
+SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
+ SelectionDAG &DAG) const {
+ const MVT XLenVT = Subtarget.getXLenVT();
+ SDLoc DL(Op);
+ SDValue Chain = Op->getOperand(0);
+ SDValue RMValue = Op->getOperand(1);
+ SDValue SysRegNo = DAG.getTargetConstant(
+ RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
+
+ // Encoding used for rounding mode in RISCV differs from that used in
+ // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
+ // a table, which consists of a sequence of 4-bit fields, each representing
+ // corresponding RISCV mode.
+ static const unsigned Table =
+ (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
+ (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
+ (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
+ (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
+ (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
+
+ SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
+ DAG.getConstant(2, DL, XLenVT));
+ SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
+ DAG.getConstant(Table, DL, XLenVT), Shift);
+ RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
+ DAG.getConstant(0x7, DL, XLenVT));
+ return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
+ RMValue);
+}
+
+SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ bool isRISCV64 = Subtarget.is64Bit();
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+
+ int FI = MF.getFrameInfo().CreateFixedObject(isRISCV64 ? 8 : 4, 0, false);
+ return DAG.getFrameIndex(FI, PtrVT);
+}
+
+// Returns the opcode of the target-specific SDNode that implements the 32-bit
+// form of the given Opcode.
+static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case ISD::SHL:
+ return RISCVISD::SLLW;
+ case ISD::SRA:
+ return RISCVISD::SRAW;
+ case ISD::SRL:
+ return RISCVISD::SRLW;
+ case ISD::SDIV:
+ return RISCVISD::DIVW;
+ case ISD::UDIV:
+ return RISCVISD::DIVUW;
+ case ISD::UREM:
+ return RISCVISD::REMUW;
+ case ISD::ROTL:
+ return RISCVISD::ROLW;
+ case ISD::ROTR:
+ return RISCVISD::RORW;
+ }
+}
+
+// Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
+// node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
+// otherwise be promoted to i64, making it difficult to select the
+// SLLW/DIVUW/.../*W later one because the fact the operation was originally of
+// type i8/i16/i32 is lost.
+static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
+ unsigned ExtOpc = ISD::ANY_EXTEND) {
+ SDLoc DL(N);
+ RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
+ SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
+ SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
+ SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
+ // ReplaceNodeResults requires we maintain the same type for the return value.
+ return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
+}
+
+// Converts the given 32-bit operation to a i64 operation with signed extension
+// semantic to reduce the signed extension instructions.
+static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
+ SDLoc DL(N);
+ SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+ SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
+ SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
+ DAG.getValueType(MVT::i32));
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
+}
+
+void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to custom type legalize this operation!");
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::STRICT_FP_TO_UINT:
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ bool IsStrict = N->isStrictFPOpcode();
+ bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
+ N->getOpcode() == ISD::STRICT_FP_TO_SINT;
+ SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
+ if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
+ TargetLowering::TypeSoftenFloat) {
+ if (!isTypeLegal(Op0.getValueType()))
+ return;
+ if (IsStrict) {
+ SDValue Chain = N->getOperand(0);
+ // In absense of Zfh, promote f16 to f32, then convert.
+ if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) {
+ Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
+ {Chain, Op0});
+ Chain = Op0.getValue(1);
+ }
+ unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
+ : RISCVISD::STRICT_FCVT_WU_RV64;
+ SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
+ SDValue Res = DAG.getNode(
+ Opc, DL, VTs, Chain, Op0,
+ DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+ Results.push_back(Res.getValue(1));
+ return;
+ }
+ // In absense of Zfh, promote f16 to f32, then convert.
+ if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
+ Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
+
+ unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+ SDValue Res =
+ DAG.getNode(Opc, DL, MVT::i64, Op0,
+ DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+ return;
+ }
+ // If the FP type needs to be softened, emit a library call using the 'si'
+ // version. If we left it to default legalization we'd end up with 'di'. If
+ // the FP type doesn't need to be softened just let generic type
+ // legalization promote the result type.
+ RTLIB::Libcall LC;
+ if (IsSigned)
+ LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
+ else
+ LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
+ MakeLibCallOptions CallOptions;
+ EVT OpVT = Op0.getValueType();
+ CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
+ SDValue Result;
+ std::tie(Result, Chain) =
+ makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
+ Results.push_back(Result);
+ if (IsStrict)
+ Results.push_back(Chain);
+ break;
+ }
+ case ISD::READCYCLECOUNTER: {
+ assert(!Subtarget.is64Bit() &&
+ "READCYCLECOUNTER only has custom type legalization on riscv32");
+
+ SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
+ SDValue RCW =
+ DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
+
+ Results.push_back(
+ DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
+ Results.push_back(RCW.getValue(2));
+ break;
+ }
+ case ISD::LOAD: {
+ if (!ISD::isNON_EXTLoad(N))
+ return;
+
+ // Use a SEXTLOAD instead of the default EXTLOAD. Similar to the
+ // sext_inreg we emit for ADD/SUB/MUL/SLLI.
+ LoadSDNode *Ld = cast<LoadSDNode>(N);
+
+ SDLoc dl(N);
+ SDValue Res = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Ld->getChain(),
+ Ld->getBasePtr(), Ld->getMemoryVT(),
+ Ld->getMemOperand());
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Res));
+ Results.push_back(Res.getValue(1));
+ return;
+ }
+ case ISD::MUL: {
+ unsigned Size = N->getSimpleValueType(0).getSizeInBits();
+ unsigned XLen = Subtarget.getXLen();
+ // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
+ if (Size > XLen) {
+ assert(Size == (XLen * 2) && "Unexpected custom legalisation");
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ APInt HighMask = APInt::getHighBitsSet(Size, XLen);
+
+ bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
+ bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
+ // We need exactly one side to be unsigned.
+ if (LHSIsU == RHSIsU)
+ return;
+
+ auto MakeMULPair = [&](SDValue S, SDValue U) {
+ MVT XLenVT = Subtarget.getXLenVT();
+ S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
+ U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
+ SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
+ SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
+ return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
+ };
+
+ bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
+ bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
+
+ // The other operand should be signed, but still prefer MULH when
+ // possible.
+ if (RHSIsU && LHSIsS && !RHSIsS)
+ Results.push_back(MakeMULPair(LHS, RHS));
+ else if (LHSIsU && RHSIsS && !LHSIsS)
+ Results.push_back(MakeMULPair(RHS, LHS));
+
+ return;
+ }
+ [[fallthrough]];
+ }
+ case ISD::ADD:
+ case ISD::SUB:
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
+ break;
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ if (N->getOperand(1).getOpcode() != ISD::Constant) {
+ // If we can use a BSET instruction, allow default promotion to apply.
+ if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
+ isOneConstant(N->getOperand(0)))
+ break;
+ Results.push_back(customLegalizeToWOp(N, DAG));
+ break;
+ }
+
+ // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
+ // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
+ // shift amount.
+ if (N->getOpcode() == ISD::SHL) {
+ SDLoc DL(N);
+ SDValue NewOp0 =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+ SDValue NewOp1 =
+ DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
+ SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
+ DAG.getValueType(MVT::i32));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
+ }
+
+ break;
+ case ISD::ROTL:
+ case ISD::ROTR:
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ Results.push_back(customLegalizeToWOp(N, DAG));
+ break;
+ case ISD::CTTZ:
+ case ISD::CTTZ_ZERO_UNDEF:
+ case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+
+ SDValue NewOp0 =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+ bool IsCTZ =
+ N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
+ unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
+ SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+ return;
+ }
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::UREM: {
+ MVT VT = N->getSimpleValueType(0);
+ assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
+ Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
+ "Unexpected custom legalisation");
+ // Don't promote division/remainder by constant since we should expand those
+ // to multiply by magic constant.
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
+ if (N->getOperand(1).getOpcode() == ISD::Constant &&
+ !isIntDivCheap(N->getValueType(0), Attr))
+ return;
+
+ // If the input is i32, use ANY_EXTEND since the W instructions don't read
+ // the upper 32 bits. For other types we need to sign or zero extend
+ // based on the opcode.
+ unsigned ExtOpc = ISD::ANY_EXTEND;
+ if (VT != MVT::i32)
+ ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND;
+
+ Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
+ break;
+ }
+ case ISD::UADDO:
+ case ISD::USUBO: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ bool IsAdd = N->getOpcode() == ISD::UADDO;
+ // Create an ADDW or SUBW.
+ SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+ SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue Res =
+ DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
+ Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
+ DAG.getValueType(MVT::i32));
+
+ SDValue Overflow;
+ if (IsAdd && isOneConstant(RHS)) {
+ // Special case uaddo X, 1 overflowed if the addition result is 0.
+ // The general case (X + C) < C is not necessarily beneficial. Although we
+ // reduce the live range of X, we may introduce the materialization of
+ // constant C, especially when the setcc result is used by branch. We have
+ // no compare with constant and branch instructions.
+ Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
+ DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
+ } else {
+ // Sign extend the LHS and perform an unsigned compare with the ADDW
+ // result. Since the inputs are sign extended from i32, this is equivalent
+ // to comparing the lower 32 bits.
+ LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
+ Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
+ IsAdd ? ISD::SETULT : ISD::SETUGT);
+ }
+
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+ Results.push_back(Overflow);
+ return;
+ }
+ case ISD::UADDSAT:
+ case ISD::USUBSAT: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ if (Subtarget.hasStdExtZbb()) {
+ // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
+ // sign extend allows overflow of the lower 32 bits to be detected on
+ // the promoted size.
+ SDValue LHS =
+ DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
+ SDValue RHS =
+ DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+ return;
+ }
+
+ // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
+ // promotion for UADDO/USUBO.
+ Results.push_back(expandAddSubSat(N, DAG));
+ return;
+ }
+ case ISD::ABS: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+
+ if (Subtarget.hasStdExtZbb()) {
+ // Emit a special ABSW node that will be expanded to NEGW+MAX at isel.
+ // This allows us to remember that the result is sign extended. Expanding
+ // to NEGW+MAX here requires a Freeze which breaks ComputeNumSignBits.
+ SDValue Src = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64,
+ N->getOperand(0));
+ SDValue Abs = DAG.getNode(RISCVISD::ABSW, DL, MVT::i64, Src);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Abs));
+ return;
+ }
+
+ // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
+ SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+
+ // Freeze the source so we can increase it's use count.
+ Src = DAG.getFreeze(Src);
+
+ // Copy sign bit to all bits using the sraiw pattern.
+ SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
+ DAG.getValueType(MVT::i32));
+ SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
+ DAG.getConstant(31, DL, MVT::i64));
+
+ SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
+ NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
+
+ // NOTE: The result is only required to be anyextended, but sext is
+ // consistent with type legalization of sub.
+ NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
+ DAG.getValueType(MVT::i32));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
+ return;
+ }
+ case ISD::BITCAST: {
+ EVT VT = N->getValueType(0);
+ assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
+ SDValue Op0 = N->getOperand(0);
+ EVT Op0VT = Op0.getValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+ if (VT == MVT::i16 && Op0VT == MVT::f16 &&
+ Subtarget.hasStdExtZfhOrZfhmin()) {
+ SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
+ } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
+ Subtarget.hasStdExtF()) {
+ SDValue FPConv =
+ DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
+ } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
+ isTypeLegal(Op0VT)) {
+ // Custom-legalize bitcasts from fixed-length vector types to illegal
+ // scalar types in order to improve codegen. Bitcast the vector to a
+ // one-element vector type whose element type is the same as the result
+ // type, and extract the first element.
+ EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
+ if (isTypeLegal(BVT)) {
+ SDValue BVec = DAG.getBitcast(BVT, Op0);
+ Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
+ DAG.getConstant(0, DL, XLenVT)));
+ }
+ }
+ break;
+ }
+ case RISCVISD::BREV8: {
+ MVT VT = N->getSimpleValueType(0);
+ MVT XLenVT = Subtarget.getXLenVT();
+ assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
+ "Unexpected custom legalisation");
+ assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
+ SDValue NewOp = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
+ SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp);
+ // ReplaceNodeResults requires we maintain the same type for the return
+ // value.
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
+ break;
+ }
+ case ISD::EXTRACT_VECTOR_ELT: {
+ // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
+ // type is illegal (currently only vXi64 RV32).
+ // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
+ // transferred to the destination register. We issue two of these from the
+ // upper- and lower- halves of the SEW-bit vector element, slid down to the
+ // first element.
+ SDValue Vec = N->getOperand(0);
+ SDValue Idx = N->getOperand(1);
+
+ // The vector type hasn't been legalized yet so we can't issue target
+ // specific nodes if it needs legalization.
+ // FIXME: We would manually legalize if it's important.
+ if (!isTypeLegal(Vec.getValueType()))
+ return;
+
+ MVT VecVT = Vec.getSimpleValueType();
+
+ assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
+ VecVT.getVectorElementType() == MVT::i64 &&
+ "Unexpected EXTRACT_VECTOR_ELT legalization");
+
+ // If this is a fixed vector, we need to convert it to a scalable vector.
+ MVT ContainerVT = VecVT;
+ if (VecVT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VecVT);
+ Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
+ }
+
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Use a VL of 1 to avoid processing more elements than we need.
+ auto [Mask, VL] = getDefaultVLOps(1, ContainerVT, DL, DAG, Subtarget);
+
+ // Unless the index is known to be 0, we must slide the vector down to get
+ // the desired element into index 0.
+ if (!isNullConstant(Idx)) {
+ Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
+ }
+
+ // Extract the lower XLEN bits of the correct vector element.
+ SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
+
+ // To extract the upper XLEN bits of the vector element, shift the first
+ // element right by 32 bits and re-extract the lower XLEN bits.
+ SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT),
+ DAG.getConstant(32, DL, XLenVT), VL);
+ SDValue LShr32 =
+ DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, ThirtyTwoV,
+ DAG.getUNDEF(ContainerVT), Mask, VL);
+
+ SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
+
+ Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
+ break;
+ }
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ switch (IntNo) {
+ default:
+ llvm_unreachable(
+ "Don't know how to custom type legalize this intrinsic!");
+ case Intrinsic::riscv_orc_b: {
+ SDValue NewOp =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue Res = DAG.getNode(RISCVISD::ORC_B, DL, MVT::i64, NewOp);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+ return;
+ }
+ case Intrinsic::riscv_vmv_x_s: {
+ EVT VT = N->getValueType(0);
+ MVT XLenVT = Subtarget.getXLenVT();
+ if (VT.bitsLT(XLenVT)) {
+ // Simple case just extract using vmv.x.s and truncate.
+ SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
+ Subtarget.getXLenVT(), N->getOperand(1));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
+ return;
+ }
+
+ assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
+ "Unexpected custom legalization");
+
+ // We need to do the move in two steps.
+ SDValue Vec = N->getOperand(1);
+ MVT VecVT = Vec.getSimpleValueType();
+
+ // First extract the lower XLEN bits of the element.
+ SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
+
+ // To extract the upper XLEN bits of the vector element, shift the first
+ // element right by 32 bits and re-extract the lower XLEN bits.
+ auto [Mask, VL] = getDefaultVLOps(1, VecVT, DL, DAG, Subtarget);
+
+ SDValue ThirtyTwoV =
+ DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
+ DAG.getConstant(32, DL, XLenVT), VL);
+ SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV,
+ DAG.getUNDEF(VecVT), Mask, VL);
+ SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
+
+ Results.push_back(
+ DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
+ break;
+ }
+ }
+ break;
+ }
+ case ISD::VECREDUCE_ADD:
+ case ISD::VECREDUCE_AND:
+ case ISD::VECREDUCE_OR:
+ case ISD::VECREDUCE_XOR:
+ case ISD::VECREDUCE_SMAX:
+ case ISD::VECREDUCE_UMAX:
+ case ISD::VECREDUCE_SMIN:
+ case ISD::VECREDUCE_UMIN:
+ if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
+ Results.push_back(V);
+ break;
+ case ISD::VP_REDUCE_ADD:
+ case ISD::VP_REDUCE_AND:
+ case ISD::VP_REDUCE_OR:
+ case ISD::VP_REDUCE_XOR:
+ case ISD::VP_REDUCE_SMAX:
+ case ISD::VP_REDUCE_UMAX:
+ case ISD::VP_REDUCE_SMIN:
+ case ISD::VP_REDUCE_UMIN:
+ if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
+ Results.push_back(V);
+ break;
+ case ISD::GET_ROUNDING: {
+ SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
+ SDValue Res = DAG.getNode(ISD::GET_ROUNDING, DL, VTs, N->getOperand(0));
+ Results.push_back(Res.getValue(0));
+ Results.push_back(Res.getValue(1));
+ break;
+ }
+ }
+}
+
+// Try to fold (<bop> x, (reduction.<bop> vec, start))
+static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ auto BinOpToRVVReduce = [](unsigned Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unhandled binary to transfrom reduction");
+ case ISD::ADD:
+ return RISCVISD::VECREDUCE_ADD_VL;
+ case ISD::UMAX:
+ return RISCVISD::VECREDUCE_UMAX_VL;
+ case ISD::SMAX:
+ return RISCVISD::VECREDUCE_SMAX_VL;
+ case ISD::UMIN:
+ return RISCVISD::VECREDUCE_UMIN_VL;
+ case ISD::SMIN:
+ return RISCVISD::VECREDUCE_SMIN_VL;
+ case ISD::AND:
+ return RISCVISD::VECREDUCE_AND_VL;
+ case ISD::OR:
+ return RISCVISD::VECREDUCE_OR_VL;
+ case ISD::XOR:
+ return RISCVISD::VECREDUCE_XOR_VL;
+ case ISD::FADD:
+ return RISCVISD::VECREDUCE_FADD_VL;
+ case ISD::FMAXNUM:
+ return RISCVISD::VECREDUCE_FMAX_VL;
+ case ISD::FMINNUM:
+ return RISCVISD::VECREDUCE_FMIN_VL;
+ }
+ };
+
+ auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
+ return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ isNullConstant(V.getOperand(1)) &&
+ V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
+ };
+
+ unsigned Opc = N->getOpcode();
+ unsigned ReduceIdx;
+ if (IsReduction(N->getOperand(0), Opc))
+ ReduceIdx = 0;
+ else if (IsReduction(N->getOperand(1), Opc))
+ ReduceIdx = 1;
+ else
+ return SDValue();
+
+ // Skip if FADD disallows reassociation but the combiner needs.
+ if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
+ return SDValue();
+
+ SDValue Extract = N->getOperand(ReduceIdx);
+ SDValue Reduce = Extract.getOperand(0);
+ if (!Reduce.hasOneUse())
+ return SDValue();
+
+ SDValue ScalarV = Reduce.getOperand(2);
+ EVT ScalarVT = ScalarV.getValueType();
+ if (ScalarV.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ ScalarV.getOperand(0)->isUndef())
+ ScalarV = ScalarV.getOperand(1);
+
+ // Make sure that ScalarV is a splat with VL=1.
+ if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
+ ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
+ ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
+ return SDValue();
+
+ if (!hasNonZeroAVL(ScalarV.getOperand(2)))
+ return SDValue();
+
+ // Check the scalar of ScalarV is neutral element
+ // TODO: Deal with value other than neutral element.
+ if (!isNeutralConstant(N->getOpcode(), N->getFlags(), ScalarV.getOperand(1),
+ 0))
+ return SDValue();
+
+ if (!ScalarV.hasOneUse())
+ return SDValue();
+
+ SDValue NewStart = N->getOperand(1 - ReduceIdx);
+
+ SDLoc DL(N);
+ SDValue NewScalarV =
+ lowerScalarInsert(NewStart, ScalarV.getOperand(2),
+ ScalarV.getSimpleValueType(), DL, DAG, Subtarget);
+
+ // If we looked through an INSERT_SUBVECTOR we need to restore it.
+ if (ScalarVT != ScalarV.getValueType())
+ NewScalarV =
+ DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalarVT, DAG.getUNDEF(ScalarVT),
+ NewScalarV, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
+
+ SDValue NewReduce =
+ DAG.getNode(Reduce.getOpcode(), DL, Reduce.getValueType(),
+ Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
+ Reduce.getOperand(3), Reduce.getOperand(4));
+ return DAG.getNode(Extract.getOpcode(), DL, Extract.getValueType(), NewReduce,
+ Extract.getOperand(1));
+}
+
+// Optimize (add (shl x, c0), (shl y, c1)) ->
+// (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
+static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ // Perform this optimization only in the zba extension.
+ if (!Subtarget.hasStdExtZba())
+ return SDValue();
+
+ // Skip for vector types and larger types.
+ EVT VT = N->getValueType(0);
+ if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
+ return SDValue();
+
+ // The two operand nodes must be SHL and have no other use.
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
+ !N0->hasOneUse() || !N1->hasOneUse())
+ return SDValue();
+
+ // Check c0 and c1.
+ auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
+ if (!N0C || !N1C)
+ return SDValue();
+ int64_t C0 = N0C->getSExtValue();
+ int64_t C1 = N1C->getSExtValue();
+ if (C0 <= 0 || C1 <= 0)
+ return SDValue();
+
+ // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
+ int64_t Bits = std::min(C0, C1);
+ int64_t Diff = std::abs(C0 - C1);
+ if (Diff != 1 && Diff != 2 && Diff != 3)
+ return SDValue();
+
+ // Build nodes.
+ SDLoc DL(N);
+ SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
+ SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
+ SDValue NA0 =
+ DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
+ SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
+ return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
+}
+
+// Combine a constant select operand into its use:
+//
+// (and (select cond, -1, c), x)
+// -> (select cond, x, (and x, c)) [AllOnes=1]
+// (or (select cond, 0, c), x)
+// -> (select cond, x, (or x, c)) [AllOnes=0]
+// (xor (select cond, 0, c), x)
+// -> (select cond, x, (xor x, c)) [AllOnes=0]
+// (add (select cond, 0, c), x)
+// -> (select cond, x, (add x, c)) [AllOnes=0]
+// (sub x, (select cond, 0, c))
+// -> (select cond, x, (sub x, c)) [AllOnes=0]
+static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
+ SelectionDAG &DAG, bool AllOnes,
+ const RISCVSubtarget &Subtarget) {
+ EVT VT = N->getValueType(0);
+
+ // Skip vectors.
+ if (VT.isVector())
+ return SDValue();
+
+ if (!Subtarget.hasShortForwardBranchOpt() ||
+ (Slct.getOpcode() != ISD::SELECT &&
+ Slct.getOpcode() != RISCVISD::SELECT_CC) ||
+ !Slct.hasOneUse())
+ return SDValue();
+
+ auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
+ return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
+ };
+
+ bool SwapSelectOps;
+ unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
+ SDValue TrueVal = Slct.getOperand(1 + OpOffset);
+ SDValue FalseVal = Slct.getOperand(2 + OpOffset);
+ SDValue NonConstantVal;
+ if (isZeroOrAllOnes(TrueVal, AllOnes)) {
+ SwapSelectOps = false;
+ NonConstantVal = FalseVal;
+ } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
+ SwapSelectOps = true;
+ NonConstantVal = TrueVal;
+ } else
+ return SDValue();
+
+ // Slct is now know to be the desired identity constant when CC is true.
+ TrueVal = OtherOp;
+ FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
+ // Unless SwapSelectOps says the condition should be false.
+ if (SwapSelectOps)
+ std::swap(TrueVal, FalseVal);
+
+ if (Slct.getOpcode() == RISCVISD::SELECT_CC)
+ return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
+ {Slct.getOperand(0), Slct.getOperand(1),
+ Slct.getOperand(2), TrueVal, FalseVal});
+
+ return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
+ {Slct.getOperand(0), TrueVal, FalseVal});
+}
+
+// Attempt combineSelectAndUse on each operand of a commutative operator N.
+static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
+ bool AllOnes,
+ const RISCVSubtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes, Subtarget))
+ return Result;
+ if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes, Subtarget))
+ return Result;
+ return SDValue();
+}
+
+// Transform (add (mul x, c0), c1) ->
+// (add (mul (add x, c1/c0), c0), c1%c0).
+// if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
+// that should be excluded is when c0*(c1/c0) is simm12, which will lead
+// to an infinite loop in DAGCombine if transformed.
+// Or transform (add (mul x, c0), c1) ->
+// (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
+// if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
+// case that should be excluded is when c0*(c1/c0+1) is simm12, which will
+// lead to an infinite loop in DAGCombine if transformed.
+// Or transform (add (mul x, c0), c1) ->
+// (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
+// if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
+// case that should be excluded is when c0*(c1/c0-1) is simm12, which will
+// lead to an infinite loop in DAGCombine if transformed.
+// Or transform (add (mul x, c0), c1) ->
+// (mul (add x, c1/c0), c0).
+// if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
+static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ // Skip for vector types and larger types.
+ EVT VT = N->getValueType(0);
+ if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
+ return SDValue();
+ // The first operand node must be a MUL and has no other use.
+ SDValue N0 = N->getOperand(0);
+ if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
+ return SDValue();
+ // Check if c0 and c1 match above conditions.
+ auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!N0C || !N1C)
+ return SDValue();
+ // If N0C has multiple uses it's possible one of the cases in
+ // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
+ // in an infinite loop.
+ if (!N0C->hasOneUse())
+ return SDValue();
+ int64_t C0 = N0C->getSExtValue();
+ int64_t C1 = N1C->getSExtValue();
+ int64_t CA, CB;
+ if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
+ return SDValue();
+ // Search for proper CA (non-zero) and CB that both are simm12.
+ if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
+ !isInt<12>(C0 * (C1 / C0))) {
+ CA = C1 / C0;
+ CB = C1 % C0;
+ } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
+ isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
+ CA = C1 / C0 + 1;
+ CB = C1 % C0 - C0;
+ } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
+ isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
+ CA = C1 / C0 - 1;
+ CB = C1 % C0 + C0;
+ } else
+ return SDValue();
+ // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
+ SDLoc DL(N);
+ SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
+ DAG.getConstant(CA, DL, VT));
+ SDValue New1 =
+ DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
+ return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
+}
+
+static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
+ return V;
+ if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
+ return V;
+ if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
+ return V;
+ // fold (add (select lhs, rhs, cc, 0, y), x) ->
+ // (select lhs, rhs, cc, x, (add x, y))
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
+}
+
+// Try to turn a sub boolean RHS and constant LHS into an addi.
+static SDValue combineSubOfBoolean(SDNode *N, SelectionDAG &DAG) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+ SDLoc DL(N);
+
+ // Require a constant LHS.
+ auto *N0C = dyn_cast<ConstantSDNode>(N0);
+ if (!N0C)
+ return SDValue();
+
+ // All our optimizations involve subtracting 1 from the immediate and forming
+ // an ADDI. Make sure the new immediate is valid for an ADDI.
+ APInt ImmValMinus1 = N0C->getAPIntValue() - 1;
+ if (!ImmValMinus1.isSignedIntN(12))
+ return SDValue();
+
+ SDValue NewLHS;
+ if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse()) {
+ // (sub constant, (setcc x, y, eq/neq)) ->
+ // (add (setcc x, y, neq/eq), constant - 1)
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(N1.getOperand(2))->get();
+ EVT SetCCOpVT = N1.getOperand(0).getValueType();
+ if (!isIntEqualitySetCC(CCVal) || !SetCCOpVT.isInteger())
+ return SDValue();
+ CCVal = ISD::getSetCCInverse(CCVal, SetCCOpVT);
+ NewLHS =
+ DAG.getSetCC(SDLoc(N1), VT, N1.getOperand(0), N1.getOperand(1), CCVal);
+ } else if (N1.getOpcode() == ISD::XOR && isOneConstant(N1.getOperand(1)) &&
+ N1.getOperand(0).getOpcode() == ISD::SETCC) {
+ // (sub C, (xor (setcc), 1)) -> (add (setcc), C-1).
+ // Since setcc returns a bool the xor is equivalent to 1-setcc.
+ NewLHS = N1.getOperand(0);
+ } else
+ return SDValue();
+
+ SDValue NewRHS = DAG.getConstant(ImmValMinus1, DL, VT);
+ return DAG.getNode(ISD::ADD, DL, VT, NewLHS, NewRHS);
+}
+
+static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (SDValue V = combineSubOfBoolean(N, DAG))
+ return V;
+
+ // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
+ // (select lhs, rhs, cc, x, (sub x, y))
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false, Subtarget);
+}
+
+// Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.
+// Legalizing setcc can introduce xors like this. Doing this transform reduces
+// the number of xors and may allow the xor to fold into a branch condition.
+static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ bool IsAnd = N->getOpcode() == ISD::AND;
+
+ if (N0.getOpcode() != ISD::XOR || N1.getOpcode() != ISD::XOR)
+ return SDValue();
+
+ if (!N0.hasOneUse() || !N1.hasOneUse())
+ return SDValue();
+
+ SDValue N01 = N0.getOperand(1);
+ SDValue N11 = N1.getOperand(1);
+
+ // For AND, SimplifyDemandedBits may have turned one of the (xor X, 1) into
+ // (xor X, -1) based on the upper bits of the other operand being 0. If the
+ // operation is And, allow one of the Xors to use -1.
+ if (isOneConstant(N01)) {
+ if (!isOneConstant(N11) && !(IsAnd && isAllOnesConstant(N11)))
+ return SDValue();
+ } else if (isOneConstant(N11)) {
+ // N01 and N11 being 1 was already handled. Handle N11==1 and N01==-1.
+ if (!(IsAnd && isAllOnesConstant(N01)))
+ return SDValue();
+ } else
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+
+ SDValue N00 = N0.getOperand(0);
+ SDValue N10 = N1.getOperand(0);
+
+ // The LHS of the xors needs to be 0/1.
+ APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
+ if (!DAG.MaskedValueIsZero(N00, Mask) || !DAG.MaskedValueIsZero(N10, Mask))
+ return SDValue();
+
+ // Invert the opcode and insert a new xor.
+ SDLoc DL(N);
+ unsigned Opc = IsAnd ? ISD::OR : ISD::AND;
+ SDValue Logic = DAG.getNode(Opc, DL, VT, N00, N10);
+ return DAG.getNode(ISD::XOR, DL, VT, Logic, DAG.getConstant(1, DL, VT));
+}
+
+static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // Pre-promote (i1 (truncate (srl X, Y))) on RV64 with Zbs without zero
+ // extending X. This is safe since we only need the LSB after the shift and
+ // shift amounts larger than 31 would produce poison. If we wait until
+ // type legalization, we'll create RISCVISD::SRLW and we can't recover it
+ // to use a BEXT instruction.
+ if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() && VT == MVT::i1 &&
+ N0.getValueType() == MVT::i32 && N0.getOpcode() == ISD::SRL &&
+ !isa<ConstantSDNode>(N0.getOperand(1)) && N0.hasOneUse()) {
+ SDLoc DL(N0);
+ SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
+ SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
+ SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
+ return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Srl);
+ }
+
+ return SDValue();
+}
+
+namespace {
+// Helper class contains information about comparison operation.
+// The first two operands of this operation are compared values and the
+// last one is the operation.
+// Compared values are stored in Ops.
+// Comparison operation is stored in CCode.
+class CmpOpInfo {
+ static unsigned constexpr Size = 2u;
+
+ // Type for storing operands of compare operation.
+ using OpsArray = std::array<SDValue, Size>;
+ OpsArray Ops;
+
+ using const_iterator = OpsArray::const_iterator;
+ const_iterator begin() const { return Ops.begin(); }
+ const_iterator end() const { return Ops.end(); }
+
+ ISD::CondCode CCode;
+
+ unsigned CommonPos{Size};
+ unsigned DifferPos{Size};
+
+ // Sets CommonPos and DifferPos based on incoming position
+ // of common operand CPos.
+ void setPositions(const_iterator CPos) {
+ assert(CPos != Ops.end() && "Common operand has to be in OpsArray.\n");
+ CommonPos = CPos == Ops.begin() ? 0 : 1;
+ DifferPos = 1 - CommonPos;
+ assert((DifferPos == 0 || DifferPos == 1) &&
+ "Positions can be only 0 or 1.");
+ }
+
+ // Private constructor of comparison info based on comparison operator.
+ // It is private because CmpOpInfo only reasonable relative to other
+ // comparison operator. Therefore, infos about comparison operation
+ // have to be collected simultaneously via CmpOpInfo::getInfoAbout().
+ CmpOpInfo(const SDValue &CmpOp)
+ : Ops{CmpOp.getOperand(0), CmpOp.getOperand(1)},
+ CCode{cast<CondCodeSDNode>(CmpOp.getOperand(2))->get()} {}
+
+ // Finds common operand of Op1 and Op2 and finishes filling CmpOpInfos.
+ // Returns true if common operand is found. Otherwise - false.
+ static bool establishCorrespondence(CmpOpInfo &Op1, CmpOpInfo &Op2) {
+ const auto CommonOpIt1 =
+ std::find_first_of(Op1.begin(), Op1.end(), Op2.begin(), Op2.end());
+ if (CommonOpIt1 == Op1.end())
+ return false;
+
+ const auto CommonOpIt2 = std::find(Op2.begin(), Op2.end(), *CommonOpIt1);
+ assert(CommonOpIt2 != Op2.end() &&
+ "Cannot find common operand in the second comparison operation.");
+
+ Op1.setPositions(CommonOpIt1);
+ Op2.setPositions(CommonOpIt2);
+
+ return true;
+ }
+
+public:
+ CmpOpInfo(const CmpOpInfo &) = default;
+ CmpOpInfo(CmpOpInfo &&) = default;
+
+ SDValue const &operator[](unsigned Pos) const {
+ assert(Pos < Size && "Out of range\n");
+ return Ops[Pos];
+ }
+
+ // Creates infos about comparison operations CmpOp0 and CmpOp1.
+ // If there is no common operand returns None. Otherwise, returns
+ // correspondence info about comparison operations.
+ static std::optional<std::pair<CmpOpInfo, CmpOpInfo>>
+ getInfoAbout(SDValue const &CmpOp0, SDValue const &CmpOp1) {
+ CmpOpInfo Op0{CmpOp0};
+ CmpOpInfo Op1{CmpOp1};
+ if (!establishCorrespondence(Op0, Op1))
+ return std::nullopt;
+ return std::make_pair(Op0, Op1);
+ }
+
+ // Returns position of common operand.
+ unsigned getCPos() const { return CommonPos; }
+
+ // Returns position of differ operand.
+ unsigned getDPos() const { return DifferPos; }
+
+ // Returns common operand.
+ SDValue const &getCOp() const { return operator[](CommonPos); }
+
+ // Returns differ operand.
+ SDValue const &getDOp() const { return operator[](DifferPos); }
+
+ // Returns consition code of comparison operation.
+ ISD::CondCode getCondCode() const { return CCode; }
+};
+} // namespace
+
+// Verifies conditions to apply an optimization.
+// Returns Reference comparison code and three operands A, B, C.
+// Conditions for optimization:
+// One operand of the compasions has to be common.
+// This operand is written to C.
+// Two others operands are differend. They are written to A and B.
+// Comparisons has to be similar with respect to common operand C.
+// e.g. A < C; C > B are similar
+// but A < C; B > C are not.
+// Reference comparison code is the comparison code if
+// common operand is right placed.
+// e.g. C > A will be swapped to A < C.
+static std::optional<std::tuple<ISD::CondCode, SDValue, SDValue, SDValue>>
+verifyCompareConds(SDNode *N, SelectionDAG &DAG) {
+ LLVM_DEBUG(
+ dbgs() << "Checking conditions for comparison operation combining.\n";);
+
+ SDValue V0 = N->getOperand(0);
+ SDValue V1 = N->getOperand(1);
+ assert(V0.getValueType() == V1.getValueType() &&
+ "Operations must have the same value type.");
+
+ // Condition 1. Operations have to be used only in logic operation.
+ if (!V0.hasOneUse() || !V1.hasOneUse())
+ return std::nullopt;
+
+ // Condition 2. Operands have to be comparison operations.
+ if (V0.getOpcode() != ISD::SETCC || V1.getOpcode() != ISD::SETCC)
+ return std::nullopt;
+
+ // Condition 3.1. Operations only with integers.
+ if (!V0.getOperand(0).getValueType().isInteger())
+ return std::nullopt;
+
+ const auto ComparisonInfo = CmpOpInfo::getInfoAbout(V0, V1);
+ // Condition 3.2. Common operand has to be in comparison.
+ if (!ComparisonInfo)
+ return std::nullopt;
+
+ const auto [Op0, Op1] = ComparisonInfo.value();
+
+ LLVM_DEBUG(dbgs() << "Shared operands are on positions: " << Op0.getCPos()
+ << " and " << Op1.getCPos() << '\n';);
+ // If common operand at the first position then swap operation to convert to
+ // strict pattern. Common operand has to be right hand side.
+ ISD::CondCode RefCond = Op0.getCondCode();
+ ISD::CondCode AssistCode = Op1.getCondCode();
+ if (!Op0.getCPos())
+ RefCond = ISD::getSetCCSwappedOperands(RefCond);
+ if (!Op1.getCPos())
+ AssistCode = ISD::getSetCCSwappedOperands(AssistCode);
+ LLVM_DEBUG(dbgs() << "Reference condition is: " << RefCond << '\n';);
+ // If there are different comparison operations then do not perform an
+ // optimization. a < c; c < b -> will be changed to b > c.
+ if (RefCond != AssistCode)
+ return std::nullopt;
+
+ // Conditions can be only similar to Less or Greater. (>, >=, <, <=)
+ // Applying this mask to the operation will determine Less and Greater
+ // operations.
+ const unsigned CmpMask = 0b110;
+ const unsigned MaskedOpcode = CmpMask & RefCond;
+ // If masking gave 0b110, then this is an operation NE, O or TRUE.
+ if (MaskedOpcode == CmpMask)
+ return std::nullopt;
+ // If masking gave 00000, then this is an operation E, O or FALSE.
+ if (MaskedOpcode == 0)
+ return std::nullopt;
+ // Everything else is similar to Less or Greater.
+
+ SDValue A = Op0.getDOp();
+ SDValue B = Op1.getDOp();
+ SDValue C = Op0.getCOp();
+
+ LLVM_DEBUG(
+ dbgs() << "The conditions for combining comparisons are satisfied.\n";);
+ return std::make_tuple(RefCond, A, B, C);
+}
+
+static ISD::NodeType getSelectionCode(bool IsUnsigned, bool IsAnd,
+ bool IsGreaterOp) {
+ // Codes of selection operation. The first index selects signed or unsigned,
+ // the second index selects MIN/MAX.
+ static constexpr ISD::NodeType SelectionCodes[2][2] = {
+ {ISD::SMIN, ISD::SMAX}, {ISD::UMIN, ISD::UMAX}};
+ const bool ChooseSelCode = IsAnd ^ IsGreaterOp;
+ return SelectionCodes[IsUnsigned][ChooseSelCode];
+}
+
+// Combines two comparison operation and logic operation to one selection
+// operation(min, max) and logic operation. Returns new constructed Node if
+// conditions for optimization are satisfied.
+static SDValue combineCmpOp(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (!Subtarget.hasStdExtZbb())
+ return SDValue();
+
+ const unsigned BitOpcode = N->getOpcode();
+ assert((BitOpcode == ISD::AND || BitOpcode == ISD::OR) &&
+ "This optimization can be used only with AND/OR operations");
+
+ const auto Props = verifyCompareConds(N, DAG);
+ // If conditions are invalidated then do not perform an optimization.
+ if (!Props)
+ return SDValue();
+
+ const auto [RefOpcode, A, B, C] = Props.value();
+ const EVT CmpOpVT = A.getValueType();
+
+ const bool IsGreaterOp = RefOpcode & 0b10;
+ const bool IsUnsigned = ISD::isUnsignedIntSetCC(RefOpcode);
+ assert((IsUnsigned || ISD::isSignedIntSetCC(RefOpcode)) &&
+ "Operation neither with signed or unsigned integers.");
+
+ const bool IsAnd = BitOpcode == ISD::AND;
+ const ISD::NodeType PickCode =
+ getSelectionCode(IsUnsigned, IsAnd, IsGreaterOp);
+
+ SDLoc DL(N);
+ SDValue Pick = DAG.getNode(PickCode, DL, CmpOpVT, A, B);
+ SDValue Cmp =
+ DAG.getSetCC(DL, N->getOperand(0).getValueType(), Pick, C, RefOpcode);
+
+ return Cmp;
+}
+
+static SDValue performANDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+
+ SDValue N0 = N->getOperand(0);
+ // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
+ // extending X. This is safe since we only need the LSB after the shift and
+ // shift amounts larger than 31 would produce poison. If we wait until
+ // type legalization, we'll create RISCVISD::SRLW and we can't recover it
+ // to use a BEXT instruction.
+ if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
+ N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
+ N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
+ N0.hasOneUse()) {
+ SDLoc DL(N);
+ SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
+ SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
+ SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
+ SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Srl,
+ DAG.getConstant(1, DL, MVT::i64));
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And);
+ }
+
+ if (SDValue V = combineCmpOp(N, DAG, Subtarget))
+ return V;
+
+ if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
+ return V;
+
+ if (DCI.isAfterLegalizeDAG())
+ if (SDValue V = combineDeMorganOfBoolean(N, DAG))
+ return V;
+
+ // fold (and (select lhs, rhs, cc, -1, y), x) ->
+ // (select lhs, rhs, cc, x, (and x, y))
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true, Subtarget);
+}
+
+static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+
+ if (SDValue V = combineCmpOp(N, DAG, Subtarget))
+ return V;
+
+ if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
+ return V;
+
+ if (DCI.isAfterLegalizeDAG())
+ if (SDValue V = combineDeMorganOfBoolean(N, DAG))
+ return V;
+
+ // fold (or (select cond, 0, y), x) ->
+ // (select cond, x, (or x, y))
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
+}
+
+static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
+ // NOTE: Assumes ROL being legal means ROLW is legal.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (N0.getOpcode() == RISCVISD::SLLW &&
+ isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
+ TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
+ SDLoc DL(N);
+ return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
+ DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
+ }
+
+ if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
+ return V;
+ // fold (xor (select cond, 0, y), x) ->
+ // (select cond, x, (xor x, y))
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
+}
+
+// Replace (seteq (i64 (and X, 0xffffffff)), C1) with
+// (seteq (i64 (sext_inreg (X, i32)), C1')) where C1' is C1 sign extended from
+// bit 31. Same for setne. C1' may be cheaper to materialize and the sext_inreg
+// can become a sext.w instead of a shift pair.
+static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+ EVT OpVT = N0.getValueType();
+
+ if (OpVT != MVT::i64 || !Subtarget.is64Bit())
+ return SDValue();
+
+ // RHS needs to be a constant.
+ auto *N1C = dyn_cast<ConstantSDNode>(N1);
+ if (!N1C)
+ return SDValue();
+
+ // LHS needs to be (and X, 0xffffffff).
+ if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
+ !isa<ConstantSDNode>(N0.getOperand(1)) ||
+ N0.getConstantOperandVal(1) != UINT64_C(0xffffffff))
+ return SDValue();
+
+ // Looking for an equality compare.
+ ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ if (!isIntEqualitySetCC(Cond))
+ return SDValue();
+
+ // Don't do this if the sign bit is provably zero, it will be turned back into
+ // an AND.
+ APInt SignMask = APInt::getOneBitSet(64, 31);
+ if (DAG.MaskedValueIsZero(N0.getOperand(0), SignMask))
+ return SDValue();
+
+ const APInt &C1 = N1C->getAPIntValue();
+
+ SDLoc dl(N);
+ // If the constant is larger than 2^32 - 1 it is impossible for both sides
+ // to be equal.
+ if (C1.getActiveBits() > 32)
+ return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT);
+
+ SDValue SExtOp = DAG.getNode(ISD::SIGN_EXTEND_INREG, N, OpVT,
+ N0.getOperand(0), DAG.getValueType(MVT::i32));
+ return DAG.getSetCC(dl, VT, SExtOp, DAG.getConstant(C1.trunc(32).sext(64),
+ dl, OpVT), Cond);
+}
+
+static SDValue
+performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ SDValue Src = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
+ if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
+ cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
+ return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
+ Src.getOperand(0));
+
+ return SDValue();
+}
+
+namespace {
+// Forward declaration of the structure holding the necessary information to
+// apply a combine.
+struct CombineResult;
+
+/// Helper class for folding sign/zero extensions.
+/// In particular, this class is used for the following combines:
+/// add_vl -> vwadd(u) | vwadd(u)_w
+/// sub_vl -> vwsub(u) | vwsub(u)_w
+/// mul_vl -> vwmul(u) | vwmul_su
+///
+/// An object of this class represents an operand of the operation we want to
+/// combine.
+/// E.g., when trying to combine `mul_vl a, b`, we will have one instance of
+/// NodeExtensionHelper for `a` and one for `b`.
+///
+/// This class abstracts away how the extension is materialized and
+/// how its Mask, VL, number of users affect the combines.
+///
+/// In particular:
+/// - VWADD_W is conceptually == add(op0, sext(op1))
+/// - VWADDU_W == add(op0, zext(op1))
+/// - VWSUB_W == sub(op0, sext(op1))
+/// - VWSUBU_W == sub(op0, zext(op1))
+///
+/// And VMV_V_X_VL, depending on the value, is conceptually equivalent to
+/// zext|sext(smaller_value).
+struct NodeExtensionHelper {
+ /// Records if this operand is like being zero extended.
+ bool SupportsZExt;
+ /// Records if this operand is like being sign extended.
+ /// Note: SupportsZExt and SupportsSExt are not mutually exclusive. For
+ /// instance, a splat constant (e.g., 3), would support being both sign and
+ /// zero extended.
+ bool SupportsSExt;
+ /// This boolean captures whether we care if this operand would still be
+ /// around after the folding happens.
+ bool EnforceOneUse;
+ /// Records if this operand's mask needs to match the mask of the operation
+ /// that it will fold into.
+ bool CheckMask;
+ /// Value of the Mask for this operand.
+ /// It may be SDValue().
+ SDValue Mask;
+ /// Value of the vector length operand.
+ /// It may be SDValue().
+ SDValue VL;
+ /// Original value that this NodeExtensionHelper represents.
+ SDValue OrigOperand;
+
+ /// Get the value feeding the extension or the value itself.
+ /// E.g., for zext(a), this would return a.
+ SDValue getSource() const {
+ switch (OrigOperand.getOpcode()) {
+ case RISCVISD::VSEXT_VL:
+ case RISCVISD::VZEXT_VL:
+ return OrigOperand.getOperand(0);
+ default:
+ return OrigOperand;
+ }
+ }
+
+ /// Check if this instance represents a splat.
+ bool isSplat() const {
+ return OrigOperand.getOpcode() == RISCVISD::VMV_V_X_VL;
+ }
+
+ /// Get or create a value that can feed \p Root with the given extension \p
+ /// SExt. If \p SExt is None, this returns the source of this operand.
+ /// \see ::getSource().
+ SDValue getOrCreateExtendedOp(const SDNode *Root, SelectionDAG &DAG,
+ std::optional<bool> SExt) const {
+ if (!SExt.has_value())
+ return OrigOperand;
+
+ MVT NarrowVT = getNarrowType(Root);
+
+ SDValue Source = getSource();
+ if (Source.getValueType() == NarrowVT)
+ return Source;
+
+ unsigned ExtOpc = *SExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
+
+ // If we need an extension, we should be changing the type.
+ SDLoc DL(Root);
+ auto [Mask, VL] = getMaskAndVL(Root);
+ switch (OrigOperand.getOpcode()) {
+ case RISCVISD::VSEXT_VL:
+ case RISCVISD::VZEXT_VL:
+ return DAG.getNode(ExtOpc, DL, NarrowVT, Source, Mask, VL);
+ case RISCVISD::VMV_V_X_VL:
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
+ DAG.getUNDEF(NarrowVT), Source.getOperand(1), VL);
+ default:
+ // Other opcodes can only come from the original LHS of VW(ADD|SUB)_W_VL
+ // and that operand should already have the right NarrowVT so no
+ // extension should be required at this point.
+ llvm_unreachable("Unsupported opcode");
+ }
+ }
+
+ /// Helper function to get the narrow type for \p Root.
+ /// The narrow type is the type of \p Root where we divided the size of each
+ /// element by 2. E.g., if Root's type <2xi16> -> narrow type <2xi8>.
+ /// \pre The size of the type of the elements of Root must be a multiple of 2
+ /// and be greater than 16.
+ static MVT getNarrowType(const SDNode *Root) {
+ MVT VT = Root->getSimpleValueType(0);
+
+ // Determine the narrow size.
+ unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
+ assert(NarrowSize >= 8 && "Trying to extend something we can't represent");
+ MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
+ VT.getVectorElementCount());
+ return NarrowVT;
+ }
+
+ /// Return the opcode required to materialize the folding of the sign
+ /// extensions (\p IsSExt == true) or zero extensions (IsSExt == false) for
+ /// both operands for \p Opcode.
+ /// Put differently, get the opcode to materialize:
+ /// - ISExt == true: \p Opcode(sext(a), sext(b)) -> newOpcode(a, b)
+ /// - ISExt == false: \p Opcode(zext(a), zext(b)) -> newOpcode(a, b)
+ /// \pre \p Opcode represents a supported root (\see ::isSupportedRoot()).
+ static unsigned getSameExtensionOpcode(unsigned Opcode, bool IsSExt) {
+ switch (Opcode) {
+ case RISCVISD::ADD_VL:
+ case RISCVISD::VWADD_W_VL:
+ case RISCVISD::VWADDU_W_VL:
+ return IsSExt ? RISCVISD::VWADD_VL : RISCVISD::VWADDU_VL;
+ case RISCVISD::MUL_VL:
+ return IsSExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
+ case RISCVISD::SUB_VL:
+ case RISCVISD::VWSUB_W_VL:
+ case RISCVISD::VWSUBU_W_VL:
+ return IsSExt ? RISCVISD::VWSUB_VL : RISCVISD::VWSUBU_VL;
+ default:
+ llvm_unreachable("Unexpected opcode");
+ }
+ }
+
+ /// Get the opcode to materialize \p Opcode(sext(a), zext(b)) ->
+ /// newOpcode(a, b).
+ static unsigned getSUOpcode(unsigned Opcode) {
+ assert(Opcode == RISCVISD::MUL_VL && "SU is only supported for MUL");
+ return RISCVISD::VWMULSU_VL;
+ }
+
+ /// Get the opcode to materialize \p Opcode(a, s|zext(b)) ->
+ /// newOpcode(a, b).
+ static unsigned getWOpcode(unsigned Opcode, bool IsSExt) {
+ switch (Opcode) {
+ case RISCVISD::ADD_VL:
+ return IsSExt ? RISCVISD::VWADD_W_VL : RISCVISD::VWADDU_W_VL;
+ case RISCVISD::SUB_VL:
+ return IsSExt ? RISCVISD::VWSUB_W_VL : RISCVISD::VWSUBU_W_VL;
+ default:
+ llvm_unreachable("Unexpected opcode");
+ }
+ }
+
+ using CombineToTry = std::function<std::optional<CombineResult>(
+ SDNode * /*Root*/, const NodeExtensionHelper & /*LHS*/,
+ const NodeExtensionHelper & /*RHS*/)>;
+
+ /// Check if this node needs to be fully folded or extended for all users.
+ bool needToPromoteOtherUsers() const { return EnforceOneUse; }
+
+ /// Helper method to set the various fields of this struct based on the
+ /// type of \p Root.
+ void fillUpExtensionSupport(SDNode *Root, SelectionDAG &DAG) {
+ SupportsZExt = false;
+ SupportsSExt = false;
+ EnforceOneUse = true;
+ CheckMask = true;
+ switch (OrigOperand.getOpcode()) {
+ case RISCVISD::VZEXT_VL:
+ SupportsZExt = true;
+ Mask = OrigOperand.getOperand(1);
+ VL = OrigOperand.getOperand(2);
+ break;
+ case RISCVISD::VSEXT_VL:
+ SupportsSExt = true;
+ Mask = OrigOperand.getOperand(1);
+ VL = OrigOperand.getOperand(2);
+ break;
+ case RISCVISD::VMV_V_X_VL: {
+ // Historically, we didn't care about splat values not disappearing during
+ // combines.
+ EnforceOneUse = false;
+ CheckMask = false;
+ VL = OrigOperand.getOperand(2);
+
+ // The operand is a splat of a scalar.
+
+ // The pasthru must be undef for tail agnostic.
+ if (!OrigOperand.getOperand(0).isUndef())
+ break;
+
+ // Get the scalar value.
+ SDValue Op = OrigOperand.getOperand(1);
+
+ // See if we have enough sign bits or zero bits in the scalar to use a
+ // widening opcode by splatting to smaller element size.
+ MVT VT = Root->getSimpleValueType(0);
+ unsigned EltBits = VT.getScalarSizeInBits();
+ unsigned ScalarBits = Op.getValueSizeInBits();
+ // Make sure we're getting all element bits from the scalar register.
+ // FIXME: Support implicit sign extension of vmv.v.x?
+ if (ScalarBits < EltBits)
+ break;
+
+ unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
+ // If the narrow type cannot be expressed with a legal VMV,
+ // this is not a valid candidate.
+ if (NarrowSize < 8)
+ break;
+
+ if (DAG.ComputeMaxSignificantBits(Op) <= NarrowSize)
+ SupportsSExt = true;
+ if (DAG.MaskedValueIsZero(Op,
+ APInt::getBitsSetFrom(ScalarBits, NarrowSize)))
+ SupportsZExt = true;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ /// Check if \p Root supports any extension folding combines.
+ static bool isSupportedRoot(const SDNode *Root) {
+ switch (Root->getOpcode()) {
+ case RISCVISD::ADD_VL:
+ case RISCVISD::MUL_VL:
+ case RISCVISD::VWADD_W_VL:
+ case RISCVISD::VWADDU_W_VL:
+ case RISCVISD::SUB_VL:
+ case RISCVISD::VWSUB_W_VL:
+ case RISCVISD::VWSUBU_W_VL:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// Build a NodeExtensionHelper for \p Root.getOperand(\p OperandIdx).
+ NodeExtensionHelper(SDNode *Root, unsigned OperandIdx, SelectionDAG &DAG) {
+ assert(isSupportedRoot(Root) && "Trying to build an helper with an "
+ "unsupported root");
+ assert(OperandIdx < 2 && "Requesting something else than LHS or RHS");
+ OrigOperand = Root->getOperand(OperandIdx);
+
+ unsigned Opc = Root->getOpcode();
+ switch (Opc) {
+ // We consider VW<ADD|SUB>(U)_W(LHS, RHS) as if they were
+ // <ADD|SUB>(LHS, S|ZEXT(RHS))
+ case RISCVISD::VWADD_W_VL:
+ case RISCVISD::VWADDU_W_VL:
+ case RISCVISD::VWSUB_W_VL:
+ case RISCVISD::VWSUBU_W_VL:
+ if (OperandIdx == 1) {
+ SupportsZExt =
+ Opc == RISCVISD::VWADDU_W_VL || Opc == RISCVISD::VWSUBU_W_VL;
+ SupportsSExt = !SupportsZExt;
+ std::tie(Mask, VL) = getMaskAndVL(Root);
+ CheckMask = true;
+ // There's no existing extension here, so we don't have to worry about
+ // making sure it gets removed.
+ EnforceOneUse = false;
+ break;
+ }
+ [[fallthrough]];
+ default:
+ fillUpExtensionSupport(Root, DAG);
+ break;
+ }
+ }
+
+ /// Check if this operand is compatible with the given vector length \p VL.
+ bool isVLCompatible(SDValue VL) const {
+ return this->VL != SDValue() && this->VL == VL;
+ }
+
+ /// Check if this operand is compatible with the given \p Mask.
+ bool isMaskCompatible(SDValue Mask) const {
+ return !CheckMask || (this->Mask != SDValue() && this->Mask == Mask);
+ }
+
+ /// Helper function to get the Mask and VL from \p Root.
+ static std::pair<SDValue, SDValue> getMaskAndVL(const SDNode *Root) {
+ assert(isSupportedRoot(Root) && "Unexpected root");
+ return std::make_pair(Root->getOperand(3), Root->getOperand(4));
+ }
+
+ /// Check if the Mask and VL of this operand are compatible with \p Root.
+ bool areVLAndMaskCompatible(const SDNode *Root) const {
+ auto [Mask, VL] = getMaskAndVL(Root);
+ return isMaskCompatible(Mask) && isVLCompatible(VL);
+ }
+
+ /// Helper function to check if \p N is commutative with respect to the
+ /// foldings that are supported by this class.
+ static bool isCommutative(const SDNode *N) {
+ switch (N->getOpcode()) {
+ case RISCVISD::ADD_VL:
+ case RISCVISD::MUL_VL:
+ case RISCVISD::VWADD_W_VL:
+ case RISCVISD::VWADDU_W_VL:
+ return true;
+ case RISCVISD::SUB_VL:
+ case RISCVISD::VWSUB_W_VL:
+ case RISCVISD::VWSUBU_W_VL:
+ return false;
+ default:
+ llvm_unreachable("Unexpected opcode");
+ }
+ }
+
+ /// Get a list of combine to try for folding extensions in \p Root.
+ /// Note that each returned CombineToTry function doesn't actually modify
+ /// anything. Instead they produce an optional CombineResult that if not None,
+ /// need to be materialized for the combine to be applied.
+ /// \see CombineResult::materialize.
+ /// If the related CombineToTry function returns std::nullopt, that means the
+ /// combine didn't match.
+ static SmallVector<CombineToTry> getSupportedFoldings(const SDNode *Root);
+};
+
+/// Helper structure that holds all the necessary information to materialize a
+/// combine that does some extension folding.
+struct CombineResult {
+ /// Opcode to be generated when materializing the combine.
+ unsigned TargetOpcode;
+ // No value means no extension is needed. If extension is needed, the value
+ // indicates if it needs to be sign extended.
+ std::optional<bool> SExtLHS;
+ std::optional<bool> SExtRHS;
+ /// Root of the combine.
+ SDNode *Root;
+ /// LHS of the TargetOpcode.
+ NodeExtensionHelper LHS;
+ /// RHS of the TargetOpcode.
+ NodeExtensionHelper RHS;
+
+ CombineResult(unsigned TargetOpcode, SDNode *Root,
+ const NodeExtensionHelper &LHS, std::optional<bool> SExtLHS,
+ const NodeExtensionHelper &RHS, std::optional<bool> SExtRHS)
+ : TargetOpcode(TargetOpcode), SExtLHS(SExtLHS), SExtRHS(SExtRHS),
+ Root(Root), LHS(LHS), RHS(RHS) {}
+
+ /// Return a value that uses TargetOpcode and that can be used to replace
+ /// Root.
+ /// The actual replacement is *not* done in that method.
+ SDValue materialize(SelectionDAG &DAG) const {
+ SDValue Mask, VL, Merge;
+ std::tie(Mask, VL) = NodeExtensionHelper::getMaskAndVL(Root);
+ Merge = Root->getOperand(2);
+ return DAG.getNode(TargetOpcode, SDLoc(Root), Root->getValueType(0),
+ LHS.getOrCreateExtendedOp(Root, DAG, SExtLHS),
+ RHS.getOrCreateExtendedOp(Root, DAG, SExtRHS), Merge,
+ Mask, VL);
+ }
+};
+
+/// Check if \p Root follows a pattern Root(ext(LHS), ext(RHS))
+/// where `ext` is the same for both LHS and RHS (i.e., both are sext or both
+/// are zext) and LHS and RHS can be folded into Root.
+/// AllowSExt and AllozZExt define which form `ext` can take in this pattern.
+///
+/// \note If the pattern can match with both zext and sext, the returned
+/// CombineResult will feature the zext result.
+///
+/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
+/// can be used to apply the pattern.
+static std::optional<CombineResult>
+canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS, bool AllowSExt,
+ bool AllowZExt) {
+ assert((AllowSExt || AllowZExt) && "Forgot to set what you want?");
+ if (!LHS.areVLAndMaskCompatible(Root) || !RHS.areVLAndMaskCompatible(Root))
+ return std::nullopt;
+ if (AllowZExt && LHS.SupportsZExt && RHS.SupportsZExt)
+ return CombineResult(NodeExtensionHelper::getSameExtensionOpcode(
+ Root->getOpcode(), /*IsSExt=*/false),
+ Root, LHS, /*SExtLHS=*/false, RHS,
+ /*SExtRHS=*/false);
+ if (AllowSExt && LHS.SupportsSExt && RHS.SupportsSExt)
+ return CombineResult(NodeExtensionHelper::getSameExtensionOpcode(
+ Root->getOpcode(), /*IsSExt=*/true),
+ Root, LHS, /*SExtLHS=*/true, RHS,
+ /*SExtRHS=*/true);
+ return std::nullopt;
+}
+
+/// Check if \p Root follows a pattern Root(ext(LHS), ext(RHS))
+/// where `ext` is the same for both LHS and RHS (i.e., both are sext or both
+/// are zext) and LHS and RHS can be folded into Root.
+///
+/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
+/// can be used to apply the pattern.
+static std::optional<CombineResult>
+canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS) {
+ return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
+ /*AllowZExt=*/true);
+}
+
+/// Check if \p Root follows a pattern Root(LHS, ext(RHS))
+///
+/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
+/// can be used to apply the pattern.
+static std::optional<CombineResult>
+canFoldToVW_W(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS) {
+ if (!RHS.areVLAndMaskCompatible(Root))
+ return std::nullopt;
+
+ // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
+ // sext/zext?
+ // Control this behavior behind an option (AllowSplatInVW_W) for testing
+ // purposes.
+ if (RHS.SupportsZExt && (!RHS.isSplat() || AllowSplatInVW_W))
+ return CombineResult(
+ NodeExtensionHelper::getWOpcode(Root->getOpcode(), /*IsSExt=*/false),
+ Root, LHS, /*SExtLHS=*/std::nullopt, RHS, /*SExtRHS=*/false);
+ if (RHS.SupportsSExt && (!RHS.isSplat() || AllowSplatInVW_W))
+ return CombineResult(
+ NodeExtensionHelper::getWOpcode(Root->getOpcode(), /*IsSExt=*/true),
+ Root, LHS, /*SExtLHS=*/std::nullopt, RHS, /*SExtRHS=*/true);
+ return std::nullopt;
+}
+
+/// Check if \p Root follows a pattern Root(sext(LHS), sext(RHS))
+///
+/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
+/// can be used to apply the pattern.
+static std::optional<CombineResult>
+canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS) {
+ return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
+ /*AllowZExt=*/false);
+}
+
+/// Check if \p Root follows a pattern Root(zext(LHS), zext(RHS))
+///
+/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
+/// can be used to apply the pattern.
+static std::optional<CombineResult>
+canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS) {
+ return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/false,
+ /*AllowZExt=*/true);
+}
+
+/// Check if \p Root follows a pattern Root(sext(LHS), zext(RHS))
+///
+/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
+/// can be used to apply the pattern.
+static std::optional<CombineResult>
+canFoldToVW_SU(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS) {
+ if (!LHS.SupportsSExt || !RHS.SupportsZExt)
+ return std::nullopt;
+ if (!LHS.areVLAndMaskCompatible(Root) || !RHS.areVLAndMaskCompatible(Root))
+ return std::nullopt;
+ return CombineResult(NodeExtensionHelper::getSUOpcode(Root->getOpcode()),
+ Root, LHS, /*SExtLHS=*/true, RHS, /*SExtRHS=*/false);
+}
+
+SmallVector<NodeExtensionHelper::CombineToTry>
+NodeExtensionHelper::getSupportedFoldings(const SDNode *Root) {
+ SmallVector<CombineToTry> Strategies;
+ switch (Root->getOpcode()) {
+ case RISCVISD::ADD_VL:
+ case RISCVISD::SUB_VL:
+ // add|sub -> vwadd(u)|vwsub(u)
+ Strategies.push_back(canFoldToVWWithSameExtension);
+ // add|sub -> vwadd(u)_w|vwsub(u)_w
+ Strategies.push_back(canFoldToVW_W);
+ break;
+ case RISCVISD::MUL_VL:
+ // mul -> vwmul(u)
+ Strategies.push_back(canFoldToVWWithSameExtension);
+ // mul -> vwmulsu
+ Strategies.push_back(canFoldToVW_SU);
+ break;
+ case RISCVISD::VWADD_W_VL:
+ case RISCVISD::VWSUB_W_VL:
+ // vwadd_w|vwsub_w -> vwadd|vwsub
+ Strategies.push_back(canFoldToVWWithSEXT);
+ break;
+ case RISCVISD::VWADDU_W_VL:
+ case RISCVISD::VWSUBU_W_VL:
+ // vwaddu_w|vwsubu_w -> vwaddu|vwsubu
+ Strategies.push_back(canFoldToVWWithZEXT);
+ break;
+ default:
+ llvm_unreachable("Unexpected opcode");
+ }
+ return Strategies;
+}
+} // End anonymous namespace.
+
+/// Combine a binary operation to its equivalent VW or VW_W form.
+/// The supported combines are:
+/// add_vl -> vwadd(u) | vwadd(u)_w
+/// sub_vl -> vwsub(u) | vwsub(u)_w
+/// mul_vl -> vwmul(u) | vwmul_su
+/// vwadd_w(u) -> vwadd(u)
+/// vwub_w(u) -> vwadd(u)
+static SDValue
+combineBinOp_VLToVWBinOp_VL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ SelectionDAG &DAG = DCI.DAG;
+
+ assert(NodeExtensionHelper::isSupportedRoot(N) &&
+ "Shouldn't have called this method");
+ SmallVector<SDNode *> Worklist;
+ SmallSet<SDNode *, 8> Inserted;
+ Worklist.push_back(N);
+ Inserted.insert(N);
+ SmallVector<CombineResult> CombinesToApply;
+
+ while (!Worklist.empty()) {
+ SDNode *Root = Worklist.pop_back_val();
+ if (!NodeExtensionHelper::isSupportedRoot(Root))
+ return SDValue();
+
+ NodeExtensionHelper LHS(N, 0, DAG);
+ NodeExtensionHelper RHS(N, 1, DAG);
+ auto AppendUsersIfNeeded = [&Worklist,
+ &Inserted](const NodeExtensionHelper &Op) {
+ if (Op.needToPromoteOtherUsers()) {
+ for (SDNode *TheUse : Op.OrigOperand->uses()) {
+ if (Inserted.insert(TheUse).second)
+ Worklist.push_back(TheUse);
+ }
+ }
+ };
+
+ // Control the compile time by limiting the number of node we look at in
+ // total.
+ if (Inserted.size() > ExtensionMaxWebSize)
+ return SDValue();
+
+ SmallVector<NodeExtensionHelper::CombineToTry> FoldingStrategies =
+ NodeExtensionHelper::getSupportedFoldings(N);
+
+ assert(!FoldingStrategies.empty() && "Nothing to be folded");
+ bool Matched = false;
+ for (int Attempt = 0;
+ (Attempt != 1 + NodeExtensionHelper::isCommutative(N)) && !Matched;
+ ++Attempt) {
+
+ for (NodeExtensionHelper::CombineToTry FoldingStrategy :
+ FoldingStrategies) {
+ std::optional<CombineResult> Res = FoldingStrategy(N, LHS, RHS);
+ if (Res) {
+ Matched = true;
+ CombinesToApply.push_back(*Res);
+ // All the inputs that are extended need to be folded, otherwise
+ // we would be leaving the old input (since it is may still be used),
+ // and the new one.
+ if (Res->SExtLHS.has_value())
+ AppendUsersIfNeeded(LHS);
+ if (Res->SExtRHS.has_value())
+ AppendUsersIfNeeded(RHS);
+ break;
+ }
+ }
+ std::swap(LHS, RHS);
+ }
+ // Right now we do an all or nothing approach.
+ if (!Matched)
+ return SDValue();
+ }
+ // Store the value for the replacement of the input node separately.
+ SDValue InputRootReplacement;
+ // We do the RAUW after we materialize all the combines, because some replaced
+ // nodes may be feeding some of the yet-to-be-replaced nodes. Put differently,
+ // some of these nodes may appear in the NodeExtensionHelpers of some of the
+ // yet-to-be-visited CombinesToApply roots.
+ SmallVector<std::pair<SDValue, SDValue>> ValuesToReplace;
+ ValuesToReplace.reserve(CombinesToApply.size());
+ for (CombineResult Res : CombinesToApply) {
+ SDValue NewValue = Res.materialize(DAG);
+ if (!InputRootReplacement) {
+ assert(Res.Root == N &&
+ "First element is expected to be the current node");
+ InputRootReplacement = NewValue;
+ } else {
+ ValuesToReplace.emplace_back(SDValue(Res.Root, 0), NewValue);
+ }
+ }
+ for (std::pair<SDValue, SDValue> OldNewValues : ValuesToReplace) {
+ DAG.ReplaceAllUsesOfValueWith(OldNewValues.first, OldNewValues.second);
+ DCI.AddToWorklist(OldNewValues.second.getNode());
+ }
+ return InputRootReplacement;
+}
+
+// Fold
+// (fp_to_int (froundeven X)) -> fcvt X, rne
+// (fp_to_int (ftrunc X)) -> fcvt X, rtz
+// (fp_to_int (ffloor X)) -> fcvt X, rdn
+// (fp_to_int (fceil X)) -> fcvt X, rup
+// (fp_to_int (fround X)) -> fcvt X, rmm
+static SDValue performFP_TO_INTCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ SDValue Src = N->getOperand(0);
+
+ // Ensure the FP type is legal.
+ if (!TLI.isTypeLegal(Src.getValueType()))
+ return SDValue();
+
+ // Don't do this for f16 with Zfhmin and not Zfh.
+ if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
+ return SDValue();
+
+ RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src.getOpcode());
+ if (FRM == RISCVFPRndMode::Invalid)
+ return SDValue();
+
+ SDLoc DL(N);
+ bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
+ EVT VT = N->getValueType(0);
+
+ if (VT.isVector() && TLI.isTypeLegal(VT)) {
+ MVT SrcVT = Src.getSimpleValueType();
+ MVT SrcContainerVT = SrcVT;
+ MVT ContainerVT = VT.getSimpleVT();
+ SDValue XVal = Src.getOperand(0);
+
+ // For widening and narrowing conversions we just combine it into a
+ // VFCVT_..._VL node, as there are no specific VFWCVT/VFNCVT VL nodes. They
+ // end up getting lowered to their appropriate pseudo instructions based on
+ // their operand types
+ if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits() * 2 ||
+ VT.getScalarSizeInBits() * 2 < SrcVT.getScalarSizeInBits())
+ return SDValue();
+
+ // Make fixed-length vectors scalable first
+ if (SrcVT.isFixedLengthVector()) {
+ SrcContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
+ XVal = convertToScalableVector(SrcContainerVT, XVal, DAG, Subtarget);
+ ContainerVT =
+ getContainerForFixedLengthVector(DAG, ContainerVT, Subtarget);
+ }
+
+ auto [Mask, VL] =
+ getDefaultVLOps(SrcVT, SrcContainerVT, DL, DAG, Subtarget);
+
+ SDValue FpToInt;
+ if (FRM == RISCVFPRndMode::RTZ) {
+ // Use the dedicated trunc static rounding mode if we're truncating so we
+ // don't need to generate calls to fsrmi/fsrm
+ unsigned Opc =
+ IsSigned ? RISCVISD::VFCVT_RTZ_X_F_VL : RISCVISD::VFCVT_RTZ_XU_F_VL;
+ FpToInt = DAG.getNode(Opc, DL, ContainerVT, XVal, Mask, VL);
+ } else {
+ unsigned Opc =
+ IsSigned ? RISCVISD::VFCVT_RM_X_F_VL : RISCVISD::VFCVT_RM_XU_F_VL;
+ FpToInt = DAG.getNode(Opc, DL, ContainerVT, XVal, Mask,
+ DAG.getTargetConstant(FRM, DL, XLenVT), VL);
+ }
+
+ // If converted from fixed-length to scalable, convert back
+ if (VT.isFixedLengthVector())
+ FpToInt = convertFromScalableVector(VT, FpToInt, DAG, Subtarget);
+
+ return FpToInt;
+ }
+
+ // Only handle XLen or i32 types. Other types narrower than XLen will
+ // eventually be legalized to XLenVT.
+ if (VT != MVT::i32 && VT != XLenVT)
+ return SDValue();
+
+ unsigned Opc;
+ if (VT == XLenVT)
+ Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
+ else
+ Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+
+ SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
+ DAG.getTargetConstant(FRM, DL, XLenVT));
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
+}
+
+// Fold
+// (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
+// (fp_to_int_sat (ftrunc X)) -> (select X == nan, 0, (fcvt X, rtz))
+// (fp_to_int_sat (ffloor X)) -> (select X == nan, 0, (fcvt X, rdn))
+// (fp_to_int_sat (fceil X)) -> (select X == nan, 0, (fcvt X, rup))
+// (fp_to_int_sat (fround X)) -> (select X == nan, 0, (fcvt X, rmm))
+static SDValue performFP_TO_INT_SATCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Only handle XLen types. Other types narrower than XLen will eventually be
+ // legalized to XLenVT.
+ EVT DstVT = N->getValueType(0);
+ if (DstVT != XLenVT)
+ return SDValue();
+
+ SDValue Src = N->getOperand(0);
+
+ // Ensure the FP type is also legal.
+ if (!TLI.isTypeLegal(Src.getValueType()))
+ return SDValue();
+
+ // Don't do this for f16 with Zfhmin and not Zfh.
+ if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
+ return SDValue();
+
+ EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+
+ RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src.getOpcode());
+ if (FRM == RISCVFPRndMode::Invalid)
+ return SDValue();
+
+ bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
+
+ unsigned Opc;
+ if (SatVT == DstVT)
+ Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
+ else if (DstVT == MVT::i64 && SatVT == MVT::i32)
+ Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+ else
+ return SDValue();
+ // FIXME: Support other SatVTs by clamping before or after the conversion.
+
+ Src = Src.getOperand(0);
+
+ SDLoc DL(N);
+ SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
+ DAG.getTargetConstant(FRM, DL, XLenVT));
+
+ // fcvt.wu.* sign extends bit 31 on RV64. FP_TO_UINT_SAT expects to zero
+ // extend.
+ if (Opc == RISCVISD::FCVT_WU_RV64)
+ FpToInt = DAG.getZeroExtendInReg(FpToInt, DL, MVT::i32);
+
+ // RISCV FP-to-int conversions saturate to the destination register size, but
+ // don't produce 0 for nan.
+ SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
+ return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
+}
+
+// Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
+// smaller than XLenVT.
+static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
+
+ SDValue Src = N->getOperand(0);
+ if (Src.getOpcode() != ISD::BSWAP)
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
+ !isPowerOf2_32(VT.getSizeInBits()))
+ return SDValue();
+
+ SDLoc DL(N);
+ return DAG.getNode(RISCVISD::BREV8, DL, VT, Src.getOperand(0));
+}
+
+// Convert from one FMA opcode to another based on whether we are negating the
+// multiply result and/or the accumulator.
+// NOTE: Only supports RVV operations with VL.
+static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
+ assert((NegMul || NegAcc) && "Not negating anything?");
+
+ // Negating the multiply result changes ADD<->SUB and toggles 'N'.
+ if (NegMul) {
+ // clang-format off
+ switch (Opcode) {
+ default: llvm_unreachable("Unexpected opcode");
+ case RISCVISD::VFMADD_VL: Opcode = RISCVISD::VFNMSUB_VL; break;
+ case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFMADD_VL; break;
+ case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFMSUB_VL; break;
+ case RISCVISD::VFMSUB_VL: Opcode = RISCVISD::VFNMADD_VL; break;
+ }
+ // clang-format on
+ }
+
+ // Negating the accumulator changes ADD<->SUB.
+ if (NegAcc) {
+ // clang-format off
+ switch (Opcode) {
+ default: llvm_unreachable("Unexpected opcode");
+ case RISCVISD::VFMADD_VL: Opcode = RISCVISD::VFMSUB_VL; break;
+ case RISCVISD::VFMSUB_VL: Opcode = RISCVISD::VFMADD_VL; break;
+ case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFNMSUB_VL; break;
+ case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFNMADD_VL; break;
+ }
+ // clang-format on
+ }
+
+ return Opcode;
+}
+
+static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(N->getOpcode() == ISD::SRA && "Unexpected opcode");
+
+ if (N->getValueType(0) != MVT::i64 || !Subtarget.is64Bit())
+ return SDValue();
+
+ if (!isa<ConstantSDNode>(N->getOperand(1)))
+ return SDValue();
+ uint64_t ShAmt = N->getConstantOperandVal(1);
+ if (ShAmt > 32)
+ return SDValue();
+
+ SDValue N0 = N->getOperand(0);
+
+ // Combine (sra (sext_inreg (shl X, C1), i32), C2) ->
+ // (sra (shl X, C1+32), C2+32) so it gets selected as SLLI+SRAI instead of
+ // SLLIW+SRAIW. SLLI+SRAI have compressed forms.
+ if (ShAmt < 32 &&
+ N0.getOpcode() == ISD::SIGN_EXTEND_INREG && N0.hasOneUse() &&
+ cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32 &&
+ N0.getOperand(0).getOpcode() == ISD::SHL && N0.getOperand(0).hasOneUse() &&
+ isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
+ uint64_t LShAmt = N0.getOperand(0).getConstantOperandVal(1);
+ if (LShAmt < 32) {
+ SDLoc ShlDL(N0.getOperand(0));
+ SDValue Shl = DAG.getNode(ISD::SHL, ShlDL, MVT::i64,
+ N0.getOperand(0).getOperand(0),
+ DAG.getConstant(LShAmt + 32, ShlDL, MVT::i64));
+ SDLoc DL(N);
+ return DAG.getNode(ISD::SRA, DL, MVT::i64, Shl,
+ DAG.getConstant(ShAmt + 32, DL, MVT::i64));
+ }
+ }
+
+ // Combine (sra (shl X, 32), 32 - C) -> (shl (sext_inreg X, i32), C)
+ // FIXME: Should this be a generic combine? There's a similar combine on X86.
+ //
+ // Also try these folds where an add or sub is in the middle.
+ // (sra (add (shl X, 32), C1), 32 - C) -> (shl (sext_inreg (add X, C1), C)
+ // (sra (sub C1, (shl X, 32)), 32 - C) -> (shl (sext_inreg (sub C1, X), C)
+ SDValue Shl;
+ ConstantSDNode *AddC = nullptr;
+
+ // We might have an ADD or SUB between the SRA and SHL.
+ bool IsAdd = N0.getOpcode() == ISD::ADD;
+ if ((IsAdd || N0.getOpcode() == ISD::SUB)) {
+ // Other operand needs to be a constant we can modify.
+ AddC = dyn_cast<ConstantSDNode>(N0.getOperand(IsAdd ? 1 : 0));
+ if (!AddC)
+ return SDValue();
+
+ // AddC needs to have at least 32 trailing zeros.
+ if (AddC->getAPIntValue().countTrailingZeros() < 32)
+ return SDValue();
+
+ // All users should be a shift by constant less than or equal to 32. This
+ // ensures we'll do this optimization for each of them to produce an
+ // add/sub+sext_inreg they can all share.
+ for (SDNode *U : N0->uses()) {
+ if (U->getOpcode() != ISD::SRA ||
+ !isa<ConstantSDNode>(U->getOperand(1)) ||
+ cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() > 32)
+ return SDValue();
+ }
+
+ Shl = N0.getOperand(IsAdd ? 0 : 1);
+ } else {
+ // Not an ADD or SUB.
+ Shl = N0;
+ }
+
+ // Look for a shift left by 32.
+ if (Shl.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Shl.getOperand(1)) ||
+ Shl.getConstantOperandVal(1) != 32)
+ return SDValue();
+
+ // We if we didn't look through an add/sub, then the shl should have one use.
+ // If we did look through an add/sub, the sext_inreg we create is free so
+ // we're only creating 2 new instructions. It's enough to only remove the
+ // original sra+add/sub.
+ if (!AddC && !Shl.hasOneUse())
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue In = Shl.getOperand(0);
+
+ // If we looked through an ADD or SUB, we need to rebuild it with the shifted
+ // constant.
+ if (AddC) {
+ SDValue ShiftedAddC =
+ DAG.getConstant(AddC->getAPIntValue().lshr(32), DL, MVT::i64);
+ if (IsAdd)
+ In = DAG.getNode(ISD::ADD, DL, MVT::i64, In, ShiftedAddC);
+ else
+ In = DAG.getNode(ISD::SUB, DL, MVT::i64, ShiftedAddC, In);
+ }
+
+ SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, In,
+ DAG.getValueType(MVT::i32));
+ if (ShAmt == 32)
+ return SExt;
+
+ return DAG.getNode(
+ ISD::SHL, DL, MVT::i64, SExt,
+ DAG.getConstant(32 - ShAmt, DL, MVT::i64));
+}
+
+// Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
+// the result is used as the conditon of a br_cc or select_cc we can invert,
+// inverting the setcc is free, and Z is 0/1. Caller will invert the
+// br_cc/select_cc.
+static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
+ bool IsAnd = Cond.getOpcode() == ISD::AND;
+ if (!IsAnd && Cond.getOpcode() != ISD::OR)
+ return SDValue();
+
+ if (!Cond.hasOneUse())
+ return SDValue();
+
+ SDValue Setcc = Cond.getOperand(0);
+ SDValue Xor = Cond.getOperand(1);
+ // Canonicalize setcc to LHS.
+ if (Setcc.getOpcode() != ISD::SETCC)
+ std::swap(Setcc, Xor);
+ // LHS should be a setcc and RHS should be an xor.
+ if (Setcc.getOpcode() != ISD::SETCC || !Setcc.hasOneUse() ||
+ Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse())
+ return SDValue();
+
+ // If the condition is an And, SimplifyDemandedBits may have changed
+ // (xor Z, 1) to (not Z).
+ SDValue Xor1 = Xor.getOperand(1);
+ if (!isOneConstant(Xor1) && !(IsAnd && isAllOnesConstant(Xor1)))
+ return SDValue();
+
+ EVT VT = Cond.getValueType();
+ SDValue Xor0 = Xor.getOperand(0);
+
+ // The LHS of the xor needs to be 0/1.
+ APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
+ if (!DAG.MaskedValueIsZero(Xor0, Mask))
+ return SDValue();
+
+ // We can only invert integer setccs.
+ EVT SetCCOpVT = Setcc.getOperand(0).getValueType();
+ if (!SetCCOpVT.isScalarInteger())
+ return SDValue();
+
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
+ if (ISD::isIntEqualitySetCC(CCVal)) {
+ CCVal = ISD::getSetCCInverse(CCVal, SetCCOpVT);
+ Setcc = DAG.getSetCC(SDLoc(Setcc), VT, Setcc.getOperand(0),
+ Setcc.getOperand(1), CCVal);
+ } else if (CCVal == ISD::SETLT && isNullConstant(Setcc.getOperand(0))) {
+ // Invert (setlt 0, X) by converting to (setlt X, 1).
+ Setcc = DAG.getSetCC(SDLoc(Setcc), VT, Setcc.getOperand(1),
+ DAG.getConstant(1, SDLoc(Setcc), VT), CCVal);
+ } else if (CCVal == ISD::SETLT && isOneConstant(Setcc.getOperand(1))) {
+ // (setlt X, 1) by converting to (setlt 0, X).
+ Setcc = DAG.getSetCC(SDLoc(Setcc), VT,
+ DAG.getConstant(0, SDLoc(Setcc), VT),
+ Setcc.getOperand(0), CCVal);
+ } else
+ return SDValue();
+
+ unsigned Opc = IsAnd ? ISD::OR : ISD::AND;
+ return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0));
+}
+
+// Perform common combines for BR_CC and SELECT_CC condtions.
+static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
+ SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
+
+ // As far as arithmetic right shift always saves the sign,
+ // shift can be omitted.
+ // Fold setlt (sra X, N), 0 -> setlt X, 0 and
+ // setge (sra X, N), 0 -> setge X, 0
+ if (auto *RHSConst = dyn_cast<ConstantSDNode>(RHS.getNode())) {
+ if ((CCVal == ISD::SETGE || CCVal == ISD::SETLT) &&
+ LHS.getOpcode() == ISD::SRA && RHSConst->isZero()) {
+ LHS = LHS.getOperand(0);
+ return true;
+ }
+ }
+
+ if (!ISD::isIntEqualitySetCC(CCVal))
+ return false;
+
+ // Fold ((setlt X, Y), 0, ne) -> (X, Y, lt)
+ // Sometimes the setcc is introduced after br_cc/select_cc has been formed.
+ if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
+ LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
+ // If we're looking for eq 0 instead of ne 0, we need to invert the
+ // condition.
+ bool Invert = CCVal == ISD::SETEQ;
+ CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
+ if (Invert)
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+
+ RHS = LHS.getOperand(1);
+ LHS = LHS.getOperand(0);
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ CC = DAG.getCondCode(CCVal);
+ return true;
+ }
+
+ // Fold ((xor X, Y), 0, eq/ne) -> (X, Y, eq/ne)
+ if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) {
+ RHS = LHS.getOperand(1);
+ LHS = LHS.getOperand(0);
+ return true;
+ }
+
+ // Fold ((srl (and X, 1<<C), C), 0, eq/ne) -> ((shl X, XLen-1-C), 0, ge/lt)
+ if (isNullConstant(RHS) && LHS.getOpcode() == ISD::SRL && LHS.hasOneUse() &&
+ LHS.getOperand(1).getOpcode() == ISD::Constant) {
+ SDValue LHS0 = LHS.getOperand(0);
+ if (LHS0.getOpcode() == ISD::AND &&
+ LHS0.getOperand(1).getOpcode() == ISD::Constant) {
+ uint64_t Mask = LHS0.getConstantOperandVal(1);
+ uint64_t ShAmt = LHS.getConstantOperandVal(1);
+ if (isPowerOf2_64(Mask) && Log2_64(Mask) == ShAmt) {
+ CCVal = CCVal == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
+ CC = DAG.getCondCode(CCVal);
+
+ ShAmt = LHS.getValueSizeInBits() - 1 - ShAmt;
+ LHS = LHS0.getOperand(0);
+ if (ShAmt != 0)
+ LHS =
+ DAG.getNode(ISD::SHL, DL, LHS.getValueType(), LHS0.getOperand(0),
+ DAG.getConstant(ShAmt, DL, LHS.getValueType()));
+ return true;
+ }
+ }
+ }
+
+ // (X, 1, setne) -> // (X, 0, seteq) if we can prove X is 0/1.
+ // This can occur when legalizing some floating point comparisons.
+ APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
+ if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+ CC = DAG.getCondCode(CCVal);
+ RHS = DAG.getConstant(0, DL, LHS.getValueType());
+ return true;
+ }
+
+ if (isNullConstant(RHS)) {
+ if (SDValue NewCond = tryDemorganOfBooleanCondition(LHS, DAG)) {
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+ CC = DAG.getCondCode(CCVal);
+ LHS = NewCond;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Fold
+// (select C, (add Y, X), Y) -> (add Y, (select C, X, 0)).
+// (select C, (sub Y, X), Y) -> (sub Y, (select C, X, 0)).
+// (select C, (or Y, X), Y) -> (or Y, (select C, X, 0)).
+// (select C, (xor Y, X), Y) -> (xor Y, (select C, X, 0)).
+static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG,
+ SDValue TrueVal, SDValue FalseVal,
+ bool Swapped) {
+ bool Commutative = true;
+ switch (TrueVal.getOpcode()) {
+ default:
+ return SDValue();
+ case ISD::SUB:
+ Commutative = false;
+ break;
+ case ISD::ADD:
+ case ISD::OR:
+ case ISD::XOR:
+ break;
+ }
+
+ if (!TrueVal.hasOneUse() || isa<ConstantSDNode>(FalseVal))
+ return SDValue();
+
+ unsigned OpToFold;
+ if (FalseVal == TrueVal.getOperand(0))
+ OpToFold = 0;
+ else if (Commutative && FalseVal == TrueVal.getOperand(1))
+ OpToFold = 1;
+ else
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ SDLoc DL(N);
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue OtherOp = TrueVal.getOperand(1 - OpToFold);
+
+ if (Swapped)
+ std::swap(OtherOp, Zero);
+ SDValue NewSel = DAG.getSelect(DL, VT, N->getOperand(0), OtherOp, Zero);
+ return DAG.getNode(TrueVal.getOpcode(), DL, VT, FalseVal, NewSel);
+}
+
+static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (Subtarget.hasShortForwardBranchOpt())
+ return SDValue();
+
+ // Only support XLenVT.
+ if (N->getValueType(0) != Subtarget.getXLenVT())
+ return SDValue();
+
+ SDValue TrueVal = N->getOperand(1);
+ SDValue FalseVal = N->getOperand(2);
+ if (SDValue V = tryFoldSelectIntoOp(N, DAG, TrueVal, FalseVal, /*Swapped*/false))
+ return V;
+ return tryFoldSelectIntoOp(N, DAG, FalseVal, TrueVal, /*Swapped*/true);
+}
+
+SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+
+ // Helper to call SimplifyDemandedBits on an operand of N where only some low
+ // bits are demanded. N will be added to the Worklist if it was not deleted.
+ // Caller should return SDValue(N, 0) if this returns true.
+ auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
+ SDValue Op = N->getOperand(OpNo);
+ APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
+ if (!SimplifyDemandedBits(Op, Mask, DCI))
+ return false;
+
+ if (N->getOpcode() != ISD::DELETED_NODE)
+ DCI.AddToWorklist(N);
+ return true;
+ };
+
+ switch (N->getOpcode()) {
+ default:
+ break;
+ case RISCVISD::SplitF64: {
+ SDValue Op0 = N->getOperand(0);
+ // If the input to SplitF64 is just BuildPairF64 then the operation is
+ // redundant. Instead, use BuildPairF64's operands directly.
+ if (Op0->getOpcode() == RISCVISD::BuildPairF64)
+ return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
+
+ if (Op0->isUndef()) {
+ SDValue Lo = DAG.getUNDEF(MVT::i32);
+ SDValue Hi = DAG.getUNDEF(MVT::i32);
+ return DCI.CombineTo(N, Lo, Hi);
+ }
+
+ SDLoc DL(N);
+
+ // It's cheaper to materialise two 32-bit integers than to load a double
+ // from the constant pool and transfer it to integer registers through the
+ // stack.
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
+ APInt V = C->getValueAPF().bitcastToAPInt();
+ SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
+ SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
+ return DCI.CombineTo(N, Lo, Hi);
+ }
+
+ // This is a target-specific version of a DAGCombine performed in
+ // DAGCombiner::visitBITCAST. It performs the equivalent of:
+ // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
+ // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
+ if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
+ !Op0.getNode()->hasOneUse())
+ break;
+ SDValue NewSplitF64 =
+ DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
+ Op0.getOperand(0));
+ SDValue Lo = NewSplitF64.getValue(0);
+ SDValue Hi = NewSplitF64.getValue(1);
+ APInt SignBit = APInt::getSignMask(32);
+ if (Op0.getOpcode() == ISD::FNEG) {
+ SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
+ DAG.getConstant(SignBit, DL, MVT::i32));
+ return DCI.CombineTo(N, Lo, NewHi);
+ }
+ assert(Op0.getOpcode() == ISD::FABS);
+ SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
+ DAG.getConstant(~SignBit, DL, MVT::i32));
+ return DCI.CombineTo(N, Lo, NewHi);
+ }
+ case RISCVISD::SLLW:
+ case RISCVISD::SRAW:
+ case RISCVISD::SRLW:
+ case RISCVISD::RORW:
+ case RISCVISD::ROLW: {
+ // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
+ if (SimplifyDemandedLowBitsHelper(0, 32) ||
+ SimplifyDemandedLowBitsHelper(1, 5))
+ return SDValue(N, 0);
+
+ break;
+ }
+ case RISCVISD::CLZW:
+ case RISCVISD::CTZW: {
+ // Only the lower 32 bits of the first operand are read
+ if (SimplifyDemandedLowBitsHelper(0, 32))
+ return SDValue(N, 0);
+ break;
+ }
+ case RISCVISD::FMV_X_ANYEXTH:
+ case RISCVISD::FMV_X_ANYEXTW_RV64: {
+ SDLoc DL(N);
+ SDValue Op0 = N->getOperand(0);
+ MVT VT = N->getSimpleValueType(0);
+ // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
+ // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
+ // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
+ if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
+ Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
+ (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
+ Op0->getOpcode() == RISCVISD::FMV_H_X)) {
+ assert(Op0.getOperand(0).getValueType() == VT &&
+ "Unexpected value type!");
+ return Op0.getOperand(0);
+ }
+
+ // This is a target-specific version of a DAGCombine performed in
+ // DAGCombiner::visitBITCAST. It performs the equivalent of:
+ // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
+ // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
+ if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
+ !Op0.getNode()->hasOneUse())
+ break;
+ SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
+ unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
+ APInt SignBit = APInt::getSignMask(FPBits).sext(VT.getSizeInBits());
+ if (Op0.getOpcode() == ISD::FNEG)
+ return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
+ DAG.getConstant(SignBit, DL, VT));
+
+ assert(Op0.getOpcode() == ISD::FABS);
+ return DAG.getNode(ISD::AND, DL, VT, NewFMV,
+ DAG.getConstant(~SignBit, DL, VT));
+ }
+ case ISD::ADD:
+ return performADDCombine(N, DAG, Subtarget);
+ case ISD::SUB:
+ return performSUBCombine(N, DAG, Subtarget);
+ case ISD::AND:
+ return performANDCombine(N, DCI, Subtarget);
+ case ISD::OR:
+ return performORCombine(N, DCI, Subtarget);
+ case ISD::XOR:
+ return performXORCombine(N, DAG, Subtarget);
+ case ISD::FADD:
+ case ISD::UMAX:
+ case ISD::UMIN:
+ case ISD::SMAX:
+ case ISD::SMIN:
+ case ISD::FMAXNUM:
+ case ISD::FMINNUM:
+ return combineBinOpToReduce(N, DAG, Subtarget);
+ case ISD::SETCC:
+ return performSETCCCombine(N, DAG, Subtarget);
+ case ISD::SIGN_EXTEND_INREG:
+ return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
+ case ISD::ZERO_EXTEND:
+ // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
+ // type legalization. This is safe because fp_to_uint produces poison if
+ // it overflows.
+ if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
+ SDValue Src = N->getOperand(0);
+ if (Src.getOpcode() == ISD::FP_TO_UINT &&
+ isTypeLegal(Src.getOperand(0).getValueType()))
+ return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
+ Src.getOperand(0));
+ if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
+ isTypeLegal(Src.getOperand(1).getValueType())) {
+ SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
+ SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
+ Src.getOperand(0), Src.getOperand(1));
+ DCI.CombineTo(N, Res);
+ DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
+ DCI.recursivelyDeleteUnusedNodes(Src.getNode());
+ return SDValue(N, 0); // Return N so it doesn't get rechecked.
+ }
+ }
+ return SDValue();
+ case ISD::TRUNCATE:
+ return performTRUNCATECombine(N, DAG, Subtarget);
+ case ISD::SELECT:
+ return performSELECTCombine(N, DAG, Subtarget);
+ case RISCVISD::SELECT_CC: {
+ // Transform
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ SDValue CC = N->getOperand(2);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
+ SDValue TrueV = N->getOperand(3);
+ SDValue FalseV = N->getOperand(4);
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+
+ // If the True and False values are the same, we don't need a select_cc.
+ if (TrueV == FalseV)
+ return TrueV;
+
+ // (select (x < 0), y, z) -> x >> (XLEN - 1) & (y - z) + z
+ // (select (x >= 0), y, z) -> x >> (XLEN - 1) & (z - y) + y
+ if (!Subtarget.hasShortForwardBranchOpt() && isa<ConstantSDNode>(TrueV) &&
+ isa<ConstantSDNode>(FalseV) && isNullConstant(RHS) &&
+ (CCVal == ISD::CondCode::SETLT || CCVal == ISD::CondCode::SETGE)) {
+ if (CCVal == ISD::CondCode::SETGE)
+ std::swap(TrueV, FalseV);
+
+ int64_t TrueSImm = cast<ConstantSDNode>(TrueV)->getSExtValue();
+ int64_t FalseSImm = cast<ConstantSDNode>(FalseV)->getSExtValue();
+ // Only handle simm12, if it is not in this range, it can be considered as
+ // register.
+ if (isInt<12>(TrueSImm) && isInt<12>(FalseSImm) &&
+ isInt<12>(TrueSImm - FalseSImm)) {
+ SDValue SRA =
+ DAG.getNode(ISD::SRA, DL, VT, LHS,
+ DAG.getConstant(Subtarget.getXLen() - 1, DL, VT));
+ SDValue AND =
+ DAG.getNode(ISD::AND, DL, VT, SRA,
+ DAG.getConstant(TrueSImm - FalseSImm, DL, VT));
+ return DAG.getNode(ISD::ADD, DL, VT, AND, FalseV);
+ }
+
+ if (CCVal == ISD::CondCode::SETGE)
+ std::swap(TrueV, FalseV);
+ }
+
+ if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
+ return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
+ {LHS, RHS, CC, TrueV, FalseV});
+
+ if (!Subtarget.hasShortForwardBranchOpt()) {
+ // (select c, -1, y) -> -c | y
+ if (isAllOnesConstant(TrueV)) {
+ SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, CCVal);
+ SDValue Neg = DAG.getNegative(C, DL, VT);
+ return DAG.getNode(ISD::OR, DL, VT, Neg, FalseV);
+ }
+ // (select c, y, -1) -> -!c | y
+ if (isAllOnesConstant(FalseV)) {
+ SDValue C =
+ DAG.getSetCC(DL, VT, LHS, RHS, ISD::getSetCCInverse(CCVal, VT));
+ SDValue Neg = DAG.getNegative(C, DL, VT);
+ return DAG.getNode(ISD::OR, DL, VT, Neg, TrueV);
+ }
+
+ // (select c, 0, y) -> -!c & y
+ if (isNullConstant(TrueV)) {
+ SDValue C =
+ DAG.getSetCC(DL, VT, LHS, RHS, ISD::getSetCCInverse(CCVal, VT));
+ SDValue Neg = DAG.getNegative(C, DL, VT);
+ return DAG.getNode(ISD::AND, DL, VT, Neg, FalseV);
+ }
+ // (select c, y, 0) -> -c & y
+ if (isNullConstant(FalseV)) {
+ SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, CCVal);
+ SDValue Neg = DAG.getNegative(C, DL, VT);
+ return DAG.getNode(ISD::AND, DL, VT, Neg, TrueV);
+ }
+ }
+
+ return SDValue();
+ }
+ case RISCVISD::BR_CC: {
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ SDValue CC = N->getOperand(3);
+ SDLoc DL(N);
+
+ if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
+ return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
+ N->getOperand(0), LHS, RHS, CC, N->getOperand(4));
+
+ return SDValue();
+ }
+ case ISD::BITREVERSE:
+ return performBITREVERSECombine(N, DAG, Subtarget);
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ return performFP_TO_INTCombine(N, DCI, Subtarget);
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
+ case ISD::FCOPYSIGN: {
+ EVT VT = N->getValueType(0);
+ if (!VT.isVector())
+ break;
+ // There is a form of VFSGNJ which injects the negated sign of its second
+ // operand. Try and bubble any FNEG up after the extend/round to produce
+ // this optimized pattern. Avoid modifying cases where FP_ROUND and
+ // TRUNC=1.
+ SDValue In2 = N->getOperand(1);
+ // Avoid cases where the extend/round has multiple uses, as duplicating
+ // those is typically more expensive than removing a fneg.
+ if (!In2.hasOneUse())
+ break;
+ if (In2.getOpcode() != ISD::FP_EXTEND &&
+ (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
+ break;
+ In2 = In2.getOperand(0);
+ if (In2.getOpcode() != ISD::FNEG)
+ break;
+ SDLoc DL(N);
+ SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
+ return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
+ DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
+ }
+ case ISD::MGATHER:
+ case ISD::MSCATTER:
+ case ISD::VP_GATHER:
+ case ISD::VP_SCATTER: {
+ if (!DCI.isBeforeLegalize())
+ break;
+ SDValue Index, ScaleOp;
+ bool IsIndexSigned = false;
+ if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
+ Index = VPGSN->getIndex();
+ ScaleOp = VPGSN->getScale();
+ IsIndexSigned = VPGSN->isIndexSigned();
+ assert(!VPGSN->isIndexScaled() &&
+ "Scaled gather/scatter should not be formed");
+ } else {
+ const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
+ Index = MGSN->getIndex();
+ ScaleOp = MGSN->getScale();
+ IsIndexSigned = MGSN->isIndexSigned();
+ assert(!MGSN->isIndexScaled() &&
+ "Scaled gather/scatter should not be formed");
+
+ }
+ EVT IndexVT = Index.getValueType();
+ MVT XLenVT = Subtarget.getXLenVT();
+ // RISCV indexed loads only support the "unsigned unscaled" addressing
+ // mode, so anything else must be manually legalized.
+ bool NeedsIdxLegalization =
+ (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
+ if (!NeedsIdxLegalization)
+ break;
+
+ SDLoc DL(N);
+
+ // Any index legalization should first promote to XLenVT, so we don't lose
+ // bits when scaling. This may create an illegal index type so we let
+ // LLVM's legalization take care of the splitting.
+ // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
+ if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
+ IndexVT = IndexVT.changeVectorElementType(XLenVT);
+ Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
+ DL, IndexVT, Index);
+ }
+
+ ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_SCALED;
+ if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
+ return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
+ {VPGN->getChain(), VPGN->getBasePtr(), Index,
+ ScaleOp, VPGN->getMask(),
+ VPGN->getVectorLength()},
+ VPGN->getMemOperand(), NewIndexTy);
+ if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
+ return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
+ {VPSN->getChain(), VPSN->getValue(),
+ VPSN->getBasePtr(), Index, ScaleOp,
+ VPSN->getMask(), VPSN->getVectorLength()},
+ VPSN->getMemOperand(), NewIndexTy);
+ if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
+ return DAG.getMaskedGather(
+ N->getVTList(), MGN->getMemoryVT(), DL,
+ {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
+ MGN->getBasePtr(), Index, ScaleOp},
+ MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
+ const auto *MSN = cast<MaskedScatterSDNode>(N);
+ return DAG.getMaskedScatter(
+ N->getVTList(), MSN->getMemoryVT(), DL,
+ {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
+ Index, ScaleOp},
+ MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
+ }
+ case RISCVISD::SRA_VL:
+ case RISCVISD::SRL_VL:
+ case RISCVISD::SHL_VL: {
+ SDValue ShAmt = N->getOperand(1);
+ if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
+ // We don't need the upper 32 bits of a 64-bit element for a shift amount.
+ SDLoc DL(N);
+ SDValue VL = N->getOperand(3);
+ EVT VT = N->getValueType(0);
+ ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
+ ShAmt.getOperand(1), VL);
+ return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
+ N->getOperand(2), N->getOperand(3), N->getOperand(4));
+ }
+ break;
+ }
+ case ISD::SRA:
+ if (SDValue V = performSRACombine(N, DAG, Subtarget))
+ return V;
+ [[fallthrough]];
+ case ISD::SRL:
+ case ISD::SHL: {
+ SDValue ShAmt = N->getOperand(1);
+ if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
+ // We don't need the upper 32 bits of a 64-bit element for a shift amount.
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
+ ShAmt.getOperand(1),
+ DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
+ return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
+ }
+ break;
+ }
+ case RISCVISD::ADD_VL:
+ case RISCVISD::SUB_VL:
+ case RISCVISD::VWADD_W_VL:
+ case RISCVISD::VWADDU_W_VL:
+ case RISCVISD::VWSUB_W_VL:
+ case RISCVISD::VWSUBU_W_VL:
+ case RISCVISD::MUL_VL:
+ return combineBinOp_VLToVWBinOp_VL(N, DCI);
+ case RISCVISD::VFMADD_VL:
+ case RISCVISD::VFNMADD_VL:
+ case RISCVISD::VFMSUB_VL:
+ case RISCVISD::VFNMSUB_VL: {
+ // Fold FNEG_VL into FMA opcodes.
+ SDValue A = N->getOperand(0);
+ SDValue B = N->getOperand(1);
+ SDValue C = N->getOperand(2);
+ SDValue Mask = N->getOperand(3);
+ SDValue VL = N->getOperand(4);
+
+ auto invertIfNegative = [&Mask, &VL](SDValue &V) {
+ if (V.getOpcode() == RISCVISD::FNEG_VL && V.getOperand(1) == Mask &&
+ V.getOperand(2) == VL) {
+ // Return the negated input.
+ V = V.getOperand(0);
+ return true;
+ }
+
+ return false;
+ };
+
+ bool NegA = invertIfNegative(A);
+ bool NegB = invertIfNegative(B);
+ bool NegC = invertIfNegative(C);
+
+ // If no operands are negated, we're done.
+ if (!NegA && !NegB && !NegC)
+ return SDValue();
+
+ unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC);
+ return DAG.getNode(NewOpcode, SDLoc(N), N->getValueType(0), A, B, C, Mask,
+ VL);
+ }
+ case ISD::STORE: {
+ auto *Store = cast<StoreSDNode>(N);
+ SDValue Val = Store->getValue();
+ // Combine store of vmv.x.s/vfmv.f.s to vse with VL of 1.
+ // vfmv.f.s is represented as extract element from 0. Match it late to avoid
+ // any illegal types.
+ if (Val.getOpcode() == RISCVISD::VMV_X_S ||
+ (DCI.isAfterLegalizeDAG() &&
+ Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ isNullConstant(Val.getOperand(1)))) {
+ SDValue Src = Val.getOperand(0);
+ MVT VecVT = Src.getSimpleValueType();
+ EVT MemVT = Store->getMemoryVT();
+ // VecVT should be scalable and memory VT should match the element type.
+ if (VecVT.isScalableVector() &&
+ MemVT == VecVT.getVectorElementType()) {
+ SDLoc DL(N);
+ MVT MaskVT = getMaskTypeFor(VecVT);
+ return DAG.getStoreVP(
+ Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
+ DAG.getConstant(1, DL, MaskVT),
+ DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
+ Store->getMemOperand(), Store->getAddressingMode(),
+ Store->isTruncatingStore(), /*IsCompress*/ false);
+ }
+ }
+
+ break;
+ }
+ case ISD::SPLAT_VECTOR: {
+ EVT VT = N->getValueType(0);
+ // Only perform this combine on legal MVT types.
+ if (!isTypeLegal(VT))
+ break;
+ if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
+ DAG, Subtarget))
+ return Gather;
+ break;
+ }
+ case RISCVISD::VMV_V_X_VL: {
+ // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
+ // scalar input.
+ unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
+ unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
+ if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
+ if (SimplifyDemandedLowBitsHelper(1, EltWidth))
+ return SDValue(N, 0);
+
+ break;
+ }
+ case RISCVISD::VFMV_S_F_VL: {
+ SDValue Src = N->getOperand(1);
+ // Try to remove vector->scalar->vector if the scalar->vector is inserting
+ // into an undef vector.
+ // TODO: Could use a vslide or vmv.v.v for non-undef.
+ if (N->getOperand(0).isUndef() &&
+ Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ isNullConstant(Src.getOperand(1)) &&
+ Src.getOperand(0).getValueType().isScalableVector()) {
+ EVT VT = N->getValueType(0);
+ EVT SrcVT = Src.getOperand(0).getValueType();
+ assert(SrcVT.getVectorElementType() == VT.getVectorElementType());
+ // Widths match, just return the original vector.
+ if (SrcVT == VT)
+ return Src.getOperand(0);
+ // TODO: Use insert_subvector/extract_subvector to change widen/narrow?
+ }
+ break;
+ }
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = N->getConstantOperandVal(0);
+ switch (IntNo) {
+ // By default we do not combine any intrinsic.
+ default:
+ return SDValue();
+ case Intrinsic::riscv_vcpop:
+ case Intrinsic::riscv_vcpop_mask:
+ case Intrinsic::riscv_vfirst:
+ case Intrinsic::riscv_vfirst_mask: {
+ SDValue VL = N->getOperand(2);
+ if (IntNo == Intrinsic::riscv_vcpop_mask ||
+ IntNo == Intrinsic::riscv_vfirst_mask)
+ VL = N->getOperand(3);
+ if (!isNullConstant(VL))
+ return SDValue();
+ // If VL is 0, vcpop -> li 0, vfirst -> li -1.
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ if (IntNo == Intrinsic::riscv_vfirst ||
+ IntNo == Intrinsic::riscv_vfirst_mask)
+ return DAG.getConstant(-1, DL, VT);
+ return DAG.getConstant(0, DL, VT);
+ }
+ }
+ }
+ case ISD::BITCAST: {
+ assert(Subtarget.useRVVForFixedLengthVectors());
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ EVT SrcVT = N0.getValueType();
+ // If this is a bitcast between a MVT::v4i1/v2i1/v1i1 and an illegal integer
+ // type, widen both sides to avoid a trip through memory.
+ if ((SrcVT == MVT::v1i1 || SrcVT == MVT::v2i1 || SrcVT == MVT::v4i1) &&
+ VT.isScalarInteger()) {
+ unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
+ SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
+ Ops[0] = N0;
+ SDLoc DL(N);
+ N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i1, Ops);
+ N0 = DAG.getBitcast(MVT::i8, N0);
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
+ }
+
+ return SDValue();
+ }
+ }
+
+ return SDValue();
+}
+
+bool RISCVTargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) &&
+ "Expected shift op");
+
+ // The following folds are only desirable if `(OP _, c1 << c2)` can be
+ // materialised in fewer instructions than `(OP _, c1)`:
+ //
+ // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
+ // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
+ SDValue N0 = N->getOperand(0);
+ EVT Ty = N0.getValueType();
+ if (Ty.isScalarInteger() &&
+ (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
+ auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (C1 && C2) {
+ const APInt &C1Int = C1->getAPIntValue();
+ APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
+
+ // We can materialise `c1 << c2` into an add immediate, so it's "free",
+ // and the combine should happen, to potentially allow further combines
+ // later.
+ if (ShiftedC1Int.getMinSignedBits() <= 64 &&
+ isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
+ return true;
+
+ // We can materialise `c1` in an add immediate, so it's "free", and the
+ // combine should be prevented.
+ if (C1Int.getMinSignedBits() <= 64 &&
+ isLegalAddImmediate(C1Int.getSExtValue()))
+ return false;
+
+ // Neither constant will fit into an immediate, so find materialisation
+ // costs.
+ int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
+ Subtarget.getFeatureBits(),
+ /*CompressionCost*/true);
+ int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
+ ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
+ /*CompressionCost*/true);
+
+ // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
+ // combine should be prevented.
+ if (C1Cost < ShiftedC1Cost)
+ return false;
+ }
+ }
+ return true;
+}
+
+bool RISCVTargetLowering::targetShrinkDemandedConstant(
+ SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
+ TargetLoweringOpt &TLO) const {
+ // Delay this optimization as late as possible.
+ if (!TLO.LegalOps)
+ return false;
+
+ EVT VT = Op.getValueType();
+ if (VT.isVector())
+ return false;
+
+ unsigned Opcode = Op.getOpcode();
+ if (Opcode != ISD::AND && Opcode != ISD::OR && Opcode != ISD::XOR)
+ return false;
+
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ if (!C)
+ return false;
+
+ const APInt &Mask = C->getAPIntValue();
+
+ // Clear all non-demanded bits initially.
+ APInt ShrunkMask = Mask & DemandedBits;
+
+ // Try to make a smaller immediate by setting undemanded bits.
+
+ APInt ExpandedMask = Mask | ~DemandedBits;
+
+ auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
+ return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
+ };
+ auto UseMask = [Mask, Op, &TLO](const APInt &NewMask) -> bool {
+ if (NewMask == Mask)
+ return true;
+ SDLoc DL(Op);
+ SDValue NewC = TLO.DAG.getConstant(NewMask, DL, Op.getValueType());
+ SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), DL, Op.getValueType(),
+ Op.getOperand(0), NewC);
+ return TLO.CombineTo(Op, NewOp);
+ };
+
+ // If the shrunk mask fits in sign extended 12 bits, let the target
+ // independent code apply it.
+ if (ShrunkMask.isSignedIntN(12))
+ return false;
+
+ // And has a few special cases for zext.
+ if (Opcode == ISD::AND) {
+ // Preserve (and X, 0xffff), if zext.h exists use zext.h,
+ // otherwise use SLLI + SRLI.
+ APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
+ if (IsLegalMask(NewMask))
+ return UseMask(NewMask);
+
+ // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
+ if (VT == MVT::i64) {
+ APInt NewMask = APInt(64, 0xffffffff);
+ if (IsLegalMask(NewMask))
+ return UseMask(NewMask);
+ }
+ }
+
+ // For the remaining optimizations, we need to be able to make a negative
+ // number through a combination of mask and undemanded bits.
+ if (!ExpandedMask.isNegative())
+ return false;
+
+ // What is the fewest number of bits we need to represent the negative number.
+ unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
+
+ // Try to make a 12 bit negative immediate. If that fails try to make a 32
+ // bit negative immediate unless the shrunk immediate already fits in 32 bits.
+ // If we can't create a simm12, we shouldn't change opaque constants.
+ APInt NewMask = ShrunkMask;
+ if (MinSignedBits <= 12)
+ NewMask.setBitsFrom(11);
+ else if (!C->isOpaque() && MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
+ NewMask.setBitsFrom(31);
+ else
+ return false;
+
+ // Check that our new mask is a subset of the demanded mask.
+ assert(IsLegalMask(NewMask));
+ return UseMask(NewMask);
+}
+
+static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
+ static const uint64_t GREVMasks[] = {
+ 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
+ 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
+
+ for (unsigned Stage = 0; Stage != 6; ++Stage) {
+ unsigned Shift = 1 << Stage;
+ if (ShAmt & Shift) {
+ uint64_t Mask = GREVMasks[Stage];
+ uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
+ if (IsGORC)
+ Res |= x;
+ x = Res;
+ }
+ }
+
+ return x;
+}
+
+void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ KnownBits &Known,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
+ unsigned BitWidth = Known.getBitWidth();
+ unsigned Opc = Op.getOpcode();
+ assert((Opc >= ISD::BUILTIN_OP_END ||
+ Opc == ISD::INTRINSIC_WO_CHAIN ||
+ Opc == ISD::INTRINSIC_W_CHAIN ||
+ Opc == ISD::INTRINSIC_VOID) &&
+ "Should use MaskedValueIsZero if you don't know whether Op"
+ " is a target node!");
+
+ Known.resetAll();
+ switch (Opc) {
+ default: break;
+ case RISCVISD::SELECT_CC: {
+ Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
+ // If we don't know any bits, early out.
+ if (Known.isUnknown())
+ break;
+ KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
+
+ // Only known if known in both the LHS and RHS.
+ Known = KnownBits::commonBits(Known, Known2);
+ break;
+ }
+ case RISCVISD::REMUW: {
+ KnownBits Known2;
+ Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ // We only care about the lower 32 bits.
+ Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
+ // Restore the original width by sign extending.
+ Known = Known.sext(BitWidth);
+ break;
+ }
+ case RISCVISD::DIVUW: {
+ KnownBits Known2;
+ Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ // We only care about the lower 32 bits.
+ Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
+ // Restore the original width by sign extending.
+ Known = Known.sext(BitWidth);
+ break;
+ }
+ case RISCVISD::CTZW: {
+ KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
+ unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
+ unsigned LowBits = llvm::bit_width(PossibleTZ);
+ Known.Zero.setBitsFrom(LowBits);
+ break;
+ }
+ case RISCVISD::CLZW: {
+ KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
+ unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
+ unsigned LowBits = llvm::bit_width(PossibleLZ);
+ Known.Zero.setBitsFrom(LowBits);
+ break;
+ }
+ case RISCVISD::BREV8:
+ case RISCVISD::ORC_B: {
+ // FIXME: This is based on the non-ratified Zbp GREV and GORC where a
+ // control value of 7 is equivalent to brev8 and orc.b.
+ Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
+ bool IsGORC = Op.getOpcode() == RISCVISD::ORC_B;
+ // To compute zeros, we need to invert the value and invert it back after.
+ Known.Zero =
+ ~computeGREVOrGORC(~Known.Zero.getZExtValue(), 7, IsGORC);
+ Known.One = computeGREVOrGORC(Known.One.getZExtValue(), 7, IsGORC);
+ break;
+ }
+ case RISCVISD::READ_VLENB: {
+ // We can use the minimum and maximum VLEN values to bound VLENB. We
+ // know VLEN must be a power of two.
+ const unsigned MinVLenB = Subtarget.getRealMinVLen() / 8;
+ const unsigned MaxVLenB = Subtarget.getRealMaxVLen() / 8;
+ assert(MinVLenB > 0 && "READ_VLENB without vector extension enabled?");
+ Known.Zero.setLowBits(Log2_32(MinVLenB));
+ Known.Zero.setBitsFrom(Log2_32(MaxVLenB)+1);
+ if (MaxVLenB == MinVLenB)
+ Known.One.setBit(Log2_32(MinVLenB));
+ break;
+ }
+ case ISD::INTRINSIC_W_CHAIN:
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo =
+ Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
+ switch (IntNo) {
+ default:
+ // We can't do anything for most intrinsics.
+ break;
+ case Intrinsic::riscv_vsetvli:
+ case Intrinsic::riscv_vsetvlimax:
+ case Intrinsic::riscv_vsetvli_opt:
+ case Intrinsic::riscv_vsetvlimax_opt:
+ // Assume that VL output is positive and would fit in an int32_t.
+ // TODO: VLEN might be capped at 16 bits in a future V spec update.
+ if (BitWidth >= 32)
+ Known.Zero.setBitsFrom(31);
+ break;
+ }
+ break;
+ }
+ }
+}
+
+unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
+ SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
+ unsigned Depth) const {
+ switch (Op.getOpcode()) {
+ default:
+ break;
+ case RISCVISD::SELECT_CC: {
+ unsigned Tmp =
+ DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
+ if (Tmp == 1) return 1; // Early out.
+ unsigned Tmp2 =
+ DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
+ return std::min(Tmp, Tmp2);
+ }
+ case RISCVISD::ABSW: {
+ // We expand this at isel to negw+max. The result will have 33 sign bits
+ // if the input has at least 33 sign bits.
+ unsigned Tmp =
+ DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ if (Tmp < 33) return 1;
+ return 33;
+ }
+ case RISCVISD::SLLW:
+ case RISCVISD::SRAW:
+ case RISCVISD::SRLW:
+ case RISCVISD::DIVW:
+ case RISCVISD::DIVUW:
+ case RISCVISD::REMUW:
+ case RISCVISD::ROLW:
+ case RISCVISD::RORW:
+ case RISCVISD::FCVT_W_RV64:
+ case RISCVISD::FCVT_WU_RV64:
+ case RISCVISD::STRICT_FCVT_W_RV64:
+ case RISCVISD::STRICT_FCVT_WU_RV64:
+ // TODO: As the result is sign-extended, this is conservatively correct. A
+ // more precise answer could be calculated for SRAW depending on known
+ // bits in the shift amount.
+ return 33;
+ case RISCVISD::VMV_X_S: {
+ // The number of sign bits of the scalar result is computed by obtaining the
+ // element type of the input vector operand, subtracting its width from the
+ // XLEN, and then adding one (sign bit within the element type). If the
+ // element type is wider than XLen, the least-significant XLEN bits are
+ // taken.
+ unsigned XLen = Subtarget.getXLen();
+ unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
+ if (EltBits <= XLen)
+ return XLen - EltBits + 1;
+ break;
+ }
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = Op.getConstantOperandVal(1);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
+ case Intrinsic::riscv_masked_atomicrmw_add_i64:
+ case Intrinsic::riscv_masked_atomicrmw_sub_i64:
+ case Intrinsic::riscv_masked_atomicrmw_nand_i64:
+ case Intrinsic::riscv_masked_atomicrmw_max_i64:
+ case Intrinsic::riscv_masked_atomicrmw_min_i64:
+ case Intrinsic::riscv_masked_atomicrmw_umax_i64:
+ case Intrinsic::riscv_masked_atomicrmw_umin_i64:
+ case Intrinsic::riscv_masked_cmpxchg_i64:
+ // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
+ // narrow atomic operation. These are implemented using atomic
+ // operations at the minimum supported atomicrmw/cmpxchg width whose
+ // result is then sign extended to XLEN. With +A, the minimum width is
+ // 32 for both 64 and 32.
+ assert(Subtarget.getXLen() == 64);
+ assert(getMinCmpXchgSizeInBits() == 32);
+ assert(Subtarget.hasStdExtA());
+ return 33;
+ }
+ }
+ }
+
+ return 1;
+}
+
+const Constant *
+RISCVTargetLowering::getTargetConstantFromLoad(LoadSDNode *Ld) const {
+ assert(Ld && "Unexpected null LoadSDNode");
+ if (!ISD::isNormalLoad(Ld))
+ return nullptr;
+
+ SDValue Ptr = Ld->getBasePtr();
+
+ // Only constant pools with no offset are supported.
+ auto GetSupportedConstantPool = [](SDValue Ptr) -> ConstantPoolSDNode * {
+ auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
+ if (!CNode || CNode->isMachineConstantPoolEntry() ||
+ CNode->getOffset() != 0)
+ return nullptr;
+
+ return CNode;
+ };
+
+ // Simple case, LLA.
+ if (Ptr.getOpcode() == RISCVISD::LLA) {
+ auto *CNode = GetSupportedConstantPool(Ptr);
+ if (!CNode || CNode->getTargetFlags() != 0)
+ return nullptr;
+
+ return CNode->getConstVal();
+ }
+
+ // Look for a HI and ADD_LO pair.
+ if (Ptr.getOpcode() != RISCVISD::ADD_LO ||
+ Ptr.getOperand(0).getOpcode() != RISCVISD::HI)
+ return nullptr;
+
+ auto *CNodeLo = GetSupportedConstantPool(Ptr.getOperand(1));
+ auto *CNodeHi = GetSupportedConstantPool(Ptr.getOperand(0).getOperand(0));
+
+ if (!CNodeLo || CNodeLo->getTargetFlags() != RISCVII::MO_LO ||
+ !CNodeHi || CNodeHi->getTargetFlags() != RISCVII::MO_HI)
+ return nullptr;
+
+ if (CNodeLo->getConstVal() != CNodeHi->getConstVal())
+ return nullptr;
+
+ return CNodeLo->getConstVal();
+}
+
+static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
+
+ // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
+ // Should the count have wrapped while it was being read, we need to try
+ // again.
+ // ...
+ // read:
+ // rdcycleh x3 # load high word of cycle
+ // rdcycle x2 # load low word of cycle
+ // rdcycleh x4 # load high word of cycle
+ // bne x3, x4, read # check if high word reads match, otherwise try again
+ // ...
+
+ MachineFunction &MF = *BB->getParent();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = ++BB->getIterator();
+
+ MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
+ MF.insert(It, LoopMBB);
+
+ MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
+ MF.insert(It, DoneMBB);
+
+ // Transfer the remainder of BB and its successor edges to DoneMBB.
+ DoneMBB->splice(DoneMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ BB->addSuccessor(LoopMBB);
+
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
+ Register LoReg = MI.getOperand(0).getReg();
+ Register HiReg = MI.getOperand(1).getReg();
+ DebugLoc DL = MI.getDebugLoc();
+
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
+ .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
+ .addReg(RISCV::X0);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
+ .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
+ .addReg(RISCV::X0);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
+ .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
+ .addReg(RISCV::X0);
+
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(HiReg)
+ .addReg(ReadAgainReg)
+ .addMBB(LoopMBB);
+
+ LoopMBB->addSuccessor(LoopMBB);
+ LoopMBB->addSuccessor(DoneMBB);
+
+ MI.eraseFromParent();
+
+ return DoneMBB;
+}
+
+static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
+
+ MachineFunction &MF = *BB->getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ Register LoReg = MI.getOperand(0).getReg();
+ Register HiReg = MI.getOperand(1).getReg();
+ Register SrcReg = MI.getOperand(2).getReg();
+ const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
+ int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
+
+ TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
+ RI, Register());
+ MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
+ MachineMemOperand *MMOLo =
+ MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
+ MachineMemOperand *MMOHi = MF.getMachineMemOperand(
+ MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
+ "Unexpected instruction");
+
+ MachineFunction &MF = *BB->getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register LoReg = MI.getOperand(1).getReg();
+ Register HiReg = MI.getOperand(2).getReg();
+ const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
+ int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
+
+ MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
+ MachineMemOperand *MMOLo =
+ MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
+ MachineMemOperand *MMOHi = MF.getMachineMemOperand(
+ MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
+ TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI, Register());
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+static bool isSelectPseudo(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::Select_GPR_Using_CC_GPR:
+ case RISCV::Select_FPR16_Using_CC_GPR:
+ case RISCV::Select_FPR32_Using_CC_GPR:
+ case RISCV::Select_FPR64_Using_CC_GPR:
+ return true;
+ }
+}
+
+static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
+ unsigned RelOpcode, unsigned EqOpcode,
+ const RISCVSubtarget &Subtarget) {
+ DebugLoc DL = MI.getDebugLoc();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register Src1Reg = MI.getOperand(1).getReg();
+ Register Src2Reg = MI.getOperand(2).getReg();
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+
+ // Save the current FFLAGS.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
+
+ auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
+ .addReg(Src1Reg)
+ .addReg(Src2Reg);
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Restore the FFLAGS.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
+ .addReg(SavedFFlags, RegState::Kill);
+
+ // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
+ auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
+ .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
+ .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Erase the pseudoinstruction.
+ MI.eraseFromParent();
+ return BB;
+}
+
+static MachineBasicBlock *
+EmitLoweredCascadedSelect(MachineInstr &First, MachineInstr &Second,
+ MachineBasicBlock *ThisMBB,
+ const RISCVSubtarget &Subtarget) {
+ // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5)
+ // Without this, custom-inserter would have generated:
+ //
+ // A
+ // | \
+ // | B
+ // | /
+ // C
+ // | \
+ // | D
+ // | /
+ // E
+ //
+ // A: X = ...; Y = ...
+ // B: empty
+ // C: Z = PHI [X, A], [Y, B]
+ // D: empty
+ // E: PHI [X, C], [Z, D]
+ //
+ // If we lower both Select_FPRX_ in a single step, we can instead generate:
+ //
+ // A
+ // | \
+ // | C
+ // | /|
+ // |/ |
+ // | |
+ // | D
+ // | /
+ // E
+ //
+ // A: X = ...; Y = ...
+ // D: empty
+ // E: PHI [X, A], [X, C], [Y, D]
+
+ const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
+ const DebugLoc &DL = First.getDebugLoc();
+ const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
+ MachineFunction *F = ThisMBB->getParent();
+ MachineBasicBlock *FirstMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *SecondMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineFunction::iterator It = ++ThisMBB->getIterator();
+ F->insert(It, FirstMBB);
+ F->insert(It, SecondMBB);
+ F->insert(It, SinkMBB);
+
+ // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
+ SinkMBB->splice(SinkMBB->begin(), ThisMBB,
+ std::next(MachineBasicBlock::iterator(First)),
+ ThisMBB->end());
+ SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
+
+ // Fallthrough block for ThisMBB.
+ ThisMBB->addSuccessor(FirstMBB);
+ // Fallthrough block for FirstMBB.
+ FirstMBB->addSuccessor(SecondMBB);
+ ThisMBB->addSuccessor(SinkMBB);
+ FirstMBB->addSuccessor(SinkMBB);
+ // This is fallthrough.
+ SecondMBB->addSuccessor(SinkMBB);
+
+ auto FirstCC = static_cast<RISCVCC::CondCode>(First.getOperand(3).getImm());
+ Register FLHS = First.getOperand(1).getReg();
+ Register FRHS = First.getOperand(2).getReg();
+ // Insert appropriate branch.
+ BuildMI(FirstMBB, DL, TII.getBrCond(FirstCC))
+ .addReg(FLHS)
+ .addReg(FRHS)
+ .addMBB(SinkMBB);
+
+ Register SLHS = Second.getOperand(1).getReg();
+ Register SRHS = Second.getOperand(2).getReg();
+ Register Op1Reg4 = First.getOperand(4).getReg();
+ Register Op1Reg5 = First.getOperand(5).getReg();
+
+ auto SecondCC = static_cast<RISCVCC::CondCode>(Second.getOperand(3).getImm());
+ // Insert appropriate branch.
+ BuildMI(ThisMBB, DL, TII.getBrCond(SecondCC))
+ .addReg(SLHS)
+ .addReg(SRHS)
+ .addMBB(SinkMBB);
+
+ Register DestReg = Second.getOperand(0).getReg();
+ Register Op2Reg4 = Second.getOperand(4).getReg();
+ BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII.get(RISCV::PHI), DestReg)
+ .addReg(Op2Reg4)
+ .addMBB(ThisMBB)
+ .addReg(Op1Reg4)
+ .addMBB(FirstMBB)
+ .addReg(Op1Reg5)
+ .addMBB(SecondMBB);
+
+ // Now remove the Select_FPRX_s.
+ First.eraseFromParent();
+ Second.eraseFromParent();
+ return SinkMBB;
+}
+
+static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ const RISCVSubtarget &Subtarget) {
+ // To "insert" Select_* instructions, we actually have to insert the triangle
+ // control-flow pattern. The incoming instructions know the destination vreg
+ // to set, the condition code register to branch on, the true/false values to
+ // select between, and the condcode to use to select the appropriate branch.
+ //
+ // We produce the following control flow:
+ // HeadMBB
+ // | \
+ // | IfFalseMBB
+ // | /
+ // TailMBB
+ //
+ // When we find a sequence of selects we attempt to optimize their emission
+ // by sharing the control flow. Currently we only handle cases where we have
+ // multiple selects with the exact same condition (same LHS, RHS and CC).
+ // The selects may be interleaved with other instructions if the other
+ // instructions meet some requirements we deem safe:
+ // - They are not pseudo instructions.
+ // - They are debug instructions. Otherwise,
+ // - They do not have side-effects, do not access memory and their inputs do
+ // not depend on the results of the select pseudo-instructions.
+ // The TrueV/FalseV operands of the selects cannot depend on the result of
+ // previous selects in the sequence.
+ // These conditions could be further relaxed. See the X86 target for a
+ // related approach and more information.
+ //
+ // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5))
+ // is checked here and handled by a separate function -
+ // EmitLoweredCascadedSelect.
+ Register LHS = MI.getOperand(1).getReg();
+ Register RHS = MI.getOperand(2).getReg();
+ auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
+
+ SmallVector<MachineInstr *, 4> SelectDebugValues;
+ SmallSet<Register, 4> SelectDests;
+ SelectDests.insert(MI.getOperand(0).getReg());
+
+ MachineInstr *LastSelectPseudo = &MI;
+ auto Next = next_nodbg(MI.getIterator(), BB->instr_end());
+ if (MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR && Next != BB->end() &&
+ Next->getOpcode() == MI.getOpcode() &&
+ Next->getOperand(5).getReg() == MI.getOperand(0).getReg() &&
+ Next->getOperand(5).isKill()) {
+ return EmitLoweredCascadedSelect(MI, *Next, BB, Subtarget);
+ }
+
+ for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
+ SequenceMBBI != E; ++SequenceMBBI) {
+ if (SequenceMBBI->isDebugInstr())
+ continue;
+ if (isSelectPseudo(*SequenceMBBI)) {
+ if (SequenceMBBI->getOperand(1).getReg() != LHS ||
+ SequenceMBBI->getOperand(2).getReg() != RHS ||
+ SequenceMBBI->getOperand(3).getImm() != CC ||
+ SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
+ SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
+ break;
+ LastSelectPseudo = &*SequenceMBBI;
+ SequenceMBBI->collectDebugValues(SelectDebugValues);
+ SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
+ continue;
+ }
+ if (SequenceMBBI->hasUnmodeledSideEffects() ||
+ SequenceMBBI->mayLoadOrStore() ||
+ SequenceMBBI->usesCustomInsertionHook())
+ break;
+ if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
+ return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
+ }))
+ break;
+ }
+
+ const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction::iterator I = ++BB->getIterator();
+
+ MachineBasicBlock *HeadMBB = BB;
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+ F->insert(I, IfFalseMBB);
+ F->insert(I, TailMBB);
+
+ // Transfer debug instructions associated with the selects to TailMBB.
+ for (MachineInstr *DebugInstr : SelectDebugValues) {
+ TailMBB->push_back(DebugInstr->removeFromParent());
+ }
+
+ // Move all instructions after the sequence to TailMBB.
+ TailMBB->splice(TailMBB->end(), HeadMBB,
+ std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
+ // Update machine-CFG edges by transferring all successors of the current
+ // block to the new block which will contain the Phi nodes for the selects.
+ TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
+ // Set the successors for HeadMBB.
+ HeadMBB->addSuccessor(IfFalseMBB);
+ HeadMBB->addSuccessor(TailMBB);
+
+ // Insert appropriate branch.
+ BuildMI(HeadMBB, DL, TII.getBrCond(CC))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(TailMBB);
+
+ // IfFalseMBB just falls through to TailMBB.
+ IfFalseMBB->addSuccessor(TailMBB);
+
+ // Create PHIs for all of the select pseudo-instructions.
+ auto SelectMBBI = MI.getIterator();
+ auto SelectEnd = std::next(LastSelectPseudo->getIterator());
+ auto InsertionPoint = TailMBB->begin();
+ while (SelectMBBI != SelectEnd) {
+ auto Next = std::next(SelectMBBI);
+ if (isSelectPseudo(*SelectMBBI)) {
+ // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
+ BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
+ TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
+ .addReg(SelectMBBI->getOperand(4).getReg())
+ .addMBB(HeadMBB)
+ .addReg(SelectMBBI->getOperand(5).getReg())
+ .addMBB(IfFalseMBB);
+ SelectMBBI->eraseFromParent();
+ }
+ SelectMBBI = Next;
+ }
+
+ F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
+ return TailMBB;
+}
+
+static MachineBasicBlock *
+emitVFCVT_RM_MASK(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode) {
+ DebugLoc DL = MI.getDebugLoc();
+
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ Register SavedFRM = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+
+ // Update FRM and save the old value.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SwapFRMImm), SavedFRM)
+ .addImm(MI.getOperand(4).getImm());
+
+ // Emit an VFCVT without the FRM operand.
+ assert(MI.getNumOperands() == 8);
+ auto MIB = BuildMI(*BB, MI, DL, TII.get(Opcode))
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(1))
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(5))
+ .add(MI.getOperand(6))
+ .add(MI.getOperand(7));
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Restore FRM.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFRM))
+ .addReg(SavedFRM, RegState::Kill);
+
+ // Erase the pseudoinstruction.
+ MI.eraseFromParent();
+ return BB;
+}
+
+static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ unsigned CVTXOpc,
+ unsigned CVTFOpc) {
+ DebugLoc DL = MI.getDebugLoc();
+
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ Register SavedFFLAGS = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+
+ // Save the old value of FFLAGS.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFLAGS);
+
+ assert(MI.getNumOperands() == 7);
+
+ // Emit a VFCVT_X_F
+ const TargetRegisterInfo *TRI =
+ BB->getParent()->getSubtarget().getRegisterInfo();
+ const TargetRegisterClass *RC = MI.getRegClassConstraint(0, &TII, TRI);
+ Register Tmp = MRI.createVirtualRegister(RC);
+ BuildMI(*BB, MI, DL, TII.get(CVTXOpc), Tmp)
+ .add(MI.getOperand(1))
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4))
+ .add(MI.getOperand(5))
+ .add(MI.getOperand(6));
+
+ // Emit a VFCVT_F_X
+ BuildMI(*BB, MI, DL, TII.get(CVTFOpc))
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(1))
+ .addReg(Tmp)
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4))
+ .add(MI.getOperand(5))
+ .add(MI.getOperand(6));
+
+ // Restore FFLAGS.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
+ .addReg(SavedFFLAGS, RegState::Kill);
+
+ // Erase the pseudoinstruction.
+ MI.eraseFromParent();
+ return BB;
+}
+
+static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
+ const RISCVSubtarget &Subtarget) {
+ unsigned CmpOpc, F2IOpc, I2FOpc, FSGNJOpc, FSGNJXOpc;
+ const TargetRegisterClass *RC;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case RISCV::PseudoFROUND_H:
+ CmpOpc = RISCV::FLT_H;
+ F2IOpc = RISCV::FCVT_W_H;
+ I2FOpc = RISCV::FCVT_H_W;
+ FSGNJOpc = RISCV::FSGNJ_H;
+ FSGNJXOpc = RISCV::FSGNJX_H;
+ RC = &RISCV::FPR16RegClass;
+ break;
+ case RISCV::PseudoFROUND_S:
+ CmpOpc = RISCV::FLT_S;
+ F2IOpc = RISCV::FCVT_W_S;
+ I2FOpc = RISCV::FCVT_S_W;
+ FSGNJOpc = RISCV::FSGNJ_S;
+ FSGNJXOpc = RISCV::FSGNJX_S;
+ RC = &RISCV::FPR32RegClass;
+ break;
+ case RISCV::PseudoFROUND_D:
+ assert(Subtarget.is64Bit() && "Expected 64-bit GPR.");
+ CmpOpc = RISCV::FLT_D;
+ F2IOpc = RISCV::FCVT_L_D;
+ I2FOpc = RISCV::FCVT_D_L;
+ FSGNJOpc = RISCV::FSGNJ_D;
+ FSGNJXOpc = RISCV::FSGNJX_D;
+ RC = &RISCV::FPR64RegClass;
+ break;
+ }
+
+ const BasicBlock *BB = MBB->getBasicBlock();
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction::iterator I = ++MBB->getIterator();
+
+ MachineFunction *F = MBB->getParent();
+ MachineBasicBlock *CvtMBB = F->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(BB);
+
+ F->insert(I, CvtMBB);
+ F->insert(I, DoneMBB);
+ // Move all instructions after the sequence to DoneMBB.
+ DoneMBB->splice(DoneMBB->end(), MBB, MachineBasicBlock::iterator(MI),
+ MBB->end());
+ // Update machine-CFG edges by transferring all successors of the current
+ // block to the new block which will contain the Phi nodes for the selects.
+ DoneMBB->transferSuccessorsAndUpdatePHIs(MBB);
+ // Set the successors for MBB.
+ MBB->addSuccessor(CvtMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ Register MaxReg = MI.getOperand(2).getReg();
+ int64_t FRM = MI.getOperand(3).getImm();
+
+ const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+
+ Register FabsReg = MRI.createVirtualRegister(RC);
+ BuildMI(MBB, DL, TII.get(FSGNJXOpc), FabsReg).addReg(SrcReg).addReg(SrcReg);
+
+ // Compare the FP value to the max value.
+ Register CmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ auto MIB =
+ BuildMI(MBB, DL, TII.get(CmpOpc), CmpReg).addReg(FabsReg).addReg(MaxReg);
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Insert branch.
+ BuildMI(MBB, DL, TII.get(RISCV::BEQ))
+ .addReg(CmpReg)
+ .addReg(RISCV::X0)
+ .addMBB(DoneMBB);
+
+ CvtMBB->addSuccessor(DoneMBB);
+
+ // Convert to integer.
+ Register F2IReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ MIB = BuildMI(CvtMBB, DL, TII.get(F2IOpc), F2IReg).addReg(SrcReg).addImm(FRM);
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Convert back to FP.
+ Register I2FReg = MRI.createVirtualRegister(RC);
+ MIB = BuildMI(CvtMBB, DL, TII.get(I2FOpc), I2FReg).addReg(F2IReg).addImm(FRM);
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Restore the sign bit.
+ Register CvtReg = MRI.createVirtualRegister(RC);
+ BuildMI(CvtMBB, DL, TII.get(FSGNJOpc), CvtReg).addReg(I2FReg).addReg(SrcReg);
+
+ // Merge the results.
+ BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(RISCV::PHI), DstReg)
+ .addReg(SrcReg)
+ .addMBB(MBB)
+ .addReg(CvtReg)
+ .addMBB(CvtMBB);
+
+ MI.eraseFromParent();
+ return DoneMBB;
+}
+
+MachineBasicBlock *
+RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected instr type to insert");
+ case RISCV::ReadCycleWide:
+ assert(!Subtarget.is64Bit() &&
+ "ReadCycleWrite is only to be used on riscv32");
+ return emitReadCycleWidePseudo(MI, BB);
+ case RISCV::Select_GPR_Using_CC_GPR:
+ case RISCV::Select_FPR16_Using_CC_GPR:
+ case RISCV::Select_FPR32_Using_CC_GPR:
+ case RISCV::Select_FPR64_Using_CC_GPR:
+ return emitSelectPseudo(MI, BB, Subtarget);
+ case RISCV::BuildPairF64Pseudo:
+ return emitBuildPairF64Pseudo(MI, BB);
+ case RISCV::SplitF64Pseudo:
+ return emitSplitF64Pseudo(MI, BB);
+ case RISCV::PseudoQuietFLE_H:
+ return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
+ case RISCV::PseudoQuietFLT_H:
+ return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
+ case RISCV::PseudoQuietFLE_S:
+ return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
+ case RISCV::PseudoQuietFLT_S:
+ return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
+ case RISCV::PseudoQuietFLE_D:
+ return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
+ case RISCV::PseudoQuietFLT_D:
+ return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
+
+ // =========================================================================
+ // VFCVT
+ // =========================================================================
+
+ case RISCV::PseudoVFCVT_RM_X_F_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M1_MASK);
+ case RISCV::PseudoVFCVT_RM_X_F_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M2_MASK);
+ case RISCV::PseudoVFCVT_RM_X_F_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M4_MASK);
+ case RISCV::PseudoVFCVT_RM_X_F_V_M8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M8_MASK);
+ case RISCV::PseudoVFCVT_RM_X_F_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_MF2_MASK);
+ case RISCV::PseudoVFCVT_RM_X_F_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_MF4_MASK);
+
+ case RISCV::PseudoVFCVT_RM_XU_F_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_XU_F_V_M1_MASK);
+ case RISCV::PseudoVFCVT_RM_XU_F_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_XU_F_V_M2_MASK);
+ case RISCV::PseudoVFCVT_RM_XU_F_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_XU_F_V_M4_MASK);
+ case RISCV::PseudoVFCVT_RM_XU_F_V_M8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_XU_F_V_M8_MASK);
+ case RISCV::PseudoVFCVT_RM_XU_F_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_XU_F_V_MF2_MASK);
+ case RISCV::PseudoVFCVT_RM_XU_F_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_XU_F_V_MF4_MASK);
+
+ case RISCV::PseudoVFCVT_RM_F_XU_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_XU_V_M1_MASK);
+ case RISCV::PseudoVFCVT_RM_F_XU_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_XU_V_M2_MASK);
+ case RISCV::PseudoVFCVT_RM_F_XU_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_XU_V_M4_MASK);
+ case RISCV::PseudoVFCVT_RM_F_XU_V_M8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_XU_V_M8_MASK);
+ case RISCV::PseudoVFCVT_RM_F_XU_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_XU_V_MF2_MASK);
+ case RISCV::PseudoVFCVT_RM_F_XU_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_XU_V_MF4_MASK);
+
+ case RISCV::PseudoVFCVT_RM_F_X_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_X_V_M1_MASK);
+ case RISCV::PseudoVFCVT_RM_F_X_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_X_V_M2_MASK);
+ case RISCV::PseudoVFCVT_RM_F_X_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_X_V_M4_MASK);
+ case RISCV::PseudoVFCVT_RM_F_X_V_M8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_X_V_M8_MASK);
+ case RISCV::PseudoVFCVT_RM_F_X_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_X_V_MF2_MASK);
+ case RISCV::PseudoVFCVT_RM_F_X_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFCVT_F_X_V_MF4_MASK);
+
+ // =========================================================================
+ // VFWCVT
+ // =========================================================================
+
+ case RISCV::PseudoVFWCVT_RM_XU_F_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_M1_MASK);
+ case RISCV::PseudoVFWCVT_RM_XU_F_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_M2_MASK);
+ case RISCV::PseudoVFWCVT_RM_XU_F_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_M4_MASK);
+ case RISCV::PseudoVFWCVT_RM_XU_F_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_MF2_MASK);
+ case RISCV::PseudoVFWCVT_RM_XU_F_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_MF4_MASK);
+
+ case RISCV::PseudoVFWCVT_RM_X_F_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_M1_MASK);
+ case RISCV::PseudoVFWCVT_RM_X_F_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_M2_MASK);
+ case RISCV::PseudoVFWCVT_RM_X_F_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_M4_MASK);
+ case RISCV::PseudoVFWCVT_RM_X_F_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_MF2_MASK);
+ case RISCV::PseudoVFWCVT_RM_X_F_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_X_F_V_MF4_MASK);
+
+ case RISCV::PseudoVFWCVT_RM_F_XU_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_M1_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_XU_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_M2_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_XU_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_M4_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_XU_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_MF2_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_XU_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_MF4_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_XU_V_MF8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_MF8_MASK);
+
+ case RISCV::PseudoVFWCVT_RM_F_X_V_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_M1_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_X_V_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_M2_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_X_V_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_M4_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_X_V_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_MF2_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_X_V_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_MF4_MASK);
+ case RISCV::PseudoVFWCVT_RM_F_X_V_MF8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFWCVT_F_XU_V_MF8_MASK);
+
+ // =========================================================================
+ // VFNCVT
+ // =========================================================================
+
+ case RISCV::PseudoVFNCVT_RM_XU_F_W_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_M1_MASK);
+ case RISCV::PseudoVFNCVT_RM_XU_F_W_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_M2_MASK);
+ case RISCV::PseudoVFNCVT_RM_XU_F_W_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_M4_MASK);
+ case RISCV::PseudoVFNCVT_RM_XU_F_W_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_MF2_MASK);
+ case RISCV::PseudoVFNCVT_RM_XU_F_W_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_MF4_MASK);
+ case RISCV::PseudoVFNCVT_RM_XU_F_W_MF8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_XU_F_W_MF8_MASK);
+
+ case RISCV::PseudoVFNCVT_RM_X_F_W_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_M1_MASK);
+ case RISCV::PseudoVFNCVT_RM_X_F_W_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_M2_MASK);
+ case RISCV::PseudoVFNCVT_RM_X_F_W_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_M4_MASK);
+ case RISCV::PseudoVFNCVT_RM_X_F_W_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_MF2_MASK);
+ case RISCV::PseudoVFNCVT_RM_X_F_W_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_MF4_MASK);
+ case RISCV::PseudoVFNCVT_RM_X_F_W_MF8_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_X_F_W_MF8_MASK);
+
+ case RISCV::PseudoVFNCVT_RM_F_XU_W_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_M1_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_XU_W_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_M2_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_XU_W_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_M4_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_XU_W_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_MF2_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_XU_W_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_MF4_MASK);
+
+ case RISCV::PseudoVFNCVT_RM_F_X_W_M1_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_M1_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_X_W_M2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_M2_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_X_W_M4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_M4_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_X_W_MF2_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_MF2_MASK);
+ case RISCV::PseudoVFNCVT_RM_F_X_W_MF4_MASK:
+ return emitVFCVT_RM_MASK(MI, BB, RISCV::PseudoVFNCVT_F_XU_W_MF4_MASK);
+
+ case RISCV::PseudoVFROUND_NOEXCEPT_V_M1_MASK:
+ return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M1_MASK,
+ RISCV::PseudoVFCVT_F_X_V_M1_MASK);
+ case RISCV::PseudoVFROUND_NOEXCEPT_V_M2_MASK:
+ return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M2_MASK,
+ RISCV::PseudoVFCVT_F_X_V_M2_MASK);
+ case RISCV::PseudoVFROUND_NOEXCEPT_V_M4_MASK:
+ return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M4_MASK,
+ RISCV::PseudoVFCVT_F_X_V_M4_MASK);
+ case RISCV::PseudoVFROUND_NOEXCEPT_V_M8_MASK:
+ return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M8_MASK,
+ RISCV::PseudoVFCVT_F_X_V_M8_MASK);
+ case RISCV::PseudoVFROUND_NOEXCEPT_V_MF2_MASK:
+ return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_MF2_MASK,
+ RISCV::PseudoVFCVT_F_X_V_MF2_MASK);
+ case RISCV::PseudoVFROUND_NOEXCEPT_V_MF4_MASK:
+ return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_MF4_MASK,
+ RISCV::PseudoVFCVT_F_X_V_MF4_MASK);
+ case RISCV::PseudoFROUND_H:
+ case RISCV::PseudoFROUND_S:
+ case RISCV::PseudoFROUND_D:
+ return emitFROUND(MI, BB, Subtarget);
+ }
+}
+
+void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
+ SDNode *Node) const {
+ // Add FRM dependency to any instructions with dynamic rounding mode.
+ unsigned Opc = MI.getOpcode();
+ auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
+ if (Idx < 0)
+ return;
+ if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
+ return;
+ // If the instruction already reads FRM, don't add another read.
+ if (MI.readsRegister(RISCV::FRM))
+ return;
+ MI.addOperand(
+ MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
+}
+
+// Calling Convention Implementation.
+// The expectations for frontend ABI lowering vary from target to target.
+// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
+// details, but this is a longer term goal. For now, we simply try to keep the
+// role of the frontend as simple and well-defined as possible. The rules can
+// be summarised as:
+// * Never split up large scalar arguments. We handle them here.
+// * If a hardfloat calling convention is being used, and the struct may be
+// passed in a pair of registers (fp+fp, int+fp), and both registers are
+// available, then pass as two separate arguments. If either the GPRs or FPRs
+// are exhausted, then pass according to the rule below.
+// * If a struct could never be passed in registers or directly in a stack
+// slot (as it is larger than 2*XLEN and the floating point rules don't
+// apply), then pass it using a pointer with the byval attribute.
+// * If a struct is less than 2*XLEN, then coerce to either a two-element
+// word-sized array or a 2*XLEN scalar (depending on alignment).
+// * The frontend can determine whether a struct is returned by reference or
+// not based on its size and fields. If it will be returned by reference, the
+// frontend must modify the prototype so a pointer with the sret annotation is
+// passed as the first argument. This is not necessary for large scalar
+// returns.
+// * Struct return values and varargs should be coerced to structs containing
+// register-size fields in the same situations they would be for fixed
+// arguments.
+
+static const MCPhysReg ArgGPRs[] = {
+ RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
+ RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
+};
+static const MCPhysReg ArgFPR16s[] = {
+ RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
+ RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
+};
+static const MCPhysReg ArgFPR32s[] = {
+ RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
+ RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
+};
+static const MCPhysReg ArgFPR64s[] = {
+ RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
+ RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
+};
+// This is an interim calling convention and it may be changed in the future.
+static const MCPhysReg ArgVRs[] = {
+ RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
+ RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
+ RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
+static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
+ RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
+ RISCV::V20M2, RISCV::V22M2};
+static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
+ RISCV::V20M4};
+static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
+
+// Pass a 2*XLEN argument that has been split into two XLEN values through
+// registers or the stack as necessary.
+static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
+ ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
+ MVT ValVT2, MVT LocVT2,
+ ISD::ArgFlagsTy ArgFlags2) {
+ unsigned XLenInBytes = XLen / 8;
+ if (Register Reg = State.AllocateReg(ArgGPRs)) {
+ // At least one half can be passed via register.
+ State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
+ VA1.getLocVT(), CCValAssign::Full));
+ } else {
+ // Both halves must be passed on the stack, with proper alignment.
+ Align StackAlign =
+ std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
+ State.addLoc(
+ CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
+ State.AllocateStack(XLenInBytes, StackAlign),
+ VA1.getLocVT(), CCValAssign::Full));
+ State.addLoc(CCValAssign::getMem(
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
+ LocVT2, CCValAssign::Full));
+ return false;
+ }
+
+ if (Register Reg = State.AllocateReg(ArgGPRs)) {
+ // The second half can also be passed via register.
+ State.addLoc(
+ CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
+ } else {
+ // The second half is passed via the stack, without additional alignment.
+ State.addLoc(CCValAssign::getMem(
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
+ LocVT2, CCValAssign::Full));
+ }
+
+ return false;
+}
+
+static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
+ std::optional<unsigned> FirstMaskArgument,
+ CCState &State, const RISCVTargetLowering &TLI) {
+ const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
+ if (RC == &RISCV::VRRegClass) {
+ // Assign the first mask argument to V0.
+ // This is an interim calling convention and it may be changed in the
+ // future.
+ if (FirstMaskArgument && ValNo == *FirstMaskArgument)
+ return State.AllocateReg(RISCV::V0);
+ return State.AllocateReg(ArgVRs);
+ }
+ if (RC == &RISCV::VRM2RegClass)
+ return State.AllocateReg(ArgVRM2s);
+ if (RC == &RISCV::VRM4RegClass)
+ return State.AllocateReg(ArgVRM4s);
+ if (RC == &RISCV::VRM8RegClass)
+ return State.AllocateReg(ArgVRM8s);
+ llvm_unreachable("Unhandled register class for ValueType");
+}
+
+// Implements the RISC-V calling convention. Returns true upon failure.
+static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
+ MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
+ bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
+ std::optional<unsigned> FirstMaskArgument) {
+ unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
+ assert(XLen == 32 || XLen == 64);
+ MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
+
+ // Static chain parameter must not be passed in normal argument registers,
+ // so we assign t2 for it as done in GCC's __builtin_call_with_static_chain
+ if (ArgFlags.isNest()) {
+ if (unsigned Reg = State.AllocateReg(RISCV::X7)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ // Any return value split in to more than two values can't be returned
+ // directly. Vectors are returned via the available vector registers.
+ if (!LocVT.isVector() && IsRet && ValNo > 1)
+ return true;
+
+ // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
+ // variadic argument, or if no F16/F32 argument registers are available.
+ bool UseGPRForF16_F32 = true;
+ // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
+ // variadic argument, or if no F64 argument registers are available.
+ bool UseGPRForF64 = true;
+
+ switch (ABI) {
+ default:
+ llvm_unreachable("Unexpected ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_LP64:
+ break;
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_LP64F:
+ UseGPRForF16_F32 = !IsFixed;
+ break;
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64D:
+ UseGPRForF16_F32 = !IsFixed;
+ UseGPRForF64 = !IsFixed;
+ break;
+ }
+
+ // FPR16, FPR32, and FPR64 alias each other.
+ if (State.getFirstUnallocated(ArgFPR32s) == std::size(ArgFPR32s)) {
+ UseGPRForF16_F32 = true;
+ UseGPRForF64 = true;
+ }
+
+ // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
+ // similar local variables rather than directly checking against the target
+ // ABI.
+
+ if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
+ LocVT = XLenVT;
+ LocInfo = CCValAssign::BCvt;
+ } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
+ LocVT = MVT::i64;
+ LocInfo = CCValAssign::BCvt;
+ }
+
+ // If this is a variadic argument, the RISC-V calling convention requires
+ // that it is assigned an 'even' or 'aligned' register if it has 8-byte
+ // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
+ // be used regardless of whether the original argument was split during
+ // legalisation or not. The argument will not be passed by registers if the
+ // original type is larger than 2*XLEN, so the register alignment rule does
+ // not apply.
+ unsigned TwoXLenInBytes = (2 * XLen) / 8;
+ if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
+ DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
+ unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
+ // Skip 'odd' register if necessary.
+ if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
+ State.AllocateReg(ArgGPRs);
+ }
+
+ SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
+ SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
+ State.getPendingArgFlags();
+
+ assert(PendingLocs.size() == PendingArgFlags.size() &&
+ "PendingLocs and PendingArgFlags out of sync");
+
+ // Handle passing f64 on RV32D with a soft float ABI or when floating point
+ // registers are exhausted.
+ if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
+ assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
+ "Can't lower f64 if it is split");
+ // Depending on available argument GPRS, f64 may be passed in a pair of
+ // GPRs, split between a GPR and the stack, or passed completely on the
+ // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
+ // cases.
+ Register Reg = State.AllocateReg(ArgGPRs);
+ LocVT = MVT::i32;
+ if (!Reg) {
+ unsigned StackOffset = State.AllocateStack(8, Align(8));
+ State.addLoc(
+ CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+ return false;
+ }
+ if (!State.AllocateReg(ArgGPRs))
+ State.AllocateStack(4, Align(4));
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ // Fixed-length vectors are located in the corresponding scalable-vector
+ // container types.
+ if (ValVT.isFixedLengthVector())
+ LocVT = TLI.getContainerForFixedLengthVector(LocVT);
+
+ // Split arguments might be passed indirectly, so keep track of the pending
+ // values. Split vectors are passed via a mix of registers and indirectly, so
+ // treat them as we would any other argument.
+ if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
+ LocVT = XLenVT;
+ LocInfo = CCValAssign::Indirect;
+ PendingLocs.push_back(
+ CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
+ PendingArgFlags.push_back(ArgFlags);
+ if (!ArgFlags.isSplitEnd()) {
+ return false;
+ }
+ }
+
+ // If the split argument only had two elements, it should be passed directly
+ // in registers or on the stack.
+ if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
+ PendingLocs.size() <= 2) {
+ assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
+ // Apply the normal calling convention rules to the first half of the
+ // split argument.
+ CCValAssign VA = PendingLocs[0];
+ ISD::ArgFlagsTy AF = PendingArgFlags[0];
+ PendingLocs.clear();
+ PendingArgFlags.clear();
+ return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
+ ArgFlags);
+ }
+
+ // Allocate to a register if possible, or else a stack slot.
+ Register Reg;
+ unsigned StoreSizeBytes = XLen / 8;
+ Align StackAlign = Align(XLen / 8);
+
+ if (ValVT == MVT::f16 && !UseGPRForF16_F32)
+ Reg = State.AllocateReg(ArgFPR16s);
+ else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
+ Reg = State.AllocateReg(ArgFPR32s);
+ else if (ValVT == MVT::f64 && !UseGPRForF64)
+ Reg = State.AllocateReg(ArgFPR64s);
+ else if (ValVT.isVector()) {
+ Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
+ if (!Reg) {
+ // For return values, the vector must be passed fully via registers or
+ // via the stack.
+ // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
+ // but we're using all of them.
+ if (IsRet)
+ return true;
+ // Try using a GPR to pass the address
+ if ((Reg = State.AllocateReg(ArgGPRs))) {
+ LocVT = XLenVT;
+ LocInfo = CCValAssign::Indirect;
+ } else if (ValVT.isScalableVector()) {
+ LocVT = XLenVT;
+ LocInfo = CCValAssign::Indirect;
+ } else {
+ // Pass fixed-length vectors on the stack.
+ LocVT = ValVT;
+ StoreSizeBytes = ValVT.getStoreSize();
+ // Align vectors to their element sizes, being careful for vXi1
+ // vectors.
+ StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
+ }
+ }
+ } else {
+ Reg = State.AllocateReg(ArgGPRs);
+ }
+
+ unsigned StackOffset =
+ Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
+
+ // If we reach this point and PendingLocs is non-empty, we must be at the
+ // end of a split argument that must be passed indirectly.
+ if (!PendingLocs.empty()) {
+ assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
+ assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
+
+ for (auto &It : PendingLocs) {
+ if (Reg)
+ It.convertToReg(Reg);
+ else
+ It.convertToMem(StackOffset);
+ State.addLoc(It);
+ }
+ PendingLocs.clear();
+ PendingArgFlags.clear();
+ return false;
+ }
+
+ assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
+ (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
+ "Expected an XLenVT or vector types at this stage");
+
+ if (Reg) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ // When a floating-point value is passed on the stack, no bit-conversion is
+ // needed.
+ if (ValVT.isFloatingPoint()) {
+ LocVT = ValVT;
+ LocInfo = CCValAssign::Full;
+ }
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+ return false;
+}
+
+template <typename ArgTy>
+static std::optional<unsigned> preAssignMask(const ArgTy &Args) {
+ for (const auto &ArgIdx : enumerate(Args)) {
+ MVT ArgVT = ArgIdx.value().VT;
+ if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
+ return ArgIdx.index();
+ }
+ return std::nullopt;
+}
+
+void RISCVTargetLowering::analyzeInputArgs(
+ MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
+ RISCVCCAssignFn Fn) const {
+ unsigned NumArgs = Ins.size();
+ FunctionType *FType = MF.getFunction().getFunctionType();
+
+ std::optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasVInstructions())
+ FirstMaskArgument = preAssignMask(Ins);
+
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ MVT ArgVT = Ins[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
+
+ Type *ArgTy = nullptr;
+ if (IsRet)
+ ArgTy = FType->getReturnType();
+ else if (Ins[i].isOrigArg())
+ ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
+
+ RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
+ if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
+ ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
+ FirstMaskArgument)) {
+ LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString() << '\n');
+ llvm_unreachable(nullptr);
+ }
+ }
+}
+
+void RISCVTargetLowering::analyzeOutputArgs(
+ MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
+ CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
+ unsigned NumArgs = Outs.size();
+
+ std::optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasVInstructions())
+ FirstMaskArgument = preAssignMask(Outs);
+
+ for (unsigned i = 0; i != NumArgs; i++) {
+ MVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
+
+ RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
+ if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
+ ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
+ FirstMaskArgument)) {
+ LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString() << "\n");
+ llvm_unreachable(nullptr);
+ }
+ }
+}
+
+// Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
+// values.
+static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
+ const CCValAssign &VA, const SDLoc &DL,
+ const RISCVSubtarget &Subtarget) {
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unexpected CCValAssign::LocInfo");
+ case CCValAssign::Full:
+ if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
+ Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
+ break;
+ case CCValAssign::BCvt:
+ if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
+ Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
+ else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
+ Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
+ else
+ Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
+ break;
+ }
+ return Val;
+}
+
+// The caller is responsible for loading the full value if the argument is
+// passed with CCValAssign::Indirect.
+static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
+ const CCValAssign &VA, const SDLoc &DL,
+ const ISD::InputArg &In,
+ const RISCVTargetLowering &TLI) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ EVT LocVT = VA.getLocVT();
+ SDValue Val;
+ const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
+ Register VReg = RegInfo.createVirtualRegister(RC);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
+
+ // If input is sign extended from 32 bits, note it for the SExtWRemoval pass.
+ if (In.isOrigArg()) {
+ Argument *OrigArg = MF.getFunction().getArg(In.getOrigArgIndex());
+ if (OrigArg->getType()->isIntegerTy()) {
+ unsigned BitWidth = OrigArg->getType()->getIntegerBitWidth();
+ // An input zero extended from i31 can also be considered sign extended.
+ if ((BitWidth <= 32 && In.Flags.isSExt()) ||
+ (BitWidth < 32 && In.Flags.isZExt())) {
+ RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ RVFI->addSExt32Register(VReg);
+ }
+ }
+ }
+
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return Val;
+
+ return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
+}
+
+static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
+ const CCValAssign &VA, const SDLoc &DL,
+ const RISCVSubtarget &Subtarget) {
+ EVT LocVT = VA.getLocVT();
+
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unexpected CCValAssign::LocInfo");
+ case CCValAssign::Full:
+ if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
+ Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
+ break;
+ case CCValAssign::BCvt:
+ if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
+ Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
+ else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
+ Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
+ else
+ Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
+ break;
+ }
+ return Val;
+}
+
+// The caller is responsible for loading the full value if the argument is
+// passed with CCValAssign::Indirect.
+static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
+ const CCValAssign &VA, const SDLoc &DL) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ EVT LocVT = VA.getLocVT();
+ EVT ValVT = VA.getValVT();
+ EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
+ if (ValVT.isScalableVector()) {
+ // When the value is a scalable vector, we save the pointer which points to
+ // the scalable vector value in the stack. The ValVT will be the pointer
+ // type, instead of the scalable vector type.
+ ValVT = LocVT;
+ }
+ int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
+ /*IsImmutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ SDValue Val;
+
+ ISD::LoadExtType ExtType;
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unexpected CCValAssign::LocInfo");
+ case CCValAssign::Full:
+ case CCValAssign::Indirect:
+ case CCValAssign::BCvt:
+ ExtType = ISD::NON_EXTLOAD;
+ break;
+ }
+ Val = DAG.getExtLoad(
+ ExtType, DL, LocVT, Chain, FIN,
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
+ return Val;
+}
+
+static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
+ const CCValAssign &VA, const SDLoc &DL) {
+ assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
+ "Unexpected VA");
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+
+ if (VA.isMemLoc()) {
+ // f64 is passed on the stack.
+ int FI =
+ MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ return DAG.getLoad(MVT::f64, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ }
+
+ assert(VA.isRegLoc() && "Expected register VA assignment");
+
+ Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
+ SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
+ SDValue Hi;
+ if (VA.getLocReg() == RISCV::X17) {
+ // Second half of f64 is passed on the stack.
+ int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ } else {
+ // Second half of f64 is passed in another GPR.
+ Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
+ Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
+ }
+ return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
+}
+
+// FastCC has less than 1% performance improvement for some particular
+// benchmark. But theoretically, it may has benenfit for some cases.
+static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
+ unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State,
+ bool IsFixed, bool IsRet, Type *OrigTy,
+ const RISCVTargetLowering &TLI,
+ std::optional<unsigned> FirstMaskArgument) {
+
+ // X5 and X6 might be used for save-restore libcall.
+ static const MCPhysReg GPRList[] = {
+ RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
+ RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
+ RISCV::X29, RISCV::X30, RISCV::X31};
+
+ if (LocVT == MVT::i32 || LocVT == MVT::i64) {
+ if (unsigned Reg = State.AllocateReg(GPRList)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::f16) {
+ static const MCPhysReg FPR16List[] = {
+ RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
+ RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
+ RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
+ RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
+ if (unsigned Reg = State.AllocateReg(FPR16List)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::f32) {
+ static const MCPhysReg FPR32List[] = {
+ RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
+ RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
+ RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
+ RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
+ if (unsigned Reg = State.AllocateReg(FPR32List)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::f64) {
+ static const MCPhysReg FPR64List[] = {
+ RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
+ RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
+ RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
+ RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
+ if (unsigned Reg = State.AllocateReg(FPR64List)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::i32 || LocVT == MVT::f32) {
+ unsigned Offset4 = State.AllocateStack(4, Align(4));
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
+ return false;
+ }
+
+ if (LocVT == MVT::i64 || LocVT == MVT::f64) {
+ unsigned Offset5 = State.AllocateStack(8, Align(8));
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
+ return false;
+ }
+
+ if (LocVT.isVector()) {
+ if (unsigned Reg =
+ allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
+ // Fixed-length vectors are located in the corresponding scalable-vector
+ // container types.
+ if (ValVT.isFixedLengthVector())
+ LocVT = TLI.getContainerForFixedLengthVector(LocVT);
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ } else {
+ // Try and pass the address via a "fast" GPR.
+ if (unsigned GPRReg = State.AllocateReg(GPRList)) {
+ LocInfo = CCValAssign::Indirect;
+ LocVT = TLI.getSubtarget().getXLenVT();
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
+ } else if (ValVT.isFixedLengthVector()) {
+ auto StackAlign =
+ MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
+ unsigned StackOffset =
+ State.AllocateStack(ValVT.getStoreSize(), StackAlign);
+ State.addLoc(
+ CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+ } else {
+ // Can't pass scalable vectors on the stack.
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ return true; // CC didn't match.
+}
+
+static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (ArgFlags.isNest()) {
+ report_fatal_error(
+ "Attribute 'nest' is not supported in GHC calling convention");
+ }
+
+ if (LocVT == MVT::i32 || LocVT == MVT::i64) {
+ // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
+ // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
+ static const MCPhysReg GPRList[] = {
+ RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
+ RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
+ if (unsigned Reg = State.AllocateReg(GPRList)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::f32) {
+ // Pass in STG registers: F1, ..., F6
+ // fs0 ... fs5
+ static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
+ RISCV::F18_F, RISCV::F19_F,
+ RISCV::F20_F, RISCV::F21_F};
+ if (unsigned Reg = State.AllocateReg(FPR32List)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::f64) {
+ // Pass in STG registers: D1, ..., D6
+ // fs6 ... fs11
+ static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
+ RISCV::F24_D, RISCV::F25_D,
+ RISCV::F26_D, RISCV::F27_D};
+ if (unsigned Reg = State.AllocateReg(FPR64List)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ report_fatal_error("No registers left in GHC calling convention");
+ return true;
+}
+
+// Transform physical registers into virtual registers.
+SDValue RISCVTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ switch (CallConv) {
+ default:
+ report_fatal_error("Unsupported calling convention");
+ case CallingConv::C:
+ case CallingConv::Fast:
+ break;
+ case CallingConv::GHC:
+ if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
+ !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
+ report_fatal_error(
+ "GHC calling convention requires the F and D instruction set extensions");
+ }
+
+ const Function &Func = MF.getFunction();
+ if (Func.hasFnAttribute("interrupt")) {
+ if (!Func.arg_empty())
+ report_fatal_error(
+ "Functions with the interrupt attribute cannot have arguments!");
+
+ StringRef Kind =
+ MF.getFunction().getFnAttribute("interrupt").getValueAsString();
+
+ if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
+ report_fatal_error(
+ "Function interrupt attribute argument not supported!");
+ }
+
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ MVT XLenVT = Subtarget.getXLenVT();
+ unsigned XLenInBytes = Subtarget.getXLen() / 8;
+ // Used with vargs to acumulate store chains.
+ std::vector<SDValue> OutChains;
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+
+ if (CallConv == CallingConv::GHC)
+ CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
+ else
+ analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
+ CallConv == CallingConv::Fast ? CC_RISCV_FastCC
+ : CC_RISCV);
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue ArgValue;
+ // Passing f64 on RV32D with a soft float ABI must be handled as a special
+ // case.
+ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
+ ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
+ else if (VA.isRegLoc())
+ ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, Ins[i], *this);
+ else
+ ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
+
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
+ // If the original argument was split and passed by reference (e.g. i128
+ // on RV32), we need to load all parts of it here (using the same
+ // address). Vectors may be partly split to registers and partly to the
+ // stack, in which case the base address is partly offset and subsequent
+ // stores are relative to that.
+ InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
+ MachinePointerInfo()));
+ unsigned ArgIndex = Ins[i].OrigArgIndex;
+ unsigned ArgPartOffset = Ins[i].PartOffset;
+ assert(VA.getValVT().isVector() || ArgPartOffset == 0);
+ while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
+ CCValAssign &PartVA = ArgLocs[i + 1];
+ unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
+ SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
+ if (PartVA.getValVT().isScalableVector())
+ Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
+ SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
+ InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
+ MachinePointerInfo()));
+ ++i;
+ }
+ continue;
+ }
+ InVals.push_back(ArgValue);
+ }
+
+ if (any_of(ArgLocs,
+ [](CCValAssign &VA) { return VA.getLocVT().isScalableVector(); }))
+ MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
+
+ if (IsVarArg) {
+ ArrayRef<MCPhysReg> ArgRegs = ArrayRef(ArgGPRs);
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+ const TargetRegisterClass *RC = &RISCV::GPRRegClass;
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ // Offset of the first variable argument from stack pointer, and size of
+ // the vararg save area. For now, the varargs save area is either zero or
+ // large enough to hold a0-a7.
+ int VaArgOffset, VarArgsSaveSize;
+
+ // If all registers are allocated, then all varargs must be passed on the
+ // stack and we don't need to save any argregs.
+ if (ArgRegs.size() == Idx) {
+ VaArgOffset = CCInfo.getNextStackOffset();
+ VarArgsSaveSize = 0;
+ } else {
+ VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+ VaArgOffset = -VarArgsSaveSize;
+ }
+
+ // Record the frame index of the first variable argument
+ // which is a value necessary to VASTART.
+ int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+ RVFI->setVarArgsFrameIndex(FI);
+
+ // If saving an odd number of registers then create an extra stack slot to
+ // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+ // offsets to even-numbered registered remain 2*XLEN-aligned.
+ if (Idx % 2) {
+ MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
+ VarArgsSaveSize += XLenInBytes;
+ }
+
+ // Copy the integer registers that may have been used for passing varargs
+ // to the vararg save area.
+ for (unsigned I = Idx; I < ArgRegs.size();
+ ++I, VaArgOffset += XLenInBytes) {
+ const Register Reg = RegInfo.createVirtualRegister(RC);
+ RegInfo.addLiveIn(ArgRegs[I], Reg);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
+ FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+ SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ cast<StoreSDNode>(Store.getNode())
+ ->getMemOperand()
+ ->setValue((Value *)nullptr);
+ OutChains.push_back(Store);
+ }
+ RVFI->setVarArgsSaveSize(VarArgsSaveSize);
+ }
+
+ // All stores are grouped in one node to allow the matching between
+ // the size of Ins and InVals. This only happens for vararg functions.
+ if (!OutChains.empty()) {
+ OutChains.push_back(Chain);
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
+ }
+
+ return Chain;
+}
+
+/// isEligibleForTailCallOptimization - Check whether the call is eligible
+/// for tail call optimization.
+/// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
+bool RISCVTargetLowering::isEligibleForTailCallOptimization(
+ CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
+ const SmallVector<CCValAssign, 16> &ArgLocs) const {
+
+ auto &Callee = CLI.Callee;
+ auto CalleeCC = CLI.CallConv;
+ auto &Outs = CLI.Outs;
+ auto &Caller = MF.getFunction();
+ auto CallerCC = Caller.getCallingConv();
+
+ // Exception-handling functions need a special set of instructions to
+ // indicate a return to the hardware. Tail-calling another function would
+ // probably break this.
+ // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
+ // should be expanded as new function attributes are introduced.
+ if (Caller.hasFnAttribute("interrupt"))
+ return false;
+
+ // Do not tail call opt if the stack is used to pass parameters.
+ if (CCInfo.getNextStackOffset() != 0)
+ return false;
+
+ // Do not tail call opt if any parameters need to be passed indirectly.
+ // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
+ // passed indirectly. So the address of the value will be passed in a
+ // register, or if not available, then the address is put on the stack. In
+ // order to pass indirectly, space on the stack often needs to be allocated
+ // in order to store the value. In this case the CCInfo.getNextStackOffset()
+ // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
+ // are passed CCValAssign::Indirect.
+ for (auto &VA : ArgLocs)
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return false;
+
+ // Do not tail call opt if either caller or callee uses struct return
+ // semantics.
+ auto IsCallerStructRet = Caller.hasStructRetAttr();
+ auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
+ if (IsCallerStructRet || IsCalleeStructRet)
+ return false;
+
+ // Externally-defined functions with weak linkage should not be
+ // tail-called. The behaviour of branch instructions in this situation (as
+ // used for tail calls) is implementation-defined, so we cannot rely on the
+ // linker replacing the tail call with a return.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = G->getGlobal();
+ if (GV->hasExternalWeakLinkage())
+ return false;
+ }
+
+ // The callee has to preserve all registers the caller needs to preserve.
+ const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
+ if (CalleeCC != CallerCC) {
+ const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
+ if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
+ return false;
+ }
+
+ // Byval parameters hand the function a pointer directly into the stack area
+ // we want to reuse during a tail call. Working around this *is* possible
+ // but less efficient and uglier in LowerCall.
+ for (auto &Arg : Outs)
+ if (Arg.Flags.isByVal())
+ return false;
+
+ return true;
+}
+
+static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
+ return DAG.getDataLayout().getPrefTypeAlign(
+ VT.getTypeForEVT(*DAG.getContext()));
+}
+
+// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
+// and output parameter nodes.
+SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc &DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &IsTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool IsVarArg = CLI.IsVarArg;
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Analyze the operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+
+ if (CallConv == CallingConv::GHC)
+ ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
+ else
+ analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
+ CallConv == CallingConv::Fast ? CC_RISCV_FastCC
+ : CC_RISCV);
+
+ // Check if it's really possible to do a tail call.
+ if (IsTailCall)
+ IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
+
+ if (IsTailCall)
+ ++NumTailCalls;
+ else if (CLI.CB && CLI.CB->isMustTailCall())
+ report_fatal_error("failed to perform tail call elimination on a call "
+ "site marked musttail");
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = ArgCCInfo.getNextStackOffset();
+
+ // Create local copies for byval args
+ SmallVector<SDValue, 8> ByValArgs;
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ if (!Flags.isByVal())
+ continue;
+
+ SDValue Arg = OutVals[i];
+ unsigned Size = Flags.getByValSize();
+ Align Alignment = Flags.getNonZeroByValAlign();
+
+ int FI =
+ MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
+
+ Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
+ /*IsVolatile=*/false,
+ /*AlwaysInline=*/false, IsTailCall,
+ MachinePointerInfo(), MachinePointerInfo());
+ ByValArgs.push_back(FIPtr);
+ }
+
+ if (!IsTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
+
+ // Copy argument values to their designated locations.
+ SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+ SDValue StackPtr;
+ for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue ArgValue = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // Handle passing f64 on RV32D with a soft float ABI as a special case.
+ bool IsF64OnRV32DSoftABI =
+ VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
+ if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
+ SDValue SplitF64 = DAG.getNode(
+ RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
+ SDValue Lo = SplitF64.getValue(0);
+ SDValue Hi = SplitF64.getValue(1);
+
+ Register RegLo = VA.getLocReg();
+ RegsToPass.push_back(std::make_pair(RegLo, Lo));
+
+ if (RegLo == RISCV::X17) {
+ // Second half of f64 is passed on the stack.
+ // Work out the address of the stack slot.
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
+ // Emit the store.
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
+ } else {
+ // Second half of f64 is passed in another GPR.
+ assert(RegLo < RISCV::X31 && "Invalid register pair");
+ Register RegHigh = RegLo + 1;
+ RegsToPass.push_back(std::make_pair(RegHigh, Hi));
+ }
+ continue;
+ }
+
+ // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
+ // as any other MemLoc.
+
+ // Promote the value if needed.
+ // For now, only handle fully promoted and indirect arguments.
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
+ // Store the argument in a stack slot and pass its address.
+ Align StackAlign =
+ std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
+ getPrefTypeAlign(ArgValue.getValueType(), DAG));
+ TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
+ // If the original argument was split (e.g. i128), we need
+ // to store the required parts of it here (and pass just one address).
+ // Vectors may be partly split to registers and partly to the stack, in
+ // which case the base address is partly offset and subsequent stores are
+ // relative to that.
+ unsigned ArgIndex = Outs[i].OrigArgIndex;
+ unsigned ArgPartOffset = Outs[i].PartOffset;
+ assert(VA.getValVT().isVector() || ArgPartOffset == 0);
+ // Calculate the total size to store. We don't have access to what we're
+ // actually storing other than performing the loop and collecting the
+ // info.
+ SmallVector<std::pair<SDValue, SDValue>> Parts;
+ while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
+ SDValue PartValue = OutVals[i + 1];
+ unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
+ SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
+ EVT PartVT = PartValue.getValueType();
+ if (PartVT.isScalableVector())
+ Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
+ StoredSize += PartVT.getStoreSize();
+ StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
+ Parts.push_back(std::make_pair(PartValue, Offset));
+ ++i;
+ }
+ SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
+ int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, ArgValue, SpillSlot,
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ for (const auto &Part : Parts) {
+ SDValue PartValue = Part.first;
+ SDValue PartOffset = Part.second;
+ SDValue Address =
+ DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, PartValue, Address,
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ }
+ ArgValue = SpillSlot;
+ } else {
+ ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
+ }
+
+ // Use local copy if it is a byval arg.
+ if (Flags.isByVal())
+ ArgValue = ByValArgs[j++];
+
+ if (VA.isRegLoc()) {
+ // Queue up the argument copies and emit them at the end.
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
+ } else {
+ assert(VA.isMemLoc() && "Argument not register or memory");
+ assert(!IsTailCall && "Tail call not allowed if stack is used "
+ "for passing parameters");
+
+ // Work out the address of the stack slot.
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
+ SDValue Address =
+ DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
+ DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
+
+ // Emit the store.
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
+ }
+ }
+
+ // Join the stores, which are independent of one another.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
+
+ SDValue Glue;
+
+ // Build a sequence of copy-to-reg nodes, chained and glued together.
+ for (auto &Reg : RegsToPass) {
+ Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
+ Glue = Chain.getValue(1);
+ }
+
+ // Validate that none of the argument registers have been marked as
+ // reserved, if so report an error. Do the same for the return address if this
+ // is not a tailcall.
+ validateCCReservedRegs(RegsToPass, MF);
+ if (!IsTailCall &&
+ MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
+ MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(),
+ "Return address register required, but has been reserved."});
+
+ // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
+ // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
+ // split it and then direct call can be matched by PseudoCALL.
+ if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = S->getGlobal();
+
+ unsigned OpFlags = RISCVII::MO_CALL;
+ if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
+ OpFlags = RISCVII::MO_PLT;
+
+ Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ unsigned OpFlags = RISCVII::MO_CALL;
+
+ if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
+ nullptr))
+ OpFlags = RISCVII::MO_PLT;
+
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
+ }
+
+ // The first call operand is the chain and the second is the target address.
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (auto &Reg : RegsToPass)
+ Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
+
+ if (!IsTailCall) {
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+ }
+
+ // Glue the call to the argument copies, if any.
+ if (Glue.getNode())
+ Ops.push_back(Glue);
+
+ // Emit the call.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+
+ if (IsTailCall) {
+ MF.getFrameInfo().setHasTailCall();
+ return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
+ }
+
+ Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
+ DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
+ Glue = Chain.getValue(1);
+
+ // Mark the end of the call, which is glued to the call itself.
+ Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, DL);
+ Glue = Chain.getValue(1);
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
+ analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (auto &VA : RVLocs) {
+ // Copy the value out
+ SDValue RetValue =
+ DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
+ // Glue the RetValue to the end of the call sequence
+ Chain = RetValue.getValue(1);
+ Glue = RetValue.getValue(2);
+
+ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
+ assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
+ SDValue RetValue2 =
+ DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
+ Chain = RetValue2.getValue(1);
+ Glue = RetValue2.getValue(2);
+ RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
+ RetValue2);
+ }
+
+ RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
+
+ InVals.push_back(RetValue);
+ }
+
+ return Chain;
+}
+
+bool RISCVTargetLowering::CanLowerReturn(
+ CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
+
+ std::optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasVInstructions())
+ FirstMaskArgument = preAssignMask(Outs);
+
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ MVT VT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
+ if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
+ ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
+ *this, FirstMaskArgument))
+ return false;
+ }
+ return true;
+}
+
+SDValue
+RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
+
+ // Stores the assignment of the return value to a location.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
+ nullptr, CC_RISCV);
+
+ if (CallConv == CallingConv::GHC && !RVLocs.empty())
+ report_fatal_error("GHC functions return void only");
+
+ SDValue Glue;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
+ SDValue Val = OutVals[i];
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
+ // Handle returning f64 on RV32D with a soft float ABI.
+ assert(VA.isRegLoc() && "Expected return via registers");
+ SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
+ DAG.getVTList(MVT::i32, MVT::i32), Val);
+ SDValue Lo = SplitF64.getValue(0);
+ SDValue Hi = SplitF64.getValue(1);
+ Register RegLo = VA.getLocReg();
+ assert(RegLo < RISCV::X31 && "Invalid register pair");
+ Register RegHi = RegLo + 1;
+
+ if (STI.isRegisterReservedByUser(RegLo) ||
+ STI.isRegisterReservedByUser(RegHi))
+ MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(),
+ "Return value register required, but has been reserved."});
+
+ Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
+ Glue = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
+ Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
+ Glue = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
+ } else {
+ // Handle a 'normal' return.
+ Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
+
+ if (STI.isRegisterReservedByUser(VA.getLocReg()))
+ MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(),
+ "Return value register required, but has been reserved."});
+
+ // Guarantee that all emitted copies are stuck together.
+ Glue = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+ }
+
+ RetOps[0] = Chain; // Update chain.
+
+ // Add the glue node if we have it.
+ if (Glue.getNode()) {
+ RetOps.push_back(Glue);
+ }
+
+ if (any_of(RVLocs,
+ [](CCValAssign &VA) { return VA.getLocVT().isScalableVector(); }))
+ MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
+
+ unsigned RetOpc = RISCVISD::RET_FLAG;
+ // Interrupt service routines use different return instructions.
+ const Function &Func = DAG.getMachineFunction().getFunction();
+ if (Func.hasFnAttribute("interrupt")) {
+ if (!Func.getReturnType()->isVoidTy())
+ report_fatal_error(
+ "Functions with the interrupt attribute must have void return type!");
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ StringRef Kind =
+ MF.getFunction().getFnAttribute("interrupt").getValueAsString();
+
+ if (Kind == "user")
+ RetOpc = RISCVISD::URET_FLAG;
+ else if (Kind == "supervisor")
+ RetOpc = RISCVISD::SRET_FLAG;
+ else
+ RetOpc = RISCVISD::MRET_FLAG;
+ }
+
+ return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
+}
+
+void RISCVTargetLowering::validateCCReservedRegs(
+ const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
+ MachineFunction &MF) const {
+ const Function &F = MF.getFunction();
+ const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
+
+ if (llvm::any_of(Regs, [&STI](auto Reg) {
+ return STI.isRegisterReservedByUser(Reg.first);
+ }))
+ F.getContext().diagnose(DiagnosticInfoUnsupported{
+ F, "Argument register required, but has been reserved."});
+}
+
+// Check if the result of the node is only used as a return value, as
+// otherwise we can't perform a tail-call.
+bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
+ if (N->getNumValues() != 1)
+ return false;
+ if (!N->hasNUsesOfValue(1, 0))
+ return false;
+
+ SDNode *Copy = *N->use_begin();
+ // TODO: Handle additional opcodes in order to support tail-calling libcalls
+ // with soft float ABIs.
+ if (Copy->getOpcode() != ISD::CopyToReg) {
+ return false;
+ }
+
+ // If the ISD::CopyToReg has a glue operand, we conservatively assume it
+ // isn't safe to perform a tail call.
+ if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
+ return false;
+
+ // The copy must be used by a RISCVISD::RET_FLAG, and nothing else.
+ bool HasRet = false;
+ for (SDNode *Node : Copy->uses()) {
+ if (Node->getOpcode() != RISCVISD::RET_FLAG)
+ return false;
+ HasRet = true;
+ }
+ if (!HasRet)
+ return false;
+
+ Chain = Copy->getOperand(0);
+ return true;
+}
+
+bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
+ return CI->isTailCall();
+}
+
+const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
+#define NODE_NAME_CASE(NODE) \
+ case RISCVISD::NODE: \
+ return "RISCVISD::" #NODE;
+ // clang-format off
+ switch ((RISCVISD::NodeType)Opcode) {
+ case RISCVISD::FIRST_NUMBER:
+ break;
+ NODE_NAME_CASE(RET_FLAG)
+ NODE_NAME_CASE(URET_FLAG)
+ NODE_NAME_CASE(SRET_FLAG)
+ NODE_NAME_CASE(MRET_FLAG)
+ NODE_NAME_CASE(CALL)
+ NODE_NAME_CASE(SELECT_CC)
+ NODE_NAME_CASE(BR_CC)
+ NODE_NAME_CASE(BuildPairF64)
+ NODE_NAME_CASE(SplitF64)
+ NODE_NAME_CASE(TAIL)
+ NODE_NAME_CASE(ADD_LO)
+ NODE_NAME_CASE(HI)
+ NODE_NAME_CASE(LLA)
+ NODE_NAME_CASE(ADD_TPREL)
+ NODE_NAME_CASE(LA)
+ NODE_NAME_CASE(LA_TLS_IE)
+ NODE_NAME_CASE(LA_TLS_GD)
+ NODE_NAME_CASE(MULHSU)
+ NODE_NAME_CASE(SLLW)
+ NODE_NAME_CASE(SRAW)
+ NODE_NAME_CASE(SRLW)
+ NODE_NAME_CASE(DIVW)
+ NODE_NAME_CASE(DIVUW)
+ NODE_NAME_CASE(REMUW)
+ NODE_NAME_CASE(ROLW)
+ NODE_NAME_CASE(RORW)
+ NODE_NAME_CASE(CLZW)
+ NODE_NAME_CASE(CTZW)
+ NODE_NAME_CASE(ABSW)
+ NODE_NAME_CASE(FMV_H_X)
+ NODE_NAME_CASE(FMV_X_ANYEXTH)
+ NODE_NAME_CASE(FMV_X_SIGNEXTH)
+ NODE_NAME_CASE(FMV_W_X_RV64)
+ NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
+ NODE_NAME_CASE(FCVT_X)
+ NODE_NAME_CASE(FCVT_XU)
+ NODE_NAME_CASE(FCVT_W_RV64)
+ NODE_NAME_CASE(FCVT_WU_RV64)
+ NODE_NAME_CASE(STRICT_FCVT_W_RV64)
+ NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
+ NODE_NAME_CASE(FROUND)
+ NODE_NAME_CASE(READ_CYCLE_WIDE)
+ NODE_NAME_CASE(BREV8)
+ NODE_NAME_CASE(ORC_B)
+ NODE_NAME_CASE(ZIP)
+ NODE_NAME_CASE(UNZIP)
+ NODE_NAME_CASE(VMV_V_X_VL)
+ NODE_NAME_CASE(VFMV_V_F_VL)
+ NODE_NAME_CASE(VMV_X_S)
+ NODE_NAME_CASE(VMV_S_X_VL)
+ NODE_NAME_CASE(VFMV_S_F_VL)
+ NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
+ NODE_NAME_CASE(READ_VLENB)
+ NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
+ NODE_NAME_CASE(VSLIDEUP_VL)
+ NODE_NAME_CASE(VSLIDE1UP_VL)
+ NODE_NAME_CASE(VSLIDEDOWN_VL)
+ NODE_NAME_CASE(VSLIDE1DOWN_VL)
+ NODE_NAME_CASE(VID_VL)
+ NODE_NAME_CASE(VFNCVT_ROD_VL)
+ NODE_NAME_CASE(VECREDUCE_ADD_VL)
+ NODE_NAME_CASE(VECREDUCE_UMAX_VL)
+ NODE_NAME_CASE(VECREDUCE_SMAX_VL)
+ NODE_NAME_CASE(VECREDUCE_UMIN_VL)
+ NODE_NAME_CASE(VECREDUCE_SMIN_VL)
+ NODE_NAME_CASE(VECREDUCE_AND_VL)
+ NODE_NAME_CASE(VECREDUCE_OR_VL)
+ NODE_NAME_CASE(VECREDUCE_XOR_VL)
+ NODE_NAME_CASE(VECREDUCE_FADD_VL)
+ NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
+ NODE_NAME_CASE(VECREDUCE_FMIN_VL)
+ NODE_NAME_CASE(VECREDUCE_FMAX_VL)
+ NODE_NAME_CASE(ADD_VL)
+ NODE_NAME_CASE(AND_VL)
+ NODE_NAME_CASE(MUL_VL)
+ NODE_NAME_CASE(OR_VL)
+ NODE_NAME_CASE(SDIV_VL)
+ NODE_NAME_CASE(SHL_VL)
+ NODE_NAME_CASE(SREM_VL)
+ NODE_NAME_CASE(SRA_VL)
+ NODE_NAME_CASE(SRL_VL)
+ NODE_NAME_CASE(SUB_VL)
+ NODE_NAME_CASE(UDIV_VL)
+ NODE_NAME_CASE(UREM_VL)
+ NODE_NAME_CASE(XOR_VL)
+ NODE_NAME_CASE(SADDSAT_VL)
+ NODE_NAME_CASE(UADDSAT_VL)
+ NODE_NAME_CASE(SSUBSAT_VL)
+ NODE_NAME_CASE(USUBSAT_VL)
+ NODE_NAME_CASE(FADD_VL)
+ NODE_NAME_CASE(FSUB_VL)
+ NODE_NAME_CASE(FMUL_VL)
+ NODE_NAME_CASE(FDIV_VL)
+ NODE_NAME_CASE(FNEG_VL)
+ NODE_NAME_CASE(FABS_VL)
+ NODE_NAME_CASE(FSQRT_VL)
+ NODE_NAME_CASE(VFMADD_VL)
+ NODE_NAME_CASE(VFNMADD_VL)
+ NODE_NAME_CASE(VFMSUB_VL)
+ NODE_NAME_CASE(VFNMSUB_VL)
+ NODE_NAME_CASE(FCOPYSIGN_VL)
+ NODE_NAME_CASE(SMIN_VL)
+ NODE_NAME_CASE(SMAX_VL)
+ NODE_NAME_CASE(UMIN_VL)
+ NODE_NAME_CASE(UMAX_VL)
+ NODE_NAME_CASE(FMINNUM_VL)
+ NODE_NAME_CASE(FMAXNUM_VL)
+ NODE_NAME_CASE(MULHS_VL)
+ NODE_NAME_CASE(MULHU_VL)
+ NODE_NAME_CASE(VFCVT_RTZ_X_F_VL)
+ NODE_NAME_CASE(VFCVT_RTZ_XU_F_VL)
+ NODE_NAME_CASE(VFCVT_RM_X_F_VL)
+ NODE_NAME_CASE(VFCVT_RM_XU_F_VL)
+ NODE_NAME_CASE(VFCVT_X_F_VL)
+ NODE_NAME_CASE(VFCVT_XU_F_VL)
+ NODE_NAME_CASE(VFROUND_NOEXCEPT_VL)
+ NODE_NAME_CASE(SINT_TO_FP_VL)
+ NODE_NAME_CASE(UINT_TO_FP_VL)
+ NODE_NAME_CASE(VFCVT_RM_F_XU_VL)
+ NODE_NAME_CASE(VFCVT_RM_F_X_VL)
+ NODE_NAME_CASE(FP_EXTEND_VL)
+ NODE_NAME_CASE(FP_ROUND_VL)
+ NODE_NAME_CASE(VWMUL_VL)
+ NODE_NAME_CASE(VWMULU_VL)
+ NODE_NAME_CASE(VWMULSU_VL)
+ NODE_NAME_CASE(VWADD_VL)
+ NODE_NAME_CASE(VWADDU_VL)
+ NODE_NAME_CASE(VWSUB_VL)
+ NODE_NAME_CASE(VWSUBU_VL)
+ NODE_NAME_CASE(VWADD_W_VL)
+ NODE_NAME_CASE(VWADDU_W_VL)
+ NODE_NAME_CASE(VWSUB_W_VL)
+ NODE_NAME_CASE(VWSUBU_W_VL)
+ NODE_NAME_CASE(VNSRL_VL)
+ NODE_NAME_CASE(SETCC_VL)
+ NODE_NAME_CASE(VSELECT_VL)
+ NODE_NAME_CASE(VP_MERGE_VL)
+ NODE_NAME_CASE(VMAND_VL)
+ NODE_NAME_CASE(VMOR_VL)
+ NODE_NAME_CASE(VMXOR_VL)
+ NODE_NAME_CASE(VMCLR_VL)
+ NODE_NAME_CASE(VMSET_VL)
+ NODE_NAME_CASE(VRGATHER_VX_VL)
+ NODE_NAME_CASE(VRGATHER_VV_VL)
+ NODE_NAME_CASE(VRGATHEREI16_VV_VL)
+ NODE_NAME_CASE(VSEXT_VL)
+ NODE_NAME_CASE(VZEXT_VL)
+ NODE_NAME_CASE(VCPOP_VL)
+ NODE_NAME_CASE(VFIRST_VL)
+ NODE_NAME_CASE(READ_CSR)
+ NODE_NAME_CASE(WRITE_CSR)
+ NODE_NAME_CASE(SWAP_CSR)
+ }
+ // clang-format on
+ return nullptr;
+#undef NODE_NAME_CASE
+}
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+RISCVTargetLowering::ConstraintType
+RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'f':
+ return C_RegisterClass;
+ case 'I':
+ case 'J':
+ case 'K':
+ return C_Immediate;
+ case 'A':
+ return C_Memory;
+ case 'S': // A symbolic address
+ return C_Other;
+ }
+ } else {
+ if (Constraint == "vr" || Constraint == "vm")
+ return C_RegisterClass;
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ // First, see if this is a constraint that directly corresponds to a
+ // RISCV register class.
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r':
+ // TODO: Support fixed vectors up to XLen for P extension?
+ if (VT.isVector())
+ break;
+ return std::make_pair(0U, &RISCV::GPRRegClass);
+ case 'f':
+ if (Subtarget.hasStdExtZfhOrZfhmin() && VT == MVT::f16)
+ return std::make_pair(0U, &RISCV::FPR16RegClass);
+ if (Subtarget.hasStdExtF() && VT == MVT::f32)
+ return std::make_pair(0U, &RISCV::FPR32RegClass);
+ if (Subtarget.hasStdExtD() && VT == MVT::f64)
+ return std::make_pair(0U, &RISCV::FPR64RegClass);
+ break;
+ default:
+ break;
+ }
+ } else if (Constraint == "vr") {
+ for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
+ &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
+ if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
+ return std::make_pair(0U, RC);
+ }
+ } else if (Constraint == "vm") {
+ if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
+ return std::make_pair(0U, &RISCV::VMV0RegClass);
+ }
+
+ // Clang will correctly decode the usage of register name aliases into their
+ // official names. However, other frontends like `rustc` do not. This allows
+ // users of these frontends to use the ABI names for registers in LLVM-style
+ // register constraints.
+ unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
+ .Case("{zero}", RISCV::X0)
+ .Case("{ra}", RISCV::X1)
+ .Case("{sp}", RISCV::X2)
+ .Case("{gp}", RISCV::X3)
+ .Case("{tp}", RISCV::X4)
+ .Case("{t0}", RISCV::X5)
+ .Case("{t1}", RISCV::X6)
+ .Case("{t2}", RISCV::X7)
+ .Cases("{s0}", "{fp}", RISCV::X8)
+ .Case("{s1}", RISCV::X9)
+ .Case("{a0}", RISCV::X10)
+ .Case("{a1}", RISCV::X11)
+ .Case("{a2}", RISCV::X12)
+ .Case("{a3}", RISCV::X13)
+ .Case("{a4}", RISCV::X14)
+ .Case("{a5}", RISCV::X15)
+ .Case("{a6}", RISCV::X16)
+ .Case("{a7}", RISCV::X17)
+ .Case("{s2}", RISCV::X18)
+ .Case("{s3}", RISCV::X19)
+ .Case("{s4}", RISCV::X20)
+ .Case("{s5}", RISCV::X21)
+ .Case("{s6}", RISCV::X22)
+ .Case("{s7}", RISCV::X23)
+ .Case("{s8}", RISCV::X24)
+ .Case("{s9}", RISCV::X25)
+ .Case("{s10}", RISCV::X26)
+ .Case("{s11}", RISCV::X27)
+ .Case("{t3}", RISCV::X28)
+ .Case("{t4}", RISCV::X29)
+ .Case("{t5}", RISCV::X30)
+ .Case("{t6}", RISCV::X31)
+ .Default(RISCV::NoRegister);
+ if (XRegFromAlias != RISCV::NoRegister)
+ return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
+
+ // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
+ // TableGen record rather than the AsmName to choose registers for InlineAsm
+ // constraints, plus we want to match those names to the widest floating point
+ // register type available, manually select floating point registers here.
+ //
+ // The second case is the ABI name of the register, so that frontends can also
+ // use the ABI names in register constraint lists.
+ if (Subtarget.hasStdExtF()) {
+ unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
+ .Cases("{f0}", "{ft0}", RISCV::F0_F)
+ .Cases("{f1}", "{ft1}", RISCV::F1_F)
+ .Cases("{f2}", "{ft2}", RISCV::F2_F)
+ .Cases("{f3}", "{ft3}", RISCV::F3_F)
+ .Cases("{f4}", "{ft4}", RISCV::F4_F)
+ .Cases("{f5}", "{ft5}", RISCV::F5_F)
+ .Cases("{f6}", "{ft6}", RISCV::F6_F)
+ .Cases("{f7}", "{ft7}", RISCV::F7_F)
+ .Cases("{f8}", "{fs0}", RISCV::F8_F)
+ .Cases("{f9}", "{fs1}", RISCV::F9_F)
+ .Cases("{f10}", "{fa0}", RISCV::F10_F)
+ .Cases("{f11}", "{fa1}", RISCV::F11_F)
+ .Cases("{f12}", "{fa2}", RISCV::F12_F)
+ .Cases("{f13}", "{fa3}", RISCV::F13_F)
+ .Cases("{f14}", "{fa4}", RISCV::F14_F)
+ .Cases("{f15}", "{fa5}", RISCV::F15_F)
+ .Cases("{f16}", "{fa6}", RISCV::F16_F)
+ .Cases("{f17}", "{fa7}", RISCV::F17_F)
+ .Cases("{f18}", "{fs2}", RISCV::F18_F)
+ .Cases("{f19}", "{fs3}", RISCV::F19_F)
+ .Cases("{f20}", "{fs4}", RISCV::F20_F)
+ .Cases("{f21}", "{fs5}", RISCV::F21_F)
+ .Cases("{f22}", "{fs6}", RISCV::F22_F)
+ .Cases("{f23}", "{fs7}", RISCV::F23_F)
+ .Cases("{f24}", "{fs8}", RISCV::F24_F)
+ .Cases("{f25}", "{fs9}", RISCV::F25_F)
+ .Cases("{f26}", "{fs10}", RISCV::F26_F)
+ .Cases("{f27}", "{fs11}", RISCV::F27_F)
+ .Cases("{f28}", "{ft8}", RISCV::F28_F)
+ .Cases("{f29}", "{ft9}", RISCV::F29_F)
+ .Cases("{f30}", "{ft10}", RISCV::F30_F)
+ .Cases("{f31}", "{ft11}", RISCV::F31_F)
+ .Default(RISCV::NoRegister);
+ if (FReg != RISCV::NoRegister) {
+ assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
+ if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
+ unsigned RegNo = FReg - RISCV::F0_F;
+ unsigned DReg = RISCV::F0_D + RegNo;
+ return std::make_pair(DReg, &RISCV::FPR64RegClass);
+ }
+ if (VT == MVT::f32 || VT == MVT::Other)
+ return std::make_pair(FReg, &RISCV::FPR32RegClass);
+ if (Subtarget.hasStdExtZfhOrZfhmin() && VT == MVT::f16) {
+ unsigned RegNo = FReg - RISCV::F0_F;
+ unsigned HReg = RISCV::F0_H + RegNo;
+ return std::make_pair(HReg, &RISCV::FPR16RegClass);
+ }
+ }
+ }
+
+ if (Subtarget.hasVInstructions()) {
+ Register VReg = StringSwitch<Register>(Constraint.lower())
+ .Case("{v0}", RISCV::V0)
+ .Case("{v1}", RISCV::V1)
+ .Case("{v2}", RISCV::V2)
+ .Case("{v3}", RISCV::V3)
+ .Case("{v4}", RISCV::V4)
+ .Case("{v5}", RISCV::V5)
+ .Case("{v6}", RISCV::V6)
+ .Case("{v7}", RISCV::V7)
+ .Case("{v8}", RISCV::V8)
+ .Case("{v9}", RISCV::V9)
+ .Case("{v10}", RISCV::V10)
+ .Case("{v11}", RISCV::V11)
+ .Case("{v12}", RISCV::V12)
+ .Case("{v13}", RISCV::V13)
+ .Case("{v14}", RISCV::V14)
+ .Case("{v15}", RISCV::V15)
+ .Case("{v16}", RISCV::V16)
+ .Case("{v17}", RISCV::V17)
+ .Case("{v18}", RISCV::V18)
+ .Case("{v19}", RISCV::V19)
+ .Case("{v20}", RISCV::V20)
+ .Case("{v21}", RISCV::V21)
+ .Case("{v22}", RISCV::V22)
+ .Case("{v23}", RISCV::V23)
+ .Case("{v24}", RISCV::V24)
+ .Case("{v25}", RISCV::V25)
+ .Case("{v26}", RISCV::V26)
+ .Case("{v27}", RISCV::V27)
+ .Case("{v28}", RISCV::V28)
+ .Case("{v29}", RISCV::V29)
+ .Case("{v30}", RISCV::V30)
+ .Case("{v31}", RISCV::V31)
+ .Default(RISCV::NoRegister);
+ if (VReg != RISCV::NoRegister) {
+ if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
+ return std::make_pair(VReg, &RISCV::VMRegClass);
+ if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
+ return std::make_pair(VReg, &RISCV::VRRegClass);
+ for (const auto *RC :
+ {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
+ if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
+ VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
+ return std::make_pair(VReg, RC);
+ }
+ }
+ }
+ }
+
+ std::pair<Register, const TargetRegisterClass *> Res =
+ TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+
+ // If we picked one of the Zfinx register classes, remap it to the GPR class.
+ // FIXME: When Zfinx is supported in CodeGen this will need to take the
+ // Subtarget into account.
+ if (Res.second == &RISCV::GPRF16RegClass ||
+ Res.second == &RISCV::GPRF32RegClass ||
+ Res.second == &RISCV::GPRF64RegClass)
+ return std::make_pair(Res.first, &RISCV::GPRRegClass);
+
+ return Res;
+}
+
+unsigned
+RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
+ // Currently only support length 1 constraints.
+ if (ConstraintCode.size() == 1) {
+ switch (ConstraintCode[0]) {
+ case 'A':
+ return InlineAsm::Constraint_A;
+ default:
+ break;
+ }
+ }
+
+ return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
+}
+
+void RISCVTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ // Currently only support length 1 constraints.
+ if (Constraint.length() == 1) {
+ switch (Constraint[0]) {
+ case 'I':
+ // Validate & create a 12-bit signed immediate operand.
+ if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
+ uint64_t CVal = C->getSExtValue();
+ if (isInt<12>(CVal))
+ Ops.push_back(
+ DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
+ }
+ return;
+ case 'J':
+ // Validate & create an integer zero operand.
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
+ if (C->getZExtValue() == 0)
+ Ops.push_back(
+ DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
+ return;
+ case 'K':
+ // Validate & create a 5-bit unsigned immediate operand.
+ if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
+ uint64_t CVal = C->getZExtValue();
+ if (isUInt<5>(CVal))
+ Ops.push_back(
+ DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
+ }
+ return;
+ case 'S':
+ if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
+ Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
+ GA->getValueType(0)));
+ } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
+ Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
+ BA->getValueType(0)));
+ }
+ return;
+ default:
+ break;
+ }
+ }
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
+ Instruction *Inst,
+ AtomicOrdering Ord) const {
+ if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
+ return Builder.CreateFence(Ord);
+ if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
+ return Builder.CreateFence(AtomicOrdering::Release);
+ return nullptr;
+}
+
+Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
+ Instruction *Inst,
+ AtomicOrdering Ord) const {
+ if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
+ return Builder.CreateFence(AtomicOrdering::Acquire);
+ return nullptr;
+}
+
+TargetLowering::AtomicExpansionKind
+RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
+ // point operations can't be used in an lr/sc sequence without breaking the
+ // forward-progress guarantee.
+ if (AI->isFloatingPointOperation() ||
+ AI->getOperation() == AtomicRMWInst::UIncWrap ||
+ AI->getOperation() == AtomicRMWInst::UDecWrap)
+ return AtomicExpansionKind::CmpXChg;
+
+ // Don't expand forced atomics, we want to have __sync libcalls instead.
+ if (Subtarget.hasForcedAtomics())
+ return AtomicExpansionKind::None;
+
+ unsigned Size = AI->getType()->getPrimitiveSizeInBits();
+ if (Size == 8 || Size == 16)
+ return AtomicExpansionKind::MaskedIntrinsic;
+ return AtomicExpansionKind::None;
+}
+
+static Intrinsic::ID
+getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
+ if (XLen == 32) {
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
+ case AtomicRMWInst::Add:
+ return Intrinsic::riscv_masked_atomicrmw_add_i32;
+ case AtomicRMWInst::Sub:
+ return Intrinsic::riscv_masked_atomicrmw_sub_i32;
+ case AtomicRMWInst::Nand:
+ return Intrinsic::riscv_masked_atomicrmw_nand_i32;
+ case AtomicRMWInst::Max:
+ return Intrinsic::riscv_masked_atomicrmw_max_i32;
+ case AtomicRMWInst::Min:
+ return Intrinsic::riscv_masked_atomicrmw_min_i32;
+ case AtomicRMWInst::UMax:
+ return Intrinsic::riscv_masked_atomicrmw_umax_i32;
+ case AtomicRMWInst::UMin:
+ return Intrinsic::riscv_masked_atomicrmw_umin_i32;
+ }
+ }
+
+ if (XLen == 64) {
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
+ case AtomicRMWInst::Add:
+ return Intrinsic::riscv_masked_atomicrmw_add_i64;
+ case AtomicRMWInst::Sub:
+ return Intrinsic::riscv_masked_atomicrmw_sub_i64;
+ case AtomicRMWInst::Nand:
+ return Intrinsic::riscv_masked_atomicrmw_nand_i64;
+ case AtomicRMWInst::Max:
+ return Intrinsic::riscv_masked_atomicrmw_max_i64;
+ case AtomicRMWInst::Min:
+ return Intrinsic::riscv_masked_atomicrmw_min_i64;
+ case AtomicRMWInst::UMax:
+ return Intrinsic::riscv_masked_atomicrmw_umax_i64;
+ case AtomicRMWInst::UMin:
+ return Intrinsic::riscv_masked_atomicrmw_umin_i64;
+ }
+ }
+
+ llvm_unreachable("Unexpected XLen\n");
+}
+
+Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
+ IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
+ unsigned XLen = Subtarget.getXLen();
+ Value *Ordering =
+ Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
+ Type *Tys[] = {AlignedAddr->getType()};
+ Function *LrwOpScwLoop = Intrinsic::getDeclaration(
+ AI->getModule(),
+ getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
+
+ if (XLen == 64) {
+ Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
+ Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
+ ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
+ }
+
+ Value *Result;
+
+ // Must pass the shift amount needed to sign extend the loaded value prior
+ // to performing a signed comparison for min/max. ShiftAmt is the number of
+ // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
+ // is the number of bits to left+right shift the value in order to
+ // sign-extend.
+ if (AI->getOperation() == AtomicRMWInst::Min ||
+ AI->getOperation() == AtomicRMWInst::Max) {
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+ unsigned ValWidth =
+ DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
+ Value *SextShamt =
+ Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
+ Result = Builder.CreateCall(LrwOpScwLoop,
+ {AlignedAddr, Incr, Mask, SextShamt, Ordering});
+ } else {
+ Result =
+ Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
+ }
+
+ if (XLen == 64)
+ Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+ return Result;
+}
+
+TargetLowering::AtomicExpansionKind
+RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
+ AtomicCmpXchgInst *CI) const {
+ // Don't expand forced atomics, we want to have __sync libcalls instead.
+ if (Subtarget.hasForcedAtomics())
+ return AtomicExpansionKind::None;
+
+ unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
+ if (Size == 8 || Size == 16)
+ return AtomicExpansionKind::MaskedIntrinsic;
+ return AtomicExpansionKind::None;
+}
+
+Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
+ IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
+ Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
+ unsigned XLen = Subtarget.getXLen();
+ Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
+ Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
+ if (XLen == 64) {
+ CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
+ NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
+ Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
+ CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
+ }
+ Type *Tys[] = {AlignedAddr->getType()};
+ Function *MaskedCmpXchg =
+ Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
+ Value *Result = Builder.CreateCall(
+ MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
+ if (XLen == 64)
+ Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+ return Result;
+}
+
+bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
+ EVT DataVT) const {
+ return false;
+}
+
+bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
+ EVT VT) const {
+ if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
+ return false;
+
+ switch (FPVT.getSimpleVT().SimpleTy) {
+ case MVT::f16:
+ return Subtarget.hasStdExtZfhOrZfhmin();
+ case MVT::f32:
+ return Subtarget.hasStdExtF();
+ case MVT::f64:
+ return Subtarget.hasStdExtD();
+ default:
+ return false;
+ }
+}
+
+unsigned RISCVTargetLowering::getJumpTableEncoding() const {
+ // If we are using the small code model, we can reduce size of jump table
+ // entry to 4 bytes.
+ if (Subtarget.is64Bit() && !isPositionIndependent() &&
+ getTargetMachine().getCodeModel() == CodeModel::Small) {
+ return MachineJumpTableInfo::EK_Custom32;
+ }
+ return TargetLowering::getJumpTableEncoding();
+}
+
+const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
+ const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
+ unsigned uid, MCContext &Ctx) const {
+ assert(Subtarget.is64Bit() && !isPositionIndependent() &&
+ getTargetMachine().getCodeModel() == CodeModel::Small);
+ return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
+}
+
+bool RISCVTargetLowering::isVScaleKnownToBeAPowerOfTwo() const {
+ // We define vscale to be VLEN/RVVBitsPerBlock. VLEN is always a power
+ // of two >= 64, and RVVBitsPerBlock is 64. Thus, vscale must be
+ // a power of two as well.
+ // FIXME: This doesn't work for zve32, but that's already broken
+ // elsewhere for the same reason.
+ assert(Subtarget.getRealMinVLen() >= 64 && "zve32* unsupported");
+ static_assert(RISCV::RVVBitsPerBlock == 64,
+ "RVVBitsPerBlock changed, audit needed");
+ return true;
+}
+
+bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
+ EVT VT) const {
+ EVT SVT = VT.getScalarType();
+
+ if (!SVT.isSimple())
+ return false;
+
+ switch (SVT.getSimpleVT().SimpleTy) {
+ case MVT::f16:
+ return VT.isVector() ? Subtarget.hasVInstructionsF16()
+ : Subtarget.hasStdExtZfh();
+ case MVT::f32:
+ return Subtarget.hasStdExtF();
+ case MVT::f64:
+ return Subtarget.hasStdExtD();
+ default:
+ break;
+ }
+
+ return false;
+}
+
+Register RISCVTargetLowering::getExceptionPointerRegister(
+ const Constant *PersonalityFn) const {
+ return RISCV::X10;
+}
+
+Register RISCVTargetLowering::getExceptionSelectorRegister(
+ const Constant *PersonalityFn) const {
+ return RISCV::X11;
+}
+
+bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
+ // Return false to suppress the unnecessary extensions if the LibCall
+ // arguments or return value is f32 type for LP64 ABI.
+ RISCVABI::ABI ABI = Subtarget.getTargetABI();
+ if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
+ return false;
+
+ return true;
+}
+
+bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
+ if (Subtarget.is64Bit() && Type == MVT::i32)
+ return true;
+
+ return IsSigned;
+}
+
+bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
+ SDValue C) const {
+ // Check integral scalar types.
+ const bool HasExtMOrZmmul =
+ Subtarget.hasStdExtM() || Subtarget.hasStdExtZmmul();
+ if (VT.isScalarInteger()) {
+ // Omit the optimization if the sub target has the M extension and the data
+ // size exceeds XLen.
+ if (HasExtMOrZmmul && VT.getSizeInBits() > Subtarget.getXLen())
+ return false;
+ if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
+ // Break the MUL to a SLLI and an ADD/SUB.
+ const APInt &Imm = ConstNode->getAPIntValue();
+ if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
+ (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
+ return true;
+ // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
+ if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
+ ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
+ (Imm - 8).isPowerOf2()))
+ return true;
+ // Omit the following optimization if the sub target has the M extension
+ // and the data size >= XLen.
+ if (HasExtMOrZmmul && VT.getSizeInBits() >= Subtarget.getXLen())
+ return false;
+ // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
+ // a pair of LUI/ADDI.
+ if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
+ APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
+ if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
+ (1 - ImmS).isPowerOf2())
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
+ SDValue ConstNode) const {
+ // Let the DAGCombiner decide for vectors.
+ EVT VT = AddNode.getValueType();
+ if (VT.isVector())
+ return true;
+
+ // Let the DAGCombiner decide for larger types.
+ if (VT.getScalarSizeInBits() > Subtarget.getXLen())
+ return true;
+
+ // It is worse if c1 is simm12 while c1*c2 is not.
+ ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
+ ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
+ const APInt &C1 = C1Node->getAPIntValue();
+ const APInt &C2 = C2Node->getAPIntValue();
+ if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
+ return false;
+
+ // Default to true and let the DAGCombiner decide.
+ return true;
+}
+
+bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
+ unsigned *Fast) const {
+ if (!VT.isVector()) {
+ if (Fast)
+ *Fast = 0;
+ return Subtarget.enableUnalignedScalarMem();
+ }
+
+ // All vector implementations must support element alignment
+ EVT ElemVT = VT.getVectorElementType();
+ if (Alignment >= ElemVT.getStoreSize()) {
+ if (Fast)
+ *Fast = 1;
+ return true;
+ }
+
+ return false;
+}
+
+bool RISCVTargetLowering::splitValueIntoRegisterParts(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
+ unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
+ bool IsABIRegCopy = CC.has_value();
+ EVT ValueVT = Val.getValueType();
+ if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+ // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
+ // and cast to f32.
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
+ Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
+ Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
+ DAG.getConstant(0xFFFF0000, DL, MVT::i32));
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
+ Parts[0] = Val;
+ return true;
+ }
+
+ if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
+ LLVMContext &Context = *DAG.getContext();
+ EVT ValueEltVT = ValueVT.getVectorElementType();
+ EVT PartEltVT = PartVT.getVectorElementType();
+ unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue();
+ unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
+ if (PartVTBitSize % ValueVTBitSize == 0) {
+ assert(PartVTBitSize >= ValueVTBitSize);
+ // If the element types are different, bitcast to the same element type of
+ // PartVT first.
+ // Give an example here, we want copy a <vscale x 1 x i8> value to
+ // <vscale x 4 x i16>.
+ // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
+ // subvector, then we can bitcast to <vscale x 4 x i16>.
+ if (ValueEltVT != PartEltVT) {
+ if (PartVTBitSize > ValueVTBitSize) {
+ unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
+ assert(Count != 0 && "The number of element should not be zero.");
+ EVT SameEltTypeVT =
+ EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
+ Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
+ DAG.getUNDEF(SameEltTypeVT), Val,
+ DAG.getVectorIdxConstant(0, DL));
+ }
+ Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
+ } else {
+ Val =
+ DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
+ Val, DAG.getVectorIdxConstant(0, DL));
+ }
+ Parts[0] = Val;
+ return true;
+ }
+ }
+ return false;
+}
+
+SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
+ SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
+ MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
+ bool IsABIRegCopy = CC.has_value();
+ if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+ SDValue Val = Parts[0];
+
+ // Cast the f32 to i32, truncate to i16, and cast back to f16.
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
+ Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
+ return Val;
+ }
+
+ if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
+ LLVMContext &Context = *DAG.getContext();
+ SDValue Val = Parts[0];
+ EVT ValueEltVT = ValueVT.getVectorElementType();
+ EVT PartEltVT = PartVT.getVectorElementType();
+ unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue();
+ unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
+ if (PartVTBitSize % ValueVTBitSize == 0) {
+ assert(PartVTBitSize >= ValueVTBitSize);
+ EVT SameEltTypeVT = ValueVT;
+ // If the element types are different, convert it to the same element type
+ // of PartVT.
+ // Give an example here, we want copy a <vscale x 1 x i8> value from
+ // <vscale x 4 x i16>.
+ // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
+ // then we can extract <vscale x 1 x i8>.
+ if (ValueEltVT != PartEltVT) {
+ unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
+ assert(Count != 0 && "The number of element should not be zero.");
+ SameEltTypeVT =
+ EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
+ Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
+ }
+ Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
+ DAG.getVectorIdxConstant(0, DL));
+ return Val;
+ }
+ }
+ return SDValue();
+}
+
+bool RISCVTargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
+ // When aggressively optimizing for code size, we prefer to use a div
+ // instruction, as it is usually smaller than the alternative sequence.
+ // TODO: Add vector division?
+ bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
+ return OptSize && !VT.isVector();
+}
+
+bool RISCVTargetLowering::preferScalarizeSplat(unsigned Opc) const {
+ // Scalarize zero_ext and sign_ext might stop match to widening instruction in
+ // some situation.
+ if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND)
+ return false;
+ return true;
+}
+
+static Value *useTpOffset(IRBuilderBase &IRB, unsigned Offset) {
+ Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+ Function *ThreadPointerFunc =
+ Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
+ return IRB.CreatePointerCast(
+ IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
+ IRB.CreateCall(ThreadPointerFunc), Offset),
+ IRB.getInt8PtrTy()->getPointerTo(0));
+}
+
+Value *RISCVTargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
+ // Fuchsia provides a fixed TLS slot for the stack cookie.
+ // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
+ if (Subtarget.isTargetFuchsia())
+ return useTpOffset(IRB, -0x10);
+
+ return TargetLowering::getIRStackGuard(IRB);
+}
+
+#define GET_REGISTER_MATCHER
+#include "RISCVGenAsmMatcher.inc"
+
+Register
+RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
+ const MachineFunction &MF) const {
+ Register Reg = MatchRegisterAltName(RegName);
+ if (Reg == RISCV::NoRegister)
+ Reg = MatchRegisterName(RegName);
+ if (Reg == RISCV::NoRegister)
+ report_fatal_error(
+ Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
+ BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
+ if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
+ report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
+ StringRef(RegName) + "\"."));
+ return Reg;
+}
+
+namespace llvm::RISCVVIntrinsicsTable {
+
+#define GET_RISCVVIntrinsicsTable_IMPL
+#include "RISCVGenSearchableTables.inc"
+
+} // namespace llvm::RISCVVIntrinsicsTable
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.h
new file mode 100644
index 0000000000..5086e6c0bd
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVISelLowering.h
@@ -0,0 +1,783 @@
+//===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that RISCV uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
+#define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
+
+#include "RISCV.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include <optional>
+
+namespace llvm {
+class RISCVSubtarget;
+struct RISCVRegisterInfo;
+namespace RISCVISD {
+enum NodeType : unsigned {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ RET_FLAG,
+ URET_FLAG,
+ SRET_FLAG,
+ MRET_FLAG,
+ CALL,
+ /// Select with condition operator - This selects between a true value and
+ /// a false value (ops #3 and #4) based on the boolean result of comparing
+ /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
+ /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
+ /// The lhs and rhs are XLenVT integers. The true and false values can be
+ /// integer or floating point.
+ SELECT_CC,
+ BR_CC,
+ BuildPairF64,
+ SplitF64,
+ TAIL,
+
+ // Add the Lo 12 bits from an address. Selected to ADDI.
+ ADD_LO,
+ // Get the Hi 20 bits from an address. Selected to LUI.
+ HI,
+
+ // Represents an AUIPC+ADDI pair. Selected to PseudoLLA.
+ LLA,
+
+ // Selected as PseudoAddTPRel. Used to emit a TP-relative relocation.
+ ADD_TPREL,
+
+ // Load address.
+ LA_TLS_GD,
+
+ // Multiply high for signedxunsigned.
+ MULHSU,
+ // RV64I shifts, directly matching the semantics of the named RISC-V
+ // instructions.
+ SLLW,
+ SRAW,
+ SRLW,
+ // 32-bit operations from RV64M that can't be simply matched with a pattern
+ // at instruction selection time. These have undefined behavior for division
+ // by 0 or overflow (divw) like their target independent counterparts.
+ DIVW,
+ DIVUW,
+ REMUW,
+ // RV64IB rotates, directly matching the semantics of the named RISC-V
+ // instructions.
+ ROLW,
+ RORW,
+ // RV64IZbb bit counting instructions directly matching the semantics of the
+ // named RISC-V instructions.
+ CLZW,
+ CTZW,
+
+ // RV64IZbb absolute value for i32. Expanded to (max (negw X), X) during isel.
+ ABSW,
+
+ // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
+ // XLEN is the only legal integer width.
+ //
+ // FMV_H_X matches the semantics of the FMV.H.X.
+ // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
+ // FMV_X_SIGNEXTH is similar to FMV.X.H and has a sign-extended result.
+ // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
+ // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
+ //
+ // This is a more convenient semantic for producing dagcombines that remove
+ // unnecessary GPR->FPR->GPR moves.
+ FMV_H_X,
+ FMV_X_ANYEXTH,
+ FMV_X_SIGNEXTH,
+ FMV_W_X_RV64,
+ FMV_X_ANYEXTW_RV64,
+ // FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and
+ // fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of
+ // range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode
+ // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
+ FCVT_X,
+ FCVT_XU,
+ // FP to 32 bit int conversions for RV64. These are used to keep track of the
+ // result being sign extended to 64 bit. These saturate out of range inputs.
+ // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode
+ // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
+ FCVT_W_RV64,
+ FCVT_WU_RV64,
+
+ // Rounds an FP value to its corresponding integer in the same FP format.
+ // First operand is the value to round, the second operand is the largest
+ // integer that can be represented exactly in the FP format. This will be
+ // expanded into multiple instructions and basic blocks with a custom
+ // inserter.
+ FROUND,
+
+ // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
+ // (returns (Lo, Hi)). It takes a chain operand.
+ READ_CYCLE_WIDE,
+ // brev8, orc.b, zip, and unzip from Zbb and Zbkb. All operands are i32 or
+ // XLenVT.
+ BREV8,
+ ORC_B,
+ ZIP,
+ UNZIP,
+ // Vector Extension
+ // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
+ // for the VL value to be used for the operation. The first operand is
+ // passthru operand.
+ VMV_V_X_VL,
+ // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
+ // for the VL value to be used for the operation. The first operand is
+ // passthru operand.
+ VFMV_V_F_VL,
+ // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
+ // extended from the vector element size.
+ VMV_X_S,
+ // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
+ VMV_S_X_VL,
+ // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
+ VFMV_S_F_VL,
+ // Splats an 64-bit value that has been split into two i32 parts. This is
+ // expanded late to two scalar stores and a stride 0 vector load.
+ // The first operand is passthru operand.
+ SPLAT_VECTOR_SPLIT_I64_VL,
+ // Read VLENB CSR
+ READ_VLENB,
+ // Truncates a RVV integer vector by one power-of-two. Carries both an extra
+ // mask and VL operand.
+ TRUNCATE_VECTOR_VL,
+ // Matches the semantics of vslideup/vslidedown. The first operand is the
+ // pass-thru operand, the second is the source vector, the third is the
+ // XLenVT index (either constant or non-constant), the fourth is the mask
+ // and the fifth the VL.
+ VSLIDEUP_VL,
+ VSLIDEDOWN_VL,
+ // Matches the semantics of vslide1up/slide1down. The first operand is
+ // passthru operand, the second is source vector, third is the XLenVT scalar
+ // value. The fourth and fifth operands are the mask and VL operands.
+ VSLIDE1UP_VL,
+ VSLIDE1DOWN_VL,
+ // Matches the semantics of the vid.v instruction, with a mask and VL
+ // operand.
+ VID_VL,
+ // Matches the semantics of the vfcnvt.rod function (Convert double-width
+ // float to single-width float, rounding towards odd). Takes a double-width
+ // float vector and produces a single-width float vector. Also has a mask and
+ // VL operand.
+ VFNCVT_ROD_VL,
+ // These nodes match the semantics of the corresponding RVV vector reduction
+ // instructions. They produce a vector result which is the reduction
+ // performed over the second vector operand plus the first element of the
+ // third vector operand. The first operand is the pass-thru operand. The
+ // second operand is an unconstrained vector type, and the result, first, and
+ // third operand's types are expected to be the corresponding full-width
+ // LMUL=1 type for the second operand:
+ // nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8
+ // nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32
+ // The different in types does introduce extra vsetvli instructions but
+ // similarly it reduces the number of registers consumed per reduction.
+ // Also has a mask and VL operand.
+ VECREDUCE_ADD_VL,
+ VECREDUCE_UMAX_VL,
+ VECREDUCE_SMAX_VL,
+ VECREDUCE_UMIN_VL,
+ VECREDUCE_SMIN_VL,
+ VECREDUCE_AND_VL,
+ VECREDUCE_OR_VL,
+ VECREDUCE_XOR_VL,
+ VECREDUCE_FADD_VL,
+ VECREDUCE_SEQ_FADD_VL,
+ VECREDUCE_FMIN_VL,
+ VECREDUCE_FMAX_VL,
+
+ // Vector binary ops with a merge as a third operand, a mask as a fourth
+ // operand, and VL as a fifth operand.
+ ADD_VL,
+ AND_VL,
+ MUL_VL,
+ OR_VL,
+ SDIV_VL,
+ SHL_VL,
+ SREM_VL,
+ SRA_VL,
+ SRL_VL,
+ SUB_VL,
+ UDIV_VL,
+ UREM_VL,
+ XOR_VL,
+ SMIN_VL,
+ SMAX_VL,
+ UMIN_VL,
+ UMAX_VL,
+
+ SADDSAT_VL,
+ UADDSAT_VL,
+ SSUBSAT_VL,
+ USUBSAT_VL,
+
+ MULHS_VL,
+ MULHU_VL,
+ FADD_VL,
+ FSUB_VL,
+ FMUL_VL,
+ FDIV_VL,
+ FMINNUM_VL,
+ FMAXNUM_VL,
+
+ // Vector unary ops with a mask as a second operand and VL as a third operand.
+ FNEG_VL,
+ FABS_VL,
+ FSQRT_VL,
+ FCOPYSIGN_VL, // Has a merge operand
+ VFCVT_RTZ_X_F_VL,
+ VFCVT_RTZ_XU_F_VL,
+ VFCVT_X_F_VL,
+ VFCVT_XU_F_VL,
+ VFROUND_NOEXCEPT_VL,
+ VFCVT_RM_X_F_VL, // Has a rounding mode operand.
+ VFCVT_RM_XU_F_VL, // Has a rounding mode operand.
+ SINT_TO_FP_VL,
+ UINT_TO_FP_VL,
+ VFCVT_RM_F_X_VL, // Has a rounding mode operand.
+ VFCVT_RM_F_XU_VL, // Has a rounding mode operand.
+ FP_ROUND_VL,
+ FP_EXTEND_VL,
+
+ // Vector FMA ops with a mask as a fourth operand and VL as a fifth operand.
+ VFMADD_VL,
+ VFNMADD_VL,
+ VFMSUB_VL,
+ VFNMSUB_VL,
+
+ // Widening instructions with a merge value a third operand, a mask as a
+ // fourth operand, and VL as a fifth operand.
+ VWMUL_VL,
+ VWMULU_VL,
+ VWMULSU_VL,
+ VWADD_VL,
+ VWADDU_VL,
+ VWSUB_VL,
+ VWSUBU_VL,
+ VWADD_W_VL,
+ VWADDU_W_VL,
+ VWSUB_W_VL,
+ VWSUBU_W_VL,
+
+ VNSRL_VL,
+
+ // Vector compare producing a mask. Fourth operand is input mask. Fifth
+ // operand is VL.
+ SETCC_VL,
+
+ // Vector select with an additional VL operand. This operation is unmasked.
+ VSELECT_VL,
+ // Vector select with operand #2 (the value when the condition is false) tied
+ // to the destination and an additional VL operand. This operation is
+ // unmasked.
+ VP_MERGE_VL,
+
+ // Mask binary operators.
+ VMAND_VL,
+ VMOR_VL,
+ VMXOR_VL,
+
+ // Set mask vector to all zeros or ones.
+ VMCLR_VL,
+ VMSET_VL,
+
+ // Matches the semantics of vrgather.vx and vrgather.vv with extra operands
+ // for passthru and VL. Operands are (src, index, mask, passthru, vl).
+ VRGATHER_VX_VL,
+ VRGATHER_VV_VL,
+ VRGATHEREI16_VV_VL,
+
+ // Vector sign/zero extend with additional mask & VL operands.
+ VSEXT_VL,
+ VZEXT_VL,
+
+ // vcpop.m with additional mask and VL operands.
+ VCPOP_VL,
+
+ // vfirst.m with additional mask and VL operands.
+ VFIRST_VL,
+
+ // Reads value of CSR.
+ // The first operand is a chain pointer. The second specifies address of the
+ // required CSR. Two results are produced, the read value and the new chain
+ // pointer.
+ READ_CSR,
+ // Write value to CSR.
+ // The first operand is a chain pointer, the second specifies address of the
+ // required CSR and the third is the value to write. The result is the new
+ // chain pointer.
+ WRITE_CSR,
+ // Read and write value of CSR.
+ // The first operand is a chain pointer, the second specifies address of the
+ // required CSR and the third is the value to write. Two results are produced,
+ // the value read before the modification and the new chain pointer.
+ SWAP_CSR,
+
+ // FP to 32 bit int conversions for RV64. These are used to keep track of the
+ // result being sign extended to 64 bit. These saturate out of range inputs.
+ STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
+ STRICT_FCVT_WU_RV64,
+
+ // WARNING: Do not add anything in the end unless you want the node to
+ // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
+ // opcodes will be thought as target memory ops!
+
+ // Load address.
+ LA = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ LA_TLS_IE,
+};
+} // namespace RISCVISD
+
+class RISCVTargetLowering : public TargetLowering {
+ const RISCVSubtarget &Subtarget;
+
+public:
+ explicit RISCVTargetLowering(const TargetMachine &TM,
+ const RISCVSubtarget &STI);
+
+ const RISCVSubtarget &getSubtarget() const { return Subtarget; }
+
+ bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const override;
+ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
+ unsigned AS,
+ Instruction *I = nullptr) const override;
+ bool isLegalICmpImmediate(int64_t Imm) const override;
+ bool isLegalAddImmediate(int64_t Imm) const override;
+ bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
+ bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
+ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
+ bool signExtendConstant(const ConstantInt *CI) const override;
+ bool isCheapToSpeculateCttz(Type *Ty) const override;
+ bool isCheapToSpeculateCtlz(Type *Ty) const override;
+ bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
+ bool hasAndNotCompare(SDValue Y) const override;
+ bool hasBitTest(SDValue X, SDValue Y) const override;
+ bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
+ SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
+ unsigned OldShiftOpcode, unsigned NewShiftOpcode,
+ SelectionDAG &DAG) const override;
+ /// Return true if the (vector) instruction I will be lowered to an instruction
+ /// with a scalar splat operand for the given Operand number.
+ bool canSplatOperand(Instruction *I, int Operand) const;
+ /// Return true if a vector instruction will lower to a target instruction
+ /// able to splat the given operand.
+ bool canSplatOperand(unsigned Opcode, int Operand) const;
+ bool shouldSinkOperands(Instruction *I,
+ SmallVectorImpl<Use *> &Ops) const override;
+ bool shouldScalarizeBinop(SDValue VecOp) const override;
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT,
+ bool ForCodeSize) const override;
+ bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
+ unsigned Index) const override;
+
+ bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
+
+ bool preferScalarizeSplat(unsigned Opc) const override;
+
+ bool softPromoteHalfType() const override { return true; }
+
+ /// Return the register type for a given MVT, ensuring vectors are treated
+ /// as a series of gpr sized integers.
+ MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
+ EVT VT) const override;
+
+ /// Return the number of registers for a given MVT, ensuring vectors are
+ /// treated as a series of gpr sized integers.
+ unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+ CallingConv::ID CC,
+ EVT VT) const override;
+
+ bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
+ EVT VT) const override;
+
+ /// Return true if the given shuffle mask can be codegen'd directly, or if it
+ /// should be stack expanded.
+ bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
+
+ bool hasBitPreservingFPLogic(EVT VT) const override;
+ bool
+ shouldExpandBuildVectorWithShuffles(EVT VT,
+ unsigned DefinedValues) const override;
+
+ // Provide custom lowering hooks for some operations.
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+
+ bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
+ const APInt &DemandedElts,
+ TargetLoweringOpt &TLO) const override;
+
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ KnownBits &Known,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
+ unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
+
+ const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
+
+ // This method returns the name of a target specific DAG node.
+ const char *getTargetNodeName(unsigned Opcode) const override;
+
+ ConstraintType getConstraintType(StringRef Constraint) const override;
+
+ unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
+
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const override;
+
+ void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
+
+ void AdjustInstrPostInstrSelection(MachineInstr &MI,
+ SDNode *Node) const override;
+
+ EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+ EVT VT) const override;
+
+ bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
+ return VT.isScalarInteger();
+ }
+ bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
+
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return isa<LoadInst>(I) || isa<StoreInst>(I);
+ }
+ Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
+ AtomicOrdering Ord) const override;
+ Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
+ AtomicOrdering Ord) const override;
+
+ bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
+ EVT VT) const override;
+
+ ISD::NodeType getExtendForAtomicOps() const override {
+ return ISD::SIGN_EXTEND;
+ }
+
+ ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
+ return ISD::SIGN_EXTEND;
+ }
+
+ TargetLowering::ShiftLegalizationStrategy
+ preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
+ unsigned ExpansionFactor) const override {
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
+ return ShiftLegalizationStrategy::LowerToLibcall;
+ return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
+ ExpansionFactor);
+ }
+
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
+
+ /// If a physical register, this returns the register that receives the
+ /// exception address on entry to an EH pad.
+ Register
+ getExceptionPointerRegister(const Constant *PersonalityFn) const override;
+
+ /// If a physical register, this returns the register that receives the
+ /// exception typeid on entry to a landing pad.
+ Register
+ getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
+
+ bool shouldExtendTypeInLibCall(EVT Type) const override;
+ bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
+
+ /// Returns the register with the specified architectural or ABI name. This
+ /// method is necessary to lower the llvm.read_register.* and
+ /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
+ /// with the clang -ffixed-xX flag for access to be allowed.
+ Register getRegisterByName(const char *RegName, LLT VT,
+ const MachineFunction &MF) const override;
+
+ // Lower incoming arguments, copy physregs into vregs
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const override;
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override;
+ bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
+ bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
+ bool shouldConsiderGEPOffsetSplit() const override { return true; }
+
+ bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
+ SDValue C) const override;
+
+ bool isMulAddWithConstProfitable(SDValue AddNode,
+ SDValue ConstNode) const override;
+
+ TargetLowering::AtomicExpansionKind
+ shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+ Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
+ Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt,
+ AtomicOrdering Ord) const override;
+ TargetLowering::AtomicExpansionKind
+ shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
+ Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
+ AtomicCmpXchgInst *CI,
+ Value *AlignedAddr, Value *CmpVal,
+ Value *NewVal, Value *Mask,
+ AtomicOrdering Ord) const override;
+
+ /// Returns true if the target allows unaligned memory accesses of the
+ /// specified type.
+ bool allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ unsigned *Fast = nullptr) const override;
+
+ bool splitValueIntoRegisterParts(
+ SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
+ unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
+ const override;
+
+ SDValue joinRegisterPartsIntoValue(
+ SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
+ unsigned NumParts, MVT PartVT, EVT ValueVT,
+ std::optional<CallingConv::ID> CC) const override;
+
+ static RISCVII::VLMUL getLMUL(MVT VT);
+ inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize,
+ unsigned MinSize) {
+ // Original equation:
+ // VLMAX = (VectorBits / EltSize) * LMUL
+ // where LMUL = MinSize / RISCV::RVVBitsPerBlock
+ // The following equations have been reordered to prevent loss of precision
+ // when calculating fractional LMUL.
+ return ((VectorBits / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
+ };
+ static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
+ static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
+ static unsigned getRegClassIDForVecVT(MVT VT);
+ static std::pair<unsigned, unsigned>
+ decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
+ unsigned InsertExtractIdx,
+ const RISCVRegisterInfo *TRI);
+ MVT getContainerForFixedLengthVector(MVT VT) const;
+
+ bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const override;
+
+ bool isLegalElementTypeForRVV(Type *ScalarTy) const;
+
+ bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
+
+ unsigned getJumpTableEncoding() const override;
+
+ const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
+ const MachineBasicBlock *MBB,
+ unsigned uid,
+ MCContext &Ctx) const override;
+
+ bool isVScaleKnownToBeAPowerOfTwo() const override;
+
+ bool isLegalScaleForGatherScatter(uint64_t Scale,
+ uint64_t ElemSize) const override {
+ // Scaled addressing not supported on indexed load/stores
+ return Scale == 1;
+ }
+
+ /// If the target has a standard location for the stack protector cookie,
+ /// returns the address of that location. Otherwise, returns nullptr.
+ Value *getIRStackGuard(IRBuilderBase &IRB) const override;
+
+private:
+ /// RISCVCCAssignFn - This target-specific function extends the default
+ /// CCValAssign with additional information used to lower RISC-V calling
+ /// conventions.
+ typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,
+ unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State,
+ bool IsFixed, bool IsRet, Type *OrigTy,
+ const RISCVTargetLowering &TLI,
+ std::optional<unsigned> FirstMaskArgument);
+
+ void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
+ RISCVCCAssignFn Fn) const;
+ void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ bool IsRet, CallLoweringInfo *CLI,
+ RISCVCCAssignFn Fn) const;
+
+ template <class NodeTy>
+ SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
+ SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
+ bool UseGOT) const;
+ SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
+
+ SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
+ SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
+ int64_t ExtTrueVal) const;
+ SDValue lowerVectorMaskTruncLike(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVectorTruncLike(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVectorFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,
+ bool IsVP) const;
+ SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
+ SelectionDAG &DAG) const;
+ SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
+ unsigned MaskOpc,
+ unsigned VecOpc) const;
+ SDValue lowerFixedLengthVectorShiftToRVV(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
+ SelectionDAG &DAG) const;
+ SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
+ bool HasMergeOp = false, bool HasMask = true) const;
+ SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc,
+ bool HasMergeOp = false) const;
+ SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc,
+ unsigned VecOpc) const;
+ SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
+ unsigned RISCVISDOpc) const;
+ SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVPStridedStore(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
+ unsigned ExtendOpc) const;
+ SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
+ SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
+
+ bool isEligibleForTailCallOptimization(
+ CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
+ const SmallVector<CCValAssign, 16> &ArgLocs) const;
+
+ /// Generate error diagnostics if any register used by CC has been marked
+ /// reserved.
+ void validateCCReservedRegs(
+ const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
+ MachineFunction &MF) const;
+
+ bool useRVVForFixedLengthVectorVT(MVT VT) const;
+
+ MVT getVPExplicitVectorLengthTy() const override;
+
+ /// RVV code generation for fixed length vectors does not lower all
+ /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
+ /// merge. However, merging them creates a BUILD_VECTOR that is just as
+ /// illegal as the original, thus leading to an infinite legalisation loop.
+ /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
+ /// this override can be removed.
+ bool mergeStoresAfterLegalization(EVT VT) const override;
+
+ /// Disable normalizing
+ /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
+ /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
+ /// RISCV doesn't have flags so it's better to perform the and/or in a GPR.
+ bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
+ return false;
+ };
+
+ /// For available scheduling models FDIV + two independent FMULs are much
+ /// faster than two FDIVs.
+ unsigned combineRepeatedFPDivisors() const override;
+};
+namespace RISCVVIntrinsicsTable {
+
+struct RISCVVIntrinsicInfo {
+ unsigned IntrinsicID;
+ uint8_t ScalarOperand;
+ uint8_t VLOperand;
+ bool hasScalarOperand() const {
+ // 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td.
+ return ScalarOperand != 0xF;
+ }
+ bool hasVLOperand() const {
+ // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
+ return VLOperand != 0x1F;
+ }
+};
+
+using namespace RISCV;
+
+#define GET_RISCVVIntrinsicsTable_DECL
+#include "RISCVGenSearchableTables.inc"
+
+} // end namespace RISCVVIntrinsicsTable
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
new file mode 100644
index 0000000000..115c962221
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -0,0 +1,1442 @@
+//===- RISCVInsertVSETVLI.cpp - Insert VSETVLI instructions ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a function pass that inserts VSETVLI instructions where
+// needed and expands the vl outputs of VLEFF/VLSEGFF to PseudoReadVL
+// instructions.
+//
+// This pass consists of 3 phases:
+//
+// Phase 1 collects how each basic block affects VL/VTYPE.
+//
+// Phase 2 uses the information from phase 1 to do a data flow analysis to
+// propagate the VL/VTYPE changes through the function. This gives us the
+// VL/VTYPE at the start of each basic block.
+//
+// Phase 3 inserts VSETVLI instructions in each basic block. Information from
+// phase 2 is used to prevent inserting a VSETVLI before the first vector
+// instruction in the block if possible.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include <queue>
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-insert-vsetvli"
+#define RISCV_INSERT_VSETVLI_NAME "RISCV Insert VSETVLI pass"
+
+static cl::opt<bool> DisableInsertVSETVLPHIOpt(
+ "riscv-disable-insert-vsetvl-phi-opt", cl::init(false), cl::Hidden,
+ cl::desc("Disable looking through phis when inserting vsetvlis."));
+
+static cl::opt<bool> UseStrictAsserts(
+ "riscv-insert-vsetvl-strict-asserts", cl::init(true), cl::Hidden,
+ cl::desc("Enable strict assertion checking for the dataflow algorithm"));
+
+namespace {
+
+static unsigned getVLOpNum(const MachineInstr &MI) {
+ return RISCVII::getVLOpNum(MI.getDesc());
+}
+
+static unsigned getSEWOpNum(const MachineInstr &MI) {
+ return RISCVII::getSEWOpNum(MI.getDesc());
+}
+
+static bool isVectorConfigInstr(const MachineInstr &MI) {
+ return MI.getOpcode() == RISCV::PseudoVSETVLI ||
+ MI.getOpcode() == RISCV::PseudoVSETVLIX0 ||
+ MI.getOpcode() == RISCV::PseudoVSETIVLI;
+}
+
+/// Return true if this is 'vsetvli x0, x0, vtype' which preserves
+/// VL and only sets VTYPE.
+static bool isVLPreservingConfig(const MachineInstr &MI) {
+ if (MI.getOpcode() != RISCV::PseudoVSETVLIX0)
+ return false;
+ assert(RISCV::X0 == MI.getOperand(1).getReg());
+ return RISCV::X0 == MI.getOperand(0).getReg();
+}
+
+static uint16_t getRVVMCOpcode(uint16_t RVVPseudoOpcode) {
+ const RISCVVPseudosTable::PseudoInfo *RVV =
+ RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
+ if (!RVV)
+ return 0;
+ return RVV->BaseInstr;
+}
+
+static bool isScalarMoveInstr(const MachineInstr &MI) {
+ switch (getRVVMCOpcode(MI.getOpcode())) {
+ default:
+ return false;
+ case RISCV::VMV_S_X:
+ case RISCV::VFMV_S_F:
+ return true;
+ }
+}
+
+/// Get the EEW for a load or store instruction. Return std::nullopt if MI is
+/// not a load or store which ignores SEW.
+static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
+ switch (getRVVMCOpcode(MI.getOpcode())) {
+ default:
+ return std::nullopt;
+ case RISCV::VLE8_V:
+ case RISCV::VLSE8_V:
+ case RISCV::VSE8_V:
+ case RISCV::VSSE8_V:
+ return 8;
+ case RISCV::VLE16_V:
+ case RISCV::VLSE16_V:
+ case RISCV::VSE16_V:
+ case RISCV::VSSE16_V:
+ return 16;
+ case RISCV::VLE32_V:
+ case RISCV::VLSE32_V:
+ case RISCV::VSE32_V:
+ case RISCV::VSSE32_V:
+ return 32;
+ case RISCV::VLE64_V:
+ case RISCV::VLSE64_V:
+ case RISCV::VSE64_V:
+ case RISCV::VSSE64_V:
+ return 64;
+ }
+}
+
+/// Return true if this is an operation on mask registers. Note that
+/// this includes both arithmetic/logical ops and load/store (vlm/vsm).
+static bool isMaskRegOp(const MachineInstr &MI) {
+ if (!RISCVII::hasSEWOp(MI.getDesc().TSFlags))
+ return false;
+ const unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm();
+ // A Log2SEW of 0 is an operation on mask registers only.
+ return Log2SEW == 0;
+}
+
+/// Which subfields of VL or VTYPE have values we need to preserve?
+struct DemandedFields {
+ // Some unknown property of VL is used. If demanded, must preserve entire
+ // value.
+ bool VLAny = false;
+ // Only zero vs non-zero is used. If demanded, can change non-zero values.
+ bool VLZeroness = false;
+ bool SEW = false;
+ bool LMUL = false;
+ bool SEWLMULRatio = false;
+ bool TailPolicy = false;
+ bool MaskPolicy = false;
+
+ // Return true if any part of VTYPE was used
+ bool usedVTYPE() const {
+ return SEW || LMUL || SEWLMULRatio || TailPolicy || MaskPolicy;
+ }
+
+ // Return true if any property of VL was used
+ bool usedVL() {
+ return VLAny || VLZeroness;
+ }
+
+ // Mark all VTYPE subfields and properties as demanded
+ void demandVTYPE() {
+ SEW = true;
+ LMUL = true;
+ SEWLMULRatio = true;
+ TailPolicy = true;
+ MaskPolicy = true;
+ }
+
+ // Mark all VL properties as demanded
+ void demandVL() {
+ VLAny = true;
+ VLZeroness = true;
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// Support for debugging, callable in GDB: V->dump()
+ LLVM_DUMP_METHOD void dump() const {
+ print(dbgs());
+ dbgs() << "\n";
+ }
+
+ /// Implement operator<<.
+ void print(raw_ostream &OS) const {
+ OS << "{";
+ OS << "VLAny=" << VLAny << ", ";
+ OS << "VLZeroness=" << VLZeroness << ", ";
+ OS << "SEW=" << SEW << ", ";
+ OS << "LMUL=" << LMUL << ", ";
+ OS << "SEWLMULRatio=" << SEWLMULRatio << ", ";
+ OS << "TailPolicy=" << TailPolicy << ", ";
+ OS << "MaskPolicy=" << MaskPolicy;
+ OS << "}";
+ }
+#endif
+};
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_ATTRIBUTE_USED
+inline raw_ostream &operator<<(raw_ostream &OS, const DemandedFields &DF) {
+ DF.print(OS);
+ return OS;
+}
+#endif
+
+
+/// Return true if the two values of the VTYPE register provided are
+/// indistinguishable from the perspective of an instruction (or set of
+/// instructions) which use only the Used subfields and properties.
+static bool areCompatibleVTYPEs(uint64_t VType1,
+ uint64_t VType2,
+ const DemandedFields &Used) {
+ if (Used.SEW &&
+ RISCVVType::getSEW(VType1) != RISCVVType::getSEW(VType2))
+ return false;
+
+ if (Used.LMUL &&
+ RISCVVType::getVLMUL(VType1) != RISCVVType::getVLMUL(VType2))
+ return false;
+
+ if (Used.SEWLMULRatio) {
+ auto Ratio1 = RISCVVType::getSEWLMULRatio(RISCVVType::getSEW(VType1),
+ RISCVVType::getVLMUL(VType1));
+ auto Ratio2 = RISCVVType::getSEWLMULRatio(RISCVVType::getSEW(VType2),
+ RISCVVType::getVLMUL(VType2));
+ if (Ratio1 != Ratio2)
+ return false;
+ }
+
+ if (Used.TailPolicy &&
+ RISCVVType::isTailAgnostic(VType1) != RISCVVType::isTailAgnostic(VType2))
+ return false;
+ if (Used.MaskPolicy &&
+ RISCVVType::isMaskAgnostic(VType1) != RISCVVType::isMaskAgnostic(VType2))
+ return false;
+ return true;
+}
+
+/// Return the fields and properties demanded by the provided instruction.
+static DemandedFields getDemanded(const MachineInstr &MI) {
+ // Warning: This function has to work on both the lowered (i.e. post
+ // emitVSETVLIs) and pre-lowering forms. The main implication of this is
+ // that it can't use the value of a SEW, VL, or Policy operand as they might
+ // be stale after lowering.
+
+ // Most instructions don't use any of these subfeilds.
+ DemandedFields Res;
+ // Start conservative if registers are used
+ if (MI.isCall() || MI.isInlineAsm() || MI.readsRegister(RISCV::VL))
+ Res.demandVL();;
+ if (MI.isCall() || MI.isInlineAsm() || MI.readsRegister(RISCV::VTYPE))
+ Res.demandVTYPE();
+ // Start conservative on the unlowered form too
+ uint64_t TSFlags = MI.getDesc().TSFlags;
+ if (RISCVII::hasSEWOp(TSFlags)) {
+ Res.demandVTYPE();
+ if (RISCVII::hasVLOp(TSFlags))
+ Res.demandVL();
+
+ // Behavior is independent of mask policy.
+ if (!RISCVII::usesMaskPolicy(TSFlags))
+ Res.MaskPolicy = false;
+ }
+
+ // Loads and stores with implicit EEW do not demand SEW or LMUL directly.
+ // They instead demand the ratio of the two which is used in computing
+ // EMUL, but which allows us the flexibility to change SEW and LMUL
+ // provided we don't change the ratio.
+ // Note: We assume that the instructions initial SEW is the EEW encoded
+ // in the opcode. This is asserted when constructing the VSETVLIInfo.
+ if (getEEWForLoadStore(MI)) {
+ Res.SEW = false;
+ Res.LMUL = false;
+ }
+
+ // Store instructions don't use the policy fields.
+ if (RISCVII::hasSEWOp(TSFlags) && MI.getNumExplicitDefs() == 0) {
+ Res.TailPolicy = false;
+ Res.MaskPolicy = false;
+ }
+
+ // If this is a mask reg operation, it only cares about VLMAX.
+ // TODO: Possible extensions to this logic
+ // * Probably ok if available VLMax is larger than demanded
+ // * The policy bits can probably be ignored..
+ if (isMaskRegOp(MI)) {
+ Res.SEW = false;
+ Res.LMUL = false;
+ }
+
+ // For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
+ if (isScalarMoveInstr(MI)) {
+ Res.LMUL = false;
+ Res.SEWLMULRatio = false;
+ Res.VLAny = false;
+ }
+
+ return Res;
+}
+
+/// Defines the abstract state with which the forward dataflow models the
+/// values of the VL and VTYPE registers after insertion.
+class VSETVLIInfo {
+ union {
+ Register AVLReg;
+ unsigned AVLImm;
+ };
+
+ enum : uint8_t {
+ Uninitialized,
+ AVLIsReg,
+ AVLIsImm,
+ Unknown,
+ } State = Uninitialized;
+
+ // Fields from VTYPE.
+ RISCVII::VLMUL VLMul = RISCVII::LMUL_1;
+ uint8_t SEW = 0;
+ uint8_t TailAgnostic : 1;
+ uint8_t MaskAgnostic : 1;
+ uint8_t SEWLMULRatioOnly : 1;
+
+public:
+ VSETVLIInfo()
+ : AVLImm(0), TailAgnostic(false), MaskAgnostic(false),
+ SEWLMULRatioOnly(false) {}
+
+ static VSETVLIInfo getUnknown() {
+ VSETVLIInfo Info;
+ Info.setUnknown();
+ return Info;
+ }
+
+ bool isValid() const { return State != Uninitialized; }
+ void setUnknown() { State = Unknown; }
+ bool isUnknown() const { return State == Unknown; }
+
+ void setAVLReg(Register Reg) {
+ AVLReg = Reg;
+ State = AVLIsReg;
+ }
+
+ void setAVLImm(unsigned Imm) {
+ AVLImm = Imm;
+ State = AVLIsImm;
+ }
+
+ bool hasAVLImm() const { return State == AVLIsImm; }
+ bool hasAVLReg() const { return State == AVLIsReg; }
+ Register getAVLReg() const {
+ assert(hasAVLReg());
+ return AVLReg;
+ }
+ unsigned getAVLImm() const {
+ assert(hasAVLImm());
+ return AVLImm;
+ }
+
+ unsigned getSEW() const { return SEW; }
+ RISCVII::VLMUL getVLMUL() const { return VLMul; }
+
+ bool hasNonZeroAVL() const {
+ if (hasAVLImm())
+ return getAVLImm() > 0;
+ if (hasAVLReg())
+ return getAVLReg() == RISCV::X0;
+ return false;
+ }
+
+ bool hasEquallyZeroAVL(const VSETVLIInfo &Other) const {
+ if (hasSameAVL(Other))
+ return true;
+ return (hasNonZeroAVL() && Other.hasNonZeroAVL());
+ }
+
+ bool hasSameAVL(const VSETVLIInfo &Other) const {
+ if (hasAVLReg() && Other.hasAVLReg())
+ return getAVLReg() == Other.getAVLReg();
+
+ if (hasAVLImm() && Other.hasAVLImm())
+ return getAVLImm() == Other.getAVLImm();
+
+ return false;
+ }
+
+ void setVTYPE(unsigned VType) {
+ assert(isValid() && !isUnknown() &&
+ "Can't set VTYPE for uninitialized or unknown");
+ VLMul = RISCVVType::getVLMUL(VType);
+ SEW = RISCVVType::getSEW(VType);
+ TailAgnostic = RISCVVType::isTailAgnostic(VType);
+ MaskAgnostic = RISCVVType::isMaskAgnostic(VType);
+ }
+ void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA) {
+ assert(isValid() && !isUnknown() &&
+ "Can't set VTYPE for uninitialized or unknown");
+ VLMul = L;
+ SEW = S;
+ TailAgnostic = TA;
+ MaskAgnostic = MA;
+ }
+
+ unsigned encodeVTYPE() const {
+ assert(isValid() && !isUnknown() && !SEWLMULRatioOnly &&
+ "Can't encode VTYPE for uninitialized or unknown");
+ return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic);
+ }
+
+ bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; }
+
+ bool hasSameVTYPE(const VSETVLIInfo &Other) const {
+ assert(isValid() && Other.isValid() &&
+ "Can't compare invalid VSETVLIInfos");
+ assert(!isUnknown() && !Other.isUnknown() &&
+ "Can't compare VTYPE in unknown state");
+ assert(!SEWLMULRatioOnly && !Other.SEWLMULRatioOnly &&
+ "Can't compare when only LMUL/SEW ratio is valid.");
+ return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic) ==
+ std::tie(Other.VLMul, Other.SEW, Other.TailAgnostic,
+ Other.MaskAgnostic);
+ }
+
+ unsigned getSEWLMULRatio() const {
+ assert(isValid() && !isUnknown() &&
+ "Can't use VTYPE for uninitialized or unknown");
+ return RISCVVType::getSEWLMULRatio(SEW, VLMul);
+ }
+
+ // Check if the VTYPE for these two VSETVLIInfos produce the same VLMAX.
+ // Note that having the same VLMAX ensures that both share the same
+ // function from AVL to VL; that is, they must produce the same VL value
+ // for any given AVL value.
+ bool hasSameVLMAX(const VSETVLIInfo &Other) const {
+ assert(isValid() && Other.isValid() &&
+ "Can't compare invalid VSETVLIInfos");
+ assert(!isUnknown() && !Other.isUnknown() &&
+ "Can't compare VTYPE in unknown state");
+ return getSEWLMULRatio() == Other.getSEWLMULRatio();
+ }
+
+ bool hasCompatibleVTYPE(const DemandedFields &Used,
+ const VSETVLIInfo &Require) const {
+ return areCompatibleVTYPEs(encodeVTYPE(), Require.encodeVTYPE(), Used);
+ }
+
+ // Determine whether the vector instructions requirements represented by
+ // Require are compatible with the previous vsetvli instruction represented
+ // by this. MI is the instruction whose requirements we're considering.
+ bool isCompatible(const DemandedFields &Used, const VSETVLIInfo &Require) const {
+ assert(isValid() && Require.isValid() &&
+ "Can't compare invalid VSETVLIInfos");
+ assert(!Require.SEWLMULRatioOnly &&
+ "Expected a valid VTYPE for instruction!");
+ // Nothing is compatible with Unknown.
+ if (isUnknown() || Require.isUnknown())
+ return false;
+
+ // If only our VLMAX ratio is valid, then this isn't compatible.
+ if (SEWLMULRatioOnly)
+ return false;
+
+ // If the instruction doesn't need an AVLReg and the SEW matches, consider
+ // it compatible.
+ if (Require.hasAVLReg() && Require.AVLReg == RISCV::NoRegister)
+ if (SEW == Require.SEW)
+ return true;
+
+ if (Used.VLAny && !hasSameAVL(Require))
+ return false;
+
+ if (Used.VLZeroness && !hasEquallyZeroAVL(Require))
+ return false;
+
+ return areCompatibleVTYPEs(encodeVTYPE(), Require.encodeVTYPE(), Used);
+ }
+
+ bool operator==(const VSETVLIInfo &Other) const {
+ // Uninitialized is only equal to another Uninitialized.
+ if (!isValid())
+ return !Other.isValid();
+ if (!Other.isValid())
+ return !isValid();
+
+ // Unknown is only equal to another Unknown.
+ if (isUnknown())
+ return Other.isUnknown();
+ if (Other.isUnknown())
+ return isUnknown();
+
+ if (!hasSameAVL(Other))
+ return false;
+
+ // If the SEWLMULRatioOnly bits are different, then they aren't equal.
+ if (SEWLMULRatioOnly != Other.SEWLMULRatioOnly)
+ return false;
+
+ // If only the VLMAX is valid, check that it is the same.
+ if (SEWLMULRatioOnly)
+ return hasSameVLMAX(Other);
+
+ // If the full VTYPE is valid, check that it is the same.
+ return hasSameVTYPE(Other);
+ }
+
+ bool operator!=(const VSETVLIInfo &Other) const {
+ return !(*this == Other);
+ }
+
+ // Calculate the VSETVLIInfo visible to a block assuming this and Other are
+ // both predecessors.
+ VSETVLIInfo intersect(const VSETVLIInfo &Other) const {
+ // If the new value isn't valid, ignore it.
+ if (!Other.isValid())
+ return *this;
+
+ // If this value isn't valid, this must be the first predecessor, use it.
+ if (!isValid())
+ return Other;
+
+ // If either is unknown, the result is unknown.
+ if (isUnknown() || Other.isUnknown())
+ return VSETVLIInfo::getUnknown();
+
+ // If we have an exact, match return this.
+ if (*this == Other)
+ return *this;
+
+ // Not an exact match, but maybe the AVL and VLMAX are the same. If so,
+ // return an SEW/LMUL ratio only value.
+ if (hasSameAVL(Other) && hasSameVLMAX(Other)) {
+ VSETVLIInfo MergeInfo = *this;
+ MergeInfo.SEWLMULRatioOnly = true;
+ return MergeInfo;
+ }
+
+ // Otherwise the result is unknown.
+ return VSETVLIInfo::getUnknown();
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// Support for debugging, callable in GDB: V->dump()
+ LLVM_DUMP_METHOD void dump() const {
+ print(dbgs());
+ dbgs() << "\n";
+ }
+
+ /// Implement operator<<.
+ /// @{
+ void print(raw_ostream &OS) const {
+ OS << "{";
+ if (!isValid())
+ OS << "Uninitialized";
+ if (isUnknown())
+ OS << "unknown";
+ if (hasAVLReg())
+ OS << "AVLReg=" << (unsigned)AVLReg;
+ if (hasAVLImm())
+ OS << "AVLImm=" << (unsigned)AVLImm;
+ OS << ", "
+ << "VLMul=" << (unsigned)VLMul << ", "
+ << "SEW=" << (unsigned)SEW << ", "
+ << "TailAgnostic=" << (bool)TailAgnostic << ", "
+ << "MaskAgnostic=" << (bool)MaskAgnostic << ", "
+ << "SEWLMULRatioOnly=" << (bool)SEWLMULRatioOnly << "}";
+ }
+#endif
+};
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_ATTRIBUTE_USED
+inline raw_ostream &operator<<(raw_ostream &OS, const VSETVLIInfo &V) {
+ V.print(OS);
+ return OS;
+}
+#endif
+
+struct BlockData {
+ // The VSETVLIInfo that represents the net changes to the VL/VTYPE registers
+ // made by this block. Calculated in Phase 1.
+ VSETVLIInfo Change;
+
+ // The VSETVLIInfo that represents the VL/VTYPE settings on exit from this
+ // block. Calculated in Phase 2.
+ VSETVLIInfo Exit;
+
+ // The VSETVLIInfo that represents the VL/VTYPE settings from all predecessor
+ // blocks. Calculated in Phase 2, and used by Phase 3.
+ VSETVLIInfo Pred;
+
+ // Keeps track of whether the block is already in the queue.
+ bool InQueue = false;
+
+ BlockData() = default;
+};
+
+class RISCVInsertVSETVLI : public MachineFunctionPass {
+ const TargetInstrInfo *TII;
+ MachineRegisterInfo *MRI;
+
+ std::vector<BlockData> BlockInfo;
+ std::queue<const MachineBasicBlock *> WorkList;
+
+public:
+ static char ID;
+
+ RISCVInsertVSETVLI() : MachineFunctionPass(ID) {
+ initializeRISCVInsertVSETVLIPass(*PassRegistry::getPassRegistry());
+ }
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override { return RISCV_INSERT_VSETVLI_NAME; }
+
+private:
+ bool needVSETVLI(const MachineInstr &MI, const VSETVLIInfo &Require,
+ const VSETVLIInfo &CurInfo) const;
+ bool needVSETVLIPHI(const VSETVLIInfo &Require,
+ const MachineBasicBlock &MBB) const;
+ void insertVSETVLI(MachineBasicBlock &MBB, MachineInstr &MI,
+ const VSETVLIInfo &Info, const VSETVLIInfo &PrevInfo);
+ void insertVSETVLI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertPt, DebugLoc DL,
+ const VSETVLIInfo &Info, const VSETVLIInfo &PrevInfo);
+
+ void transferBefore(VSETVLIInfo &Info, const MachineInstr &MI);
+ void transferAfter(VSETVLIInfo &Info, const MachineInstr &MI);
+ bool computeVLVTYPEChanges(const MachineBasicBlock &MBB);
+ void computeIncomingVLVTYPE(const MachineBasicBlock &MBB);
+ void emitVSETVLIs(MachineBasicBlock &MBB);
+ void doLocalPostpass(MachineBasicBlock &MBB);
+ void doPRE(MachineBasicBlock &MBB);
+ void insertReadVL(MachineBasicBlock &MBB);
+};
+
+} // end anonymous namespace
+
+char RISCVInsertVSETVLI::ID = 0;
+
+INITIALIZE_PASS(RISCVInsertVSETVLI, DEBUG_TYPE, RISCV_INSERT_VSETVLI_NAME,
+ false, false)
+
+static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
+ const MachineRegisterInfo *MRI) {
+ VSETVLIInfo InstrInfo;
+
+ bool TailAgnostic, MaskAgnostic;
+ unsigned UseOpIdx;
+ if (MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
+ // Start with undisturbed.
+ TailAgnostic = false;
+ MaskAgnostic = false;
+
+ // If there is a policy operand, use it.
+ if (RISCVII::hasVecPolicyOp(TSFlags)) {
+ const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
+ uint64_t Policy = Op.getImm();
+ assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
+ "Invalid Policy Value");
+ TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
+ MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
+ }
+
+ // If the tied operand is an IMPLICIT_DEF we can use TailAgnostic and
+ // MaskAgnostic.
+ const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
+ MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg());
+ if (UseMI && UseMI->isImplicitDef()) {
+ TailAgnostic = true;
+ MaskAgnostic = true;
+ }
+ // Some pseudo instructions force a tail agnostic policy despite having a
+ // tied def.
+ if (RISCVII::doesForceTailAgnostic(TSFlags))
+ TailAgnostic = true;
+
+ if (!RISCVII::usesMaskPolicy(TSFlags))
+ MaskAgnostic = true;
+ } else {
+ // If there is no tied operand,, there shouldn't be a policy operand.
+ assert(!RISCVII::hasVecPolicyOp(TSFlags) && "Unexpected policy operand");
+ // No tied operand use agnostic policies.
+ TailAgnostic = true;
+ MaskAgnostic = true;
+ }
+
+ RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
+
+ unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm();
+ // A Log2SEW of 0 is an operation on mask registers only.
+ unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
+ assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
+
+ if (RISCVII::hasVLOp(TSFlags)) {
+ const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
+ if (VLOp.isImm()) {
+ int64_t Imm = VLOp.getImm();
+ // Conver the VLMax sentintel to X0 register.
+ if (Imm == RISCV::VLMaxSentinel)
+ InstrInfo.setAVLReg(RISCV::X0);
+ else
+ InstrInfo.setAVLImm(Imm);
+ } else {
+ InstrInfo.setAVLReg(VLOp.getReg());
+ }
+ } else {
+ InstrInfo.setAVLReg(RISCV::NoRegister);
+ }
+#ifndef NDEBUG
+ if (std::optional<unsigned> EEW = getEEWForLoadStore(MI)) {
+ assert(SEW == EEW && "Initial SEW doesn't match expected EEW");
+ }
+#endif
+ InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic);
+
+ return InstrInfo;
+}
+
+void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB, MachineInstr &MI,
+ const VSETVLIInfo &Info,
+ const VSETVLIInfo &PrevInfo) {
+ DebugLoc DL = MI.getDebugLoc();
+ insertVSETVLI(MBB, MachineBasicBlock::iterator(&MI), DL, Info, PrevInfo);
+}
+
+void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertPt, DebugLoc DL,
+ const VSETVLIInfo &Info, const VSETVLIInfo &PrevInfo) {
+
+ // Use X0, X0 form if the AVL is the same and the SEW+LMUL gives the same
+ // VLMAX.
+ if (PrevInfo.isValid() && !PrevInfo.isUnknown() &&
+ Info.hasSameAVL(PrevInfo) && Info.hasSameVLMAX(PrevInfo)) {
+ BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETVLIX0))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addReg(RISCV::X0, RegState::Kill)
+ .addImm(Info.encodeVTYPE())
+ .addReg(RISCV::VL, RegState::Implicit);
+ return;
+ }
+
+ if (Info.hasAVLImm()) {
+ BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETIVLI))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addImm(Info.getAVLImm())
+ .addImm(Info.encodeVTYPE());
+ return;
+ }
+
+ Register AVLReg = Info.getAVLReg();
+ if (AVLReg == RISCV::NoRegister) {
+ // We can only use x0, x0 if there's no chance of the vtype change causing
+ // the previous vl to become invalid.
+ if (PrevInfo.isValid() && !PrevInfo.isUnknown() &&
+ Info.hasSameVLMAX(PrevInfo)) {
+ BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETVLIX0))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addReg(RISCV::X0, RegState::Kill)
+ .addImm(Info.encodeVTYPE())
+ .addReg(RISCV::VL, RegState::Implicit);
+ return;
+ }
+ // Otherwise use an AVL of 0 to avoid depending on previous vl.
+ BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETIVLI))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addImm(0)
+ .addImm(Info.encodeVTYPE());
+ return;
+ }
+
+ if (AVLReg.isVirtual())
+ MRI->constrainRegClass(AVLReg, &RISCV::GPRNoX0RegClass);
+
+ // Use X0 as the DestReg unless AVLReg is X0. We also need to change the
+ // opcode if the AVLReg is X0 as they have different register classes for
+ // the AVL operand.
+ Register DestReg = RISCV::X0;
+ unsigned Opcode = RISCV::PseudoVSETVLI;
+ if (AVLReg == RISCV::X0) {
+ DestReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
+ Opcode = RISCV::PseudoVSETVLIX0;
+ }
+ BuildMI(MBB, InsertPt, DL, TII->get(Opcode))
+ .addReg(DestReg, RegState::Define | RegState::Dead)
+ .addReg(AVLReg)
+ .addImm(Info.encodeVTYPE());
+}
+
+// Return a VSETVLIInfo representing the changes made by this VSETVLI or
+// VSETIVLI instruction.
+static VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) {
+ VSETVLIInfo NewInfo;
+ if (MI.getOpcode() == RISCV::PseudoVSETIVLI) {
+ NewInfo.setAVLImm(MI.getOperand(1).getImm());
+ } else {
+ assert(MI.getOpcode() == RISCV::PseudoVSETVLI ||
+ MI.getOpcode() == RISCV::PseudoVSETVLIX0);
+ Register AVLReg = MI.getOperand(1).getReg();
+ assert((AVLReg != RISCV::X0 || MI.getOperand(0).getReg() != RISCV::X0) &&
+ "Can't handle X0, X0 vsetvli yet");
+ NewInfo.setAVLReg(AVLReg);
+ }
+ NewInfo.setVTYPE(MI.getOperand(2).getImm());
+
+ return NewInfo;
+}
+
+/// Return true if a VSETVLI is required to transition from CurInfo to Require
+/// before MI.
+bool RISCVInsertVSETVLI::needVSETVLI(const MachineInstr &MI,
+ const VSETVLIInfo &Require,
+ const VSETVLIInfo &CurInfo) const {
+ assert(Require == computeInfoForInstr(MI, MI.getDesc().TSFlags, MRI));
+
+ if (!CurInfo.isValid() || CurInfo.isUnknown() || CurInfo.hasSEWLMULRatioOnly())
+ return true;
+
+ DemandedFields Used = getDemanded(MI);
+
+ if (isScalarMoveInstr(MI)) {
+ // For vmv.s.x and vfmv.s.f, if writing to an implicit_def operand, we don't
+ // need to preserve any other bits and are thus compatible with any larger,
+ // etype and can disregard policy bits. Warning: It's tempting to try doing
+ // this for any tail agnostic operation, but we can't as TA requires
+ // tail lanes to either be the original value or -1. We are writing
+ // unknown bits to the lanes here.
+ auto *VRegDef = MRI->getVRegDef(MI.getOperand(1).getReg());
+ if (VRegDef && VRegDef->isImplicitDef() &&
+ CurInfo.getSEW() >= Require.getSEW()) {
+ Used.SEW = false;
+ Used.TailPolicy = false;
+ }
+ }
+
+ if (CurInfo.isCompatible(Used, Require))
+ return false;
+
+ // We didn't find a compatible value. If our AVL is a virtual register,
+ // it might be defined by a VSET(I)VLI. If it has the same VLMAX we need
+ // and the last VL/VTYPE we observed is the same, we don't need a
+ // VSETVLI here.
+ if (Require.hasAVLReg() && Require.getAVLReg().isVirtual() &&
+ CurInfo.hasCompatibleVTYPE(Used, Require)) {
+ if (MachineInstr *DefMI = MRI->getVRegDef(Require.getAVLReg())) {
+ if (isVectorConfigInstr(*DefMI)) {
+ VSETVLIInfo DefInfo = getInfoForVSETVLI(*DefMI);
+ if (DefInfo.hasSameAVL(CurInfo) && DefInfo.hasSameVLMAX(CurInfo))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+// Given an incoming state reaching MI, modifies that state so that it is minimally
+// compatible with MI. The resulting state is guaranteed to be semantically legal
+// for MI, but may not be the state requested by MI.
+void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, const MachineInstr &MI) {
+ uint64_t TSFlags = MI.getDesc().TSFlags;
+ if (!RISCVII::hasSEWOp(TSFlags))
+ return;
+
+ const VSETVLIInfo NewInfo = computeInfoForInstr(MI, TSFlags, MRI);
+ if (Info.isValid() && !needVSETVLI(MI, NewInfo, Info))
+ return;
+
+ const VSETVLIInfo PrevInfo = Info;
+ Info = NewInfo;
+
+ if (!RISCVII::hasVLOp(TSFlags))
+ return;
+
+ // For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and
+ // VL > 0. We can discard the user requested AVL and just use the last
+ // one if we can prove it equally zero. This removes a vsetvli entirely
+ // if the types match or allows use of cheaper avl preserving variant
+ // if VLMAX doesn't change. If VLMAX might change, we couldn't use
+ // the 'vsetvli x0, x0, vtype" variant, so we avoid the transform to
+ // prevent extending live range of an avl register operand.
+ // TODO: We can probably relax this for immediates.
+ if (isScalarMoveInstr(MI) && PrevInfo.isValid() &&
+ PrevInfo.hasEquallyZeroAVL(Info) &&
+ Info.hasSameVLMAX(PrevInfo)) {
+ if (PrevInfo.hasAVLImm())
+ Info.setAVLImm(PrevInfo.getAVLImm());
+ else
+ Info.setAVLReg(PrevInfo.getAVLReg());
+ return;
+ }
+
+ // If AVL is defined by a vsetvli with the same VLMAX, we can
+ // replace the AVL operand with the AVL of the defining vsetvli.
+ // We avoid general register AVLs to avoid extending live ranges
+ // without being sure we can kill the original source reg entirely.
+ if (!Info.hasAVLReg() || !Info.getAVLReg().isVirtual())
+ return;
+ MachineInstr *DefMI = MRI->getVRegDef(Info.getAVLReg());
+ if (!DefMI || !isVectorConfigInstr(*DefMI))
+ return;
+
+ VSETVLIInfo DefInfo = getInfoForVSETVLI(*DefMI);
+ if (DefInfo.hasSameVLMAX(Info) &&
+ (DefInfo.hasAVLImm() || DefInfo.getAVLReg() == RISCV::X0)) {
+ if (DefInfo.hasAVLImm())
+ Info.setAVLImm(DefInfo.getAVLImm());
+ else
+ Info.setAVLReg(DefInfo.getAVLReg());
+ return;
+ }
+}
+
+// Given a state with which we evaluated MI (see transferBefore above for why
+// this might be different that the state MI requested), modify the state to
+// reflect the changes MI might make.
+void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info, const MachineInstr &MI) {
+ if (isVectorConfigInstr(MI)) {
+ Info = getInfoForVSETVLI(MI);
+ return;
+ }
+
+ if (RISCV::isFaultFirstLoad(MI)) {
+ // Update AVL to vl-output of the fault first load.
+ Info.setAVLReg(MI.getOperand(1).getReg());
+ return;
+ }
+
+ // If this is something that updates VL/VTYPE that we don't know about, set
+ // the state to unknown.
+ if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
+ MI.modifiesRegister(RISCV::VTYPE))
+ Info = VSETVLIInfo::getUnknown();
+}
+
+bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB) {
+ bool HadVectorOp = false;
+
+ BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+ BBInfo.Change = BBInfo.Pred;
+ for (const MachineInstr &MI : MBB) {
+ transferBefore(BBInfo.Change, MI);
+
+ if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags))
+ HadVectorOp = true;
+
+ transferAfter(BBInfo.Change, MI);
+ }
+
+ return HadVectorOp;
+}
+
+void RISCVInsertVSETVLI::computeIncomingVLVTYPE(const MachineBasicBlock &MBB) {
+
+ BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+ BBInfo.InQueue = false;
+
+ // Start with the previous entry so that we keep the most conservative state
+ // we have ever found.
+ VSETVLIInfo InInfo = BBInfo.Pred;
+ if (MBB.pred_empty()) {
+ // There are no predecessors, so use the default starting status.
+ InInfo.setUnknown();
+ } else {
+ for (MachineBasicBlock *P : MBB.predecessors())
+ InInfo = InInfo.intersect(BlockInfo[P->getNumber()].Exit);
+ }
+
+ // If we don't have any valid predecessor value, wait until we do.
+ if (!InInfo.isValid())
+ return;
+
+ // If no change, no need to rerun block
+ if (InInfo == BBInfo.Pred)
+ return;
+
+ BBInfo.Pred = InInfo;
+ LLVM_DEBUG(dbgs() << "Entry state of " << printMBBReference(MBB)
+ << " changed to " << BBInfo.Pred << "\n");
+
+ // Note: It's tempting to cache the state changes here, but due to the
+ // compatibility checks performed a blocks output state can change based on
+ // the input state. To cache, we'd have to add logic for finding
+ // never-compatible state changes.
+ computeVLVTYPEChanges(MBB);
+ VSETVLIInfo TmpStatus = BBInfo.Change;
+
+ // If the new exit value matches the old exit value, we don't need to revisit
+ // any blocks.
+ if (BBInfo.Exit == TmpStatus)
+ return;
+
+ BBInfo.Exit = TmpStatus;
+ LLVM_DEBUG(dbgs() << "Exit state of " << printMBBReference(MBB)
+ << " changed to " << BBInfo.Exit << "\n");
+
+ // Add the successors to the work list so we can propagate the changed exit
+ // status.
+ for (MachineBasicBlock *S : MBB.successors())
+ if (!BlockInfo[S->getNumber()].InQueue) {
+ BlockInfo[S->getNumber()].InQueue = true;
+ WorkList.push(S);
+ }
+}
+
+// If we weren't able to prove a vsetvli was directly unneeded, it might still
+// be unneeded if the AVL is a phi node where all incoming values are VL
+// outputs from the last VSETVLI in their respective basic blocks.
+bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
+ const MachineBasicBlock &MBB) const {
+ if (DisableInsertVSETVLPHIOpt)
+ return true;
+
+ if (!Require.hasAVLReg())
+ return true;
+
+ Register AVLReg = Require.getAVLReg();
+ if (!AVLReg.isVirtual())
+ return true;
+
+ // We need the AVL to be produce by a PHI node in this basic block.
+ MachineInstr *PHI = MRI->getVRegDef(AVLReg);
+ if (!PHI || PHI->getOpcode() != RISCV::PHI || PHI->getParent() != &MBB)
+ return true;
+
+ for (unsigned PHIOp = 1, NumOps = PHI->getNumOperands(); PHIOp != NumOps;
+ PHIOp += 2) {
+ Register InReg = PHI->getOperand(PHIOp).getReg();
+ MachineBasicBlock *PBB = PHI->getOperand(PHIOp + 1).getMBB();
+ const BlockData &PBBInfo = BlockInfo[PBB->getNumber()];
+ // If the exit from the predecessor has the VTYPE we are looking for
+ // we might be able to avoid a VSETVLI.
+ if (PBBInfo.Exit.isUnknown() || !PBBInfo.Exit.hasSameVTYPE(Require))
+ return true;
+
+ // We need the PHI input to the be the output of a VSET(I)VLI.
+ MachineInstr *DefMI = MRI->getVRegDef(InReg);
+ if (!DefMI || !isVectorConfigInstr(*DefMI))
+ return true;
+
+ // We found a VSET(I)VLI make sure it matches the output of the
+ // predecessor block.
+ VSETVLIInfo DefInfo = getInfoForVSETVLI(*DefMI);
+ if (!DefInfo.hasSameAVL(PBBInfo.Exit) ||
+ !DefInfo.hasSameVTYPE(PBBInfo.Exit))
+ return true;
+ }
+
+ // If all the incoming values to the PHI checked out, we don't need
+ // to insert a VSETVLI.
+ return false;
+}
+
+void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
+ VSETVLIInfo CurInfo = BlockInfo[MBB.getNumber()].Pred;
+ // Track whether the prefix of the block we've scanned is transparent
+ // (meaning has not yet changed the abstract state).
+ bool PrefixTransparent = true;
+ for (MachineInstr &MI : MBB) {
+ const VSETVLIInfo PrevInfo = CurInfo;
+ transferBefore(CurInfo, MI);
+
+ // If this is an explicit VSETVLI or VSETIVLI, update our state.
+ if (isVectorConfigInstr(MI)) {
+ // Conservatively, mark the VL and VTYPE as live.
+ assert(MI.getOperand(3).getReg() == RISCV::VL &&
+ MI.getOperand(4).getReg() == RISCV::VTYPE &&
+ "Unexpected operands where VL and VTYPE should be");
+ MI.getOperand(3).setIsDead(false);
+ MI.getOperand(4).setIsDead(false);
+ PrefixTransparent = false;
+ }
+
+ uint64_t TSFlags = MI.getDesc().TSFlags;
+ if (RISCVII::hasSEWOp(TSFlags)) {
+ if (PrevInfo != CurInfo) {
+ // If this is the first implicit state change, and the state change
+ // requested can be proven to produce the same register contents, we
+ // can skip emitting the actual state change and continue as if we
+ // had since we know the GPR result of the implicit state change
+ // wouldn't be used and VL/VTYPE registers are correct. Note that
+ // we *do* need to model the state as if it changed as while the
+ // register contents are unchanged, the abstract model can change.
+ if (!PrefixTransparent || needVSETVLIPHI(CurInfo, MBB))
+ insertVSETVLI(MBB, MI, CurInfo, PrevInfo);
+ PrefixTransparent = false;
+ }
+
+ if (RISCVII::hasVLOp(TSFlags)) {
+ MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
+ if (VLOp.isReg()) {
+ // Erase the AVL operand from the instruction.
+ VLOp.setReg(RISCV::NoRegister);
+ VLOp.setIsKill(false);
+ }
+ MI.addOperand(MachineOperand::CreateReg(RISCV::VL, /*isDef*/ false,
+ /*isImp*/ true));
+ }
+ MI.addOperand(MachineOperand::CreateReg(RISCV::VTYPE, /*isDef*/ false,
+ /*isImp*/ true));
+ }
+
+ if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
+ MI.modifiesRegister(RISCV::VTYPE))
+ PrefixTransparent = false;
+
+ transferAfter(CurInfo, MI);
+ }
+
+ // If we reach the end of the block and our current info doesn't match the
+ // expected info, insert a vsetvli to correct.
+ if (!UseStrictAsserts) {
+ const VSETVLIInfo &ExitInfo = BlockInfo[MBB.getNumber()].Exit;
+ if (CurInfo.isValid() && ExitInfo.isValid() && !ExitInfo.isUnknown() &&
+ CurInfo != ExitInfo) {
+ // Note there's an implicit assumption here that terminators never use
+ // or modify VL or VTYPE. Also, fallthrough will return end().
+ auto InsertPt = MBB.getFirstInstrTerminator();
+ insertVSETVLI(MBB, InsertPt, MBB.findDebugLoc(InsertPt), ExitInfo,
+ CurInfo);
+ CurInfo = ExitInfo;
+ }
+ }
+
+ if (UseStrictAsserts && CurInfo.isValid()) {
+ const auto &Info = BlockInfo[MBB.getNumber()];
+ if (CurInfo != Info.Exit) {
+ LLVM_DEBUG(dbgs() << "in block " << printMBBReference(MBB) << "\n");
+ LLVM_DEBUG(dbgs() << " begin state: " << Info.Pred << "\n");
+ LLVM_DEBUG(dbgs() << " expected end state: " << Info.Exit << "\n");
+ LLVM_DEBUG(dbgs() << " actual end state: " << CurInfo << "\n");
+ }
+ assert(CurInfo == Info.Exit &&
+ "InsertVSETVLI dataflow invariant violated");
+ }
+}
+
+/// Return true if the VL value configured must be equal to the requested one.
+static bool hasFixedResult(const VSETVLIInfo &Info, const RISCVSubtarget &ST) {
+ if (!Info.hasAVLImm())
+ // VLMAX is always the same value.
+ // TODO: Could extend to other registers by looking at the associated vreg
+ // def placement.
+ return RISCV::X0 == Info.getAVLReg();
+
+ unsigned AVL = Info.getAVLImm();
+ unsigned SEW = Info.getSEW();
+ unsigned AVLInBits = AVL * SEW;
+
+ unsigned LMul;
+ bool Fractional;
+ std::tie(LMul, Fractional) = RISCVVType::decodeVLMUL(Info.getVLMUL());
+
+ if (Fractional)
+ return ST.getRealMinVLen() / LMul >= AVLInBits;
+ return ST.getRealMinVLen() * LMul >= AVLInBits;
+}
+
+/// Perform simple partial redundancy elimination of the VSETVLI instructions
+/// we're about to insert by looking for cases where we can PRE from the
+/// beginning of one block to the end of one of its predecessors. Specifically,
+/// this is geared to catch the common case of a fixed length vsetvl in a single
+/// block loop when it could execute once in the preheader instead.
+void RISCVInsertVSETVLI::doPRE(MachineBasicBlock &MBB) {
+ const MachineFunction &MF = *MBB.getParent();
+ const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+
+ if (!BlockInfo[MBB.getNumber()].Pred.isUnknown())
+ return;
+
+ MachineBasicBlock *UnavailablePred = nullptr;
+ VSETVLIInfo AvailableInfo;
+ for (MachineBasicBlock *P : MBB.predecessors()) {
+ const VSETVLIInfo &PredInfo = BlockInfo[P->getNumber()].Exit;
+ if (PredInfo.isUnknown()) {
+ if (UnavailablePred)
+ return;
+ UnavailablePred = P;
+ } else if (!AvailableInfo.isValid()) {
+ AvailableInfo = PredInfo;
+ } else if (AvailableInfo != PredInfo) {
+ return;
+ }
+ }
+
+ // Unreachable, single pred, or full redundancy. Note that FRE is handled by
+ // phase 3.
+ if (!UnavailablePred || !AvailableInfo.isValid())
+ return;
+
+ // Critical edge - TODO: consider splitting?
+ if (UnavailablePred->succ_size() != 1)
+ return;
+
+ // If VL can be less than AVL, then we can't reduce the frequency of exec.
+ if (!hasFixedResult(AvailableInfo, ST))
+ return;
+
+ // Does it actually let us remove an implicit transition in MBB?
+ bool Found = false;
+ for (auto &MI : MBB) {
+ if (isVectorConfigInstr(MI))
+ return;
+
+ const uint64_t TSFlags = MI.getDesc().TSFlags;
+ if (RISCVII::hasSEWOp(TSFlags)) {
+ if (AvailableInfo != computeInfoForInstr(MI, TSFlags, MRI))
+ return;
+ Found = true;
+ break;
+ }
+ }
+ if (!Found)
+ return;
+
+ // Finally, update both data flow state and insert the actual vsetvli.
+ // Doing both keeps the code in sync with the dataflow results, which
+ // is critical for correctness of phase 3.
+ auto OldInfo = BlockInfo[UnavailablePred->getNumber()].Exit;
+ LLVM_DEBUG(dbgs() << "PRE VSETVLI from " << MBB.getName() << " to "
+ << UnavailablePred->getName() << " with state "
+ << AvailableInfo << "\n");
+ BlockInfo[UnavailablePred->getNumber()].Exit = AvailableInfo;
+ BlockInfo[MBB.getNumber()].Pred = AvailableInfo;
+
+ // Note there's an implicit assumption here that terminators never use
+ // or modify VL or VTYPE. Also, fallthrough will return end().
+ auto InsertPt = UnavailablePred->getFirstInstrTerminator();
+ insertVSETVLI(*UnavailablePred, InsertPt,
+ UnavailablePred->findDebugLoc(InsertPt),
+ AvailableInfo, OldInfo);
+}
+
+static void doUnion(DemandedFields &A, DemandedFields B) {
+ A.VLAny |= B.VLAny;
+ A.VLZeroness |= B.VLZeroness;
+ A.SEW |= B.SEW;
+ A.LMUL |= B.LMUL;
+ A.SEWLMULRatio |= B.SEWLMULRatio;
+ A.TailPolicy |= B.TailPolicy;
+ A.MaskPolicy |= B.MaskPolicy;
+}
+
+static bool isNonZeroAVL(const MachineOperand &MO) {
+ if (MO.isReg())
+ return RISCV::X0 == MO.getReg();
+ assert(MO.isImm());
+ return 0 != MO.getImm();
+}
+
+// Return true if we can mutate PrevMI to match MI without changing any the
+// fields which would be observed.
+static bool canMutatePriorConfig(const MachineInstr &PrevMI,
+ const MachineInstr &MI,
+ const DemandedFields &Used) {
+ // If the VL values aren't equal, return false if either a) the former is
+ // demanded, or b) we can't rewrite the former to be the later for
+ // implementation reasons.
+ if (!isVLPreservingConfig(MI)) {
+ if (Used.VLAny)
+ return false;
+
+ // TODO: Requires more care in the mutation...
+ if (isVLPreservingConfig(PrevMI))
+ return false;
+
+ // We don't bother to handle the equally zero case here as it's largely
+ // uninteresting.
+ if (Used.VLZeroness &&
+ (!isNonZeroAVL(MI.getOperand(1)) ||
+ !isNonZeroAVL(PrevMI.getOperand(1))))
+ return false;
+
+ // TODO: Track whether the register is defined between
+ // PrevMI and MI.
+ if (MI.getOperand(1).isReg() &&
+ RISCV::X0 != MI.getOperand(1).getReg())
+ return false;
+
+ // TODO: We need to change the result register to allow this rewrite
+ // without the result forming a vl preserving vsetvli which is not
+ // a correct state merge.
+ if (PrevMI.getOperand(0).getReg() == RISCV::X0 &&
+ MI.getOperand(1).isReg())
+ return false;
+ }
+
+ if (!PrevMI.getOperand(2).isImm() || !MI.getOperand(2).isImm())
+ return false;
+
+ auto PriorVType = PrevMI.getOperand(2).getImm();
+ auto VType = MI.getOperand(2).getImm();
+ return areCompatibleVTYPEs(PriorVType, VType, Used);
+}
+
+void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
+ MachineInstr *NextMI = nullptr;
+ // We can have arbitrary code in successors, so VL and VTYPE
+ // must be considered demanded.
+ DemandedFields Used;
+ Used.demandVL();
+ Used.demandVTYPE();
+ SmallVector<MachineInstr*> ToDelete;
+ for (MachineInstr &MI : make_range(MBB.rbegin(), MBB.rend())) {
+
+ if (!isVectorConfigInstr(MI)) {
+ doUnion(Used, getDemanded(MI));
+ continue;
+ }
+
+ Register VRegDef = MI.getOperand(0).getReg();
+ if (VRegDef != RISCV::X0 &&
+ !(VRegDef.isVirtual() && MRI->use_nodbg_empty(VRegDef)))
+ Used.demandVL();
+
+ if (NextMI) {
+ if (!Used.usedVL() && !Used.usedVTYPE()) {
+ ToDelete.push_back(&MI);
+ // Leave NextMI unchanged
+ continue;
+ } else if (canMutatePriorConfig(MI, *NextMI, Used)) {
+ if (!isVLPreservingConfig(*NextMI)) {
+ if (NextMI->getOperand(1).isImm())
+ MI.getOperand(1).ChangeToImmediate(NextMI->getOperand(1).getImm());
+ else
+ MI.getOperand(1).ChangeToRegister(NextMI->getOperand(1).getReg(), false);
+ MI.setDesc(NextMI->getDesc());
+ }
+ MI.getOperand(2).setImm(NextMI->getOperand(2).getImm());
+ ToDelete.push_back(NextMI);
+ // fallthrough
+ }
+ }
+ NextMI = &MI;
+ Used = getDemanded(MI);
+ }
+
+ for (auto *MI : ToDelete)
+ MI->eraseFromParent();
+}
+
+void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) {
+ for (auto I = MBB.begin(), E = MBB.end(); I != E;) {
+ MachineInstr &MI = *I++;
+ if (RISCV::isFaultFirstLoad(MI)) {
+ Register VLOutput = MI.getOperand(1).getReg();
+ if (!MRI->use_nodbg_empty(VLOutput))
+ BuildMI(MBB, I, MI.getDebugLoc(), TII->get(RISCV::PseudoReadVL),
+ VLOutput);
+ // We don't use the vl output of the VLEFF/VLSEGFF anymore.
+ MI.getOperand(1).setReg(RISCV::X0);
+ }
+ }
+}
+
+bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
+ // Skip if the vector extension is not enabled.
+ const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+ if (!ST.hasVInstructions())
+ return false;
+
+ LLVM_DEBUG(dbgs() << "Entering InsertVSETVLI for " << MF.getName() << "\n");
+
+ TII = ST.getInstrInfo();
+ MRI = &MF.getRegInfo();
+
+ assert(BlockInfo.empty() && "Expect empty block infos");
+ BlockInfo.resize(MF.getNumBlockIDs());
+
+ bool HaveVectorOp = false;
+
+ // Phase 1 - determine how VL/VTYPE are affected by the each block.
+ for (const MachineBasicBlock &MBB : MF) {
+ HaveVectorOp |= computeVLVTYPEChanges(MBB);
+ // Initial exit state is whatever change we found in the block.
+ BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+ BBInfo.Exit = BBInfo.Change;
+ LLVM_DEBUG(dbgs() << "Initial exit state of " << printMBBReference(MBB)
+ << " is " << BBInfo.Exit << "\n");
+
+ }
+
+ // If we didn't find any instructions that need VSETVLI, we're done.
+ if (!HaveVectorOp) {
+ BlockInfo.clear();
+ return false;
+ }
+
+ // Phase 2 - determine the exit VL/VTYPE from each block. We add all
+ // blocks to the list here, but will also add any that need to be revisited
+ // during Phase 2 processing.
+ for (const MachineBasicBlock &MBB : MF) {
+ WorkList.push(&MBB);
+ BlockInfo[MBB.getNumber()].InQueue = true;
+ }
+ while (!WorkList.empty()) {
+ const MachineBasicBlock &MBB = *WorkList.front();
+ WorkList.pop();
+ computeIncomingVLVTYPE(MBB);
+ }
+
+ // Perform partial redundancy elimination of vsetvli transitions.
+ for (MachineBasicBlock &MBB : MF)
+ doPRE(MBB);
+
+ // Phase 3 - add any vsetvli instructions needed in the block. Use the
+ // Phase 2 information to avoid adding vsetvlis before the first vector
+ // instruction in the block if the VL/VTYPE is satisfied by its
+ // predecessors.
+ for (MachineBasicBlock &MBB : MF)
+ emitVSETVLIs(MBB);
+
+ // Now that all vsetvlis are explicit, go through and do block local
+ // DSE and peephole based demanded fields based transforms. Note that
+ // this *must* be done outside the main dataflow so long as we allow
+ // any cross block analysis within the dataflow. We can't have both
+ // demanded fields based mutation and non-local analysis in the
+ // dataflow at the same time without introducing inconsistencies.
+ for (MachineBasicBlock &MBB : MF)
+ doLocalPostpass(MBB);
+
+ // Once we're fully done rewriting all the instructions, do a final pass
+ // through to check for VSETVLIs which write to an unused destination.
+ // For the non X0, X0 variant, we can replace the destination register
+ // with X0 to reduce register pressure. This is really a generic
+ // optimization which can be applied to any dead def (TODO: generalize).
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ if (MI.getOpcode() == RISCV::PseudoVSETVLI ||
+ MI.getOpcode() == RISCV::PseudoVSETIVLI) {
+ Register VRegDef = MI.getOperand(0).getReg();
+ if (VRegDef != RISCV::X0 && MRI->use_nodbg_empty(VRegDef))
+ MI.getOperand(0).setReg(RISCV::X0);
+ }
+ }
+ }
+
+ // Insert PseudoReadVL after VLEFF/VLSEGFF and replace it with the vl output
+ // of VLEFF/VLSEGFF.
+ for (MachineBasicBlock &MBB : MF)
+ insertReadVL(MBB);
+
+ BlockInfo.clear();
+ return HaveVectorOp;
+}
+
+/// Returns an instance of the Insert VSETVLI pass.
+FunctionPass *llvm::createRISCVInsertVSETVLIPass() {
+ return new RISCVInsertVSETVLI();
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.cpp
new file mode 100644
index 0000000000..6494c9a2cd
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -0,0 +1,2869 @@
+//===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVInstrInfo.h"
+#include "MCTargetDesc/RISCVMatInt.h"
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineCombinerPattern.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+#define GEN_CHECK_COMPRESS_INSTR
+#include "RISCVGenCompressInstEmitter.inc"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#define GET_INSTRINFO_NAMED_OPS
+#include "RISCVGenInstrInfo.inc"
+
+static cl::opt<bool> PreferWholeRegisterMove(
+ "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
+ cl::desc("Prefer whole register move for vector registers."));
+
+namespace llvm::RISCVVPseudosTable {
+
+using namespace RISCV;
+
+#define GET_RISCVVPseudosTable_IMPL
+#include "RISCVGenSearchableTables.inc"
+
+} // namespace llvm::RISCVVPseudosTable
+
+RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
+ : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
+ STI(STI) {}
+
+MCInst RISCVInstrInfo::getNop() const {
+ if (STI.hasStdExtCOrZca())
+ return MCInstBuilder(RISCV::C_NOP);
+ return MCInstBuilder(RISCV::ADDI)
+ .addReg(RISCV::X0)
+ .addReg(RISCV::X0)
+ .addImm(0);
+}
+
+unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
+ default:
+ return 0;
+ case RISCV::LB:
+ case RISCV::LBU:
+ case RISCV::LH:
+ case RISCV::LHU:
+ case RISCV::FLH:
+ case RISCV::LW:
+ case RISCV::FLW:
+ case RISCV::LWU:
+ case RISCV::LD:
+ case RISCV::FLD:
+ break;
+ }
+
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
+ }
+
+ return 0;
+}
+
+unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
+ default:
+ return 0;
+ case RISCV::SB:
+ case RISCV::SH:
+ case RISCV::SW:
+ case RISCV::FSH:
+ case RISCV::FSW:
+ case RISCV::SD:
+ case RISCV::FSD:
+ break;
+ }
+
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
+ }
+
+ return 0;
+}
+
+static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
+ unsigned NumRegs) {
+ return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
+}
+
+static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
+ const MachineBasicBlock &MBB,
+ MachineBasicBlock::const_iterator MBBI,
+ MachineBasicBlock::const_iterator &DefMBBI,
+ RISCVII::VLMUL LMul) {
+ if (PreferWholeRegisterMove)
+ return false;
+
+ assert(MBBI->getOpcode() == TargetOpcode::COPY &&
+ "Unexpected COPY instruction.");
+ Register SrcReg = MBBI->getOperand(1).getReg();
+ const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+
+ bool FoundDef = false;
+ bool FirstVSetVLI = false;
+ unsigned FirstSEW = 0;
+ while (MBBI != MBB.begin()) {
+ --MBBI;
+ if (MBBI->isMetaInstruction())
+ continue;
+
+ if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
+ MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
+ MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
+ // There is a vsetvli between COPY and source define instruction.
+ // vy = def_vop ... (producing instruction)
+ // ...
+ // vsetvli
+ // ...
+ // vx = COPY vy
+ if (!FoundDef) {
+ if (!FirstVSetVLI) {
+ FirstVSetVLI = true;
+ unsigned FirstVType = MBBI->getOperand(2).getImm();
+ RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
+ FirstSEW = RISCVVType::getSEW(FirstVType);
+ // The first encountered vsetvli must have the same lmul as the
+ // register class of COPY.
+ if (FirstLMul != LMul)
+ return false;
+ }
+ // Only permit `vsetvli x0, x0, vtype` between COPY and the source
+ // define instruction.
+ if (MBBI->getOperand(0).getReg() != RISCV::X0)
+ return false;
+ if (MBBI->getOperand(1).isImm())
+ return false;
+ if (MBBI->getOperand(1).getReg() != RISCV::X0)
+ return false;
+ continue;
+ }
+
+ // MBBI is the first vsetvli before the producing instruction.
+ unsigned VType = MBBI->getOperand(2).getImm();
+ // If there is a vsetvli between COPY and the producing instruction.
+ if (FirstVSetVLI) {
+ // If SEW is different, return false.
+ if (RISCVVType::getSEW(VType) != FirstSEW)
+ return false;
+ }
+
+ // If the vsetvli is tail undisturbed, keep the whole register move.
+ if (!RISCVVType::isTailAgnostic(VType))
+ return false;
+
+ // The checking is conservative. We only have register classes for
+ // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
+ // for fractional LMUL operations. However, we could not use the vsetvli
+ // lmul for widening operations. The result of widening operation is
+ // 2 x LMUL.
+ return LMul == RISCVVType::getVLMUL(VType);
+ } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
+ return false;
+ } else if (MBBI->getNumDefs()) {
+ // Check all the instructions which will change VL.
+ // For example, vleff has implicit def VL.
+ if (MBBI->modifiesRegister(RISCV::VL))
+ return false;
+
+ // Only converting whole register copies to vmv.v.v when the defining
+ // value appears in the explicit operands.
+ for (const MachineOperand &MO : MBBI->explicit_operands()) {
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+ if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
+ // We only permit the source of COPY has the same LMUL as the defined
+ // operand.
+ // There are cases we need to keep the whole register copy if the LMUL
+ // is different.
+ // For example,
+ // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
+ // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
+ // # The COPY may be created by vlmul_trunc intrinsic.
+ // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
+ //
+ // After widening, the valid value will be 4 x e32 elements. If we
+ // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
+ // FIXME: The COPY of subregister of Zvlsseg register will not be able
+ // to convert to vmv.v.[v|i] under the constraint.
+ if (MO.getReg() != SrcReg)
+ return false;
+
+ // In widening reduction instructions with LMUL_1 input vector case,
+ // only checking the LMUL is insufficient due to reduction result is
+ // always LMUL_1.
+ // For example,
+ // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
+ // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
+ // $v26 = COPY killed renamable $v8
+ // After widening, The valid value will be 1 x e16 elements. If we
+ // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
+ uint64_t TSFlags = MBBI->getDesc().TSFlags;
+ if (RISCVII::isRVVWideningReduction(TSFlags))
+ return false;
+
+ // If the producing instruction does not depend on vsetvli, do not
+ // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
+ if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
+ return false;
+
+ // Found the definition.
+ FoundDef = true;
+ DefMBBI = MBBI;
+ break;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, MCRegister DstReg,
+ MCRegister SrcReg, bool KillSrc) const {
+ if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
+ BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(0);
+ return;
+ }
+
+ // Handle copy from csr
+ if (RISCV::VCSRRegClass.contains(SrcReg) &&
+ RISCV::GPRRegClass.contains(DstReg)) {
+ const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
+ BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
+ .addImm(RISCVSysReg::lookupSysRegByName(TRI.getName(SrcReg))->Encoding)
+ .addReg(RISCV::X0);
+ return;
+ }
+
+ // FPR->FPR copies and VR->VR copies.
+ unsigned Opc;
+ bool IsScalableVector = true;
+ unsigned NF = 1;
+ RISCVII::VLMUL LMul = RISCVII::LMUL_1;
+ unsigned SubRegIdx = RISCV::sub_vrm1_0;
+ if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
+ if (!STI.hasStdExtZfh() && STI.hasStdExtZfhmin()) {
+ // Zfhmin subset doesn't have FSGNJ_H, replaces FSGNJ_H with FSGNJ_S.
+ const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+ DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
+ &RISCV::FPR32RegClass);
+ SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
+ &RISCV::FPR32RegClass);
+ Opc = RISCV::FSGNJ_S;
+ } else {
+ Opc = RISCV::FSGNJ_H;
+ }
+ IsScalableVector = false;
+ } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::FSGNJ_S;
+ IsScalableVector = false;
+ } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::FSGNJ_D;
+ IsScalableVector = false;
+ } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ LMul = RISCVII::LMUL_1;
+ } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV2R_V;
+ LMul = RISCVII::LMUL_2;
+ } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV4R_V;
+ LMul = RISCVII::LMUL_4;
+ } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV8R_V;
+ LMul = RISCVII::LMUL_8;
+ } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ NF = 2;
+ LMul = RISCVII::LMUL_1;
+ } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV2R_V;
+ SubRegIdx = RISCV::sub_vrm2_0;
+ NF = 2;
+ LMul = RISCVII::LMUL_2;
+ } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV4R_V;
+ SubRegIdx = RISCV::sub_vrm4_0;
+ NF = 2;
+ LMul = RISCVII::LMUL_4;
+ } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ NF = 3;
+ LMul = RISCVII::LMUL_1;
+ } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV2R_V;
+ SubRegIdx = RISCV::sub_vrm2_0;
+ NF = 3;
+ LMul = RISCVII::LMUL_2;
+ } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ NF = 4;
+ LMul = RISCVII::LMUL_1;
+ } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV2R_V;
+ SubRegIdx = RISCV::sub_vrm2_0;
+ NF = 4;
+ LMul = RISCVII::LMUL_2;
+ } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ NF = 5;
+ LMul = RISCVII::LMUL_1;
+ } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ NF = 6;
+ LMul = RISCVII::LMUL_1;
+ } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ NF = 7;
+ LMul = RISCVII::LMUL_1;
+ } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::VMV1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ NF = 8;
+ LMul = RISCVII::LMUL_1;
+ } else {
+ llvm_unreachable("Impossible reg-to-reg copy");
+ }
+
+ if (IsScalableVector) {
+ bool UseVMV_V_V = false;
+ MachineBasicBlock::const_iterator DefMBBI;
+ unsigned VIOpc;
+ if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
+ UseVMV_V_V = true;
+ // We only need to handle LMUL = 1/2/4/8 here because we only define
+ // vector register classes for LMUL = 1/2/4/8.
+ switch (LMul) {
+ default:
+ llvm_unreachable("Impossible LMUL for vector register copy.");
+ case RISCVII::LMUL_1:
+ Opc = RISCV::PseudoVMV_V_V_M1;
+ VIOpc = RISCV::PseudoVMV_V_I_M1;
+ break;
+ case RISCVII::LMUL_2:
+ Opc = RISCV::PseudoVMV_V_V_M2;
+ VIOpc = RISCV::PseudoVMV_V_I_M2;
+ break;
+ case RISCVII::LMUL_4:
+ Opc = RISCV::PseudoVMV_V_V_M4;
+ VIOpc = RISCV::PseudoVMV_V_I_M4;
+ break;
+ case RISCVII::LMUL_8:
+ Opc = RISCV::PseudoVMV_V_V_M8;
+ VIOpc = RISCV::PseudoVMV_V_I_M8;
+ break;
+ }
+ }
+
+ bool UseVMV_V_I = false;
+ if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
+ UseVMV_V_I = true;
+ Opc = VIOpc;
+ }
+
+ if (NF == 1) {
+ auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
+ if (UseVMV_V_I)
+ MIB = MIB.add(DefMBBI->getOperand(1));
+ else
+ MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
+ if (UseVMV_V_V) {
+ const MCInstrDesc &Desc = DefMBBI->getDesc();
+ MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
+ MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
+ MIB.addReg(RISCV::VL, RegState::Implicit);
+ MIB.addReg(RISCV::VTYPE, RegState::Implicit);
+ }
+ } else {
+ const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+
+ int I = 0, End = NF, Incr = 1;
+ unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
+ unsigned DstEncoding = TRI->getEncodingValue(DstReg);
+ unsigned LMulVal;
+ bool Fractional;
+ std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
+ assert(!Fractional && "It is impossible be fractional lmul here.");
+ if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
+ I = NF - 1;
+ End = -1;
+ Incr = -1;
+ }
+
+ for (; I != End; I += Incr) {
+ auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
+ TRI->getSubReg(DstReg, SubRegIdx + I));
+ if (UseVMV_V_I)
+ MIB = MIB.add(DefMBBI->getOperand(1));
+ else
+ MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
+ getKillRegState(KillSrc));
+ if (UseVMV_V_V) {
+ const MCInstrDesc &Desc = DefMBBI->getDesc();
+ MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
+ MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
+ MIB.addReg(RISCV::VL, RegState::Implicit);
+ MIB.addReg(RISCV::VTYPE, RegState::Implicit);
+ }
+ }
+ }
+ } else {
+ BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ }
+}
+
+void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register SrcReg, bool IsKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ Register VReg) const {
+ DebugLoc DL;
+ if (I != MBB.end())
+ DL = I->getDebugLoc();
+
+ MachineFunction *MF = MBB.getParent();
+ MachineFrameInfo &MFI = MF->getFrameInfo();
+
+ unsigned Opcode;
+ bool IsScalableVector = true;
+ if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
+ Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
+ RISCV::SW : RISCV::SD;
+ IsScalableVector = false;
+ } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::FSH;
+ IsScalableVector = false;
+ } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::FSW;
+ IsScalableVector = false;
+ } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::FSD;
+ IsScalableVector = false;
+ } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VS1R_V;
+ } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VS2R_V;
+ } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VS4R_V;
+ } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VS8R_V;
+ } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL2_M1;
+ else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL2_M2;
+ else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL2_M4;
+ else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL3_M1;
+ else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL3_M2;
+ else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL4_M1;
+ else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL4_M2;
+ else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL5_M1;
+ else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL6_M1;
+ else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL7_M1;
+ else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVSPILL8_M1;
+ else
+ llvm_unreachable("Can't store this register to stack slot");
+
+ if (IsScalableVector) {
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
+ MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
+
+ MFI.setStackID(FI, TargetStackID::ScalableVector);
+ BuildMI(MBB, I, DL, get(Opcode))
+ .addReg(SrcReg, getKillRegState(IsKill))
+ .addFrameIndex(FI)
+ .addMemOperand(MMO);
+ } else {
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
+ MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
+
+ BuildMI(MBB, I, DL, get(Opcode))
+ .addReg(SrcReg, getKillRegState(IsKill))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMO);
+ }
+}
+
+void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DstReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ Register VReg) const {
+ DebugLoc DL;
+ if (I != MBB.end())
+ DL = I->getDebugLoc();
+
+ MachineFunction *MF = MBB.getParent();
+ MachineFrameInfo &MFI = MF->getFrameInfo();
+
+ unsigned Opcode;
+ bool IsScalableVector = true;
+ if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
+ Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
+ RISCV::LW : RISCV::LD;
+ IsScalableVector = false;
+ } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::FLH;
+ IsScalableVector = false;
+ } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::FLW;
+ IsScalableVector = false;
+ } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::FLD;
+ IsScalableVector = false;
+ } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VL1RE8_V;
+ } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VL2RE8_V;
+ } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VL4RE8_V;
+ } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::VL8RE8_V;
+ } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD2_M1;
+ else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD2_M2;
+ else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD2_M4;
+ else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD3_M1;
+ else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD3_M2;
+ else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD4_M1;
+ else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD4_M2;
+ else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD5_M1;
+ else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD6_M1;
+ else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD7_M1;
+ else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::PseudoVRELOAD8_M1;
+ else
+ llvm_unreachable("Can't load this register from stack slot");
+
+ if (IsScalableVector) {
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
+ MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
+
+ MFI.setStackID(FI, TargetStackID::ScalableVector);
+ BuildMI(MBB, I, DL, get(Opcode), DstReg)
+ .addFrameIndex(FI)
+ .addMemOperand(MMO);
+ } else {
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
+
+ BuildMI(MBB, I, DL, get(Opcode), DstReg)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMO);
+ }
+}
+
+MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
+ VirtRegMap *VRM) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ // The below optimizations narrow the load so they are only valid for little
+ // endian.
+ // TODO: Support big endian by adding an offset into the frame object?
+ if (MF.getDataLayout().isBigEndian())
+ return nullptr;
+
+ // Fold load from stack followed by sext.w into lw.
+ // TODO: Fold with sext.b, sext.h, zext.b, zext.h, zext.w?
+ if (Ops.size() != 1 || Ops[0] != 1)
+ return nullptr;
+
+ unsigned LoadOpc;
+ switch (MI.getOpcode()) {
+ default:
+ if (RISCV::isSEXT_W(MI)) {
+ LoadOpc = RISCV::LW;
+ break;
+ }
+ if (RISCV::isZEXT_W(MI)) {
+ LoadOpc = RISCV::LWU;
+ break;
+ }
+ if (RISCV::isZEXT_B(MI)) {
+ LoadOpc = RISCV::LBU;
+ break;
+ }
+ return nullptr;
+ case RISCV::SEXT_H:
+ LoadOpc = RISCV::LH;
+ break;
+ case RISCV::SEXT_B:
+ LoadOpc = RISCV::LB;
+ break;
+ case RISCV::ZEXT_H_RV32:
+ case RISCV::ZEXT_H_RV64:
+ LoadOpc = RISCV::LHU;
+ break;
+ }
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(MF, FrameIndex),
+ MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlign(FrameIndex));
+
+ Register DstReg = MI.getOperand(0).getReg();
+ return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
+ DstReg)
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addMemOperand(MMO);
+}
+
+void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, Register DstReg, uint64_t Val,
+ MachineInstr::MIFlag Flag) const {
+ Register SrcReg = RISCV::X0;
+
+ if (!STI.is64Bit() && !isInt<32>(Val))
+ report_fatal_error("Should only materialize 32-bit constants for RV32");
+
+ RISCVMatInt::InstSeq Seq =
+ RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
+ assert(!Seq.empty());
+
+ for (RISCVMatInt::Inst &Inst : Seq) {
+ switch (Inst.getOpndKind()) {
+ case RISCVMatInt::Imm:
+ BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
+ .addImm(Inst.getImm())
+ .setMIFlag(Flag);
+ break;
+ case RISCVMatInt::RegX0:
+ BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
+ .addReg(SrcReg, RegState::Kill)
+ .addReg(RISCV::X0)
+ .setMIFlag(Flag);
+ break;
+ case RISCVMatInt::RegReg:
+ BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
+ .addReg(SrcReg, RegState::Kill)
+ .addReg(SrcReg, RegState::Kill)
+ .setMIFlag(Flag);
+ break;
+ case RISCVMatInt::RegImm:
+ BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
+ .addReg(SrcReg, RegState::Kill)
+ .addImm(Inst.getImm())
+ .setMIFlag(Flag);
+ break;
+ }
+
+ // Only the first instruction has X0 as its source.
+ SrcReg = DstReg;
+ }
+}
+
+static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) {
+ switch (Opc) {
+ default:
+ return RISCVCC::COND_INVALID;
+ case RISCV::BEQ:
+ return RISCVCC::COND_EQ;
+ case RISCV::BNE:
+ return RISCVCC::COND_NE;
+ case RISCV::BLT:
+ return RISCVCC::COND_LT;
+ case RISCV::BGE:
+ return RISCVCC::COND_GE;
+ case RISCV::BLTU:
+ return RISCVCC::COND_LTU;
+ case RISCV::BGEU:
+ return RISCVCC::COND_GEU;
+ }
+}
+
+// The contents of values added to Cond are not examined outside of
+// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
+// push BranchOpcode, Reg1, Reg2.
+static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
+ SmallVectorImpl<MachineOperand> &Cond) {
+ // Block ends with fall-through condbranch.
+ assert(LastInst.getDesc().isConditionalBranch() &&
+ "Unknown conditional branch");
+ Target = LastInst.getOperand(2).getMBB();
+ unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
+ Cond.push_back(MachineOperand::CreateImm(CC));
+ Cond.push_back(LastInst.getOperand(0));
+ Cond.push_back(LastInst.getOperand(1));
+}
+
+const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC) const {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unknown condition code!");
+ case RISCVCC::COND_EQ:
+ return get(RISCV::BEQ);
+ case RISCVCC::COND_NE:
+ return get(RISCV::BNE);
+ case RISCVCC::COND_LT:
+ return get(RISCV::BLT);
+ case RISCVCC::COND_GE:
+ return get(RISCV::BGE);
+ case RISCVCC::COND_LTU:
+ return get(RISCV::BLTU);
+ case RISCVCC::COND_GEU:
+ return get(RISCV::BGEU);
+ }
+}
+
+RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unrecognized conditional branch");
+ case RISCVCC::COND_EQ:
+ return RISCVCC::COND_NE;
+ case RISCVCC::COND_NE:
+ return RISCVCC::COND_EQ;
+ case RISCVCC::COND_LT:
+ return RISCVCC::COND_GE;
+ case RISCVCC::COND_GE:
+ return RISCVCC::COND_LT;
+ case RISCVCC::COND_LTU:
+ return RISCVCC::COND_GEU;
+ case RISCVCC::COND_GEU:
+ return RISCVCC::COND_LTU;
+ }
+}
+
+bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ TBB = FBB = nullptr;
+ Cond.clear();
+
+ // If the block has no terminators, it just falls into the block after it.
+ MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
+ if (I == MBB.end() || !isUnpredicatedTerminator(*I))
+ return false;
+
+ // Count the number of terminators and find the first unconditional or
+ // indirect branch.
+ MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
+ int NumTerminators = 0;
+ for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
+ J++) {
+ NumTerminators++;
+ if (J->getDesc().isUnconditionalBranch() ||
+ J->getDesc().isIndirectBranch()) {
+ FirstUncondOrIndirectBr = J.getReverse();
+ }
+ }
+
+ // If AllowModify is true, we can erase any terminators after
+ // FirstUncondOrIndirectBR.
+ if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
+ while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
+ std::next(FirstUncondOrIndirectBr)->eraseFromParent();
+ NumTerminators--;
+ }
+ I = FirstUncondOrIndirectBr;
+ }
+
+ // We can't handle blocks that end in an indirect branch.
+ if (I->getDesc().isIndirectBranch())
+ return true;
+
+ // We can't handle blocks with more than 2 terminators.
+ if (NumTerminators > 2)
+ return true;
+
+ // Handle a single unconditional branch.
+ if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
+ TBB = getBranchDestBlock(*I);
+ return false;
+ }
+
+ // Handle a single conditional branch.
+ if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
+ parseCondBranch(*I, TBB, Cond);
+ return false;
+ }
+
+ // Handle a conditional branch followed by an unconditional branch.
+ if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
+ I->getDesc().isUnconditionalBranch()) {
+ parseCondBranch(*std::prev(I), TBB, Cond);
+ FBB = getBranchDestBlock(*I);
+ return false;
+ }
+
+ // Otherwise, we can't handle this.
+ return true;
+}
+
+unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved) const {
+ if (BytesRemoved)
+ *BytesRemoved = 0;
+ MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
+ if (I == MBB.end())
+ return 0;
+
+ if (!I->getDesc().isUnconditionalBranch() &&
+ !I->getDesc().isConditionalBranch())
+ return 0;
+
+ // Remove the branch.
+ if (BytesRemoved)
+ *BytesRemoved += getInstSizeInBytes(*I);
+ I->eraseFromParent();
+
+ I = MBB.end();
+
+ if (I == MBB.begin())
+ return 1;
+ --I;
+ if (!I->getDesc().isConditionalBranch())
+ return 1;
+
+ // Remove the branch.
+ if (BytesRemoved)
+ *BytesRemoved += getInstSizeInBytes(*I);
+ I->eraseFromParent();
+ return 2;
+}
+
+// Inserts a branch into the end of the specific MachineBasicBlock, returning
+// the number of instructions inserted.
+unsigned RISCVInstrInfo::insertBranch(
+ MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
+ if (BytesAdded)
+ *BytesAdded = 0;
+
+ // Shouldn't be a fall through.
+ assert(TBB && "insertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 3 || Cond.size() == 0) &&
+ "RISCV branch conditions have two components!");
+
+ // Unconditional branch.
+ if (Cond.empty()) {
+ MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(MI);
+ return 1;
+ }
+
+ // Either a one or two-way conditional branch.
+ auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
+ MachineInstr &CondMI =
+ *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(CondMI);
+
+ // One-way conditional branch.
+ if (!FBB)
+ return 1;
+
+ // Two-way conditional branch.
+ MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(MI);
+ return 2;
+}
+
+void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock &DestBB,
+ MachineBasicBlock &RestoreBB,
+ const DebugLoc &DL, int64_t BrOffset,
+ RegScavenger *RS) const {
+ assert(RS && "RegScavenger required for long branching");
+ assert(MBB.empty() &&
+ "new block should be inserted for expanding unconditional branch");
+ assert(MBB.pred_size() == 1);
+ assert(RestoreBB.empty() &&
+ "restore block should be inserted for restoring clobbered registers");
+
+ MachineFunction *MF = MBB.getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
+ const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
+
+ if (!isInt<32>(BrOffset))
+ report_fatal_error(
+ "Branch offsets outside of the signed 32-bit range not supported");
+
+ // FIXME: A virtual register must be used initially, as the register
+ // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
+ // uses the same workaround).
+ Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ auto II = MBB.end();
+ // We may also update the jump target to RestoreBB later.
+ MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
+ .addReg(ScratchReg, RegState::Define | RegState::Dead)
+ .addMBB(&DestBB, RISCVII::MO_CALL);
+
+ RS->enterBasicBlockEnd(MBB);
+ Register TmpGPR =
+ RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
+ /*RestoreAfter=*/false, /*SpAdj=*/0,
+ /*AllowSpill=*/false);
+ if (TmpGPR != RISCV::NoRegister)
+ RS->setRegUsed(TmpGPR);
+ else {
+ // The case when there is no scavenged register needs special handling.
+
+ // Pick s11 because it doesn't make a difference.
+ TmpGPR = RISCV::X27;
+
+ int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
+ if (FrameIndex == -1)
+ report_fatal_error("underestimated function size");
+
+ storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
+ &RISCV::GPRRegClass, TRI, Register());
+ TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
+ /*SpAdj=*/0, /*FIOperandNum=*/1);
+
+ MI.getOperand(1).setMBB(&RestoreBB);
+
+ loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
+ &RISCV::GPRRegClass, TRI, Register());
+ TRI->eliminateFrameIndex(RestoreBB.back(),
+ /*SpAdj=*/0, /*FIOperandNum=*/1);
+ }
+
+ MRI.replaceRegWith(ScratchReg, TmpGPR);
+ MRI.clearVirtRegs();
+}
+
+bool RISCVInstrInfo::reverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert((Cond.size() == 3) && "Invalid branch condition!");
+ auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
+ Cond[0].setImm(getOppositeBranchCondition(CC));
+ return false;
+}
+
+MachineBasicBlock *
+RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
+ assert(MI.getDesc().isBranch() && "Unexpected opcode!");
+ // The branch target is always the last operand.
+ int NumOp = MI.getNumExplicitOperands();
+ return MI.getOperand(NumOp - 1).getMBB();
+}
+
+bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
+ int64_t BrOffset) const {
+ unsigned XLen = STI.getXLen();
+ // Ideally we could determine the supported branch offset from the
+ // RISCVII::FormMask, but this can't be used for Pseudo instructions like
+ // PseudoBR.
+ switch (BranchOp) {
+ default:
+ llvm_unreachable("Unexpected opcode!");
+ case RISCV::BEQ:
+ case RISCV::BNE:
+ case RISCV::BLT:
+ case RISCV::BGE:
+ case RISCV::BLTU:
+ case RISCV::BGEU:
+ return isIntN(13, BrOffset);
+ case RISCV::JAL:
+ case RISCV::PseudoBR:
+ return isIntN(21, BrOffset);
+ case RISCV::PseudoJump:
+ return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
+ }
+}
+
+// If the operation has a predicated pseudo instruction, return the pseudo
+// instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END.
+// TODO: Support more operations.
+unsigned getPredicatedOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case RISCV::ADD: return RISCV::PseudoCCADD; break;
+ case RISCV::SUB: return RISCV::PseudoCCSUB; break;
+ case RISCV::AND: return RISCV::PseudoCCAND; break;
+ case RISCV::OR: return RISCV::PseudoCCOR; break;
+ case RISCV::XOR: return RISCV::PseudoCCXOR; break;
+
+ case RISCV::ADDW: return RISCV::PseudoCCADDW; break;
+ case RISCV::SUBW: return RISCV::PseudoCCSUBW; break;
+ }
+
+ return RISCV::INSTRUCTION_LIST_END;
+}
+
+/// Identify instructions that can be folded into a CCMOV instruction, and
+/// return the defining instruction.
+static MachineInstr *canFoldAsPredicatedOp(Register Reg,
+ const MachineRegisterInfo &MRI,
+ const TargetInstrInfo *TII) {
+ if (!Reg.isVirtual())
+ return nullptr;
+ if (!MRI.hasOneNonDBGUse(Reg))
+ return nullptr;
+ MachineInstr *MI = MRI.getVRegDef(Reg);
+ if (!MI)
+ return nullptr;
+ // Check if MI can be predicated and folded into the CCMOV.
+ if (getPredicatedOpcode(MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
+ return nullptr;
+ // Check if MI has any other defs or physreg uses.
+ for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ // Reject frame index operands, PEI can't handle the predicated pseudos.
+ if (MO.isFI() || MO.isCPI() || MO.isJTI())
+ return nullptr;
+ if (!MO.isReg())
+ continue;
+ // MI can't have any tied operands, that would conflict with predication.
+ if (MO.isTied())
+ return nullptr;
+ if (MO.isDef())
+ return nullptr;
+ // Allow constant physregs.
+ if (MO.getReg().isPhysical() && !MRI.isConstantPhysReg(MO.getReg()))
+ return nullptr;
+ }
+ bool DontMoveAcrossStores = true;
+ if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
+ return nullptr;
+ return MI;
+}
+
+bool RISCVInstrInfo::analyzeSelect(const MachineInstr &MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const {
+ assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
+ "Unknown select instruction");
+ // CCMOV operands:
+ // 0: Def.
+ // 1: LHS of compare.
+ // 2: RHS of compare.
+ // 3: Condition code.
+ // 4: False use.
+ // 5: True use.
+ TrueOp = 5;
+ FalseOp = 4;
+ Cond.push_back(MI.getOperand(1));
+ Cond.push_back(MI.getOperand(2));
+ Cond.push_back(MI.getOperand(3));
+ // We can only fold when we support short forward branch opt.
+ Optimizable = STI.hasShortForwardBranchOpt();
+ return false;
+}
+
+MachineInstr *
+RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
+ SmallPtrSetImpl<MachineInstr *> &SeenMIs,
+ bool PreferFalse) const {
+ assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
+ "Unknown select instruction");
+ if (!STI.hasShortForwardBranchOpt())
+ return nullptr;
+
+ MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ MachineInstr *DefMI =
+ canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this);
+ bool Invert = !DefMI;
+ if (!DefMI)
+ DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this);
+ if (!DefMI)
+ return nullptr;
+
+ // Find new register class to use.
+ MachineOperand FalseReg = MI.getOperand(Invert ? 5 : 4);
+ Register DestReg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
+ if (!MRI.constrainRegClass(DestReg, PreviousClass))
+ return nullptr;
+
+ unsigned PredOpc = getPredicatedOpcode(DefMI->getOpcode());
+ assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
+
+ // Create a new predicated version of DefMI.
+ MachineInstrBuilder NewMI =
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
+
+ // Copy the condition portion.
+ NewMI.add(MI.getOperand(1));
+ NewMI.add(MI.getOperand(2));
+
+ // Add condition code, inverting if necessary.
+ auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
+ if (Invert)
+ CC = RISCVCC::getOppositeBranchCondition(CC);
+ NewMI.addImm(CC);
+
+ // Copy the false register.
+ NewMI.add(FalseReg);
+
+ // Copy all the DefMI operands.
+ const MCInstrDesc &DefDesc = DefMI->getDesc();
+ for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
+ NewMI.add(DefMI->getOperand(i));
+
+ // Update SeenMIs set: register newly created MI and erase removed DefMI.
+ SeenMIs.insert(NewMI);
+ SeenMIs.erase(DefMI);
+
+ // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
+ // DefMI would be invalid when tranferred inside the loop. Checking for a
+ // loop is expensive, but at least remove kill flags if they are in different
+ // BBs.
+ if (DefMI->getParent() != MI.getParent())
+ NewMI->clearKillInfo();
+
+ // The caller will erase MI, but not DefMI.
+ DefMI->eraseFromParent();
+ return NewMI;
+}
+
+unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
+ if (MI.isMetaInstruction())
+ return 0;
+
+ unsigned Opcode = MI.getOpcode();
+
+ if (Opcode == TargetOpcode::INLINEASM ||
+ Opcode == TargetOpcode::INLINEASM_BR) {
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
+ return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
+ *TM.getMCAsmInfo());
+ }
+
+ if (MI.getParent() && MI.getParent()->getParent()) {
+ if (isCompressibleInst(MI, STI))
+ return 2;
+ }
+ return get(Opcode).getSize();
+}
+
+bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
+ const unsigned Opcode = MI.getOpcode();
+ switch (Opcode) {
+ default:
+ break;
+ case RISCV::FSGNJ_D:
+ case RISCV::FSGNJ_S:
+ case RISCV::FSGNJ_H:
+ // The canonical floating-point move is fsgnj rd, rs, rs.
+ return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
+ MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
+ case RISCV::ADDI:
+ case RISCV::ORI:
+ case RISCV::XORI:
+ return (MI.getOperand(1).isReg() &&
+ MI.getOperand(1).getReg() == RISCV::X0) ||
+ (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
+ }
+ return MI.isAsCheapAsAMove();
+}
+
+std::optional<DestSourcePair>
+RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
+ if (MI.isMoveReg())
+ return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case RISCV::ADDI:
+ // Operand 1 can be a frameindex but callers expect registers
+ if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0)
+ return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
+ break;
+ case RISCV::FSGNJ_D:
+ case RISCV::FSGNJ_S:
+ case RISCV::FSGNJ_H:
+ // The canonical floating-point move is fsgnj rd, rs, rs.
+ if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
+ MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
+ return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
+ break;
+ }
+ return std::nullopt;
+}
+
+void RISCVInstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
+ MachineInstr &OldMI2,
+ MachineInstr &NewMI1,
+ MachineInstr &NewMI2) const {
+ uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
+ NewMI1.setFlags(IntersectedFlags);
+ NewMI2.setFlags(IntersectedFlags);
+}
+
+void RISCVInstrInfo::finalizeInsInstrs(
+ MachineInstr &Root, MachineCombinerPattern &P,
+ SmallVectorImpl<MachineInstr *> &InsInstrs) const {
+ int16_t FrmOpIdx =
+ RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);
+ if (FrmOpIdx < 0) {
+ assert(all_of(InsInstrs,
+ [](MachineInstr *MI) {
+ return RISCV::getNamedOperandIdx(MI->getOpcode(),
+ RISCV::OpName::frm) < 0;
+ }) &&
+ "New instructions require FRM whereas the old one does not have it");
+ return;
+ }
+
+ const MachineOperand &FRM = Root.getOperand(FrmOpIdx);
+ MachineFunction &MF = *Root.getMF();
+
+ for (auto *NewMI : InsInstrs) {
+ assert(static_cast<unsigned>(RISCV::getNamedOperandIdx(
+ NewMI->getOpcode(), RISCV::OpName::frm)) ==
+ NewMI->getNumOperands() &&
+ "Instruction has unexpected number of operands");
+ MachineInstrBuilder MIB(MF, NewMI);
+ MIB.add(FRM);
+ if (FRM.getImm() == RISCVFPRndMode::DYN)
+ MIB.addUse(RISCV::FRM, RegState::Implicit);
+ }
+}
+
+static bool isFADD(unsigned Opc) {
+ switch (Opc) {
+ default:
+ return false;
+ case RISCV::FADD_H:
+ case RISCV::FADD_S:
+ case RISCV::FADD_D:
+ return true;
+ }
+}
+
+static bool isFSUB(unsigned Opc) {
+ switch (Opc) {
+ default:
+ return false;
+ case RISCV::FSUB_H:
+ case RISCV::FSUB_S:
+ case RISCV::FSUB_D:
+ return true;
+ }
+}
+
+static bool isFMUL(unsigned Opc) {
+ switch (Opc) {
+ default:
+ return false;
+ case RISCV::FMUL_H:
+ case RISCV::FMUL_S:
+ case RISCV::FMUL_D:
+ return true;
+ }
+}
+
+bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
+ bool &Commuted) const {
+ if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
+ return false;
+
+ const MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
+ unsigned OperandIdx = Commuted ? 2 : 1;
+ const MachineInstr &Sibling =
+ *MRI.getVRegDef(Inst.getOperand(OperandIdx).getReg());
+
+ int16_t InstFrmOpIdx =
+ RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);
+ int16_t SiblingFrmOpIdx =
+ RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);
+
+ return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
+ RISCV::hasEqualFRM(Inst, Sibling);
+}
+
+bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
+ bool Invert) const {
+ unsigned Opc = Inst.getOpcode();
+ if (Invert) {
+ auto InverseOpcode = getInverseOpcode(Opc);
+ if (!InverseOpcode)
+ return false;
+ Opc = *InverseOpcode;
+ }
+
+ if (isFADD(Opc) || isFMUL(Opc))
+ return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
+ Inst.getFlag(MachineInstr::MIFlag::FmNsz);
+
+ switch (Opc) {
+ default:
+ return false;
+ case RISCV::ADD:
+ case RISCV::ADDW:
+ case RISCV::AND:
+ case RISCV::OR:
+ case RISCV::XOR:
+ // From RISC-V ISA spec, if both the high and low bits of the same product
+ // are required, then the recommended code sequence is:
+ //
+ // MULH[[S]U] rdh, rs1, rs2
+ // MUL rdl, rs1, rs2
+ // (source register specifiers must be in same order and rdh cannot be the
+ // same as rs1 or rs2)
+ //
+ // Microarchitectures can then fuse these into a single multiply operation
+ // instead of performing two separate multiplies.
+ // MachineCombiner may reassociate MUL operands and lose the fusion
+ // opportunity.
+ case RISCV::MUL:
+ case RISCV::MULW:
+ case RISCV::MIN:
+ case RISCV::MINU:
+ case RISCV::MAX:
+ case RISCV::MAXU:
+ case RISCV::FMIN_H:
+ case RISCV::FMIN_S:
+ case RISCV::FMIN_D:
+ case RISCV::FMAX_H:
+ case RISCV::FMAX_S:
+ case RISCV::FMAX_D:
+ return true;
+ }
+
+ return false;
+}
+
+std::optional<unsigned>
+RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
+ switch (Opcode) {
+ default:
+ return std::nullopt;
+ case RISCV::FADD_H:
+ return RISCV::FSUB_H;
+ case RISCV::FADD_S:
+ return RISCV::FSUB_S;
+ case RISCV::FADD_D:
+ return RISCV::FSUB_D;
+ case RISCV::FSUB_H:
+ return RISCV::FADD_H;
+ case RISCV::FSUB_S:
+ return RISCV::FADD_S;
+ case RISCV::FSUB_D:
+ return RISCV::FADD_D;
+ case RISCV::ADD:
+ return RISCV::SUB;
+ case RISCV::SUB:
+ return RISCV::ADD;
+ case RISCV::ADDW:
+ return RISCV::SUBW;
+ case RISCV::SUBW:
+ return RISCV::ADDW;
+ }
+}
+
+static bool canCombineFPFusedMultiply(const MachineInstr &Root,
+ const MachineOperand &MO,
+ bool DoRegPressureReduce) {
+ if (!MO.isReg() || !MO.getReg().isVirtual())
+ return false;
+ const MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
+ MachineInstr *MI = MRI.getVRegDef(MO.getReg());
+ if (!MI || !isFMUL(MI->getOpcode()))
+ return false;
+
+ if (!Root.getFlag(MachineInstr::MIFlag::FmContract) ||
+ !MI->getFlag(MachineInstr::MIFlag::FmContract))
+ return false;
+
+ // Try combining even if fmul has more than one use as it eliminates
+ // dependency between fadd(fsub) and fmul. However, it can extend liveranges
+ // for fmul operands, so reject the transformation in register pressure
+ // reduction mode.
+ if (DoRegPressureReduce && !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
+ return false;
+
+ // Do not combine instructions from different basic blocks.
+ if (Root.getParent() != MI->getParent())
+ return false;
+ return RISCV::hasEqualFRM(Root, *MI);
+}
+
+static bool
+getFPFusedMultiplyPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern> &Patterns,
+ bool DoRegPressureReduce) {
+ unsigned Opc = Root.getOpcode();
+ bool IsFAdd = isFADD(Opc);
+ if (!IsFAdd && !isFSUB(Opc))
+ return false;
+ bool Added = false;
+ if (canCombineFPFusedMultiply(Root, Root.getOperand(1),
+ DoRegPressureReduce)) {
+ Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_AX
+ : MachineCombinerPattern::FMSUB);
+ Added = true;
+ }
+ if (canCombineFPFusedMultiply(Root, Root.getOperand(2),
+ DoRegPressureReduce)) {
+ Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_XA
+ : MachineCombinerPattern::FNMSUB);
+ Added = true;
+ }
+ return Added;
+}
+
+static bool getFPPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern> &Patterns,
+ bool DoRegPressureReduce) {
+ return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
+}
+
+bool RISCVInstrInfo::getMachineCombinerPatterns(
+ MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
+ bool DoRegPressureReduce) const {
+
+ if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
+ return true;
+
+ return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
+ DoRegPressureReduce);
+}
+
+static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc,
+ MachineCombinerPattern Pattern) {
+ switch (RootOpc) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case RISCV::FADD_H:
+ return RISCV::FMADD_H;
+ case RISCV::FADD_S:
+ return RISCV::FMADD_S;
+ case RISCV::FADD_D:
+ return RISCV::FMADD_D;
+ case RISCV::FSUB_H:
+ return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_H
+ : RISCV::FNMSUB_H;
+ case RISCV::FSUB_S:
+ return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_S
+ : RISCV::FNMSUB_S;
+ case RISCV::FSUB_D:
+ return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_D
+ : RISCV::FNMSUB_D;
+ }
+}
+
+static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern) {
+ switch (Pattern) {
+ default:
+ llvm_unreachable("Unexpected pattern");
+ case MachineCombinerPattern::FMADD_AX:
+ case MachineCombinerPattern::FMSUB:
+ return 2;
+ case MachineCombinerPattern::FMADD_XA:
+ case MachineCombinerPattern::FNMSUB:
+ return 1;
+ }
+}
+
+static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev,
+ MachineCombinerPattern Pattern,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs) {
+ MachineFunction *MF = Root.getMF();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+
+ MachineOperand &Mul1 = Prev.getOperand(1);
+ MachineOperand &Mul2 = Prev.getOperand(2);
+ MachineOperand &Dst = Root.getOperand(0);
+ MachineOperand &Addend = Root.getOperand(getAddendOperandIdx(Pattern));
+
+ Register DstReg = Dst.getReg();
+ unsigned FusedOpc = getFPFusedMultiplyOpcode(Root.getOpcode(), Pattern);
+ auto IntersectedFlags = Root.getFlags() & Prev.getFlags();
+ DebugLoc MergedLoc =
+ DILocation::getMergedLocation(Root.getDebugLoc(), Prev.getDebugLoc());
+
+ MachineInstrBuilder MIB =
+ BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)
+ .addReg(Mul1.getReg(), getKillRegState(Mul1.isKill()))
+ .addReg(Mul2.getReg(), getKillRegState(Mul2.isKill()))
+ .addReg(Addend.getReg(), getKillRegState(Addend.isKill()))
+ .setMIFlags(IntersectedFlags);
+
+ // Mul operands are not killed anymore.
+ Mul1.setIsKill(false);
+ Mul2.setIsKill(false);
+
+ InsInstrs.push_back(MIB);
+ if (MRI.hasOneNonDBGUse(Prev.getOperand(0).getReg()))
+ DelInstrs.push_back(&Prev);
+ DelInstrs.push_back(&Root);
+}
+
+void RISCVInstrInfo::genAlternativeCodeSequence(
+ MachineInstr &Root, MachineCombinerPattern Pattern,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs,
+ DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
+ MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
+ switch (Pattern) {
+ default:
+ TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
+ DelInstrs, InstrIdxForVirtReg);
+ return;
+ case MachineCombinerPattern::FMADD_AX:
+ case MachineCombinerPattern::FMSUB: {
+ MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(1).getReg());
+ combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
+ return;
+ }
+ case MachineCombinerPattern::FMADD_XA:
+ case MachineCombinerPattern::FNMSUB: {
+ MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(2).getReg());
+ combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
+ return;
+ }
+ }
+}
+
+bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
+ StringRef &ErrInfo) const {
+ MCInstrDesc const &Desc = MI.getDesc();
+
+ for (auto &OI : enumerate(Desc.operands())) {
+ unsigned OpType = OI.value().OperandType;
+ if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
+ OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
+ const MachineOperand &MO = MI.getOperand(OI.index());
+ if (MO.isImm()) {
+ int64_t Imm = MO.getImm();
+ bool Ok;
+ switch (OpType) {
+ default:
+ llvm_unreachable("Unexpected operand type");
+
+ // clang-format off
+#define CASE_OPERAND_UIMM(NUM) \
+ case RISCVOp::OPERAND_UIMM##NUM: \
+ Ok = isUInt<NUM>(Imm); \
+ break;
+ CASE_OPERAND_UIMM(2)
+ CASE_OPERAND_UIMM(3)
+ CASE_OPERAND_UIMM(4)
+ CASE_OPERAND_UIMM(5)
+ CASE_OPERAND_UIMM(7)
+ case RISCVOp::OPERAND_UIMM7_LSB00:
+ Ok = isShiftedUInt<5, 2>(Imm);
+ break;
+ case RISCVOp::OPERAND_UIMM8_LSB00:
+ Ok = isShiftedUInt<6, 2>(Imm);
+ break;
+ case RISCVOp::OPERAND_UIMM8_LSB000:
+ Ok = isShiftedUInt<5, 3>(Imm);
+ break;
+ CASE_OPERAND_UIMM(12)
+ CASE_OPERAND_UIMM(20)
+ // clang-format on
+ case RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO:
+ Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
+ break;
+ case RISCVOp::OPERAND_ZERO:
+ Ok = Imm == 0;
+ break;
+ case RISCVOp::OPERAND_SIMM5:
+ Ok = isInt<5>(Imm);
+ break;
+ case RISCVOp::OPERAND_SIMM5_PLUS1:
+ Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
+ break;
+ case RISCVOp::OPERAND_SIMM6:
+ Ok = isInt<6>(Imm);
+ break;
+ case RISCVOp::OPERAND_SIMM6_NONZERO:
+ Ok = Imm != 0 && isInt<6>(Imm);
+ break;
+ case RISCVOp::OPERAND_VTYPEI10:
+ Ok = isUInt<10>(Imm);
+ break;
+ case RISCVOp::OPERAND_VTYPEI11:
+ Ok = isUInt<11>(Imm);
+ break;
+ case RISCVOp::OPERAND_SIMM12:
+ Ok = isInt<12>(Imm);
+ break;
+ case RISCVOp::OPERAND_SIMM12_LSB00000:
+ Ok = isShiftedInt<7, 5>(Imm);
+ break;
+ case RISCVOp::OPERAND_UIMMLOG2XLEN:
+ Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
+ break;
+ case RISCVOp::OPERAND_UIMMLOG2XLEN_NONZERO:
+ Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
+ Ok = Ok && Imm != 0;
+ break;
+ case RISCVOp::OPERAND_UIMM_SHFL:
+ Ok = STI.is64Bit() ? isUInt<5>(Imm) : isUInt<4>(Imm);
+ break;
+ case RISCVOp::OPERAND_RVKRNUM:
+ Ok = Imm >= 0 && Imm <= 10;
+ break;
+ }
+ if (!Ok) {
+ ErrInfo = "Invalid immediate";
+ return false;
+ }
+ }
+ }
+ }
+
+ const uint64_t TSFlags = Desc.TSFlags;
+ if (RISCVII::hasMergeOp(TSFlags)) {
+ unsigned OpIdx = RISCVII::getMergeOpNum(Desc);
+ if (MI.findTiedOperandIdx(0) != OpIdx) {
+ ErrInfo = "Merge op improperly tied";
+ return false;
+ }
+ }
+ if (RISCVII::hasVLOp(TSFlags)) {
+ const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
+ if (!Op.isImm() && !Op.isReg()) {
+ ErrInfo = "Invalid operand type for VL operand";
+ return false;
+ }
+ if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ auto *RC = MRI.getRegClass(Op.getReg());
+ if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
+ ErrInfo = "Invalid register class for VL operand";
+ return false;
+ }
+ }
+ if (!RISCVII::hasSEWOp(TSFlags)) {
+ ErrInfo = "VL operand w/o SEW operand?";
+ return false;
+ }
+ }
+ if (RISCVII::hasSEWOp(TSFlags)) {
+ unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
+ uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
+ if (Log2SEW > 31) {
+ ErrInfo = "Unexpected SEW value";
+ return false;
+ }
+ unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
+ if (!RISCVVType::isValidSEW(SEW)) {
+ ErrInfo = "Unexpected SEW value";
+ return false;
+ }
+ }
+ if (RISCVII::hasVecPolicyOp(TSFlags)) {
+ unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
+ uint64_t Policy = MI.getOperand(OpIdx).getImm();
+ if (Policy > (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) {
+ ErrInfo = "Invalid Policy Value";
+ return false;
+ }
+ if (!RISCVII::hasVLOp(TSFlags)) {
+ ErrInfo = "policy operand w/o VL operand?";
+ return false;
+ }
+
+ // VecPolicy operands can only exist on instructions with passthru/merge
+ // arguments. Note that not all arguments with passthru have vec policy
+ // operands- some instructions have implicit policies.
+ unsigned UseOpIdx;
+ if (!MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
+ ErrInfo = "policy operand w/o tied operand?";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Return true if get the base operand, byte offset of an instruction and the
+// memory width. Width is the size of memory that is being loaded/stored.
+bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
+ const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
+ unsigned &Width, const TargetRegisterInfo *TRI) const {
+ if (!LdSt.mayLoadOrStore())
+ return false;
+
+ // Here we assume the standard RISC-V ISA, which uses a base+offset
+ // addressing mode. You'll need to relax these conditions to support custom
+ // load/stores instructions.
+ if (LdSt.getNumExplicitOperands() != 3)
+ return false;
+ if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
+ return false;
+
+ if (!LdSt.hasOneMemOperand())
+ return false;
+
+ Width = (*LdSt.memoperands_begin())->getSize();
+ BaseReg = &LdSt.getOperand(1);
+ Offset = LdSt.getOperand(2).getImm();
+ return true;
+}
+
+bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
+ const MachineInstr &MIa, const MachineInstr &MIb) const {
+ assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
+ assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
+
+ if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
+ MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
+ return false;
+
+ // Retrieve the base register, offset from the base register and width. Width
+ // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
+ // base registers are identical, and the offset of a lower memory access +
+ // the width doesn't overlap the offset of a higher memory access,
+ // then the memory accesses are different.
+ const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+ const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
+ int64_t OffsetA = 0, OffsetB = 0;
+ unsigned int WidthA = 0, WidthB = 0;
+ if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
+ getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
+ if (BaseOpA->isIdenticalTo(*BaseOpB)) {
+ int LowOffset = std::min(OffsetA, OffsetB);
+ int HighOffset = std::max(OffsetA, OffsetB);
+ int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
+ if (LowOffset + LowWidth <= HighOffset)
+ return true;
+ }
+ }
+ return false;
+}
+
+std::pair<unsigned, unsigned>
+RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
+ const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
+ return std::make_pair(TF & Mask, TF & ~Mask);
+}
+
+ArrayRef<std::pair<unsigned, const char *>>
+RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
+ using namespace RISCVII;
+ static const std::pair<unsigned, const char *> TargetFlags[] = {
+ {MO_CALL, "riscv-call"},
+ {MO_PLT, "riscv-plt"},
+ {MO_LO, "riscv-lo"},
+ {MO_HI, "riscv-hi"},
+ {MO_PCREL_LO, "riscv-pcrel-lo"},
+ {MO_PCREL_HI, "riscv-pcrel-hi"},
+ {MO_GOT_HI, "riscv-got-hi"},
+ {MO_TPREL_LO, "riscv-tprel-lo"},
+ {MO_TPREL_HI, "riscv-tprel-hi"},
+ {MO_TPREL_ADD, "riscv-tprel-add"},
+ {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
+ {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
+ return ArrayRef(TargetFlags);
+}
+bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
+ MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
+ const Function &F = MF.getFunction();
+
+ // Can F be deduplicated by the linker? If it can, don't outline from it.
+ if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
+ return false;
+
+ // Don't outline from functions with section markings; the program could
+ // expect that all the code is in the named section.
+ if (F.hasSection())
+ return false;
+
+ // It's safe to outline from MF.
+ return true;
+}
+
+bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
+ unsigned &Flags) const {
+ // More accurate safety checking is done in getOutliningCandidateInfo.
+ return TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags);
+}
+
+// Enum values indicating how an outlined call should be constructed.
+enum MachineOutlinerConstructionID {
+ MachineOutlinerDefault
+};
+
+bool RISCVInstrInfo::shouldOutlineFromFunctionByDefault(
+ MachineFunction &MF) const {
+ return MF.getFunction().hasMinSize();
+}
+
+outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
+ std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
+
+ // First we need to filter out candidates where the X5 register (IE t0) can't
+ // be used to setup the function call.
+ auto CannotInsertCall = [](outliner::Candidate &C) {
+ const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
+ return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
+ };
+
+ llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
+
+ // If the sequence doesn't have enough candidates left, then we're done.
+ if (RepeatedSequenceLocs.size() < 2)
+ return outliner::OutlinedFunction();
+
+ unsigned SequenceSize = 0;
+
+ auto I = RepeatedSequenceLocs[0].front();
+ auto E = std::next(RepeatedSequenceLocs[0].back());
+ for (; I != E; ++I)
+ SequenceSize += getInstSizeInBytes(*I);
+
+ // call t0, function = 8 bytes.
+ unsigned CallOverhead = 8;
+ for (auto &C : RepeatedSequenceLocs)
+ C.setCallInfo(MachineOutlinerDefault, CallOverhead);
+
+ // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
+ unsigned FrameOverhead = 4;
+ if (RepeatedSequenceLocs[0]
+ .getMF()
+ ->getSubtarget<RISCVSubtarget>()
+ .hasStdExtCOrZca())
+ FrameOverhead = 2;
+
+ return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
+ FrameOverhead, MachineOutlinerDefault);
+}
+
+outliner::InstrType
+RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
+ unsigned Flags) const {
+ MachineInstr &MI = *MBBI;
+ MachineBasicBlock *MBB = MI.getParent();
+ const TargetRegisterInfo *TRI =
+ MBB->getParent()->getSubtarget().getRegisterInfo();
+ const auto &F = MI.getMF()->getFunction();
+
+ // Positions generally can't safely be outlined.
+ if (MI.isPosition()) {
+ // We can manually strip out CFI instructions later.
+ if (MI.isCFIInstruction())
+ // If current function has exception handling code, we can't outline &
+ // strip these CFI instructions since it may break .eh_frame section
+ // needed in unwinding.
+ return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
+ : outliner::InstrType::Invisible;
+
+ return outliner::InstrType::Illegal;
+ }
+
+ // Don't trust the user to write safe inline assembly.
+ if (MI.isInlineAsm())
+ return outliner::InstrType::Illegal;
+
+ // We can't outline branches to other basic blocks.
+ if (MI.isTerminator() && !MBB->succ_empty())
+ return outliner::InstrType::Illegal;
+
+ // We need support for tail calls to outlined functions before return
+ // statements can be allowed.
+ if (MI.isReturn())
+ return outliner::InstrType::Illegal;
+
+ // Don't allow modifying the X5 register which we use for return addresses for
+ // these outlined functions.
+ if (MI.modifiesRegister(RISCV::X5, TRI) ||
+ MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
+ return outliner::InstrType::Illegal;
+
+ // Make sure the operands don't reference something unsafe.
+ for (const auto &MO : MI.operands()) {
+ if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
+ return outliner::InstrType::Illegal;
+
+ // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
+ // if any possible.
+ if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
+ (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
+ F.hasSection()))
+ return outliner::InstrType::Illegal;
+ }
+
+ // Don't allow instructions which won't be materialized to impact outlining
+ // analysis.
+ if (MI.isMetaInstruction())
+ return outliner::InstrType::Invisible;
+
+ return outliner::InstrType::Legal;
+}
+
+void RISCVInstrInfo::buildOutlinedFrame(
+ MachineBasicBlock &MBB, MachineFunction &MF,
+ const outliner::OutlinedFunction &OF) const {
+
+ // Strip out any CFI instructions
+ bool Changed = true;
+ while (Changed) {
+ Changed = false;
+ auto I = MBB.begin();
+ auto E = MBB.end();
+ for (; I != E; ++I) {
+ if (I->isCFIInstruction()) {
+ I->removeFromParent();
+ Changed = true;
+ break;
+ }
+ }
+ }
+
+ MBB.addLiveIn(RISCV::X5);
+
+ // Add in a return instruction to the end of the outlined frame.
+ MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
+ .addReg(RISCV::X0, RegState::Define)
+ .addReg(RISCV::X5)
+ .addImm(0));
+}
+
+MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
+ Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
+ MachineFunction &MF, outliner::Candidate &C) const {
+
+ // Add in a call instruction to the outlined function at the given location.
+ It = MBB.insert(It,
+ BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
+ .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
+ RISCVII::MO_CALL));
+ return It;
+}
+
+// MIR printer helper function to annotate Operands with a comment.
+std::string RISCVInstrInfo::createMIROperandComment(
+ const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
+ const TargetRegisterInfo *TRI) const {
+ // Print a generic comment for this operand if there is one.
+ std::string GenericComment =
+ TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
+ if (!GenericComment.empty())
+ return GenericComment;
+
+ // If not, we must have an immediate operand.
+ if (!Op.isImm())
+ return std::string();
+
+ std::string Comment;
+ raw_string_ostream OS(Comment);
+
+ uint64_t TSFlags = MI.getDesc().TSFlags;
+
+ // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
+ // operand of vector codegen pseudos.
+ if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
+ MI.getOpcode() == RISCV::PseudoVSETVLI ||
+ MI.getOpcode() == RISCV::PseudoVSETIVLI ||
+ MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
+ OpIdx == 2) {
+ unsigned Imm = MI.getOperand(OpIdx).getImm();
+ RISCVVType::printVType(Imm, OS);
+ } else if (RISCVII::hasSEWOp(TSFlags) &&
+ OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
+ unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
+ unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
+ assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
+ OS << "e" << SEW;
+ } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
+ OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
+ unsigned Policy = MI.getOperand(OpIdx).getImm();
+ assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
+ "Invalid Policy Value");
+ OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
+ << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
+ }
+
+ OS.flush();
+ return Comment;
+}
+
+// clang-format off
+#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
+ RISCV::PseudoV##OP##_##TYPE##_##LMUL
+
+#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
+ case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
+ case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
+ case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
+
+#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
+ case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
+
+#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
+ case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
+
+#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
+ case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
+
+#define CASE_VFMA_SPLATS(OP) \
+ CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
+ case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
+ case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
+// clang-format on
+
+bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
+ unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const {
+ const MCInstrDesc &Desc = MI.getDesc();
+ if (!Desc.isCommutable())
+ return false;
+
+ switch (MI.getOpcode()) {
+ case RISCV::PseudoCCMOVGPR:
+ // Operands 4 and 5 are commutable.
+ return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
+ case CASE_VFMA_SPLATS(FMADD):
+ case CASE_VFMA_SPLATS(FMSUB):
+ case CASE_VFMA_SPLATS(FMACC):
+ case CASE_VFMA_SPLATS(FMSAC):
+ case CASE_VFMA_SPLATS(FNMADD):
+ case CASE_VFMA_SPLATS(FNMSUB):
+ case CASE_VFMA_SPLATS(FNMACC):
+ case CASE_VFMA_SPLATS(FNMSAC):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS(MADD, VX):
+ case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
+ case CASE_VFMA_OPCODE_LMULS(MACC, VX):
+ case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
+ case CASE_VFMA_OPCODE_LMULS(MACC, VV):
+ case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
+ // If the tail policy is undisturbed we can't commute.
+ assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
+ if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
+ return false;
+
+ // For these instructions we can only swap operand 1 and operand 3 by
+ // changing the opcode.
+ unsigned CommutableOpIdx1 = 1;
+ unsigned CommutableOpIdx2 = 3;
+ if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
+ CommutableOpIdx2))
+ return false;
+ return true;
+ }
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS(MADD, VV):
+ case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
+ // If the tail policy is undisturbed we can't commute.
+ assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
+ if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
+ return false;
+
+ // For these instructions we have more freedom. We can commute with the
+ // other multiplicand or with the addend/subtrahend/minuend.
+
+ // Any fixed operand must be from source 1, 2 or 3.
+ if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
+ return false;
+ if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
+ return false;
+
+ // It both ops are fixed one must be the tied source.
+ if (SrcOpIdx1 != CommuteAnyOperandIndex &&
+ SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
+ return false;
+
+ // Look for two different register operands assumed to be commutable
+ // regardless of the FMA opcode. The FMA opcode is adjusted later if
+ // needed.
+ if (SrcOpIdx1 == CommuteAnyOperandIndex ||
+ SrcOpIdx2 == CommuteAnyOperandIndex) {
+ // At least one of operands to be commuted is not specified and
+ // this method is free to choose appropriate commutable operands.
+ unsigned CommutableOpIdx1 = SrcOpIdx1;
+ if (SrcOpIdx1 == SrcOpIdx2) {
+ // Both of operands are not fixed. Set one of commutable
+ // operands to the tied source.
+ CommutableOpIdx1 = 1;
+ } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
+ // Only one of the operands is not fixed.
+ CommutableOpIdx1 = SrcOpIdx2;
+ }
+
+ // CommutableOpIdx1 is well defined now. Let's choose another commutable
+ // operand and assign its index to CommutableOpIdx2.
+ unsigned CommutableOpIdx2;
+ if (CommutableOpIdx1 != 1) {
+ // If we haven't already used the tied source, we must use it now.
+ CommutableOpIdx2 = 1;
+ } else {
+ Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
+
+ // The commuted operands should have different registers.
+ // Otherwise, the commute transformation does not change anything and
+ // is useless. We use this as a hint to make our decision.
+ if (Op1Reg != MI.getOperand(2).getReg())
+ CommutableOpIdx2 = 2;
+ else
+ CommutableOpIdx2 = 3;
+ }
+
+ // Assign the found pair of commutable indices to SrcOpIdx1 and
+ // SrcOpIdx2 to return those values.
+ if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
+ CommutableOpIdx2))
+ return false;
+ }
+
+ return true;
+ }
+ }
+
+ return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
+}
+
+#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
+ case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
+ Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
+ break;
+
+#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
+
+#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
+
+#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
+
+#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
+
+#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
+
+MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
+ bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const {
+ auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
+ if (NewMI)
+ return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ return MI;
+ };
+
+ switch (MI.getOpcode()) {
+ case RISCV::PseudoCCMOVGPR: {
+ // CCMOV can be commuted by inverting the condition.
+ auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
+ CC = RISCVCC::getOppositeBranchCondition(CC);
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm(CC);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false,
+ OpIdx1, OpIdx2);
+ }
+ case CASE_VFMA_SPLATS(FMACC):
+ case CASE_VFMA_SPLATS(FMADD):
+ case CASE_VFMA_SPLATS(FMSAC):
+ case CASE_VFMA_SPLATS(FMSUB):
+ case CASE_VFMA_SPLATS(FNMACC):
+ case CASE_VFMA_SPLATS(FNMADD):
+ case CASE_VFMA_SPLATS(FNMSAC):
+ case CASE_VFMA_SPLATS(FNMSUB):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS(MADD, VX):
+ case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
+ case CASE_VFMA_OPCODE_LMULS(MACC, VX):
+ case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
+ case CASE_VFMA_OPCODE_LMULS(MACC, VV):
+ case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
+ // It only make sense to toggle these between clobbering the
+ // addend/subtrahend/minuend one of the multiplicands.
+ assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
+ assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
+ unsigned Opc;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
+ CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSAC, FMSUB, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMACC, FNMADD, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSAC, FNMSUB, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
+ }
+
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
+ }
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS(MADD, VV):
+ case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
+ assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
+ // If one of the operands, is the addend we need to change opcode.
+ // Otherwise we're just swapping 2 of the multiplicands.
+ if (OpIdx1 == 3 || OpIdx2 == 3) {
+ unsigned Opc;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSUB, FMSAC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMADD, FNMACC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSUB, FNMSAC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
+ }
+
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
+ }
+ // Let the default code handle it.
+ break;
+ }
+ }
+
+ return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+}
+
+#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
+#undef CASE_VFMA_CHANGE_OPCODE_LMULS
+#undef CASE_VFMA_CHANGE_OPCODE_COMMON
+#undef CASE_VFMA_SPLATS
+#undef CASE_VFMA_OPCODE_LMULS
+#undef CASE_VFMA_OPCODE_COMMON
+
+// clang-format off
+#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
+ RISCV::PseudoV##OP##_##LMUL##_TIED
+
+#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
+ CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
+ case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
+ case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
+ case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
+ case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
+
+#define CASE_WIDEOP_OPCODE_LMULS(OP) \
+ CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
+ case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
+// clang-format on
+
+#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
+ case RISCV::PseudoV##OP##_##LMUL##_TIED: \
+ NewOpc = RISCV::PseudoV##OP##_##LMUL; \
+ break;
+
+#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
+ CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
+ CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
+ CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
+ CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
+ CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
+
+#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
+ CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
+
+MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
+ LiveVariables *LV,
+ LiveIntervals *LIS) const {
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
+ case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
+ case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
+ case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
+ case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
+ case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
+ // If the tail policy is undisturbed we can't convert.
+ assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
+ MI.getNumExplicitOperands() == 6);
+ if ((MI.getOperand(5).getImm() & 1) == 0)
+ return nullptr;
+
+ // clang-format off
+ unsigned NewOpc;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWADD_WV)
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWSUB_WV)
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
+ }
+ // clang-format on
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(1))
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4));
+ MIB.copyImplicitOps(MI);
+
+ if (LV) {
+ unsigned NumOps = MI.getNumOperands();
+ for (unsigned I = 1; I < NumOps; ++I) {
+ MachineOperand &Op = MI.getOperand(I);
+ if (Op.isReg() && Op.isKill())
+ LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
+ }
+ }
+
+ if (LIS) {
+ SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
+
+ if (MI.getOperand(0).isEarlyClobber()) {
+ // Use operand 1 was tied to early-clobber def operand 0, so its live
+ // interval could have ended at an early-clobber slot. Now they are not
+ // tied we need to update it to the normal register slot.
+ LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
+ LiveRange::Segment *S = LI.getSegmentContaining(Idx);
+ if (S->end == Idx.getRegSlot(true))
+ S->end = Idx.getRegSlot();
+ }
+ }
+
+ return MIB;
+ }
+ }
+
+ return nullptr;
+}
+
+#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
+#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
+#undef CASE_WIDEOP_OPCODE_LMULS
+#undef CASE_WIDEOP_OPCODE_COMMON
+
+void RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II,
+ const DebugLoc &DL, Register DestReg,
+ int64_t Amount,
+ MachineInstr::MIFlag Flag) const {
+ assert(Amount > 0 && "There is no need to get VLEN scaled value.");
+ assert(Amount % 8 == 0 &&
+ "Reserve the stack by the multiple of one vector size.");
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ int64_t NumOfVReg = Amount / 8;
+
+ BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), DestReg).setMIFlag(Flag);
+ assert(isInt<32>(NumOfVReg) &&
+ "Expect the number of vector registers within 32-bits.");
+ if (isPowerOf2_32(NumOfVReg)) {
+ uint32_t ShiftAmount = Log2_32(NumOfVReg);
+ if (ShiftAmount == 0)
+ return;
+ BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
+ .addReg(DestReg, RegState::Kill)
+ .addImm(ShiftAmount)
+ .setMIFlag(Flag);
+ } else if (STI.hasStdExtZba() &&
+ ((NumOfVReg % 3 == 0 && isPowerOf2_64(NumOfVReg / 3)) ||
+ (NumOfVReg % 5 == 0 && isPowerOf2_64(NumOfVReg / 5)) ||
+ (NumOfVReg % 9 == 0 && isPowerOf2_64(NumOfVReg / 9)))) {
+ // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
+ unsigned Opc;
+ uint32_t ShiftAmount;
+ if (NumOfVReg % 9 == 0) {
+ Opc = RISCV::SH3ADD;
+ ShiftAmount = Log2_64(NumOfVReg / 9);
+ } else if (NumOfVReg % 5 == 0) {
+ Opc = RISCV::SH2ADD;
+ ShiftAmount = Log2_64(NumOfVReg / 5);
+ } else if (NumOfVReg % 3 == 0) {
+ Opc = RISCV::SH1ADD;
+ ShiftAmount = Log2_64(NumOfVReg / 3);
+ } else {
+ llvm_unreachable("Unexpected number of vregs");
+ }
+ if (ShiftAmount)
+ BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
+ .addReg(DestReg, RegState::Kill)
+ .addImm(ShiftAmount)
+ .setMIFlag(Flag);
+ BuildMI(MBB, II, DL, get(Opc), DestReg)
+ .addReg(DestReg, RegState::Kill)
+ .addReg(DestReg)
+ .setMIFlag(Flag);
+ } else if (isPowerOf2_32(NumOfVReg - 1)) {
+ Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
+ BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
+ .addReg(DestReg)
+ .addImm(ShiftAmount)
+ .setMIFlag(Flag);
+ BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
+ .addReg(ScaledRegister, RegState::Kill)
+ .addReg(DestReg, RegState::Kill)
+ .setMIFlag(Flag);
+ } else if (isPowerOf2_32(NumOfVReg + 1)) {
+ Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
+ BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
+ .addReg(DestReg)
+ .addImm(ShiftAmount)
+ .setMIFlag(Flag);
+ BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
+ .addReg(ScaledRegister, RegState::Kill)
+ .addReg(DestReg, RegState::Kill)
+ .setMIFlag(Flag);
+ } else {
+ Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ movImm(MBB, II, DL, N, NumOfVReg, Flag);
+ if (!STI.hasStdExtM() && !STI.hasStdExtZmmul())
+ MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
+ MF.getFunction(),
+ "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
+ "offset."});
+ BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
+ .addReg(DestReg, RegState::Kill)
+ .addReg(N, RegState::Kill)
+ .setMIFlag(Flag);
+ }
+}
+
+// Checks if all users only demand the lower \p OrigBits of the original
+// instruction's result.
+// TODO: handle multiple interdependent transformations
+bool RISCVInstrInfo::hasAllNBitUsers(const MachineInstr &OrigMI,
+ const MachineRegisterInfo &MRI,
+ unsigned OrigBits) const {
+
+ SmallSet<std::pair<const MachineInstr *, unsigned>, 4> Visited;
+ SmallVector<std::pair<const MachineInstr *, unsigned>, 4> Worklist;
+
+ Worklist.push_back(std::make_pair(&OrigMI, OrigBits));
+
+ while (!Worklist.empty()) {
+ auto P = Worklist.pop_back_val();
+ const MachineInstr *MI = P.first;
+ unsigned Bits = P.second;
+
+ if (!Visited.insert(P).second)
+ continue;
+
+ // Only handle instructions with one def.
+ if (MI->getNumExplicitDefs() != 1)
+ return false;
+
+ for (auto &UserOp : MRI.use_operands(MI->getOperand(0).getReg())) {
+ const MachineInstr *UserMI = UserOp.getParent();
+ unsigned OpIdx = UserMI->getOperandNo(&UserOp);
+
+ switch (UserMI->getOpcode()) {
+ default:
+ return false;
+
+ case RISCV::ADDIW:
+ case RISCV::ADDW:
+ case RISCV::DIVUW:
+ case RISCV::DIVW:
+ case RISCV::MULW:
+ case RISCV::REMUW:
+ case RISCV::REMW:
+ case RISCV::SLLIW:
+ case RISCV::SLLW:
+ case RISCV::SRAIW:
+ case RISCV::SRAW:
+ case RISCV::SRLIW:
+ case RISCV::SRLW:
+ case RISCV::SUBW:
+ case RISCV::ROLW:
+ case RISCV::RORW:
+ case RISCV::RORIW:
+ case RISCV::CLZW:
+ case RISCV::CTZW:
+ case RISCV::CPOPW:
+ case RISCV::SLLI_UW:
+ case RISCV::FMV_W_X:
+ case RISCV::FCVT_H_W:
+ case RISCV::FCVT_H_WU:
+ case RISCV::FCVT_S_W:
+ case RISCV::FCVT_S_WU:
+ case RISCV::FCVT_D_W:
+ case RISCV::FCVT_D_WU:
+ if (Bits >= 32)
+ break;
+ return false;
+ case RISCV::SEXT_B:
+ case RISCV::PACKH:
+ if (Bits >= 8)
+ break;
+ return false;
+ case RISCV::SEXT_H:
+ case RISCV::FMV_H_X:
+ case RISCV::ZEXT_H_RV32:
+ case RISCV::ZEXT_H_RV64:
+ case RISCV::PACKW:
+ if (Bits >= 16)
+ break;
+ return false;
+
+ case RISCV::PACK:
+ if (Bits >= (STI.getXLen() / 2))
+ break;
+ return false;
+
+ case RISCV::SRLI: {
+ // If we are shifting right by less than Bits, and users don't demand
+ // any bits that were shifted into [Bits-1:0], then we can consider this
+ // as an N-Bit user.
+ unsigned ShAmt = UserMI->getOperand(2).getImm();
+ if (Bits > ShAmt) {
+ Worklist.push_back(std::make_pair(UserMI, Bits - ShAmt));
+ break;
+ }
+ return false;
+ }
+
+ // these overwrite higher input bits, otherwise the lower word of output
+ // depends only on the lower word of input. So check their uses read W.
+ case RISCV::SLLI:
+ if (Bits >= (STI.getXLen() - UserMI->getOperand(2).getImm()))
+ break;
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+ case RISCV::ANDI: {
+ uint64_t Imm = UserMI->getOperand(2).getImm();
+ if (Bits >= (unsigned)llvm::bit_width(Imm))
+ break;
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+ }
+ case RISCV::ORI: {
+ uint64_t Imm = UserMI->getOperand(2).getImm();
+ if (Bits >= (unsigned)llvm::bit_width<uint64_t>(~Imm))
+ break;
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+ }
+
+ case RISCV::SLL:
+ case RISCV::BSET:
+ case RISCV::BCLR:
+ case RISCV::BINV:
+ // Operand 2 is the shift amount which uses log2(xlen) bits.
+ if (OpIdx == 2) {
+ if (Bits >= Log2_32(STI.getXLen()))
+ break;
+ return false;
+ }
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+
+ case RISCV::SRA:
+ case RISCV::SRL:
+ case RISCV::ROL:
+ case RISCV::ROR:
+ // Operand 2 is the shift amount which uses 6 bits.
+ if (OpIdx == 2 && Bits >= Log2_32(STI.getXLen()))
+ break;
+ return false;
+
+ case RISCV::ADD_UW:
+ case RISCV::SH1ADD_UW:
+ case RISCV::SH2ADD_UW:
+ case RISCV::SH3ADD_UW:
+ // Operand 1 is implicitly zero extended.
+ if (OpIdx == 1 && Bits >= 32)
+ break;
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+
+ case RISCV::BEXTI:
+ if (UserMI->getOperand(2).getImm() >= Bits)
+ return false;
+ break;
+
+ case RISCV::SB:
+ // The first argument is the value to store.
+ if (OpIdx == 0 && Bits >= 8)
+ break;
+ return false;
+ case RISCV::SH:
+ // The first argument is the value to store.
+ if (OpIdx == 0 && Bits >= 16)
+ break;
+ return false;
+ case RISCV::SW:
+ // The first argument is the value to store.
+ if (OpIdx == 0 && Bits >= 32)
+ break;
+ return false;
+
+ // For these, lower word of output in these operations, depends only on
+ // the lower word of input. So, we check all uses only read lower word.
+ case RISCV::COPY:
+ case RISCV::PHI:
+
+ case RISCV::ADD:
+ case RISCV::ADDI:
+ case RISCV::AND:
+ case RISCV::MUL:
+ case RISCV::OR:
+ case RISCV::SUB:
+ case RISCV::XOR:
+ case RISCV::XORI:
+
+ case RISCV::ANDN:
+ case RISCV::BREV8:
+ case RISCV::CLMUL:
+ case RISCV::ORC_B:
+ case RISCV::ORN:
+ case RISCV::SH1ADD:
+ case RISCV::SH2ADD:
+ case RISCV::SH3ADD:
+ case RISCV::XNOR:
+ case RISCV::BSETI:
+ case RISCV::BCLRI:
+ case RISCV::BINVI:
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+
+ case RISCV::PseudoCCMOVGPR:
+ // Either operand 4 or operand 5 is returned by this instruction. If
+ // only the lower word of the result is used, then only the lower word
+ // of operand 4 and 5 is used.
+ if (OpIdx != 4 && OpIdx != 5)
+ return false;
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+
+ case RISCV::VT_MASKC:
+ case RISCV::VT_MASKCN:
+ if (OpIdx != 1)
+ return false;
+ Worklist.push_back(std::make_pair(UserMI, Bits));
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
+bool RISCV::isSEXT_W(const MachineInstr &MI) {
+ return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
+ MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
+}
+
+// Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
+bool RISCV::isZEXT_W(const MachineInstr &MI) {
+ return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
+ MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
+}
+
+// Returns true if this is the zext.b pattern, andi rd, rs1, 255.
+bool RISCV::isZEXT_B(const MachineInstr &MI) {
+ return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
+ MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
+}
+
+static bool isRVVWholeLoadStore(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return false;
+ case RISCV::VS1R_V:
+ case RISCV::VS2R_V:
+ case RISCV::VS4R_V:
+ case RISCV::VS8R_V:
+ case RISCV::VL1RE8_V:
+ case RISCV::VL2RE8_V:
+ case RISCV::VL4RE8_V:
+ case RISCV::VL8RE8_V:
+ case RISCV::VL1RE16_V:
+ case RISCV::VL2RE16_V:
+ case RISCV::VL4RE16_V:
+ case RISCV::VL8RE16_V:
+ case RISCV::VL1RE32_V:
+ case RISCV::VL2RE32_V:
+ case RISCV::VL4RE32_V:
+ case RISCV::VL8RE32_V:
+ case RISCV::VL1RE64_V:
+ case RISCV::VL2RE64_V:
+ case RISCV::VL4RE64_V:
+ case RISCV::VL8RE64_V:
+ return true;
+ }
+}
+
+bool RISCV::isRVVSpill(const MachineInstr &MI) {
+ // RVV lacks any support for immediate addressing for stack addresses, so be
+ // conservative.
+ unsigned Opcode = MI.getOpcode();
+ if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
+ !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
+ return false;
+ return true;
+}
+
+std::optional<std::pair<unsigned, unsigned>>
+RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return std::nullopt;
+ case RISCV::PseudoVSPILL2_M1:
+ case RISCV::PseudoVRELOAD2_M1:
+ return std::make_pair(2u, 1u);
+ case RISCV::PseudoVSPILL2_M2:
+ case RISCV::PseudoVRELOAD2_M2:
+ return std::make_pair(2u, 2u);
+ case RISCV::PseudoVSPILL2_M4:
+ case RISCV::PseudoVRELOAD2_M4:
+ return std::make_pair(2u, 4u);
+ case RISCV::PseudoVSPILL3_M1:
+ case RISCV::PseudoVRELOAD3_M1:
+ return std::make_pair(3u, 1u);
+ case RISCV::PseudoVSPILL3_M2:
+ case RISCV::PseudoVRELOAD3_M2:
+ return std::make_pair(3u, 2u);
+ case RISCV::PseudoVSPILL4_M1:
+ case RISCV::PseudoVRELOAD4_M1:
+ return std::make_pair(4u, 1u);
+ case RISCV::PseudoVSPILL4_M2:
+ case RISCV::PseudoVRELOAD4_M2:
+ return std::make_pair(4u, 2u);
+ case RISCV::PseudoVSPILL5_M1:
+ case RISCV::PseudoVRELOAD5_M1:
+ return std::make_pair(5u, 1u);
+ case RISCV::PseudoVSPILL6_M1:
+ case RISCV::PseudoVRELOAD6_M1:
+ return std::make_pair(6u, 1u);
+ case RISCV::PseudoVSPILL7_M1:
+ case RISCV::PseudoVRELOAD7_M1:
+ return std::make_pair(7u, 1u);
+ case RISCV::PseudoVSPILL8_M1:
+ case RISCV::PseudoVRELOAD8_M1:
+ return std::make_pair(8u, 1u);
+ }
+}
+
+bool RISCV::isFaultFirstLoad(const MachineInstr &MI) {
+ return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
+ !MI.isInlineAsm();
+}
+
+bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
+ int16_t MI1FrmOpIdx =
+ RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);
+ int16_t MI2FrmOpIdx =
+ RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);
+ if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
+ return false;
+ MachineOperand FrmOp1 = MI1.getOperand(MI1FrmOpIdx);
+ MachineOperand FrmOp2 = MI2.getOperand(MI2FrmOpIdx);
+ return FrmOp1.getImm() == FrmOp2.getImm();
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.h
new file mode 100644
index 0000000000..c663af75a5
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVInstrInfo.h
@@ -0,0 +1,279 @@
+//===-- RISCVInstrInfo.h - RISCV Instruction Information --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVINSTRINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVINSTRINFO_H
+
+#include "RISCVRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#define GET_INSTRINFO_OPERAND_ENUM
+#include "RISCVGenInstrInfo.inc"
+
+namespace llvm {
+
+class RISCVSubtarget;
+
+namespace RISCVCC {
+
+enum CondCode {
+ COND_EQ,
+ COND_NE,
+ COND_LT,
+ COND_GE,
+ COND_LTU,
+ COND_GEU,
+ COND_INVALID
+};
+
+CondCode getOppositeBranchCondition(CondCode);
+
+} // end of namespace RISCVCC
+
+class RISCVInstrInfo : public RISCVGenInstrInfo {
+
+public:
+ explicit RISCVInstrInfo(RISCVSubtarget &STI);
+
+ MCInst getNop() const override;
+ const MCInstrDesc &getBrCond(RISCVCC::CondCode CC) const;
+
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, Register SrcReg,
+ bool IsKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ Register VReg) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, Register DstReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ Register VReg) const override;
+
+ using TargetInstrInfo::foldMemoryOperandImpl;
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
+ int FrameIndex,
+ LiveIntervals *LIS = nullptr,
+ VirtRegMap *VRM = nullptr) const override;
+
+ // Materializes the given integer Val into DstReg.
+ void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, Register DstReg, uint64_t Val,
+ MachineInstr::MIFlag Flag = MachineInstr::NoFlags) const;
+
+ unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
+
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+
+ unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &dl,
+ int *BytesAdded = nullptr) const override;
+
+ void insertIndirectBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock &NewDestBB,
+ MachineBasicBlock &RestoreBB, const DebugLoc &DL,
+ int64_t BrOffset, RegScavenger *RS) const override;
+
+ unsigned removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved = nullptr) const override;
+
+ bool
+ reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
+
+ bool isBranchOffsetInRange(unsigned BranchOpc,
+ int64_t BrOffset) const override;
+
+ bool analyzeSelect(const MachineInstr &MI,
+ SmallVectorImpl<MachineOperand> &Cond, unsigned &TrueOp,
+ unsigned &FalseOp, bool &Optimizable) const override;
+
+ MachineInstr *optimizeSelect(MachineInstr &MI,
+ SmallPtrSetImpl<MachineInstr *> &SeenMIs,
+ bool) const override;
+
+ bool isAsCheapAsAMove(const MachineInstr &MI) const override;
+
+ std::optional<DestSourcePair>
+ isCopyInstrImpl(const MachineInstr &MI) const override;
+
+ bool verifyInstruction(const MachineInstr &MI,
+ StringRef &ErrInfo) const override;
+
+ bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
+ int64_t &Offset, unsigned &Width,
+ const TargetRegisterInfo *TRI) const;
+
+ bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb) const override;
+
+
+ std::pair<unsigned, unsigned>
+ decomposeMachineOperandsTargetFlags(unsigned TF) const override;
+
+ ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableDirectMachineOperandTargetFlags() const override;
+
+ // Return true if the function can safely be outlined from.
+ bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
+ bool OutlineFromLinkOnceODRs) const override;
+
+ // Return true if MBB is safe to outline from, and return any target-specific
+ // information in Flags.
+ bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
+ unsigned &Flags) const override;
+
+ bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
+
+ // Calculate target-specific information for a set of outlining candidates.
+ outliner::OutlinedFunction getOutliningCandidateInfo(
+ std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
+
+ // Return if/how a given MachineInstr should be outlined.
+ outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI,
+ unsigned Flags) const override;
+
+ // Insert a custom frame for outlined functions.
+ void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
+ const outliner::OutlinedFunction &OF) const override;
+
+ // Insert a call to an outlined function into a given basic block.
+ MachineBasicBlock::iterator
+ insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &It, MachineFunction &MF,
+ outliner::Candidate &C) const override;
+
+ bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const override;
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const override;
+
+ MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
+ LiveIntervals *LIS) const override;
+
+ // MIR printer helper function to annotate Operands with a comment.
+ std::string
+ createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op,
+ unsigned OpIdx,
+ const TargetRegisterInfo *TRI) const override;
+
+ void getVLENFactoredAmount(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg,
+ int64_t Amount, MachineInstr::MIFlag Flag = MachineInstr::NoFlags) const;
+
+ bool useMachineCombiner() const override { return true; }
+
+ void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
+ MachineInstr &NewMI1,
+ MachineInstr &NewMI2) const override;
+ bool
+ getMachineCombinerPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern> &Patterns,
+ bool DoRegPressureReduce) const override;
+
+ void
+ finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P,
+ SmallVectorImpl<MachineInstr *> &InsInstrs) const override;
+
+ void genAlternativeCodeSequence(
+ MachineInstr &Root, MachineCombinerPattern Pattern,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs,
+ DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
+
+ bool hasReassociableSibling(const MachineInstr &Inst,
+ bool &Commuted) const override;
+
+ bool isAssociativeAndCommutative(const MachineInstr &Inst,
+ bool Invert) const override;
+
+ std::optional<unsigned> getInverseOpcode(unsigned Opcode) const override;
+
+ // Returns true if all uses of OrigMI only depend on the lower \p NBits bits
+ // of its output.
+ bool hasAllNBitUsers(const MachineInstr &MI, const MachineRegisterInfo &MRI,
+ unsigned NBits) const;
+ // Returns true if all uses of OrigMI only depend on the lower word of its
+ // output, so we can transform OrigMI to the corresponding W-version.
+ bool hasAllWUsers(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI) const {
+ return hasAllNBitUsers(MI, MRI, 32);
+ }
+
+protected:
+ const RISCVSubtarget &STI;
+};
+
+namespace RISCV {
+
+// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
+bool isSEXT_W(const MachineInstr &MI);
+bool isZEXT_W(const MachineInstr &MI);
+bool isZEXT_B(const MachineInstr &MI);
+
+// Returns true if the given MI is an RVV instruction opcode for which we may
+// expect to see a FrameIndex operand.
+bool isRVVSpill(const MachineInstr &MI);
+
+std::optional<std::pair<unsigned, unsigned>>
+isRVVSpillForZvlsseg(unsigned Opcode);
+
+bool isFaultFirstLoad(const MachineInstr &MI);
+
+// Implemented in RISCVGenInstrInfo.inc
+int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
+
+// Return true if both input instructions have equal rounding mode. If at least
+// one of the instructions does not have rounding mode, false will be returned.
+bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2);
+
+// Special immediate for AVL operand of V pseudo instructions to indicate VLMax.
+static constexpr int64_t VLMaxSentinel = -1LL;
+
+} // namespace RISCV
+
+namespace RISCVVPseudosTable {
+
+struct PseudoInfo {
+ uint16_t Pseudo;
+ uint16_t BaseInstr;
+};
+
+#define GET_RISCVVPseudosTable_DECL
+#include "RISCVGenSearchableTables.inc"
+
+} // end namespace RISCVVPseudosTable
+
+} // end namespace llvm
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVMCInstLower.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMCInstLower.cpp
new file mode 100644
index 0000000000..281918259c
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -0,0 +1,257 @@
+//===-- RISCVMCInstLower.cpp - Convert RISCV MachineInstr to an MCInst ------=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower RISCV MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym,
+ const AsmPrinter &AP) {
+ MCContext &Ctx = AP.OutContext;
+ RISCVMCExpr::VariantKind Kind;
+
+ switch (MO.getTargetFlags()) {
+ default:
+ llvm_unreachable("Unknown target flag on GV operand");
+ case RISCVII::MO_None:
+ Kind = RISCVMCExpr::VK_RISCV_None;
+ break;
+ case RISCVII::MO_CALL:
+ Kind = RISCVMCExpr::VK_RISCV_CALL;
+ break;
+ case RISCVII::MO_PLT:
+ Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
+ break;
+ case RISCVII::MO_LO:
+ Kind = RISCVMCExpr::VK_RISCV_LO;
+ break;
+ case RISCVII::MO_HI:
+ Kind = RISCVMCExpr::VK_RISCV_HI;
+ break;
+ case RISCVII::MO_PCREL_LO:
+ Kind = RISCVMCExpr::VK_RISCV_PCREL_LO;
+ break;
+ case RISCVII::MO_PCREL_HI:
+ Kind = RISCVMCExpr::VK_RISCV_PCREL_HI;
+ break;
+ case RISCVII::MO_GOT_HI:
+ Kind = RISCVMCExpr::VK_RISCV_GOT_HI;
+ break;
+ case RISCVII::MO_TPREL_LO:
+ Kind = RISCVMCExpr::VK_RISCV_TPREL_LO;
+ break;
+ case RISCVII::MO_TPREL_HI:
+ Kind = RISCVMCExpr::VK_RISCV_TPREL_HI;
+ break;
+ case RISCVII::MO_TPREL_ADD:
+ Kind = RISCVMCExpr::VK_RISCV_TPREL_ADD;
+ break;
+ case RISCVII::MO_TLS_GOT_HI:
+ Kind = RISCVMCExpr::VK_RISCV_TLS_GOT_HI;
+ break;
+ case RISCVII::MO_TLS_GD_HI:
+ Kind = RISCVMCExpr::VK_RISCV_TLS_GD_HI;
+ break;
+ }
+
+ const MCExpr *ME =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx);
+
+ if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
+ ME = MCBinaryExpr::createAdd(
+ ME, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
+
+ if (Kind != RISCVMCExpr::VK_RISCV_None)
+ ME = RISCVMCExpr::create(ME, Kind, Ctx);
+ return MCOperand::createExpr(ME);
+}
+
+bool llvm::lowerRISCVMachineOperandToMCOperand(const MachineOperand &MO,
+ MCOperand &MCOp,
+ const AsmPrinter &AP) {
+ switch (MO.getType()) {
+ default:
+ report_fatal_error("LowerRISCVMachineInstrToMCInst: unknown operand type");
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit())
+ return false;
+ MCOp = MCOperand::createReg(MO.getReg());
+ break;
+ case MachineOperand::MO_RegisterMask:
+ // Regmasks are like implicit defs.
+ return false;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::createImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = lowerSymbolOperand(MO, MO.getMBB()->getSymbol(), AP);
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = lowerSymbolOperand(MO, AP.getSymbolPreferLocal(*MO.getGlobal()), AP);
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCOp = lowerSymbolOperand(
+ MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()), AP);
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = lowerSymbolOperand(
+ MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP);
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCOp = lowerSymbolOperand(MO, AP.GetCPISymbol(MO.getIndex()), AP);
+ break;
+ case MachineOperand::MO_JumpTableIndex:
+ MCOp = lowerSymbolOperand(MO, AP.GetJTISymbol(MO.getIndex()), AP);
+ break;
+ case MachineOperand::MO_MCSymbol:
+ MCOp = lowerSymbolOperand(MO, MO.getMCSymbol(), AP);
+ break;
+ }
+ return true;
+}
+
+static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
+ MCInst &OutMI) {
+ const RISCVVPseudosTable::PseudoInfo *RVV =
+ RISCVVPseudosTable::getPseudoInfo(MI->getOpcode());
+ if (!RVV)
+ return false;
+
+ OutMI.setOpcode(RVV->BaseInstr);
+
+ const MachineBasicBlock *MBB = MI->getParent();
+ assert(MBB && "MI expected to be in a basic block");
+ const MachineFunction *MF = MBB->getParent();
+ assert(MF && "MBB expected to be in a machine function");
+
+ const TargetRegisterInfo *TRI =
+ MF->getSubtarget<RISCVSubtarget>().getRegisterInfo();
+
+ assert(TRI && "TargetRegisterInfo expected");
+
+ uint64_t TSFlags = MI->getDesc().TSFlags;
+ unsigned NumOps = MI->getNumExplicitOperands();
+
+ // Skip policy, VL and SEW operands which are the last operands if present.
+ if (RISCVII::hasVecPolicyOp(TSFlags))
+ --NumOps;
+ if (RISCVII::hasVLOp(TSFlags))
+ --NumOps;
+ if (RISCVII::hasSEWOp(TSFlags))
+ --NumOps;
+
+ bool hasVLOutput = RISCV::isFaultFirstLoad(*MI);
+ for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ // Skip vl ouput. It should be the second output.
+ if (hasVLOutput && OpNo == 1)
+ continue;
+
+ // Skip merge op. It should be the first operand after the result.
+ if (RISCVII::hasMergeOp(TSFlags) && OpNo == 1U + hasVLOutput) {
+ assert(MI->getNumExplicitDefs() == 1U + hasVLOutput);
+ continue;
+ }
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ default:
+ llvm_unreachable("Unknown operand type");
+ case MachineOperand::MO_Register: {
+ Register Reg = MO.getReg();
+
+ if (RISCV::VRM2RegClass.contains(Reg) ||
+ RISCV::VRM4RegClass.contains(Reg) ||
+ RISCV::VRM8RegClass.contains(Reg)) {
+ Reg = TRI->getSubReg(Reg, RISCV::sub_vrm1_0);
+ assert(Reg && "Subregister does not exist");
+ } else if (RISCV::FPR16RegClass.contains(Reg)) {
+ Reg = TRI->getMatchingSuperReg(Reg, RISCV::sub_16, &RISCV::FPR32RegClass);
+ assert(Reg && "Subregister does not exist");
+ } else if (RISCV::FPR64RegClass.contains(Reg)) {
+ Reg = TRI->getSubReg(Reg, RISCV::sub_32);
+ assert(Reg && "Superregister does not exist");
+ }
+
+ MCOp = MCOperand::createReg(Reg);
+ break;
+ }
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::createImm(MO.getImm());
+ break;
+ }
+ OutMI.addOperand(MCOp);
+ }
+
+ // Unmasked pseudo instructions need to append dummy mask operand to
+ // V instructions. All V instructions are modeled as the masked version.
+ if (RISCVII::hasDummyMaskOp(TSFlags))
+ OutMI.addOperand(MCOperand::createReg(RISCV::NoRegister));
+
+ return true;
+}
+
+bool llvm::lowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ AsmPrinter &AP) {
+ if (lowerRISCVVMachineInstrToMCInst(MI, OutMI))
+ return false;
+
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (const MachineOperand &MO : MI->operands()) {
+ MCOperand MCOp;
+ if (lowerRISCVMachineOperandToMCOperand(MO, MCOp, AP))
+ OutMI.addOperand(MCOp);
+ }
+
+ switch (OutMI.getOpcode()) {
+ case TargetOpcode::PATCHABLE_FUNCTION_ENTER: {
+ const Function &F = MI->getParent()->getParent()->getFunction();
+ if (F.hasFnAttribute("patchable-function-entry")) {
+ unsigned Num;
+ if (F.getFnAttribute("patchable-function-entry")
+ .getValueAsString()
+ .getAsInteger(10, Num))
+ return false;
+ AP.emitNops(Num);
+ return true;
+ }
+ break;
+ }
+ case RISCV::PseudoReadVLENB:
+ OutMI.setOpcode(RISCV::CSRRS);
+ OutMI.addOperand(MCOperand::createImm(
+ RISCVSysReg::lookupSysRegByName("VLENB")->Encoding));
+ OutMI.addOperand(MCOperand::createReg(RISCV::X0));
+ break;
+ case RISCV::PseudoReadVL:
+ OutMI.setOpcode(RISCV::CSRRS);
+ OutMI.addOperand(
+ MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding));
+ OutMI.addOperand(MCOperand::createReg(RISCV::X0));
+ break;
+ }
+ return false;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp
new file mode 100644
index 0000000000..d79c4d4a02
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp
@@ -0,0 +1,45 @@
+//=- RISCVMachineFunctionInfo.cpp - RISCV machine function info ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares RISCV-specific per-machine-function information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVMachineFunctionInfo.h"
+
+using namespace llvm;
+
+yaml::RISCVMachineFunctionInfo::RISCVMachineFunctionInfo(
+ const llvm::RISCVMachineFunctionInfo &MFI)
+ : VarArgsFrameIndex(MFI.getVarArgsFrameIndex()),
+ VarArgsSaveSize(MFI.getVarArgsSaveSize()) {}
+
+MachineFunctionInfo *RISCVMachineFunctionInfo::clone(
+ BumpPtrAllocator &Allocator, MachineFunction &DestMF,
+ const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
+ const {
+ return DestMF.cloneInfo<RISCVMachineFunctionInfo>(*this);
+}
+
+void yaml::RISCVMachineFunctionInfo::mappingImpl(yaml::IO &YamlIO) {
+ MappingTraits<RISCVMachineFunctionInfo>::mapping(YamlIO, *this);
+}
+
+void RISCVMachineFunctionInfo::initializeBaseYamlFields(
+ const yaml::RISCVMachineFunctionInfo &YamlMFI) {
+ VarArgsFrameIndex = YamlMFI.VarArgsFrameIndex;
+ VarArgsSaveSize = YamlMFI.VarArgsSaveSize;
+}
+
+void RISCVMachineFunctionInfo::addSExt32Register(Register Reg) {
+ SExt32Registers.push_back(Reg);
+}
+
+bool RISCVMachineFunctionInfo::isSExt32Register(Register Reg) const {
+ return is_contained(SExt32Registers, Reg);
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.h
new file mode 100644
index 0000000000..2744072568
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMachineFunctionInfo.h
@@ -0,0 +1,136 @@
+//=- RISCVMachineFunctionInfo.h - RISCV machine function info -----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares RISCV-specific per-machine-function information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
+
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MIRYamlMapping.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+class RISCVMachineFunctionInfo;
+
+namespace yaml {
+struct RISCVMachineFunctionInfo final : public yaml::MachineFunctionInfo {
+ int VarArgsFrameIndex;
+ int VarArgsSaveSize;
+
+ RISCVMachineFunctionInfo() = default;
+ RISCVMachineFunctionInfo(const llvm::RISCVMachineFunctionInfo &MFI);
+
+ void mappingImpl(yaml::IO &YamlIO) override;
+ ~RISCVMachineFunctionInfo() = default;
+};
+
+template <> struct MappingTraits<RISCVMachineFunctionInfo> {
+ static void mapping(IO &YamlIO, RISCVMachineFunctionInfo &MFI) {
+ YamlIO.mapOptional("varArgsFrameIndex", MFI.VarArgsFrameIndex);
+ YamlIO.mapOptional("varArgsSaveSize", MFI.VarArgsSaveSize);
+ }
+};
+} // end namespace yaml
+
+/// RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo
+/// and contains private RISCV-specific information for each MachineFunction.
+class RISCVMachineFunctionInfo : public MachineFunctionInfo {
+private:
+ /// FrameIndex for start of varargs area
+ int VarArgsFrameIndex = 0;
+ /// Size of the save area used for varargs
+ int VarArgsSaveSize = 0;
+ /// FrameIndex used for transferring values between 64-bit FPRs and a pair
+ /// of 32-bit GPRs via the stack.
+ int MoveF64FrameIndex = -1;
+ /// FrameIndex of the spill slot for the scratch register in BranchRelaxation.
+ int BranchRelaxationScratchFrameIndex = -1;
+ /// Size of any opaque stack adjustment due to save/restore libcalls.
+ unsigned LibCallStackSize = 0;
+ /// Size of RVV stack.
+ uint64_t RVVStackSize = 0;
+ /// Alignment of RVV stack.
+ Align RVVStackAlign;
+ /// Padding required to keep RVV stack aligned within the main stack.
+ uint64_t RVVPadding = 0;
+ /// Size of stack frame to save callee saved registers
+ unsigned CalleeSavedStackSize = 0;
+ /// Is there any vector argument or return?
+ bool IsVectorCall = false;
+
+ /// Registers that have been sign extended from i32.
+ SmallVector<Register, 8> SExt32Registers;
+
+public:
+ RISCVMachineFunctionInfo(const Function &F, const TargetSubtargetInfo *STI) {}
+
+ MachineFunctionInfo *
+ clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
+ const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
+ const override;
+
+ int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
+ void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
+
+ unsigned getVarArgsSaveSize() const { return VarArgsSaveSize; }
+ void setVarArgsSaveSize(int Size) { VarArgsSaveSize = Size; }
+
+ int getMoveF64FrameIndex(MachineFunction &MF) {
+ if (MoveF64FrameIndex == -1)
+ MoveF64FrameIndex =
+ MF.getFrameInfo().CreateStackObject(8, Align(8), false);
+ return MoveF64FrameIndex;
+ }
+
+ int getBranchRelaxationScratchFrameIndex() const {
+ return BranchRelaxationScratchFrameIndex;
+ }
+ void setBranchRelaxationScratchFrameIndex(int Index) {
+ BranchRelaxationScratchFrameIndex = Index;
+ }
+
+ unsigned getLibCallStackSize() const { return LibCallStackSize; }
+ void setLibCallStackSize(unsigned Size) { LibCallStackSize = Size; }
+
+ bool useSaveRestoreLibCalls(const MachineFunction &MF) const {
+ // We cannot use fixed locations for the callee saved spill slots if the
+ // function uses a varargs save area, or is an interrupt handler.
+ return MF.getSubtarget<RISCVSubtarget>().enableSaveRestore() &&
+ VarArgsSaveSize == 0 && !MF.getFrameInfo().hasTailCall() &&
+ !MF.getFunction().hasFnAttribute("interrupt");
+ }
+
+ uint64_t getRVVStackSize() const { return RVVStackSize; }
+ void setRVVStackSize(uint64_t Size) { RVVStackSize = Size; }
+
+ Align getRVVStackAlign() const { return RVVStackAlign; }
+ void setRVVStackAlign(Align StackAlign) { RVVStackAlign = StackAlign; }
+
+ uint64_t getRVVPadding() const { return RVVPadding; }
+ void setRVVPadding(uint64_t Padding) { RVVPadding = Padding; }
+
+ unsigned getCalleeSavedStackSize() const { return CalleeSavedStackSize; }
+ void setCalleeSavedStackSize(unsigned Size) { CalleeSavedStackSize = Size; }
+
+ void initializeBaseYamlFields(const yaml::RISCVMachineFunctionInfo &YamlMFI);
+
+ void addSExt32Register(Register Reg);
+ bool isSExt32Register(Register Reg) const;
+
+ bool isVectorCall() const { return IsVectorCall; }
+ void setIsVectorCall() { IsVectorCall = true; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.cpp
new file mode 100644
index 0000000000..232f202f18
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.cpp
@@ -0,0 +1,73 @@
+//===- RISCVMacroFusion.cpp - RISCV Macro Fusion --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file contains the RISCV implementation of the DAG scheduling
+/// mutation to pair instructions back to back.
+//
+//===----------------------------------------------------------------------===//
+//
+#include "RISCVMacroFusion.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MacroFusion.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+using namespace llvm;
+
+// Fuse LUI followed by ADDI or ADDIW.
+// rd = imm[31:0] which decomposes to
+// lui rd, imm[31:12]
+// addi(w) rd, rd, imm[11:0]
+static bool isLUIADDI(const MachineInstr *FirstMI,
+ const MachineInstr &SecondMI) {
+ if (SecondMI.getOpcode() != RISCV::ADDI &&
+ SecondMI.getOpcode() != RISCV::ADDIW)
+ return false;
+
+ // Assume the 1st instr to be a wildcard if it is unspecified.
+ if (!FirstMI)
+ return true;
+
+ if (FirstMI->getOpcode() != RISCV::LUI)
+ return false;
+
+ // The first operand of ADDI might be a frame index.
+ if (!SecondMI.getOperand(1).isReg())
+ return false;
+
+ Register FirstDest = FirstMI->getOperand(0).getReg();
+
+ // Destination of LUI should be the ADDI(W) source register.
+ if (SecondMI.getOperand(1).getReg() != FirstDest)
+ return false;
+
+ // If the input is virtual make sure this is the only user.
+ if (FirstDest.isVirtual()) {
+ auto &MRI = SecondMI.getMF()->getRegInfo();
+ return MRI.hasOneNonDBGUse(FirstDest);
+ }
+
+ // If the FirstMI destination is non-virtual, it should match the SecondMI
+ // destination.
+ return SecondMI.getOperand(0).getReg() == FirstDest;
+}
+
+static bool shouldScheduleAdjacent(const TargetInstrInfo &TII,
+ const TargetSubtargetInfo &TSI,
+ const MachineInstr *FirstMI,
+ const MachineInstr &SecondMI) {
+ const RISCVSubtarget &ST = static_cast<const RISCVSubtarget &>(TSI);
+
+ if (ST.hasLUIADDIFusion() && isLUIADDI(FirstMI, SecondMI))
+ return true;
+
+ return false;
+}
+
+std::unique_ptr<ScheduleDAGMutation> llvm::createRISCVMacroFusionDAGMutation() {
+ return createMacroFusionDAGMutation(shouldScheduleAdjacent);
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.h
new file mode 100644
index 0000000000..c238dacc37
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMacroFusion.h
@@ -0,0 +1,28 @@
+//===- RISCVMacroFusion.h - RISCV Macro Fusion ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file contains the RISCV definition of the DAG scheduling mutation
+/// to pair instructions back to back.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVMACROFUSION_H
+#define LLVM_LIB_TARGET_RISCV_RISCVMACROFUSION_H
+
+#include "llvm/CodeGen/MachineScheduler.h"
+
+namespace llvm {
+
+/// Note that you have to add:
+/// DAG.addMutation(createRISCVMacroFusionDAGMutation());
+/// to RISCVPassConfig::createMachineScheduler() to have an effect.
+std::unique_ptr<ScheduleDAGMutation> createRISCVMacroFusionDAGMutation();
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVMakeCompressible.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMakeCompressible.cpp
new file mode 100644
index 0000000000..39d0a201c6
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -0,0 +1,391 @@
+//===-- RISCVMakeCompressible.cpp - Make more instructions compressible ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass searches for instructions that are prevented from being compressed
+// by one of the following:
+//
+// 1. The use of a single uncompressed register.
+// 2. A base register + offset where the offset is too large to be compressed
+// and the base register may or may not be compressed.
+//
+//
+// For case 1, if a compressed register is available, then the uncompressed
+// register is copied to the compressed register and its uses are replaced.
+//
+// For example, storing zero uses the uncompressible zero register:
+// sw zero, 0(a0) # if zero
+// sw zero, 8(a0) # if zero
+// sw zero, 4(a0) # if zero
+// sw zero, 24(a0) # if zero
+//
+// If a compressed register (e.g. a1) is available, the above can be transformed
+// to the following to improve code size:
+// li a1, 0
+// c.sw a1, 0(a0)
+// c.sw a1, 8(a0)
+// c.sw a1, 4(a0)
+// c.sw a1, 24(a0)
+//
+//
+// For case 2, if a compressed register is available, then the original base
+// is copied and adjusted such that:
+//
+// new_base_register = base_register + adjustment
+// base_register + large_offset = new_base_register + small_offset
+//
+// For example, the following offsets are too large for c.sw:
+// lui a2, 983065
+// sw a1, -236(a2)
+// sw a1, -240(a2)
+// sw a1, -244(a2)
+// sw a1, -248(a2)
+// sw a1, -252(a2)
+// sw a0, -256(a2)
+//
+// If a compressed register is available (e.g. a3), a new base could be created
+// such that the addresses can accessed with a compressible offset, thus
+// improving code size:
+// lui a2, 983065
+// addi a3, a2, -256
+// c.sw a1, 20(a3)
+// c.sw a1, 16(a3)
+// c.sw a1, 12(a3)
+// c.sw a1, 8(a3)
+// c.sw a1, 4(a3)
+// c.sw a0, 0(a3)
+//
+//
+// This optimization is only applied if there are enough uses of the copied
+// register for code size to be reduced.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-make-compressible"
+#define RISCV_COMPRESS_INSTRS_NAME "RISCV Make Compressible"
+
+namespace {
+
+struct RISCVMakeCompressibleOpt : public MachineFunctionPass {
+ static char ID;
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ RISCVMakeCompressibleOpt() : MachineFunctionPass(ID) {
+ initializeRISCVMakeCompressibleOptPass(*PassRegistry::getPassRegistry());
+ }
+
+ StringRef getPassName() const override { return RISCV_COMPRESS_INSTRS_NAME; }
+};
+} // namespace
+
+char RISCVMakeCompressibleOpt::ID = 0;
+INITIALIZE_PASS(RISCVMakeCompressibleOpt, "riscv-make-compressible",
+ RISCV_COMPRESS_INSTRS_NAME, false, false)
+
+// Return log2(widthInBytes) of load/store done by Opcode.
+static unsigned log2LdstWidth(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case RISCV::LW:
+ case RISCV::SW:
+ case RISCV::FLW:
+ case RISCV::FSW:
+ return 2;
+ case RISCV::LD:
+ case RISCV::SD:
+ case RISCV::FLD:
+ case RISCV::FSD:
+ return 3;
+ }
+}
+
+// Return a mask for the offset bits of a non-stack-pointer based compressed
+// load/store.
+static uint8_t compressedLDSTOffsetMask(unsigned Opcode) {
+ return 0x1f << log2LdstWidth(Opcode);
+}
+
+// Return true if Offset fits within a compressed stack-pointer based
+// load/store.
+static bool compressibleSPOffset(int64_t Offset, unsigned Opcode) {
+ return log2LdstWidth(Opcode) == 2 ? isShiftedUInt<6, 2>(Offset)
+ : isShiftedUInt<6, 3>(Offset);
+}
+
+// Given an offset for a load/store, return the adjustment required to the base
+// register such that the address can be accessed with a compressible offset.
+// This will return 0 if the offset is already compressible.
+static int64_t getBaseAdjustForCompression(int64_t Offset, unsigned Opcode) {
+ // Return the excess bits that do not fit in a compressible offset.
+ return Offset & ~compressedLDSTOffsetMask(Opcode);
+}
+
+// Return true if Reg is in a compressed register class.
+static bool isCompressedReg(Register Reg) {
+ return RISCV::GPRCRegClass.contains(Reg) ||
+ RISCV::FPR32CRegClass.contains(Reg) ||
+ RISCV::FPR64CRegClass.contains(Reg);
+}
+
+// Return true if MI is a load for which there exists a compressed version.
+static bool isCompressibleLoad(const MachineInstr &MI) {
+ const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
+ const unsigned Opcode = MI.getOpcode();
+
+ return Opcode == RISCV::LW || (!STI.is64Bit() && Opcode == RISCV::FLW) ||
+ Opcode == RISCV::LD || Opcode == RISCV::FLD;
+}
+
+// Return true if MI is a store for which there exists a compressed version.
+static bool isCompressibleStore(const MachineInstr &MI) {
+ const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
+ const unsigned Opcode = MI.getOpcode();
+
+ return Opcode == RISCV::SW || (!STI.is64Bit() && Opcode == RISCV::FSW) ||
+ Opcode == RISCV::SD || Opcode == RISCV::FSD;
+}
+
+// Find a single register and/or large offset which, if compressible, would
+// allow the given instruction to be compressed.
+//
+// Possible return values:
+//
+// {Reg, 0} - Uncompressed Reg needs replacing with a compressed
+// register.
+// {Reg, N} - Reg needs replacing with a compressed register and
+// N needs adding to the new register. (Reg may be
+// compressed or uncompressed).
+// {RISCV::NoRegister, 0} - No suitable optimization found for this
+// instruction.
+static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) {
+ const unsigned Opcode = MI.getOpcode();
+
+ if (isCompressibleLoad(MI) || isCompressibleStore(MI)) {
+ const MachineOperand &MOImm = MI.getOperand(2);
+ if (!MOImm.isImm())
+ return RegImmPair(RISCV::NoRegister, 0);
+
+ int64_t Offset = MOImm.getImm();
+ int64_t NewBaseAdjust = getBaseAdjustForCompression(Offset, Opcode);
+ Register Base = MI.getOperand(1).getReg();
+
+ // Memory accesses via the stack pointer do not have a requirement for
+ // either of the registers to be compressible and can take a larger offset.
+ if (RISCV::SPRegClass.contains(Base)) {
+ if (!compressibleSPOffset(Offset, Opcode) && NewBaseAdjust)
+ return RegImmPair(Base, NewBaseAdjust);
+ } else {
+ Register SrcDest = MI.getOperand(0).getReg();
+ bool SrcDestCompressed = isCompressedReg(SrcDest);
+ bool BaseCompressed = isCompressedReg(Base);
+
+ // If only Base and/or offset prevent compression, then return Base and
+ // any adjustment required to make the offset compressible.
+ if ((!BaseCompressed || NewBaseAdjust) && SrcDestCompressed)
+ return RegImmPair(Base, NewBaseAdjust);
+
+ // For loads, we can only change the base register since dest is defined
+ // rather than used.
+ //
+ // For stores, we can change SrcDest (and Base if SrcDest == Base) but
+ // cannot resolve an uncompressible offset in this case.
+ if (isCompressibleStore(MI)) {
+ if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) &&
+ !NewBaseAdjust)
+ return RegImmPair(SrcDest, NewBaseAdjust);
+ }
+ }
+ }
+ return RegImmPair(RISCV::NoRegister, 0);
+}
+
+// Check all uses after FirstMI of the given register, keeping a vector of
+// instructions that would be compressible if the given register (and offset if
+// applicable) were compressible.
+//
+// If there are enough uses for this optimization to improve code size and a
+// compressed register is available, return that compressed register.
+static Register analyzeCompressibleUses(MachineInstr &FirstMI,
+ RegImmPair RegImm,
+ SmallVectorImpl<MachineInstr *> &MIs) {
+ MachineBasicBlock &MBB = *FirstMI.getParent();
+ const TargetRegisterInfo *TRI =
+ MBB.getParent()->getSubtarget().getRegisterInfo();
+
+ RegScavenger RS;
+ RS.enterBasicBlock(MBB);
+
+ for (MachineBasicBlock::instr_iterator I = FirstMI.getIterator(),
+ E = MBB.instr_end();
+ I != E; ++I) {
+ MachineInstr &MI = *I;
+
+ // Determine if this is an instruction which would benefit from using the
+ // new register.
+ RegImmPair CandidateRegImm = getRegImmPairPreventingCompression(MI);
+ if (CandidateRegImm.Reg == RegImm.Reg &&
+ CandidateRegImm.Imm == RegImm.Imm) {
+ // Advance tracking since the value in the new register must be live for
+ // this instruction too.
+ RS.forward(I);
+
+ MIs.push_back(&MI);
+ }
+
+ // If RegImm.Reg is modified by this instruction, then we cannot optimize
+ // past this instruction. If the register is already compressed, then it may
+ // possible to optimize a large offset in the current instruction - this
+ // will have been detected by the preceeding call to
+ // getRegImmPairPreventingCompression.
+ if (MI.modifiesRegister(RegImm.Reg, TRI))
+ break;
+ }
+
+ // Adjusting the base costs one new uncompressed addi and therefore three uses
+ // are required for a code size reduction. If no base adjustment is required,
+ // then copying the register costs one new c.mv (or c.li Rd, 0 for "copying"
+ // the zero register) and therefore two uses are required for a code size
+ // reduction.
+ if (MIs.size() < 2 || (RegImm.Imm != 0 && MIs.size() < 3))
+ return RISCV::NoRegister;
+
+ // Find a compressible register which will be available from the first
+ // instruction we care about to the last.
+ const TargetRegisterClass *RCToScavenge;
+
+ // Work out the compressed register class from which to scavenge.
+ if (RISCV::GPRRegClass.contains(RegImm.Reg))
+ RCToScavenge = &RISCV::GPRCRegClass;
+ else if (RISCV::FPR32RegClass.contains(RegImm.Reg))
+ RCToScavenge = &RISCV::FPR32CRegClass;
+ else if (RISCV::FPR64RegClass.contains(RegImm.Reg))
+ RCToScavenge = &RISCV::FPR64CRegClass;
+ else
+ return RISCV::NoRegister;
+
+ return RS.scavengeRegisterBackwards(*RCToScavenge, FirstMI.getIterator(),
+ /*RestoreAfter=*/false, /*SPAdj=*/0,
+ /*AllowSpill=*/false);
+}
+
+// Update uses of the old register in the given instruction to the new register.
+static void updateOperands(MachineInstr &MI, RegImmPair OldRegImm,
+ Register NewReg) {
+ unsigned Opcode = MI.getOpcode();
+
+ // If this pass is extended to support more instructions, the check for
+ // definedness may need to be strengthened.
+ assert((isCompressibleLoad(MI) || isCompressibleStore(MI)) &&
+ "Unsupported instruction for this optimization.");
+
+ int SkipN = 0;
+
+ // Skip the first (value) operand to a store instruction (except if the store
+ // offset is zero) in order to avoid an incorrect transformation.
+ // e.g. sd a0, 808(a0) to addi a2, a0, 768; sd a2, 40(a2)
+ if (isCompressibleStore(MI) && OldRegImm.Imm != 0)
+ SkipN = 1;
+
+ // Update registers
+ for (MachineOperand &MO : drop_begin(MI.operands(), SkipN))
+ if (MO.isReg() && MO.getReg() == OldRegImm.Reg) {
+ // Do not update operands that define the old register.
+ //
+ // The new register was scavenged for the range of instructions that are
+ // being updated, therefore it should not be defined within this range
+ // except possibly in the final instruction.
+ if (MO.isDef()) {
+ assert(isCompressibleLoad(MI));
+ continue;
+ }
+ // Update reg
+ MO.setReg(NewReg);
+ }
+
+ // Update offset
+ MachineOperand &MOImm = MI.getOperand(2);
+ int64_t NewOffset = MOImm.getImm() & compressedLDSTOffsetMask(Opcode);
+ MOImm.setImm(NewOffset);
+}
+
+bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
+ // This is a size optimization.
+ if (skipFunction(Fn.getFunction()) || !Fn.getFunction().hasMinSize())
+ return false;
+
+ const RISCVSubtarget &STI = Fn.getSubtarget<RISCVSubtarget>();
+ const RISCVInstrInfo &TII = *STI.getInstrInfo();
+
+ // This optimization only makes sense if compressed instructions are emitted.
+ // FIXME: Support Zca, Zcf, Zcd granularity.
+ if (!STI.hasStdExtC())
+ return false;
+
+ for (MachineBasicBlock &MBB : Fn) {
+ LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
+ for (MachineInstr &MI : MBB) {
+ // Determine if this instruction would otherwise be compressed if not for
+ // an uncompressible register or offset.
+ RegImmPair RegImm = getRegImmPairPreventingCompression(MI);
+ if (!RegImm.Reg && RegImm.Imm == 0)
+ continue;
+
+ // Determine if there is a set of instructions for which replacing this
+ // register with a compressed register (and compressible offset if
+ // applicable) is possible and will allow compression.
+ SmallVector<MachineInstr *, 8> MIs;
+ Register NewReg = analyzeCompressibleUses(MI, RegImm, MIs);
+ if (!NewReg)
+ continue;
+
+ // Create the appropriate copy and/or offset.
+ if (RISCV::GPRRegClass.contains(RegImm.Reg)) {
+ assert(isInt<12>(RegImm.Imm));
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::ADDI), NewReg)
+ .addReg(RegImm.Reg)
+ .addImm(RegImm.Imm);
+ } else {
+ // If we are looking at replacing an FPR register we don't expect to
+ // have any offset. The only compressible FP instructions with an offset
+ // are loads and stores, for which the offset applies to the GPR operand
+ // not the FPR operand.
+ assert(RegImm.Imm == 0);
+ unsigned Opcode = RISCV::FPR32RegClass.contains(RegImm.Reg)
+ ? RISCV::FSGNJ_S
+ : RISCV::FSGNJ_D;
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(Opcode), NewReg)
+ .addReg(RegImm.Reg)
+ .addReg(RegImm.Reg);
+ }
+
+ // Update the set of instructions to use the compressed register and
+ // compressible offset instead. These instructions should now be
+ // compressible.
+ // TODO: Update all uses if RegImm.Imm == 0? Not just those that are
+ // expected to become compressible.
+ for (MachineInstr *UpdateMI : MIs)
+ updateOperands(*UpdateMI, RegImm, NewReg);
+ }
+ }
+ return true;
+}
+
+/// Returns an instance of the Make Compressible Optimization pass.
+FunctionPass *llvm::createRISCVMakeCompressibleOptPass() {
+ return new RISCVMakeCompressibleOpt();
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
new file mode 100644
index 0000000000..bd1074da70
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
@@ -0,0 +1,455 @@
+//===----- RISCVMergeBaseOffset.cpp - Optimise address calculations ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Merge the offset of address calculation into the offset field
+// of instructions in a global address lowering sequence.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetOptions.h"
+#include <optional>
+#include <set>
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-merge-base-offset"
+#define RISCV_MERGE_BASE_OFFSET_NAME "RISCV Merge Base Offset"
+namespace {
+
+struct RISCVMergeBaseOffsetOpt : public MachineFunctionPass {
+private:
+ const RISCVSubtarget *ST = nullptr;
+
+public:
+ static char ID;
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+ bool detectFoldable(MachineInstr &Hi, MachineInstr *&Lo);
+
+ bool detectAndFoldOffset(MachineInstr &Hi, MachineInstr &Lo);
+ void foldOffset(MachineInstr &Hi, MachineInstr &Lo, MachineInstr &Tail,
+ int64_t Offset);
+ bool foldLargeOffset(MachineInstr &Hi, MachineInstr &Lo,
+ MachineInstr &TailAdd, Register GSReg);
+ bool foldShiftedOffset(MachineInstr &Hi, MachineInstr &Lo,
+ MachineInstr &TailShXAdd, Register GSReg);
+
+ bool foldIntoMemoryOps(MachineInstr &Hi, MachineInstr &Lo);
+
+ RISCVMergeBaseOffsetOpt() : MachineFunctionPass(ID) {}
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override {
+ return RISCV_MERGE_BASE_OFFSET_NAME;
+ }
+
+private:
+ MachineRegisterInfo *MRI;
+};
+} // end anonymous namespace
+
+char RISCVMergeBaseOffsetOpt::ID = 0;
+INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, DEBUG_TYPE,
+ RISCV_MERGE_BASE_OFFSET_NAME, false, false)
+
+// Detect either of the patterns:
+//
+// 1. (medlow pattern):
+// lui vreg1, %hi(s)
+// addi vreg2, vreg1, %lo(s)
+//
+// 2. (medany pattern):
+// .Lpcrel_hi1:
+// auipc vreg1, %pcrel_hi(s)
+// addi vreg2, vreg1, %pcrel_lo(.Lpcrel_hi1)
+//
+// The pattern is only accepted if:
+// 1) The first instruction has only one use, which is the ADDI.
+// 2) The address operands have the appropriate type, reflecting the
+// lowering of a global address or constant pool using medlow or medany.
+// 3) The offset value in the Global Address or Constant Pool is 0.
+bool RISCVMergeBaseOffsetOpt::detectFoldable(MachineInstr &Hi,
+ MachineInstr *&Lo) {
+ if (Hi.getOpcode() != RISCV::LUI && Hi.getOpcode() != RISCV::AUIPC)
+ return false;
+
+ const MachineOperand &HiOp1 = Hi.getOperand(1);
+ unsigned ExpectedFlags =
+ Hi.getOpcode() == RISCV::AUIPC ? RISCVII::MO_PCREL_HI : RISCVII::MO_HI;
+ if (HiOp1.getTargetFlags() != ExpectedFlags)
+ return false;
+
+ if (!(HiOp1.isGlobal() || HiOp1.isCPI()) || HiOp1.getOffset() != 0)
+ return false;
+
+ Register HiDestReg = Hi.getOperand(0).getReg();
+ if (!MRI->hasOneUse(HiDestReg))
+ return false;
+
+ Lo = &*MRI->use_instr_begin(HiDestReg);
+ if (Lo->getOpcode() != RISCV::ADDI)
+ return false;
+
+ const MachineOperand &LoOp2 = Lo->getOperand(2);
+ if (Hi.getOpcode() == RISCV::LUI) {
+ if (LoOp2.getTargetFlags() != RISCVII::MO_LO ||
+ !(LoOp2.isGlobal() || LoOp2.isCPI()) || LoOp2.getOffset() != 0)
+ return false;
+ } else {
+ assert(Hi.getOpcode() == RISCV::AUIPC);
+ if (LoOp2.getTargetFlags() != RISCVII::MO_PCREL_LO ||
+ LoOp2.getType() != MachineOperand::MO_MCSymbol)
+ return false;
+ }
+
+ if (HiOp1.isGlobal()) {
+ LLVM_DEBUG(dbgs() << " Found lowered global address: "
+ << *HiOp1.getGlobal() << "\n");
+ } else {
+ assert(HiOp1.isCPI());
+ LLVM_DEBUG(dbgs() << " Found lowered constant pool: " << HiOp1.getIndex()
+ << "\n");
+ }
+
+ return true;
+}
+
+// Update the offset in Hi and Lo instructions.
+// Delete the tail instruction and update all the uses to use the
+// output from Lo.
+void RISCVMergeBaseOffsetOpt::foldOffset(MachineInstr &Hi, MachineInstr &Lo,
+ MachineInstr &Tail, int64_t Offset) {
+ assert(isInt<32>(Offset) && "Unexpected offset");
+ // Put the offset back in Hi and the Lo
+ Hi.getOperand(1).setOffset(Offset);
+ if (Hi.getOpcode() != RISCV::AUIPC)
+ Lo.getOperand(2).setOffset(Offset);
+ // Delete the tail instruction.
+ MRI->replaceRegWith(Tail.getOperand(0).getReg(), Lo.getOperand(0).getReg());
+ Tail.eraseFromParent();
+ LLVM_DEBUG(dbgs() << " Merged offset " << Offset << " into base.\n"
+ << " " << Hi << " " << Lo;);
+}
+
+// Detect patterns for large offsets that are passed into an ADD instruction.
+// If the pattern is found, updates the offset in Hi and Lo instructions
+// and deletes TailAdd and the instructions that produced the offset.
+//
+// Base address lowering is of the form:
+// Hi: lui vreg1, %hi(s)
+// Lo: addi vreg2, vreg1, %lo(s)
+// / \
+// / \
+// / \
+// / The large offset can be of two forms: \
+// 1) Offset that has non zero bits in lower 2) Offset that has non zero
+// 12 bits and upper 20 bits bits in upper 20 bits only
+// OffseLUI: lui vreg3, 4
+// OffsetTail: addi voff, vreg3, 188 OffsetTail: lui voff, 128
+// \ /
+// \ /
+// \ /
+// \ /
+// TailAdd: add vreg4, vreg2, voff
+bool RISCVMergeBaseOffsetOpt::foldLargeOffset(MachineInstr &Hi,
+ MachineInstr &Lo,
+ MachineInstr &TailAdd,
+ Register GAReg) {
+ assert((TailAdd.getOpcode() == RISCV::ADD) && "Expected ADD instruction!");
+ Register Rs = TailAdd.getOperand(1).getReg();
+ Register Rt = TailAdd.getOperand(2).getReg();
+ Register Reg = Rs == GAReg ? Rt : Rs;
+
+ // Can't fold if the register has more than one use.
+ if (!MRI->hasOneUse(Reg))
+ return false;
+ // This can point to an ADDI(W) or a LUI:
+ MachineInstr &OffsetTail = *MRI->getVRegDef(Reg);
+ if (OffsetTail.getOpcode() == RISCV::ADDI ||
+ OffsetTail.getOpcode() == RISCV::ADDIW) {
+ // The offset value has non zero bits in both %hi and %lo parts.
+ // Detect an ADDI that feeds from a LUI instruction.
+ MachineOperand &AddiImmOp = OffsetTail.getOperand(2);
+ if (AddiImmOp.getTargetFlags() != RISCVII::MO_None)
+ return false;
+ int64_t OffLo = AddiImmOp.getImm();
+ MachineInstr &OffsetLui =
+ *MRI->getVRegDef(OffsetTail.getOperand(1).getReg());
+ MachineOperand &LuiImmOp = OffsetLui.getOperand(1);
+ if (OffsetLui.getOpcode() != RISCV::LUI ||
+ LuiImmOp.getTargetFlags() != RISCVII::MO_None ||
+ !MRI->hasOneUse(OffsetLui.getOperand(0).getReg()))
+ return false;
+ int64_t Offset = SignExtend64<32>(LuiImmOp.getImm() << 12);
+ Offset += OffLo;
+ // RV32 ignores the upper 32 bits. ADDIW sign extends the result.
+ if (!ST->is64Bit() || OffsetTail.getOpcode() == RISCV::ADDIW)
+ Offset = SignExtend64<32>(Offset);
+ // We can only fold simm32 offsets.
+ if (!isInt<32>(Offset))
+ return false;
+ LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail
+ << " " << OffsetLui);
+ foldOffset(Hi, Lo, TailAdd, Offset);
+ OffsetTail.eraseFromParent();
+ OffsetLui.eraseFromParent();
+ return true;
+ } else if (OffsetTail.getOpcode() == RISCV::LUI) {
+ // The offset value has all zero bits in the lower 12 bits. Only LUI
+ // exists.
+ LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
+ int64_t Offset = SignExtend64<32>(OffsetTail.getOperand(1).getImm() << 12);
+ foldOffset(Hi, Lo, TailAdd, Offset);
+ OffsetTail.eraseFromParent();
+ return true;
+ }
+ return false;
+}
+
+// Detect patterns for offsets that are passed into a SHXADD instruction.
+// The offset has 1, 2, or 3 trailing zeros and fits in simm13, simm14, simm15.
+// The constant is created with addi voff, x0, C, and shXadd is used to
+// fill insert the trailing zeros and do the addition.
+// If the pattern is found, updates the offset in Hi and Lo instructions
+// and deletes TailShXAdd and the instructions that produced the offset.
+//
+// Hi: lui vreg1, %hi(s)
+// Lo: addi vreg2, vreg1, %lo(s)
+// OffsetTail: addi voff, x0, C
+// TailAdd: shXadd vreg4, voff, vreg2
+bool RISCVMergeBaseOffsetOpt::foldShiftedOffset(MachineInstr &Hi,
+ MachineInstr &Lo,
+ MachineInstr &TailShXAdd,
+ Register GAReg) {
+ assert((TailShXAdd.getOpcode() == RISCV::SH1ADD ||
+ TailShXAdd.getOpcode() == RISCV::SH2ADD ||
+ TailShXAdd.getOpcode() == RISCV::SH3ADD) &&
+ "Expected SHXADD instruction!");
+
+ // The first source is the shifted operand.
+ Register Rs1 = TailShXAdd.getOperand(1).getReg();
+
+ if (GAReg != TailShXAdd.getOperand(2).getReg())
+ return false;
+
+ // Can't fold if the register has more than one use.
+ if (!MRI->hasOneUse(Rs1))
+ return false;
+ // This can point to an ADDI X0, C.
+ MachineInstr &OffsetTail = *MRI->getVRegDef(Rs1);
+ if (OffsetTail.getOpcode() != RISCV::ADDI)
+ return false;
+ if (!OffsetTail.getOperand(1).isReg() ||
+ OffsetTail.getOperand(1).getReg() != RISCV::X0 ||
+ !OffsetTail.getOperand(2).isImm())
+ return false;
+
+ int64_t Offset = OffsetTail.getOperand(2).getImm();
+ assert(isInt<12>(Offset) && "Unexpected offset");
+
+ unsigned ShAmt;
+ switch (TailShXAdd.getOpcode()) {
+ default: llvm_unreachable("Unexpected opcode");
+ case RISCV::SH1ADD: ShAmt = 1; break;
+ case RISCV::SH2ADD: ShAmt = 2; break;
+ case RISCV::SH3ADD: ShAmt = 3; break;
+ }
+
+ Offset = (uint64_t)Offset << ShAmt;
+
+ LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
+ foldOffset(Hi, Lo, TailShXAdd, Offset);
+ OffsetTail.eraseFromParent();
+ return true;
+}
+
+bool RISCVMergeBaseOffsetOpt::detectAndFoldOffset(MachineInstr &Hi,
+ MachineInstr &Lo) {
+ Register DestReg = Lo.getOperand(0).getReg();
+
+ // Look for arithmetic instructions we can get an offset from.
+ // We might be able to remove the arithmetic instructions by folding the
+ // offset into the LUI+ADDI.
+ if (!MRI->hasOneUse(DestReg))
+ return false;
+
+ // Lo has only one use.
+ MachineInstr &Tail = *MRI->use_instr_begin(DestReg);
+ switch (Tail.getOpcode()) {
+ default:
+ LLVM_DEBUG(dbgs() << "Don't know how to get offset from this instr:"
+ << Tail);
+ break;
+ case RISCV::ADDI: {
+ // Offset is simply an immediate operand.
+ int64_t Offset = Tail.getOperand(2).getImm();
+
+ // We might have two ADDIs in a row.
+ Register TailDestReg = Tail.getOperand(0).getReg();
+ if (MRI->hasOneUse(TailDestReg)) {
+ MachineInstr &TailTail = *MRI->use_instr_begin(TailDestReg);
+ if (TailTail.getOpcode() == RISCV::ADDI) {
+ Offset += TailTail.getOperand(2).getImm();
+ LLVM_DEBUG(dbgs() << " Offset Instrs: " << Tail << TailTail);
+ foldOffset(Hi, Lo, TailTail, Offset);
+ Tail.eraseFromParent();
+ return true;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << " Offset Instr: " << Tail);
+ foldOffset(Hi, Lo, Tail, Offset);
+ return true;
+ }
+ case RISCV::ADD:
+ // The offset is too large to fit in the immediate field of ADDI.
+ // This can be in two forms:
+ // 1) LUI hi_Offset followed by:
+ // ADDI lo_offset
+ // This happens in case the offset has non zero bits in
+ // both hi 20 and lo 12 bits.
+ // 2) LUI (offset20)
+ // This happens in case the lower 12 bits of the offset are zeros.
+ return foldLargeOffset(Hi, Lo, Tail, DestReg);
+ case RISCV::SH1ADD:
+ case RISCV::SH2ADD:
+ case RISCV::SH3ADD:
+ // The offset is too large to fit in the immediate field of ADDI.
+ // It may be encoded as (SH2ADD (ADDI X0, C), DestReg) or
+ // (SH3ADD (ADDI X0, C), DestReg).
+ return foldShiftedOffset(Hi, Lo, Tail, DestReg);
+ }
+
+ return false;
+}
+
+bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
+ MachineInstr &Lo) {
+ Register DestReg = Lo.getOperand(0).getReg();
+
+ // If all the uses are memory ops with the same offset, we can transform:
+ //
+ // 1. (medlow pattern):
+ // Hi: lui vreg1, %hi(foo) ---> lui vreg1, %hi(foo+8)
+ // Lo: addi vreg2, vreg1, %lo(foo) ---> lw vreg3, lo(foo+8)(vreg1)
+ // Tail: lw vreg3, 8(vreg2)
+ //
+ // 2. (medany pattern):
+ // Hi: 1:auipc vreg1, %pcrel_hi(s) ---> auipc vreg1, %pcrel_hi(foo+8)
+ // Lo: addi vreg2, vreg1, %pcrel_lo(1b) ---> lw vreg3, %pcrel_lo(1b)(vreg1)
+ // Tail: lw vreg3, 8(vreg2)
+
+ std::optional<int64_t> CommonOffset;
+ for (const MachineInstr &UseMI : MRI->use_instructions(DestReg)) {
+ switch (UseMI.getOpcode()) {
+ default:
+ LLVM_DEBUG(dbgs() << "Not a load or store instruction: " << UseMI);
+ return false;
+ case RISCV::LB:
+ case RISCV::LH:
+ case RISCV::LW:
+ case RISCV::LBU:
+ case RISCV::LHU:
+ case RISCV::LWU:
+ case RISCV::LD:
+ case RISCV::FLH:
+ case RISCV::FLW:
+ case RISCV::FLD:
+ case RISCV::SB:
+ case RISCV::SH:
+ case RISCV::SW:
+ case RISCV::SD:
+ case RISCV::FSH:
+ case RISCV::FSW:
+ case RISCV::FSD: {
+ if (UseMI.getOperand(1).isFI())
+ return false;
+ // Register defined by Lo should not be the value register.
+ if (DestReg == UseMI.getOperand(0).getReg())
+ return false;
+ assert(DestReg == UseMI.getOperand(1).getReg() &&
+ "Expected base address use");
+ // All load/store instructions must use the same offset.
+ int64_t Offset = UseMI.getOperand(2).getImm();
+ if (CommonOffset && Offset != CommonOffset)
+ return false;
+ CommonOffset = Offset;
+ }
+ }
+ }
+
+ // We found a common offset.
+ // Update the offsets in global address lowering.
+ // We may have already folded some arithmetic so we need to add to any
+ // existing offset.
+ int64_t NewOffset = Hi.getOperand(1).getOffset() + *CommonOffset;
+ // RV32 ignores the upper 32 bits.
+ if (!ST->is64Bit())
+ NewOffset = SignExtend64<32>(NewOffset);
+ // We can only fold simm32 offsets.
+ if (!isInt<32>(NewOffset))
+ return false;
+
+ Hi.getOperand(1).setOffset(NewOffset);
+ MachineOperand &ImmOp = Lo.getOperand(2);
+ if (Hi.getOpcode() != RISCV::AUIPC)
+ ImmOp.setOffset(NewOffset);
+
+ // Update the immediate in the load/store instructions to add the offset.
+ for (MachineInstr &UseMI :
+ llvm::make_early_inc_range(MRI->use_instructions(DestReg))) {
+ UseMI.removeOperand(2);
+ UseMI.addOperand(ImmOp);
+ // Update the base reg in the Tail instruction to feed from LUI.
+ // Output of Hi is only used in Lo, no need to use MRI->replaceRegWith().
+ UseMI.getOperand(1).setReg(Hi.getOperand(0).getReg());
+ }
+
+ Lo.eraseFromParent();
+ return true;
+}
+
+bool RISCVMergeBaseOffsetOpt::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(Fn.getFunction()))
+ return false;
+
+ ST = &Fn.getSubtarget<RISCVSubtarget>();
+
+ bool MadeChange = false;
+ MRI = &Fn.getRegInfo();
+ for (MachineBasicBlock &MBB : Fn) {
+ LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
+ for (MachineInstr &Hi : MBB) {
+ MachineInstr *Lo = nullptr;
+ if (!detectFoldable(Hi, Lo))
+ continue;
+ MadeChange |= detectAndFoldOffset(Hi, *Lo);
+ MadeChange |= foldIntoMemoryOps(Hi, *Lo);
+ }
+ }
+
+ return MadeChange;
+}
+
+/// Returns an instance of the Merge Base Offset Optimization pass.
+FunctionPass *llvm::createRISCVMergeBaseOffsetOptPass() {
+ return new RISCVMergeBaseOffsetOpt();
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
new file mode 100644
index 0000000000..c7cc21aa51
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
@@ -0,0 +1,181 @@
+//=- RISCVRedundantCopyElimination.cpp - Remove useless copy for RISCV ------=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass removes unnecessary zero copies in BBs that are targets of
+// beqz/bnez instructions. For instance, the copy instruction in the code below
+// can be removed because the beqz jumps to BB#2 when a0 is zero.
+// BB#1:
+// beqz %a0, <BB#2>
+// BB#2:
+// %a0 = COPY %x0
+// This pass should be run after register allocation.
+//
+// This pass is based on the earliest versions of
+// AArch64RedundantCopyElimination.
+//
+// FIXME: Support compares with constants other than zero? This is harder to
+// do on RISC-V since branches can't have immediates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVInstrInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-copyelim"
+
+STATISTIC(NumCopiesRemoved, "Number of copies removed.");
+
+namespace {
+class RISCVRedundantCopyElimination : public MachineFunctionPass {
+ const MachineRegisterInfo *MRI;
+ const TargetRegisterInfo *TRI;
+ const TargetInstrInfo *TII;
+
+public:
+ static char ID;
+ RISCVRedundantCopyElimination() : MachineFunctionPass(ID) {
+ initializeRISCVRedundantCopyEliminationPass(
+ *PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoVRegs);
+ }
+
+ StringRef getPassName() const override {
+ return "RISCV Redundant Copy Elimination";
+ }
+
+private:
+ bool optimizeBlock(MachineBasicBlock &MBB);
+};
+
+} // end anonymous namespace
+
+char RISCVRedundantCopyElimination::ID = 0;
+
+INITIALIZE_PASS(RISCVRedundantCopyElimination, "riscv-copyelim",
+ "RISCV redundant copy elimination pass", false, false)
+
+static bool
+guaranteesZeroRegInBlock(MachineBasicBlock &MBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *TBB) {
+ assert(Cond.size() == 3 && "Unexpected number of operands");
+ assert(TBB != nullptr && "Expected branch target basic block");
+ auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
+ if (CC == RISCVCC::COND_EQ && Cond[2].getReg() == RISCV::X0 && TBB == &MBB)
+ return true;
+ if (CC == RISCVCC::COND_NE && Cond[2].getReg() == RISCV::X0 && TBB != &MBB)
+ return true;
+ return false;
+}
+
+bool RISCVRedundantCopyElimination::optimizeBlock(MachineBasicBlock &MBB) {
+ // Check if the current basic block has a single predecessor.
+ if (MBB.pred_size() != 1)
+ return false;
+
+ // Check if the predecessor has two successors, implying the block ends in a
+ // conditional branch.
+ MachineBasicBlock *PredMBB = *MBB.pred_begin();
+ if (PredMBB->succ_size() != 2)
+ return false;
+
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
+ SmallVector<MachineOperand, 3> Cond;
+ if (TII->analyzeBranch(*PredMBB, TBB, FBB, Cond, /*AllowModify*/ false) ||
+ Cond.empty())
+ return false;
+
+ // Is this a branch with X0?
+ if (!guaranteesZeroRegInBlock(MBB, Cond, TBB))
+ return false;
+
+ Register TargetReg = Cond[1].getReg();
+ if (!TargetReg)
+ return false;
+
+ bool Changed = false;
+ MachineBasicBlock::iterator LastChange = MBB.begin();
+ // Remove redundant Copy instructions unless TargetReg is modified.
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
+ MachineInstr *MI = &*I;
+ ++I;
+ if (MI->isCopy() && MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isReg()) {
+ Register DefReg = MI->getOperand(0).getReg();
+ Register SrcReg = MI->getOperand(1).getReg();
+
+ if (SrcReg == RISCV::X0 && !MRI->isReserved(DefReg) &&
+ TargetReg == DefReg) {
+ LLVM_DEBUG(dbgs() << "Remove redundant Copy : ");
+ LLVM_DEBUG(MI->print(dbgs()));
+
+ MI->eraseFromParent();
+ Changed = true;
+ LastChange = I;
+ ++NumCopiesRemoved;
+ continue;
+ }
+ }
+
+ if (MI->modifiesRegister(TargetReg, TRI))
+ break;
+ }
+
+ if (!Changed)
+ return false;
+
+ MachineBasicBlock::iterator CondBr = PredMBB->getFirstTerminator();
+ assert((CondBr->getOpcode() == RISCV::BEQ ||
+ CondBr->getOpcode() == RISCV::BNE) &&
+ "Unexpected opcode");
+ assert(CondBr->getOperand(0).getReg() == TargetReg && "Unexpected register");
+
+ // Otherwise, we have to fixup the use-def chain, starting with the
+ // BEQ/BNE. Conservatively mark as much as we can live.
+ CondBr->clearRegisterKills(TargetReg, TRI);
+
+ // Add newly used reg to the block's live-in list if it isn't there already.
+ if (!MBB.isLiveIn(TargetReg))
+ MBB.addLiveIn(TargetReg);
+
+ // Clear any kills of TargetReg between CondBr and the last removed COPY.
+ for (MachineInstr &MMI : make_range(MBB.begin(), LastChange))
+ MMI.clearRegisterKills(TargetReg, TRI);
+
+ return true;
+}
+
+bool RISCVRedundantCopyElimination::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(MF.getFunction()))
+ return false;
+
+ TII = MF.getSubtarget().getInstrInfo();
+ TRI = MF.getSubtarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+
+ bool Changed = false;
+ for (MachineBasicBlock &MBB : MF)
+ Changed |= optimizeBlock(MBB);
+
+ return Changed;
+}
+
+FunctionPass *llvm::createRISCVRedundantCopyEliminationPass() {
+ return new RISCVRedundantCopyElimination();
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.cpp
new file mode 100644
index 0000000000..927845aa23
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -0,0 +1,786 @@
+//===-- RISCVRegisterInfo.cpp - RISCV Register Information ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVRegisterInfo.h"
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVSubtarget.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define GET_REGINFO_TARGET_DESC
+#include "RISCVGenRegisterInfo.inc"
+
+using namespace llvm;
+
+static cl::opt<bool>
+ DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
+ cl::init(false),
+ cl::desc("Disable two address hints for register "
+ "allocation"));
+
+static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
+static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
+static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
+static_assert(RISCV::F31_H == RISCV::F0_H + 31,
+ "Register list not consecutive");
+static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
+static_assert(RISCV::F31_F == RISCV::F0_F + 31,
+ "Register list not consecutive");
+static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
+static_assert(RISCV::F31_D == RISCV::F0_D + 31,
+ "Register list not consecutive");
+static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
+static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
+
+RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
+ : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
+ /*PC*/0, HwMode) {}
+
+const MCPhysReg *
+RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
+ if (MF->getFunction().getCallingConv() == CallingConv::GHC)
+ return CSR_NoRegs_SaveList;
+ if (MF->getFunction().hasFnAttribute("interrupt")) {
+ if (Subtarget.hasStdExtD())
+ return CSR_XLEN_F64_Interrupt_SaveList;
+ if (Subtarget.hasStdExtF())
+ return CSR_XLEN_F32_Interrupt_SaveList;
+ return CSR_Interrupt_SaveList;
+ }
+
+ switch (Subtarget.getTargetABI()) {
+ default:
+ llvm_unreachable("Unrecognized ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_LP64:
+ return CSR_ILP32_LP64_SaveList;
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_LP64F:
+ return CSR_ILP32F_LP64F_SaveList;
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64D:
+ return CSR_ILP32D_LP64D_SaveList;
+ }
+}
+
+BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ const RISCVFrameLowering *TFI = getFrameLowering(MF);
+ BitVector Reserved(getNumRegs());
+
+ // Mark any registers requested to be reserved as such
+ for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
+ if (MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(Reg))
+ markSuperRegs(Reserved, Reg);
+ }
+
+ // Use markSuperRegs to ensure any register aliases are also reserved
+ markSuperRegs(Reserved, RISCV::X0); // zero
+ markSuperRegs(Reserved, RISCV::X2); // sp
+ markSuperRegs(Reserved, RISCV::X3); // gp
+ markSuperRegs(Reserved, RISCV::X4); // tp
+ if (TFI->hasFP(MF))
+ markSuperRegs(Reserved, RISCV::X8); // fp
+ // Reserve the base register if we need to realign the stack and allocate
+ // variable-sized objects at runtime.
+ if (TFI->hasBP(MF))
+ markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
+
+ // V registers for code generation. We handle them manually.
+ markSuperRegs(Reserved, RISCV::VL);
+ markSuperRegs(Reserved, RISCV::VTYPE);
+ markSuperRegs(Reserved, RISCV::VXSAT);
+ markSuperRegs(Reserved, RISCV::VXRM);
+ markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
+
+ // Floating point environment registers.
+ markSuperRegs(Reserved, RISCV::FRM);
+ markSuperRegs(Reserved, RISCV::FFLAGS);
+
+ assert(checkAllSuperRegsMarked(Reserved));
+ return Reserved;
+}
+
+bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
+ MCRegister PhysReg) const {
+ return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
+}
+
+const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
+ return CSR_NoRegs_RegMask;
+}
+
+// Frame indexes representing locations of CSRs which are given a fixed location
+// by save/restore libcalls.
+static const std::pair<unsigned, int> FixedCSRFIMap[] = {
+ {/*ra*/ RISCV::X1, -1},
+ {/*s0*/ RISCV::X8, -2},
+ {/*s1*/ RISCV::X9, -3},
+ {/*s2*/ RISCV::X18, -4},
+ {/*s3*/ RISCV::X19, -5},
+ {/*s4*/ RISCV::X20, -6},
+ {/*s5*/ RISCV::X21, -7},
+ {/*s6*/ RISCV::X22, -8},
+ {/*s7*/ RISCV::X23, -9},
+ {/*s8*/ RISCV::X24, -10},
+ {/*s9*/ RISCV::X25, -11},
+ {/*s10*/ RISCV::X26, -12},
+ {/*s11*/ RISCV::X27, -13}
+};
+
+bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
+ Register Reg,
+ int &FrameIdx) const {
+ const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ if (!RVFI->useSaveRestoreLibCalls(MF))
+ return false;
+
+ const auto *FII =
+ llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; });
+ if (FII == std::end(FixedCSRFIMap))
+ return false;
+
+ FrameIdx = FII->second;
+ return true;
+}
+
+void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II,
+ const DebugLoc &DL, Register DestReg,
+ Register SrcReg, StackOffset Offset,
+ MachineInstr::MIFlag Flag,
+ MaybeAlign RequiredAlign) const {
+
+ if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
+ return;
+
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+ const RISCVInstrInfo *TII = ST.getInstrInfo();
+
+ bool KillSrcReg = false;
+
+ if (Offset.getScalable()) {
+ unsigned ScalableAdjOpc = RISCV::ADD;
+ int64_t ScalableValue = Offset.getScalable();
+ if (ScalableValue < 0) {
+ ScalableValue = -ScalableValue;
+ ScalableAdjOpc = RISCV::SUB;
+ }
+ // Get vlenb and multiply vlen with the number of vector registers.
+ Register ScratchReg = DestReg;
+ if (DestReg == SrcReg)
+ ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag);
+ BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
+ .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
+ .setMIFlag(Flag);
+ SrcReg = DestReg;
+ KillSrcReg = true;
+ }
+
+ int64_t Val = Offset.getFixed();
+ if (DestReg == SrcReg && Val == 0)
+ return;
+
+ const uint64_t Align = RequiredAlign.valueOrOne().value();
+
+ if (isInt<12>(Val)) {
+ BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrcReg))
+ .addImm(Val)
+ .setMIFlag(Flag);
+ return;
+ }
+
+ // Try to split the offset across two ADDIs. We need to keep the intermediate
+ // result aligned after each ADDI. We need to determine the maximum value we
+ // can put in each ADDI. In the negative direction, we can use -2048 which is
+ // always sufficiently aligned. In the positive direction, we need to find the
+ // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
+ // created with LUI.
+ assert(Align < 2048 && "Required alignment too large");
+ int64_t MaxPosAdjStep = 2048 - Align;
+ if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
+ int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
+ Val -= FirstAdj;
+ BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrcReg))
+ .addImm(FirstAdj)
+ .setMIFlag(Flag);
+ BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
+ .addReg(DestReg, RegState::Kill)
+ .addImm(Val)
+ .setMIFlag(Flag);
+ return;
+ }
+
+ unsigned Opc = RISCV::ADD;
+ if (Val < 0) {
+ Val = -Val;
+ Opc = RISCV::SUB;
+ }
+
+ Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
+ BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrcReg))
+ .addReg(ScratchReg, RegState::Kill)
+ .setMIFlag(Flag);
+}
+
+// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
+// LMUL*VLENB bytes.
+void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
+ DebugLoc DL = II->getDebugLoc();
+ MachineBasicBlock &MBB = *II->getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+
+ auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
+ unsigned NF = ZvlssegInfo->first;
+ unsigned LMUL = ZvlssegInfo->second;
+ assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
+ unsigned Opcode, SubRegIdx;
+ switch (LMUL) {
+ default:
+ llvm_unreachable("LMUL must be 1, 2, or 4.");
+ case 1:
+ Opcode = RISCV::VS1R_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ break;
+ case 2:
+ Opcode = RISCV::VS2R_V;
+ SubRegIdx = RISCV::sub_vrm2_0;
+ break;
+ case 4:
+ Opcode = RISCV::VS4R_V;
+ SubRegIdx = RISCV::sub_vrm4_0;
+ break;
+ }
+ static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
+ "Unexpected subreg numbering");
+ static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
+ "Unexpected subreg numbering");
+ static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
+ "Unexpected subreg numbering");
+
+ Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
+ uint32_t ShiftAmount = Log2_32(LMUL);
+ if (ShiftAmount != 0)
+ BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
+ .addReg(VL)
+ .addImm(ShiftAmount);
+
+ Register SrcReg = II->getOperand(0).getReg();
+ Register Base = II->getOperand(1).getReg();
+ bool IsBaseKill = II->getOperand(1).isKill();
+ Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ for (unsigned I = 0; I < NF; ++I) {
+ // Adding implicit-use of super register to describe we are using part of
+ // super register, that prevents machine verifier complaining when part of
+ // subreg is undef, see comment in MachineVerifier::checkLiveness for more
+ // detail.
+ BuildMI(MBB, II, DL, TII->get(Opcode))
+ .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
+ .addReg(Base, getKillRegState(I == NF - 1))
+ .addMemOperand(*(II->memoperands_begin()))
+ .addReg(SrcReg, RegState::Implicit);
+ if (I != NF - 1)
+ BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
+ .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
+ .addReg(VL, getKillRegState(I == NF - 2));
+ Base = NewBase;
+ }
+ II->eraseFromParent();
+}
+
+// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
+// LMUL*VLENB bytes.
+void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
+ DebugLoc DL = II->getDebugLoc();
+ MachineBasicBlock &MBB = *II->getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+
+ auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
+ unsigned NF = ZvlssegInfo->first;
+ unsigned LMUL = ZvlssegInfo->second;
+ assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
+ unsigned Opcode, SubRegIdx;
+ switch (LMUL) {
+ default:
+ llvm_unreachable("LMUL must be 1, 2, or 4.");
+ case 1:
+ Opcode = RISCV::VL1RE8_V;
+ SubRegIdx = RISCV::sub_vrm1_0;
+ break;
+ case 2:
+ Opcode = RISCV::VL2RE8_V;
+ SubRegIdx = RISCV::sub_vrm2_0;
+ break;
+ case 4:
+ Opcode = RISCV::VL4RE8_V;
+ SubRegIdx = RISCV::sub_vrm4_0;
+ break;
+ }
+ static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
+ "Unexpected subreg numbering");
+ static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
+ "Unexpected subreg numbering");
+ static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
+ "Unexpected subreg numbering");
+
+ Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
+ uint32_t ShiftAmount = Log2_32(LMUL);
+ if (ShiftAmount != 0)
+ BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
+ .addReg(VL)
+ .addImm(ShiftAmount);
+
+ Register DestReg = II->getOperand(0).getReg();
+ Register Base = II->getOperand(1).getReg();
+ bool IsBaseKill = II->getOperand(1).isKill();
+ Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ for (unsigned I = 0; I < NF; ++I) {
+ BuildMI(MBB, II, DL, TII->get(Opcode),
+ TRI->getSubReg(DestReg, SubRegIdx + I))
+ .addReg(Base, getKillRegState(I == NF - 1))
+ .addMemOperand(*(II->memoperands_begin()));
+ if (I != NF - 1)
+ BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
+ .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
+ .addReg(VL, getKillRegState(I == NF - 2));
+ Base = NewBase;
+ }
+ II->eraseFromParent();
+}
+
+bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
+
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+ DebugLoc DL = MI.getDebugLoc();
+
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+ Register FrameReg;
+ StackOffset Offset =
+ getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
+ bool IsRVVSpill = RISCV::isRVVSpill(MI);
+ if (!IsRVVSpill)
+ Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
+
+ if (Offset.getScalable() &&
+ ST.getRealMinVLen() == ST.getRealMaxVLen()) {
+ // For an exact VLEN value, scalable offsets become constant and thus
+ // can be converted entirely into fixed offsets.
+ int64_t FixedValue = Offset.getFixed();
+ int64_t ScalableValue = Offset.getScalable();
+ assert(ScalableValue % 8 == 0 &&
+ "Scalable offset is not a multiple of a single vector size.");
+ int64_t NumOfVReg = ScalableValue / 8;
+ int64_t VLENB = ST.getRealMinVLen() / 8;
+ Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
+ }
+
+ if (!isInt<32>(Offset.getFixed())) {
+ report_fatal_error(
+ "Frame offsets outside of the signed 32-bit range not supported");
+ }
+
+ if (!IsRVVSpill) {
+ if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
+ // We chose to emit the canonical immediate sequence rather than folding
+ // the offset into the using add under the theory that doing so doesn't
+ // save dynamic instruction count and some target may fuse the canonical
+ // 32 bit immediate sequence. We still need to clear the portion of the
+ // offset encoded in the immediate.
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
+ } else {
+ // We can encode an add with 12 bit signed immediate in the immediate
+ // operand of our user instruction. As a result, the remaining
+ // offset can by construction, at worst, a LUI and a ADD.
+ int64_t Val = Offset.getFixed();
+ int64_t Lo12 = SignExtend64<12>(Val);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
+ Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12,
+ Offset.getScalable());
+ }
+ }
+
+ if (Offset.getScalable() || Offset.getFixed()) {
+ Register DestReg;
+ if (MI.getOpcode() == RISCV::ADDI)
+ DestReg = MI.getOperand(0).getReg();
+ else
+ DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
+ MachineInstr::NoFlags, std::nullopt);
+ MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
+ /*IsImp*/false,
+ /*IsKill*/true);
+ } else {
+ MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
+ /*IsImp*/false,
+ /*IsKill*/false);
+ }
+
+ // If after materializing the adjustment, we have a pointless ADDI, remove it
+ if (MI.getOpcode() == RISCV::ADDI &&
+ MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
+ MI.getOperand(2).getImm() == 0) {
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Handle spill/fill of synthetic register classes for segment operations to
+ // ensure correctness in the edge case one gets spilled. There are many
+ // possible optimizations here, but given the extreme rarity of such spills,
+ // we prefer simplicity of implementation for now.
+ switch (MI.getOpcode()) {
+ case RISCV::PseudoVSPILL2_M1:
+ case RISCV::PseudoVSPILL2_M2:
+ case RISCV::PseudoVSPILL2_M4:
+ case RISCV::PseudoVSPILL3_M1:
+ case RISCV::PseudoVSPILL3_M2:
+ case RISCV::PseudoVSPILL4_M1:
+ case RISCV::PseudoVSPILL4_M2:
+ case RISCV::PseudoVSPILL5_M1:
+ case RISCV::PseudoVSPILL6_M1:
+ case RISCV::PseudoVSPILL7_M1:
+ case RISCV::PseudoVSPILL8_M1:
+ lowerVSPILL(II);
+ return true;
+ case RISCV::PseudoVRELOAD2_M1:
+ case RISCV::PseudoVRELOAD2_M2:
+ case RISCV::PseudoVRELOAD2_M4:
+ case RISCV::PseudoVRELOAD3_M1:
+ case RISCV::PseudoVRELOAD3_M2:
+ case RISCV::PseudoVRELOAD4_M1:
+ case RISCV::PseudoVRELOAD4_M2:
+ case RISCV::PseudoVRELOAD5_M1:
+ case RISCV::PseudoVRELOAD6_M1:
+ case RISCV::PseudoVRELOAD7_M1:
+ case RISCV::PseudoVRELOAD8_M1:
+ lowerVRELOAD(II);
+ return true;
+ }
+
+ return false;
+}
+
+bool RISCVRegisterInfo::requiresVirtualBaseRegisters(
+ const MachineFunction &MF) const {
+ return true;
+}
+
+// Returns true if the instruction's frame index reference would be better
+// served by a base register other than FP or SP.
+// Used by LocalStackSlotAllocation pass to determine which frame index
+// references it should create new base registers for.
+bool RISCVRegisterInfo::needsFrameBaseReg(MachineInstr *MI,
+ int64_t Offset) const {
+ unsigned FIOperandNum = 0;
+ for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
+ assert(FIOperandNum < MI->getNumOperands() &&
+ "Instr doesn't have FrameIndex operand");
+
+ // For RISC-V, The machine instructions that include a FrameIndex operand
+ // are load/store, ADDI instructions.
+ unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
+ if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
+ return false;
+ // We only generate virtual base registers for loads and stores, so
+ // return false for everything else.
+ if (!MI->mayLoad() && !MI->mayStore())
+ return false;
+
+ const MachineFunction &MF = *MI->getMF();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const RISCVFrameLowering *TFI = getFrameLowering(MF);
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ unsigned CalleeSavedSize = 0;
+ Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
+
+ // Estimate the stack size used to store callee saved registers(
+ // excludes reserved registers).
+ BitVector ReservedRegs = getReservedRegs(MF);
+ for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) {
+ if (!ReservedRegs.test(Reg))
+ CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
+ }
+
+ int64_t MaxFPOffset = Offset - CalleeSavedSize;
+ if (TFI->hasFP(MF) && !shouldRealignStack(MF))
+ return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
+
+ // Assume 128 bytes spill slots size to estimate the maximum possible
+ // offset relative to the stack pointer.
+ // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
+ // real one for RISC-V.
+ int64_t MaxSPOffset = Offset + 128;
+ MaxSPOffset += MFI.getLocalFrameSize();
+ return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
+}
+
+// Determine whether a given base register plus offset immediate is
+// encodable to resolve a frame index.
+bool RISCVRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
+ Register BaseReg,
+ int64_t Offset) const {
+ unsigned FIOperandNum = 0;
+ while (!MI->getOperand(FIOperandNum).isFI()) {
+ FIOperandNum++;
+ assert(FIOperandNum < MI->getNumOperands() &&
+ "Instr does not have a FrameIndex operand!");
+ }
+
+ Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
+ return isInt<12>(Offset);
+}
+
+// Insert defining instruction(s) for a pointer to FrameIdx before
+// insertion point I.
+// Return materialized frame pointer.
+Register RISCVRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
+ int FrameIdx,
+ int64_t Offset) const {
+ MachineBasicBlock::iterator MBBI = MBB->begin();
+ DebugLoc DL;
+ if (MBBI != MBB->end())
+ DL = MBBI->getDebugLoc();
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MFI = MF->getRegInfo();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+
+ Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
+ BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
+ .addFrameIndex(FrameIdx)
+ .addImm(Offset);
+ return BaseReg;
+}
+
+// Resolve a frame index operand of an instruction to reference the
+// indicated base register plus offset instead.
+void RISCVRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
+ int64_t Offset) const {
+ unsigned FIOperandNum = 0;
+ while (!MI.getOperand(FIOperandNum).isFI()) {
+ FIOperandNum++;
+ assert(FIOperandNum < MI.getNumOperands() &&
+ "Instr does not have a FrameIndex operand!");
+ }
+
+ Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
+ // FrameIndex Operands are always represented as a
+ // register followed by an immediate.
+ MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+}
+
+// Get the offset from the referenced frame index in the instruction,
+// if there is one.
+int64_t RISCVRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const {
+ assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
+ RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
+ "The MI must be I or S format.");
+ assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
+ "FrameIndex operand");
+ return MI->getOperand(Idx + 1).getImm();
+}
+
+Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = getFrameLowering(MF);
+ return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
+}
+
+const uint32_t *
+RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
+ CallingConv::ID CC) const {
+ auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+
+ if (CC == CallingConv::GHC)
+ return CSR_NoRegs_RegMask;
+ switch (Subtarget.getTargetABI()) {
+ default:
+ llvm_unreachable("Unrecognized ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_LP64:
+ return CSR_ILP32_LP64_RegMask;
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_LP64F:
+ return CSR_ILP32F_LP64F_RegMask;
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64D:
+ return CSR_ILP32D_LP64D_RegMask;
+ }
+}
+
+const TargetRegisterClass *
+RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
+ const MachineFunction &) const {
+ if (RC == &RISCV::VMV0RegClass)
+ return &RISCV::VRRegClass;
+ return RC;
+}
+
+void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
+ SmallVectorImpl<uint64_t> &Ops) const {
+ // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
+ // to represent one vector register. The dwarf offset is
+ // VLENB * scalable_offset / 8.
+ assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
+
+ // Add fixed-sized offset using existing DIExpression interface.
+ DIExpression::appendOffset(Ops, Offset.getFixed());
+
+ unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
+ int64_t VLENBSized = Offset.getScalable() / 8;
+ if (VLENBSized > 0) {
+ Ops.push_back(dwarf::DW_OP_constu);
+ Ops.push_back(VLENBSized);
+ Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
+ Ops.push_back(dwarf::DW_OP_mul);
+ Ops.push_back(dwarf::DW_OP_plus);
+ } else if (VLENBSized < 0) {
+ Ops.push_back(dwarf::DW_OP_constu);
+ Ops.push_back(-VLENBSized);
+ Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
+ Ops.push_back(dwarf::DW_OP_mul);
+ Ops.push_back(dwarf::DW_OP_minus);
+ }
+}
+
+unsigned
+RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
+ return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
+}
+
+// Add two address hints to improve chances of being able to use a compressed
+// instruction.
+bool RISCVRegisterInfo::getRegAllocationHints(
+ Register VirtReg, ArrayRef<MCPhysReg> Order,
+ SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
+ const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
+ const MachineRegisterInfo *MRI = &MF.getRegInfo();
+
+ bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
+ VirtReg, Order, Hints, MF, VRM, Matrix);
+
+ if (!VRM || DisableRegAllocHints)
+ return BaseImplRetVal;
+
+ // Add any two address hints after any copy hints.
+ SmallSet<Register, 4> TwoAddrHints;
+
+ auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
+ bool NeedGPRC) -> void {
+ Register Reg = MO.getReg();
+ Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
+ if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
+ assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
+ if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
+ TwoAddrHints.insert(PhysReg);
+ }
+ };
+
+ // This is all of the compressible binary instructions. If an instruction
+ // needs GPRC register class operands \p NeedGPRC will be set to true.
+ auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) {
+ NeedGPRC = false;
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::AND:
+ case RISCV::OR:
+ case RISCV::XOR:
+ case RISCV::SUB:
+ case RISCV::ADDW:
+ case RISCV::SUBW:
+ NeedGPRC = true;
+ return true;
+ case RISCV::ANDI:
+ NeedGPRC = true;
+ return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
+ case RISCV::SRAI:
+ case RISCV::SRLI:
+ NeedGPRC = true;
+ return true;
+ case RISCV::ADD:
+ case RISCV::SLLI:
+ return true;
+ case RISCV::ADDI:
+ case RISCV::ADDIW:
+ return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
+ }
+ };
+
+ // Returns true if this operand is compressible. For non-registers it always
+ // returns true. Immediate range was already checked in isCompressible.
+ // For registers, it checks if the register is a GPRC register. reg-reg
+ // instructions that require GPRC need all register operands to be GPRC.
+ auto isCompressibleOpnd = [&](const MachineOperand &MO) {
+ if (!MO.isReg())
+ return true;
+ Register Reg = MO.getReg();
+ Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
+ return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
+ };
+
+ for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
+ const MachineInstr &MI = *MO.getParent();
+ unsigned OpIdx = MI.getOperandNo(&MO);
+ bool NeedGPRC;
+ if (isCompressible(MI, NeedGPRC)) {
+ if (OpIdx == 0 && MI.getOperand(1).isReg()) {
+ if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))
+ tryAddHint(MO, MI.getOperand(1), NeedGPRC);
+ if (MI.isCommutable() && MI.getOperand(2).isReg() &&
+ (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
+ tryAddHint(MO, MI.getOperand(2), NeedGPRC);
+ } else if (OpIdx == 1 &&
+ (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) {
+ tryAddHint(MO, MI.getOperand(0), NeedGPRC);
+ } else if (MI.isCommutable() && OpIdx == 2 &&
+ (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
+ tryAddHint(MO, MI.getOperand(0), NeedGPRC);
+ }
+ }
+ }
+
+ for (MCPhysReg OrderReg : Order)
+ if (TwoAddrHints.count(OrderReg))
+ Hints.push_back(OrderReg);
+
+ return BaseImplRetVal;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.h
new file mode 100644
index 0000000000..57a7256735
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -0,0 +1,105 @@
+//===-- RISCVRegisterInfo.h - RISCV Register Information Impl ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVREGISTERINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVREGISTERINFO_H
+
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "RISCVGenRegisterInfo.inc"
+
+namespace llvm {
+
+struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
+
+ RISCVRegisterInfo(unsigned HwMode);
+
+ const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID) const override;
+
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+ bool isAsmClobberable(const MachineFunction &MF,
+ MCRegister PhysReg) const override;
+
+ const uint32_t *getNoPreservedMask() const override;
+
+ bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
+ int &FrameIdx) const override;
+
+ // Update DestReg to have the value SrcReg plus an offset. This is
+ // used during frame layout, and we may need to ensure that if we
+ // split the offset internally that the DestReg is always aligned,
+ // assuming that source reg was.
+ void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II,
+ const DebugLoc &DL, Register DestReg, Register SrcReg,
+ StackOffset Offset, MachineInstr::MIFlag Flag,
+ MaybeAlign RequiredAlign) const;
+
+ bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
+
+ bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override;
+
+ bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
+
+ bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
+ int64_t Offset) const override;
+
+ Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx,
+ int64_t Offset) const override;
+
+ void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
+ int64_t Offset) const override;
+
+ int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const override;
+
+ void lowerVSPILL(MachineBasicBlock::iterator II) const;
+ void lowerVRELOAD(MachineBasicBlock::iterator II) const;
+
+ Register getFrameRegister(const MachineFunction &MF) const override;
+
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override {
+ return true;
+ }
+
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override {
+ return true;
+ }
+
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override {
+ return &RISCV::GPRRegClass;
+ }
+
+ const TargetRegisterClass *
+ getLargestLegalSuperClass(const TargetRegisterClass *RC,
+ const MachineFunction &) const override;
+
+ void getOffsetOpcodes(const StackOffset &Offset,
+ SmallVectorImpl<uint64_t> &Ops) const override;
+
+ unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override;
+
+ bool getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
+ SmallVectorImpl<MCPhysReg> &Hints,
+ const MachineFunction &MF, const VirtRegMap *VRM,
+ const LiveRegMatrix *Matrix) const override;
+};
+}
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVSExtWRemoval.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVSExtWRemoval.cpp
new file mode 100644
index 0000000000..a26a3f2411
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVSExtWRemoval.cpp
@@ -0,0 +1,377 @@
+//===-------------- RISCVSExtWRemoval.cpp - MI sext.w Removal -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This pass removes unneeded sext.w instructions at the MI level. Either
+// because the sign extended bits aren't consumed or because the input was
+// already sign extended by an earlier instruction.
+//
+//===---------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVSubtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-sextw-removal"
+
+STATISTIC(NumRemovedSExtW, "Number of removed sign-extensions");
+STATISTIC(NumTransformedToWInstrs,
+ "Number of instructions transformed to W-ops");
+
+static cl::opt<bool> DisableSExtWRemoval("riscv-disable-sextw-removal",
+ cl::desc("Disable removal of sext.w"),
+ cl::init(false), cl::Hidden);
+namespace {
+
+class RISCVSExtWRemoval : public MachineFunctionPass {
+public:
+ static char ID;
+
+ RISCVSExtWRemoval() : MachineFunctionPass(ID) {
+ initializeRISCVSExtWRemovalPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override { return "RISCV sext.w Removal"; }
+};
+
+} // end anonymous namespace
+
+char RISCVSExtWRemoval::ID = 0;
+INITIALIZE_PASS(RISCVSExtWRemoval, DEBUG_TYPE, "RISCV sext.w Removal", false,
+ false)
+
+FunctionPass *llvm::createRISCVSExtWRemovalPass() {
+ return new RISCVSExtWRemoval();
+}
+
+// This function returns true if the machine instruction always outputs a value
+// where bits 63:32 match bit 31.
+static bool isSignExtendingOpW(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI) {
+ uint64_t TSFlags = MI.getDesc().TSFlags;
+
+ // Instructions that can be determined from opcode are marked in tablegen.
+ if (TSFlags & RISCVII::IsSignExtendingOpWMask)
+ return true;
+
+ // Special cases that require checking operands.
+ switch (MI.getOpcode()) {
+ // shifting right sufficiently makes the value 32-bit sign-extended
+ case RISCV::SRAI:
+ return MI.getOperand(2).getImm() >= 32;
+ case RISCV::SRLI:
+ return MI.getOperand(2).getImm() > 32;
+ // The LI pattern ADDI rd, X0, imm is sign extended.
+ case RISCV::ADDI:
+ return MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0;
+ // An ANDI with an 11 bit immediate will zero bits 63:11.
+ case RISCV::ANDI:
+ return isUInt<11>(MI.getOperand(2).getImm());
+ // An ORI with an >11 bit immediate (negative 12-bit) will set bits 63:11.
+ case RISCV::ORI:
+ return !isUInt<11>(MI.getOperand(2).getImm());
+ // Copying from X0 produces zero.
+ case RISCV::COPY:
+ return MI.getOperand(1).getReg() == RISCV::X0;
+ }
+
+ return false;
+}
+
+static bool isSignExtendedW(Register SrcReg, const MachineRegisterInfo &MRI,
+ const RISCVInstrInfo &TII,
+ SmallPtrSetImpl<MachineInstr *> &FixableDef) {
+
+ SmallPtrSet<const MachineInstr *, 4> Visited;
+ SmallVector<MachineInstr *, 4> Worklist;
+
+ auto AddRegDefToWorkList = [&](Register SrcReg) {
+ if (!SrcReg.isVirtual())
+ return false;
+ MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
+ if (!SrcMI)
+ return false;
+ // Add SrcMI to the worklist.
+ Worklist.push_back(SrcMI);
+ return true;
+ };
+
+ if (!AddRegDefToWorkList(SrcReg))
+ return false;
+
+ while (!Worklist.empty()) {
+ MachineInstr *MI = Worklist.pop_back_val();
+
+ // If we already visited this instruction, we don't need to check it again.
+ if (!Visited.insert(MI).second)
+ continue;
+
+ // If this is a sign extending operation we don't need to look any further.
+ if (isSignExtendingOpW(*MI, MRI))
+ continue;
+
+ // Is this an instruction that propagates sign extend?
+ switch (MI->getOpcode()) {
+ default:
+ // Unknown opcode, give up.
+ return false;
+ case RISCV::COPY: {
+ const MachineFunction *MF = MI->getMF();
+ const RISCVMachineFunctionInfo *RVFI =
+ MF->getInfo<RISCVMachineFunctionInfo>();
+
+ // If this is the entry block and the register is livein, see if we know
+ // it is sign extended.
+ if (MI->getParent() == &MF->front()) {
+ Register VReg = MI->getOperand(0).getReg();
+ if (MF->getRegInfo().isLiveIn(VReg) && RVFI->isSExt32Register(VReg))
+ continue;
+ }
+
+ Register CopySrcReg = MI->getOperand(1).getReg();
+ if (CopySrcReg == RISCV::X10) {
+ // For a method return value, we check the ZExt/SExt flags in attribute.
+ // We assume the following code sequence for method call.
+ // PseudoCALL @bar, ...
+ // ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
+ // %0:gpr = COPY $x10
+ //
+ // We use the PseudoCall to look up the IR function being called to find
+ // its return attributes.
+ const MachineBasicBlock *MBB = MI->getParent();
+ auto II = MI->getIterator();
+ if (II == MBB->instr_begin() ||
+ (--II)->getOpcode() != RISCV::ADJCALLSTACKUP)
+ return false;
+
+ const MachineInstr &CallMI = *(--II);
+ if (!CallMI.isCall() || !CallMI.getOperand(0).isGlobal())
+ return false;
+
+ auto *CalleeFn =
+ dyn_cast_if_present<Function>(CallMI.getOperand(0).getGlobal());
+ if (!CalleeFn)
+ return false;
+
+ auto *IntTy = dyn_cast<IntegerType>(CalleeFn->getReturnType());
+ if (!IntTy)
+ return false;
+
+ const AttributeSet &Attrs = CalleeFn->getAttributes().getRetAttrs();
+ unsigned BitWidth = IntTy->getBitWidth();
+ if ((BitWidth <= 32 && Attrs.hasAttribute(Attribute::SExt)) ||
+ (BitWidth < 32 && Attrs.hasAttribute(Attribute::ZExt)))
+ continue;
+ }
+
+ if (!AddRegDefToWorkList(CopySrcReg))
+ return false;
+
+ break;
+ }
+
+ // For these, we just need to check if the 1st operand is sign extended.
+ case RISCV::BCLRI:
+ case RISCV::BINVI:
+ case RISCV::BSETI:
+ if (MI->getOperand(2).getImm() >= 31)
+ return false;
+ [[fallthrough]];
+ case RISCV::REM:
+ case RISCV::ANDI:
+ case RISCV::ORI:
+ case RISCV::XORI:
+ // |Remainder| is always <= |Dividend|. If D is 32-bit, then so is R.
+ // DIV doesn't work because of the edge case 0xf..f 8000 0000 / (long)-1
+ // Logical operations use a sign extended 12-bit immediate.
+ if (!AddRegDefToWorkList(MI->getOperand(1).getReg()))
+ return false;
+
+ break;
+ case RISCV::PseudoCCADDW:
+ case RISCV::PseudoCCSUBW:
+ // Returns operand 4 or an ADDW/SUBW of operands 5 and 6. We only need to
+ // check if operand 4 is sign extended.
+ if (!AddRegDefToWorkList(MI->getOperand(4).getReg()))
+ return false;
+ break;
+ case RISCV::REMU:
+ case RISCV::AND:
+ case RISCV::OR:
+ case RISCV::XOR:
+ case RISCV::ANDN:
+ case RISCV::ORN:
+ case RISCV::XNOR:
+ case RISCV::MAX:
+ case RISCV::MAXU:
+ case RISCV::MIN:
+ case RISCV::MINU:
+ case RISCV::PseudoCCMOVGPR:
+ case RISCV::PseudoCCAND:
+ case RISCV::PseudoCCOR:
+ case RISCV::PseudoCCXOR:
+ case RISCV::PHI: {
+ // If all incoming values are sign-extended, the output of AND, OR, XOR,
+ // MIN, MAX, or PHI is also sign-extended.
+
+ // The input registers for PHI are operand 1, 3, ...
+ // The input registers for PseudoCCMOVGPR are 4 and 5.
+ // The input registers for PseudoCCAND/OR/XOR are 4, 5, and 6.
+ // The input registers for others are operand 1 and 2.
+ unsigned B = 1, E = 3, D = 1;
+ switch (MI->getOpcode()) {
+ case RISCV::PHI:
+ E = MI->getNumOperands();
+ D = 2;
+ break;
+ case RISCV::PseudoCCMOVGPR:
+ B = 4;
+ E = 6;
+ break;
+ case RISCV::PseudoCCAND:
+ case RISCV::PseudoCCOR:
+ case RISCV::PseudoCCXOR:
+ B = 4;
+ E = 7;
+ break;
+ }
+
+ for (unsigned I = B; I != E; I += D) {
+ if (!MI->getOperand(I).isReg())
+ return false;
+
+ if (!AddRegDefToWorkList(MI->getOperand(I).getReg()))
+ return false;
+ }
+
+ break;
+ }
+
+ case RISCV::VT_MASKC:
+ case RISCV::VT_MASKCN:
+ // Instructions return zero or operand 1. Result is sign extended if
+ // operand 1 is sign extended.
+ if (!AddRegDefToWorkList(MI->getOperand(1).getReg()))
+ return false;
+ break;
+
+ // With these opcode, we can "fix" them with the W-version
+ // if we know all users of the result only rely on bits 31:0
+ case RISCV::SLLI:
+ // SLLIW reads the lowest 5 bits, while SLLI reads lowest 6 bits
+ if (MI->getOperand(2).getImm() >= 32)
+ return false;
+ [[fallthrough]];
+ case RISCV::ADDI:
+ case RISCV::ADD:
+ case RISCV::LD:
+ case RISCV::LWU:
+ case RISCV::MUL:
+ case RISCV::SUB:
+ if (TII.hasAllWUsers(*MI, MRI)) {
+ FixableDef.insert(MI);
+ break;
+ }
+ return false;
+ }
+ }
+
+ // If we get here, then every node we visited produces a sign extended value
+ // or propagated sign extended values. So the result must be sign extended.
+ return true;
+}
+
+static unsigned getWOp(unsigned Opcode) {
+ switch (Opcode) {
+ case RISCV::ADDI:
+ return RISCV::ADDIW;
+ case RISCV::ADD:
+ return RISCV::ADDW;
+ case RISCV::LD:
+ case RISCV::LWU:
+ return RISCV::LW;
+ case RISCV::MUL:
+ return RISCV::MULW;
+ case RISCV::SLLI:
+ return RISCV::SLLIW;
+ case RISCV::SUB:
+ return RISCV::SUBW;
+ default:
+ llvm_unreachable("Unexpected opcode for replacement with W variant");
+ }
+}
+
+bool RISCVSExtWRemoval::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(MF.getFunction()) || DisableSExtWRemoval)
+ return false;
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+ const RISCVInstrInfo &TII = *ST.getInstrInfo();
+
+ if (!ST.is64Bit())
+ return false;
+
+ bool MadeChange = false;
+
+ for (MachineBasicBlock &MBB : MF) {
+ for (auto I = MBB.begin(), IE = MBB.end(); I != IE;) {
+ MachineInstr *MI = &*I++;
+
+ // We're looking for the sext.w pattern ADDIW rd, rs1, 0.
+ if (!RISCV::isSEXT_W(*MI))
+ continue;
+
+ Register SrcReg = MI->getOperand(1).getReg();
+
+ SmallPtrSet<MachineInstr *, 4> FixableDefs;
+
+ // If all users only use the lower bits, this sext.w is redundant.
+ // Or if all definitions reaching MI sign-extend their output,
+ // then sext.w is redundant.
+ if (!TII.hasAllWUsers(*MI, MRI) &&
+ !isSignExtendedW(SrcReg, MRI, TII, FixableDefs))
+ continue;
+
+ Register DstReg = MI->getOperand(0).getReg();
+ if (!MRI.constrainRegClass(SrcReg, MRI.getRegClass(DstReg)))
+ continue;
+
+ // Convert Fixable instructions to their W versions.
+ for (MachineInstr *Fixable : FixableDefs) {
+ LLVM_DEBUG(dbgs() << "Replacing " << *Fixable);
+ Fixable->setDesc(TII.get(getWOp(Fixable->getOpcode())));
+ Fixable->clearFlag(MachineInstr::MIFlag::NoSWrap);
+ Fixable->clearFlag(MachineInstr::MIFlag::NoUWrap);
+ Fixable->clearFlag(MachineInstr::MIFlag::IsExact);
+ LLVM_DEBUG(dbgs() << " with " << *Fixable);
+ ++NumTransformedToWInstrs;
+ }
+
+ LLVM_DEBUG(dbgs() << "Removing redundant sign-extension\n");
+ MRI.replaceRegWith(DstReg, SrcReg);
+ MRI.clearKillFlags(SrcReg);
+ MI->eraseFromParent();
+ ++NumRemovedSExtW;
+ MadeChange = true;
+ }
+ }
+
+ return MadeChange;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVStripWSuffix.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVStripWSuffix.cpp
new file mode 100644
index 0000000000..14ab9c2dd6
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVStripWSuffix.cpp
@@ -0,0 +1,87 @@
+//===-------------- RISCVStripWSuffix.cpp - -w Suffix Removal -------------===//
+//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This pass removes the -w suffix from each addiw and slliw instructions
+// whenever all users are dependent only on the lower word of the result of the
+// instruction. We do this only for addiw and slliw because the -w forms are
+// less compressible.
+//
+//===---------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+
+using namespace llvm;
+
+static cl::opt<bool> DisableStripWSuffix("riscv-disable-strip-w-suffix",
+ cl::desc("Disable strip W suffix"),
+ cl::init(false), cl::Hidden);
+
+namespace {
+
+class RISCVStripWSuffix : public MachineFunctionPass {
+public:
+ static char ID;
+
+ RISCVStripWSuffix() : MachineFunctionPass(ID) {
+ initializeRISCVStripWSuffixPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override { return "RISCV Strip W Suffix"; }
+};
+
+} // end anonymous namespace
+
+char RISCVStripWSuffix::ID = 0;
+INITIALIZE_PASS(RISCVStripWSuffix, "riscv-strip-w-suffix",
+ "RISCV Strip W Suffix", false, false)
+
+FunctionPass *llvm::createRISCVStripWSuffixPass() {
+ return new RISCVStripWSuffix();
+}
+
+bool RISCVStripWSuffix::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(MF.getFunction()) || DisableStripWSuffix)
+ return false;
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+ const RISCVInstrInfo &TII = *ST.getInstrInfo();
+
+ if (!ST.is64Bit())
+ return false;
+
+ bool MadeChange = false;
+ for (MachineBasicBlock &MBB : MF) {
+ for (auto I = MBB.begin(), IE = MBB.end(); I != IE; ++I) {
+ MachineInstr &MI = *I;
+
+ switch (MI.getOpcode()) {
+ case RISCV::ADDW:
+ case RISCV::SLLIW:
+ if (TII.hasAllWUsers(MI, MRI)) {
+ unsigned Opc =
+ MI.getOpcode() == RISCV::ADDW ? RISCV::ADD : RISCV::SLLI;
+ MI.setDesc(TII.get(Opc));
+ MadeChange = true;
+ }
+ break;
+ }
+ }
+ }
+
+ return MadeChange;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.cpp
new file mode 100644
index 0000000000..d0bb4634d0
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -0,0 +1,180 @@
+//===-- RISCVSubtarget.cpp - RISCV Subtarget Information ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RISCV specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVSubtarget.h"
+#include "RISCV.h"
+#include "RISCVFrameLowering.h"
+#include "RISCVMacroFusion.h"
+#include "RISCVTargetMachine.h"
+#include "GISel/RISCVCallLowering.h"
+#include "GISel/RISCVLegalizerInfo.h"
+#include "GISel/RISCVRegisterBankInfo.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-subtarget"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "RISCVGenSubtargetInfo.inc"
+
+static cl::opt<bool> EnableSubRegLiveness("riscv-enable-subreg-liveness",
+ cl::init(false), cl::Hidden);
+
+static cl::opt<unsigned> RVVVectorLMULMax(
+ "riscv-v-fixed-length-vector-lmul-max",
+ cl::desc("The maximum LMUL value to use for fixed length vectors. "
+ "Fractional LMUL values are not supported."),
+ cl::init(8), cl::Hidden);
+
+static cl::opt<bool> RISCVDisableUsingConstantPoolForLargeInts(
+ "riscv-disable-using-constant-pool-for-large-ints",
+ cl::desc("Disable using constant pool for large integers."),
+ cl::init(false), cl::Hidden);
+
+static cl::opt<unsigned> RISCVMaxBuildIntsCost(
+ "riscv-max-build-ints-cost",
+ cl::desc("The maximum cost used for building integers."), cl::init(0),
+ cl::Hidden);
+
+void RISCVSubtarget::anchor() {}
+
+RISCVSubtarget &
+RISCVSubtarget::initializeSubtargetDependencies(const Triple &TT, StringRef CPU,
+ StringRef TuneCPU, StringRef FS,
+ StringRef ABIName) {
+ // Determine default and user-specified characteristics
+ bool Is64Bit = TT.isArch64Bit();
+ if (CPU.empty() || CPU == "generic")
+ CPU = Is64Bit ? "generic-rv64" : "generic-rv32";
+
+ if (TuneCPU.empty())
+ TuneCPU = CPU;
+
+ ParseSubtargetFeatures(CPU, TuneCPU, FS);
+ if (Is64Bit) {
+ XLenVT = MVT::i64;
+ XLen = 64;
+ }
+
+ TargetABI = RISCVABI::computeTargetABI(TT, getFeatureBits(), ABIName);
+ RISCVFeatures::validate(TT, getFeatureBits());
+ return *this;
+}
+
+RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU,
+ StringRef TuneCPU, StringRef FS,
+ StringRef ABIName, unsigned RVVVectorBitsMin,
+ unsigned RVVVectorBitsMax,
+ const TargetMachine &TM)
+ : RISCVGenSubtargetInfo(TT, CPU, TuneCPU, FS),
+ RVVVectorBitsMin(RVVVectorBitsMin), RVVVectorBitsMax(RVVVectorBitsMax),
+ FrameLowering(
+ initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
+ InstrInfo(*this), RegInfo(getHwMode()), TLInfo(TM, *this) {
+ if (RISCV::isX18ReservedByDefault(TT))
+ UserReservedRegister.set(RISCV::X18);
+
+ CallLoweringInfo.reset(new RISCVCallLowering(*getTargetLowering()));
+ Legalizer.reset(new RISCVLegalizerInfo(*this));
+
+ auto *RBI = new RISCVRegisterBankInfo(*getRegisterInfo());
+ RegBankInfo.reset(RBI);
+ InstSelector.reset(createRISCVInstructionSelector(
+ *static_cast<const RISCVTargetMachine *>(&TM), *this, *RBI));
+}
+
+const CallLowering *RISCVSubtarget::getCallLowering() const {
+ return CallLoweringInfo.get();
+}
+
+InstructionSelector *RISCVSubtarget::getInstructionSelector() const {
+ return InstSelector.get();
+}
+
+const LegalizerInfo *RISCVSubtarget::getLegalizerInfo() const {
+ return Legalizer.get();
+}
+
+const RegisterBankInfo *RISCVSubtarget::getRegBankInfo() const {
+ return RegBankInfo.get();
+}
+
+bool RISCVSubtarget::useConstantPoolForLargeInts() const {
+ return !RISCVDisableUsingConstantPoolForLargeInts;
+}
+
+unsigned RISCVSubtarget::getMaxBuildIntsCost() const {
+ // Loading integer from constant pool needs two instructions (the reason why
+ // the minimum cost is 2): an address calculation instruction and a load
+ // instruction. Usually, address calculation and instructions used for
+ // building integers (addi, slli, etc.) can be done in one cycle, so here we
+ // set the default cost to (LoadLatency + 1) if no threshold is provided.
+ return RISCVMaxBuildIntsCost == 0
+ ? getSchedModel().LoadLatency + 1
+ : std::max<unsigned>(2, RISCVMaxBuildIntsCost);
+}
+
+unsigned RISCVSubtarget::getMaxRVVVectorSizeInBits() const {
+ assert(hasVInstructions() &&
+ "Tried to get vector length without Zve or V extension support!");
+
+ // ZvlLen specifies the minimum required vlen. The upper bound provided by
+ // riscv-v-vector-bits-max should be no less than it.
+ if (RVVVectorBitsMax != 0 && RVVVectorBitsMax < ZvlLen)
+ report_fatal_error("riscv-v-vector-bits-max specified is lower "
+ "than the Zvl*b limitation");
+
+ return RVVVectorBitsMax;
+}
+
+unsigned RISCVSubtarget::getMinRVVVectorSizeInBits() const {
+ assert(hasVInstructions() &&
+ "Tried to get vector length without Zve or V extension support!");
+
+ if (RVVVectorBitsMin == -1U)
+ return ZvlLen;
+
+ // ZvlLen specifies the minimum required vlen. The lower bound provided by
+ // riscv-v-vector-bits-min should be no less than it.
+ if (RVVVectorBitsMin != 0 && RVVVectorBitsMin < ZvlLen)
+ report_fatal_error("riscv-v-vector-bits-min specified is lower "
+ "than the Zvl*b limitation");
+
+ return RVVVectorBitsMin;
+}
+
+unsigned RISCVSubtarget::getMaxLMULForFixedLengthVectors() const {
+ assert(hasVInstructions() &&
+ "Tried to get vector length without Zve or V extension support!");
+ assert(RVVVectorLMULMax <= 8 && isPowerOf2_32(RVVVectorLMULMax) &&
+ "V extension requires a LMUL to be at most 8 and a power of 2!");
+ return PowerOf2Floor(
+ std::max<unsigned>(std::min<unsigned>(RVVVectorLMULMax, 8), 1));
+}
+
+bool RISCVSubtarget::useRVVForFixedLengthVectors() const {
+ return hasVInstructions() && getMinRVVVectorSizeInBits() != 0;
+}
+
+bool RISCVSubtarget::enableSubRegLiveness() const {
+ // FIXME: Enable subregister liveness by default for RVV to better handle
+ // LMUL>1 and segment load/store.
+ return EnableSubRegLiveness;
+}
+
+void RISCVSubtarget::getPostRAMutations(
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
+ Mutations.push_back(createRISCVMacroFusionDAGMutation());
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.h
new file mode 100644
index 0000000000..290c7b03ea
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVSubtarget.h
@@ -0,0 +1,197 @@
+//===-- RISCVSubtarget.h - Define Subtarget for the RISCV -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the RISCV specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVSUBTARGET_H
+#define LLVM_LIB_TARGET_RISCV_RISCVSUBTARGET_H
+
+#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "RISCVFrameLowering.h"
+#include "RISCVISelLowering.h"
+#include "RISCVInstrInfo.h"
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/RegisterBankInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define GET_SUBTARGETINFO_HEADER
+#include "RISCVGenSubtargetInfo.inc"
+
+namespace llvm {
+class StringRef;
+
+class RISCVSubtarget : public RISCVGenSubtargetInfo {
+public:
+ enum RISCVProcFamilyEnum : uint8_t {
+ Others,
+ SiFive7,
+ };
+
+private:
+ virtual void anchor();
+
+ RISCVProcFamilyEnum RISCVProcFamily = Others;
+
+#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
+ bool ATTRIBUTE = DEFAULT;
+#include "RISCVGenSubtargetInfo.inc"
+
+ unsigned XLen = 32;
+ unsigned ZvlLen = 0;
+ MVT XLenVT = MVT::i32;
+ unsigned RVVVectorBitsMin;
+ unsigned RVVVectorBitsMax;
+ uint8_t MaxInterleaveFactor = 2;
+ RISCVABI::ABI TargetABI = RISCVABI::ABI_Unknown;
+ std::bitset<RISCV::NUM_TARGET_REGS> UserReservedRegister;
+ RISCVFrameLowering FrameLowering;
+ RISCVInstrInfo InstrInfo;
+ RISCVRegisterInfo RegInfo;
+ RISCVTargetLowering TLInfo;
+ SelectionDAGTargetInfo TSInfo;
+
+ /// Initializes using the passed in CPU and feature strings so that we can
+ /// use initializer lists for subtarget initialization.
+ RISCVSubtarget &initializeSubtargetDependencies(const Triple &TT,
+ StringRef CPU,
+ StringRef TuneCPU,
+ StringRef FS,
+ StringRef ABIName);
+
+public:
+ // Initializes the data members to match that of the specified triple.
+ RISCVSubtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU,
+ StringRef FS, StringRef ABIName, unsigned RVVVectorBitsMin,
+ unsigned RVVVectorLMULMax, const TargetMachine &TM);
+
+ // Parses features string setting specified subtarget options. The
+ // definition of this function is auto-generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
+
+ const RISCVFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
+ const RISCVInstrInfo *getInstrInfo() const override { return &InstrInfo; }
+ const RISCVRegisterInfo *getRegisterInfo() const override {
+ return &RegInfo;
+ }
+ const RISCVTargetLowering *getTargetLowering() const override {
+ return &TLInfo;
+ }
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
+ return &TSInfo;
+ }
+ bool enableMachineScheduler() const override { return true; }
+
+ /// Returns RISCV processor family.
+ /// Avoid this function! CPU specifics should be kept local to this class
+ /// and preferably modeled with SubtargetFeatures or properties in
+ /// initializeProperties().
+ RISCVProcFamilyEnum getProcFamily() const { return RISCVProcFamily; }
+
+#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
+ bool GETTER() const { return ATTRIBUTE; }
+#include "RISCVGenSubtargetInfo.inc"
+
+ bool hasStdExtCOrZca() const { return HasStdExtC || HasStdExtZca; }
+ bool hasStdExtZvl() const { return ZvlLen != 0; }
+ bool hasStdExtZfhOrZfhmin() const { return HasStdExtZfh || HasStdExtZfhmin; }
+ bool is64Bit() const { return HasRV64; }
+ MVT getXLenVT() const { return XLenVT; }
+ unsigned getXLen() const { return XLen; }
+ unsigned getFLen() const {
+ if (HasStdExtD)
+ return 64;
+
+ if (HasStdExtF)
+ return 32;
+
+ return 0;
+ }
+ unsigned getELEN() const {
+ assert(hasVInstructions() && "Expected V extension");
+ return hasVInstructionsI64() ? 64 : 32;
+ }
+ unsigned getRealMinVLen() const {
+ unsigned VLen = getMinRVVVectorSizeInBits();
+ return VLen == 0 ? ZvlLen : VLen;
+ }
+ unsigned getRealMaxVLen() const {
+ unsigned VLen = getMaxRVVVectorSizeInBits();
+ return VLen == 0 ? 65536 : VLen;
+ }
+ RISCVABI::ABI getTargetABI() const { return TargetABI; }
+ bool isRegisterReservedByUser(Register i) const {
+ assert(i < RISCV::NUM_TARGET_REGS && "Register out of range");
+ return UserReservedRegister[i];
+ }
+
+ bool hasMacroFusion() const { return hasLUIADDIFusion(); }
+
+ // Vector codegen related methods.
+ bool hasVInstructions() const { return HasStdExtZve32x; }
+ bool hasVInstructionsI64() const { return HasStdExtZve64x; }
+ bool hasVInstructionsF16() const {
+ return HasStdExtZvfh && hasStdExtZfhOrZfhmin();
+ }
+ // FIXME: Consider Zfinx in the future
+ bool hasVInstructionsF32() const { return HasStdExtZve32f && HasStdExtF; }
+ // FIXME: Consider Zdinx in the future
+ bool hasVInstructionsF64() const { return HasStdExtZve64d && HasStdExtD; }
+ // F16 and F64 both require F32.
+ bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); }
+ unsigned getMaxInterleaveFactor() const {
+ return hasVInstructions() ? MaxInterleaveFactor : 1;
+ }
+
+protected:
+ // GlobalISel related APIs.
+ std::unique_ptr<CallLowering> CallLoweringInfo;
+ std::unique_ptr<InstructionSelector> InstSelector;
+ std::unique_ptr<LegalizerInfo> Legalizer;
+ std::unique_ptr<RegisterBankInfo> RegBankInfo;
+
+ // Return the known range for the bit length of RVV data registers as set
+ // at the command line. A value of 0 means nothing is known about that particular
+ // limit beyond what's implied by the architecture.
+ // NOTE: Please use getRealMinVLen and getRealMaxVLen instead!
+ unsigned getMaxRVVVectorSizeInBits() const;
+ unsigned getMinRVVVectorSizeInBits() const;
+
+public:
+ const CallLowering *getCallLowering() const override;
+ InstructionSelector *getInstructionSelector() const override;
+ const LegalizerInfo *getLegalizerInfo() const override;
+ const RegisterBankInfo *getRegBankInfo() const override;
+
+ bool isTargetFuchsia() const { return getTargetTriple().isOSFuchsia(); }
+
+ bool useConstantPoolForLargeInts() const;
+
+ // Maximum cost used for building integers, integers will be put into constant
+ // pool if exceeded.
+ unsigned getMaxBuildIntsCost() const;
+
+ unsigned getMaxLMULForFixedLengthVectors() const;
+ bool useRVVForFixedLengthVectors() const;
+
+ bool enableSubRegLiveness() const override;
+
+ void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
+ &Mutations) const override;
+};
+} // End llvm namespace
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.cpp
new file mode 100644
index 0000000000..cc88140666
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -0,0 +1,382 @@
+//===-- RISCVTargetMachine.cpp - Define TargetMachine for RISCV -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the info about RISCV target spec.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVTargetMachine.h"
+#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVMacroFusion.h"
+#include "RISCVTargetObjectFile.h"
+#include "RISCVTargetTransformInfo.h"
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
+#include "llvm/CodeGen/GlobalISel/Legalizer.h"
+#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
+#include "llvm/CodeGen/MIRParser/MIParser.h"
+#include "llvm/CodeGen/MIRYamlMapping.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO.h"
+#include <optional>
+using namespace llvm;
+
+static cl::opt<bool> EnableRedundantCopyElimination(
+ "riscv-enable-copyelim",
+ cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
+ cl::Hidden);
+
+// FIXME: Unify control over GlobalMerge.
+static cl::opt<cl::boolOrDefault>
+ EnableGlobalMerge("riscv-enable-global-merge", cl::Hidden,
+ cl::desc("Enable the global merge pass"));
+
+static cl::opt<bool>
+ EnableMachineCombiner("riscv-enable-machine-combiner",
+ cl::desc("Enable the machine combiner pass"),
+ cl::init(true), cl::Hidden);
+
+static cl::opt<unsigned> RVVVectorBitsMaxOpt(
+ "riscv-v-vector-bits-max",
+ cl::desc("Assume V extension vector registers are at most this big, "
+ "with zero meaning no maximum size is assumed."),
+ cl::init(0), cl::Hidden);
+
+static cl::opt<int> RVVVectorBitsMinOpt(
+ "riscv-v-vector-bits-min",
+ cl::desc("Assume V extension vector registers are at least this big, "
+ "with zero meaning no minimum size is assumed. A value of -1 "
+ "means use Zvl*b extension. This is primarily used to enable "
+ "autovectorization with fixed width vectors."),
+ cl::init(-1), cl::Hidden);
+
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
+ RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
+ RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
+ auto *PR = PassRegistry::getPassRegistry();
+ initializeGlobalISel(*PR);
+ initializeRISCVMakeCompressibleOptPass(*PR);
+ initializeRISCVGatherScatterLoweringPass(*PR);
+ initializeRISCVCodeGenPreparePass(*PR);
+ initializeRISCVMergeBaseOffsetOptPass(*PR);
+ initializeRISCVSExtWRemovalPass(*PR);
+ initializeRISCVStripWSuffixPass(*PR);
+ initializeRISCVPreRAExpandPseudoPass(*PR);
+ initializeRISCVExpandPseudoPass(*PR);
+ initializeRISCVInsertVSETVLIPass(*PR);
+ initializeRISCVDAGToDAGISelPass(*PR);
+}
+
+static StringRef computeDataLayout(const Triple &TT) {
+ if (TT.isArch64Bit())
+ return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128";
+ assert(TT.isArch32Bit() && "only RV32 and RV64 are currently supported");
+ return "e-m:e-p:32:32-i64:64-n32-S128";
+}
+
+static Reloc::Model getEffectiveRelocModel(const Triple &TT,
+ std::optional<Reloc::Model> RM) {
+ return RM.value_or(Reloc::Static);
+}
+
+RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ std::optional<Reloc::Model> RM,
+ std::optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT)
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options,
+ getEffectiveRelocModel(TT, RM),
+ getEffectiveCodeModel(CM, CodeModel::Small), OL),
+ TLOF(std::make_unique<RISCVELFTargetObjectFile>()) {
+ initAsmInfo();
+
+ // RISC-V supports the MachineOutliner.
+ setMachineOutliner(true);
+ setSupportsDefaultOutlining(true);
+}
+
+const RISCVSubtarget *
+RISCVTargetMachine::getSubtargetImpl(const Function &F) const {
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute TuneAttr = F.getFnAttribute("tune-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
+
+ std::string CPU =
+ CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
+ std::string TuneCPU =
+ TuneAttr.isValid() ? TuneAttr.getValueAsString().str() : CPU;
+ std::string FS =
+ FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
+
+ unsigned RVVBitsMin = RVVVectorBitsMinOpt;
+ unsigned RVVBitsMax = RVVVectorBitsMaxOpt;
+
+ Attribute VScaleRangeAttr = F.getFnAttribute(Attribute::VScaleRange);
+ if (VScaleRangeAttr.isValid()) {
+ if (!RVVVectorBitsMinOpt.getNumOccurrences())
+ RVVBitsMin = VScaleRangeAttr.getVScaleRangeMin() * RISCV::RVVBitsPerBlock;
+ std::optional<unsigned> VScaleMax = VScaleRangeAttr.getVScaleRangeMax();
+ if (VScaleMax.has_value() && !RVVVectorBitsMaxOpt.getNumOccurrences())
+ RVVBitsMax = *VScaleMax * RISCV::RVVBitsPerBlock;
+ }
+
+ if (RVVBitsMin != -1U) {
+ // FIXME: Change to >= 32 when VLEN = 32 is supported.
+ assert((RVVBitsMin == 0 || (RVVBitsMin >= 64 && RVVBitsMin <= 65536 &&
+ isPowerOf2_32(RVVBitsMin))) &&
+ "V or Zve* extension requires vector length to be in the range of "
+ "64 to 65536 and a power 2!");
+ assert((RVVBitsMax >= RVVBitsMin || RVVBitsMax == 0) &&
+ "Minimum V extension vector length should not be larger than its "
+ "maximum!");
+ }
+ assert((RVVBitsMax == 0 || (RVVBitsMax >= 64 && RVVBitsMax <= 65536 &&
+ isPowerOf2_32(RVVBitsMax))) &&
+ "V or Zve* extension requires vector length to be in the range of "
+ "64 to 65536 and a power 2!");
+
+ if (RVVBitsMin != -1U) {
+ if (RVVBitsMax != 0) {
+ RVVBitsMin = std::min(RVVBitsMin, RVVBitsMax);
+ RVVBitsMax = std::max(RVVBitsMin, RVVBitsMax);
+ }
+
+ RVVBitsMin =
+ PowerOf2Floor((RVVBitsMin < 64 || RVVBitsMin > 65536) ? 0 : RVVBitsMin);
+ }
+ RVVBitsMax =
+ PowerOf2Floor((RVVBitsMax < 64 || RVVBitsMax > 65536) ? 0 : RVVBitsMax);
+
+ SmallString<512> Key;
+ Key += "RVVMin";
+ Key += std::to_string(RVVBitsMin);
+ Key += "RVVMax";
+ Key += std::to_string(RVVBitsMax);
+ Key += CPU;
+ Key += TuneCPU;
+ Key += FS;
+ auto &I = SubtargetMap[Key];
+ if (!I) {
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+ auto ABIName = Options.MCOptions.getABIName();
+ if (const MDString *ModuleTargetABI = dyn_cast_or_null<MDString>(
+ F.getParent()->getModuleFlag("target-abi"))) {
+ auto TargetABI = RISCVABI::getTargetABI(ABIName);
+ if (TargetABI != RISCVABI::ABI_Unknown &&
+ ModuleTargetABI->getString() != ABIName) {
+ report_fatal_error("-target-abi option != target-abi module flag");
+ }
+ ABIName = ModuleTargetABI->getString();
+ }
+ I = std::make_unique<RISCVSubtarget>(
+ TargetTriple, CPU, TuneCPU, FS, ABIName, RVVBitsMin, RVVBitsMax, *this);
+ }
+ return I.get();
+}
+
+MachineFunctionInfo *RISCVTargetMachine::createMachineFunctionInfo(
+ BumpPtrAllocator &Allocator, const Function &F,
+ const TargetSubtargetInfo *STI) const {
+ return RISCVMachineFunctionInfo::create<RISCVMachineFunctionInfo>(Allocator,
+ F, STI);
+}
+
+TargetTransformInfo
+RISCVTargetMachine::getTargetTransformInfo(const Function &F) const {
+ return TargetTransformInfo(RISCVTTIImpl(this, F));
+}
+
+// A RISC-V hart has a single byte-addressable address space of 2^XLEN bytes
+// for all memory accesses, so it is reasonable to assume that an
+// implementation has no-op address space casts. If an implementation makes a
+// change to this, they can override it here.
+bool RISCVTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS,
+ unsigned DstAS) const {
+ return true;
+}
+
+namespace {
+class RISCVPassConfig : public TargetPassConfig {
+public:
+ RISCVPassConfig(RISCVTargetMachine &TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ RISCVTargetMachine &getRISCVTargetMachine() const {
+ return getTM<RISCVTargetMachine>();
+ }
+
+ ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const override {
+ const RISCVSubtarget &ST = C->MF->getSubtarget<RISCVSubtarget>();
+ if (ST.hasMacroFusion()) {
+ ScheduleDAGMILive *DAG = createGenericSchedLive(C);
+ DAG->addMutation(createRISCVMacroFusionDAGMutation());
+ return DAG;
+ }
+ return nullptr;
+ }
+
+ ScheduleDAGInstrs *
+ createPostMachineScheduler(MachineSchedContext *C) const override {
+ const RISCVSubtarget &ST = C->MF->getSubtarget<RISCVSubtarget>();
+ if (ST.hasMacroFusion()) {
+ ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
+ DAG->addMutation(createRISCVMacroFusionDAGMutation());
+ return DAG;
+ }
+ return nullptr;
+ }
+
+ void addIRPasses() override;
+ bool addPreISel() override;
+ bool addInstSelector() override;
+ bool addIRTranslator() override;
+ bool addLegalizeMachineIR() override;
+ bool addRegBankSelect() override;
+ bool addGlobalInstructionSelect() override;
+ void addPreEmitPass() override;
+ void addPreEmitPass2() override;
+ void addPreSched2() override;
+ void addMachineSSAOptimization() override;
+ void addPreRegAlloc() override;
+ void addPostRegAlloc() override;
+};
+} // namespace
+
+TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new RISCVPassConfig(*this, PM);
+}
+
+void RISCVPassConfig::addIRPasses() {
+ addPass(createAtomicExpandPass());
+
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createRISCVGatherScatterLoweringPass());
+
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createRISCVCodeGenPreparePass());
+
+ TargetPassConfig::addIRPasses();
+}
+
+bool RISCVPassConfig::addPreISel() {
+ if (TM->getOptLevel() != CodeGenOpt::None) {
+ // Add a barrier before instruction selection so that we will not get
+ // deleted block address after enabling default outlining. See D99707 for
+ // more details.
+ addPass(createBarrierNoopPass());
+ }
+
+ if (EnableGlobalMerge == cl::BOU_TRUE) {
+ addPass(createGlobalMergePass(TM, /* MaxOffset */ 2047,
+ /* OnlyOptimizeForSize */ false,
+ /* MergeExternalByDefault */ true));
+ }
+
+ return false;
+}
+
+bool RISCVPassConfig::addInstSelector() {
+ addPass(createRISCVISelDag(getRISCVTargetMachine(), getOptLevel()));
+
+ return false;
+}
+
+bool RISCVPassConfig::addIRTranslator() {
+ addPass(new IRTranslator(getOptLevel()));
+ return false;
+}
+
+bool RISCVPassConfig::addLegalizeMachineIR() {
+ addPass(new Legalizer());
+ return false;
+}
+
+bool RISCVPassConfig::addRegBankSelect() {
+ addPass(new RegBankSelect());
+ return false;
+}
+
+bool RISCVPassConfig::addGlobalInstructionSelect() {
+ addPass(new InstructionSelect(getOptLevel()));
+ return false;
+}
+
+void RISCVPassConfig::addPreSched2() {}
+
+void RISCVPassConfig::addPreEmitPass() {
+ addPass(&BranchRelaxationPassID);
+ addPass(createRISCVMakeCompressibleOptPass());
+}
+
+void RISCVPassConfig::addPreEmitPass2() {
+ addPass(createRISCVExpandPseudoPass());
+ // Schedule the expansion of AMOs at the last possible moment, avoiding the
+ // possibility for other passes to break the requirements for forward
+ // progress in the LR/SC block.
+ addPass(createRISCVExpandAtomicPseudoPass());
+}
+
+void RISCVPassConfig::addMachineSSAOptimization() {
+ TargetPassConfig::addMachineSSAOptimization();
+ if (EnableMachineCombiner)
+ addPass(&MachineCombinerID);
+
+ if (TM->getTargetTriple().getArch() == Triple::riscv64) {
+ addPass(createRISCVSExtWRemovalPass());
+ addPass(createRISCVStripWSuffixPass());
+ }
+}
+
+void RISCVPassConfig::addPreRegAlloc() {
+ addPass(createRISCVPreRAExpandPseudoPass());
+ if (TM->getOptLevel() != CodeGenOpt::None)
+ addPass(createRISCVMergeBaseOffsetOptPass());
+ addPass(createRISCVInsertVSETVLIPass());
+}
+
+void RISCVPassConfig::addPostRegAlloc() {
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
+ addPass(createRISCVRedundantCopyEliminationPass());
+}
+
+yaml::MachineFunctionInfo *
+RISCVTargetMachine::createDefaultFuncInfoYAML() const {
+ return new yaml::RISCVMachineFunctionInfo();
+}
+
+yaml::MachineFunctionInfo *
+RISCVTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
+ const auto *MFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ return new yaml::RISCVMachineFunctionInfo(*MFI);
+}
+
+bool RISCVTargetMachine::parseMachineFunctionInfo(
+ const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS,
+ SMDiagnostic &Error, SMRange &SourceRange) const {
+ const auto &YamlMFI =
+ static_cast<const yaml::RISCVMachineFunctionInfo &>(MFI);
+ PFS.MF.getInfo<RISCVMachineFunctionInfo>()->initializeBaseYamlFields(YamlMFI);
+ return false;
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.h
new file mode 100644
index 0000000000..9d3e6e9895
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetMachine.h
@@ -0,0 +1,65 @@
+//===-- RISCVTargetMachine.h - Define TargetMachine for RISCV ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the RISCV specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETMACHINE_H
+#define LLVM_LIB_TARGET_RISCV_RISCVTARGETMACHINE_H
+
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetMachine.h"
+#include <optional>
+
+namespace llvm {
+class RISCVTargetMachine : public LLVMTargetMachine {
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ mutable StringMap<std::unique_ptr<RISCVSubtarget>> SubtargetMap;
+
+public:
+ RISCVTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ std::optional<Reloc::Model> RM,
+ std::optional<CodeModel::Model> CM, CodeGenOpt::Level OL,
+ bool JIT);
+
+ const RISCVSubtarget *getSubtargetImpl(const Function &F) const override;
+ // DO NOT IMPLEMENT: There is no such thing as a valid default subtarget,
+ // subtargets are per-function entities based on the target-specific
+ // attributes of each function.
+ const RISCVSubtarget *getSubtargetImpl() const = delete;
+
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return TLOF.get();
+ }
+
+ MachineFunctionInfo *
+ createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F,
+ const TargetSubtargetInfo *STI) const override;
+
+ TargetTransformInfo getTargetTransformInfo(const Function &F) const override;
+
+ bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DstAS) const override;
+
+ yaml::MachineFunctionInfo *createDefaultFuncInfoYAML() const override;
+ yaml::MachineFunctionInfo *
+ convertFuncInfoToYAML(const MachineFunction &MF) const override;
+ bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &,
+ PerFunctionMIParsingState &PFS,
+ SMDiagnostic &Error,
+ SMRange &SourceRange) const override;
+};
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.cpp
new file mode 100644
index 0000000000..5208371493
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.cpp
@@ -0,0 +1,115 @@
+//===-- RISCVTargetObjectFile.cpp - RISCV Object Info -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVTargetObjectFile.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+
+using namespace llvm;
+
+void RISCVELFTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+
+ SmallDataSection = getContext().getELFSection(
+ ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC);
+}
+
+// A address must be loaded from a small section if its size is less than the
+// small section size threshold. Data in this section could be addressed by
+// using gp_rel operator.
+bool RISCVELFTargetObjectFile::isInSmallSection(uint64_t Size) const {
+ // gcc has traditionally not treated zero-sized objects as small data, so this
+ // is effectively part of the ABI.
+ return Size > 0 && Size <= SSThreshold;
+}
+
+// Return true if this global address should be placed into small data/bss
+// section.
+bool RISCVELFTargetObjectFile::isGlobalInSmallSection(
+ const GlobalObject *GO, const TargetMachine &TM) const {
+ // Only global variables, not functions.
+ const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GO);
+ if (!GVA)
+ return false;
+
+ // If the variable has an explicit section, it is placed in that section.
+ if (GVA->hasSection()) {
+ StringRef Section = GVA->getSection();
+
+ // Explicitly placing any variable in the small data section overrides
+ // the global -G value.
+ if (Section == ".sdata" || Section == ".sbss")
+ return true;
+
+ // Otherwise reject putting the variable to small section if it has an
+ // explicit section name.
+ return false;
+ }
+
+ if (((GVA->hasExternalLinkage() && GVA->isDeclaration()) ||
+ GVA->hasCommonLinkage()))
+ return false;
+
+ Type *Ty = GVA->getValueType();
+ // It is possible that the type of the global is unsized, i.e. a declaration
+ // of a extern struct. In this case don't presume it is in the small data
+ // section. This happens e.g. when building the FreeBSD kernel.
+ if (!Ty->isSized())
+ return false;
+
+ return isInSmallSection(
+ GVA->getParent()->getDataLayout().getTypeAllocSize(Ty));
+}
+
+MCSection *RISCVELFTargetObjectFile::SelectSectionForGlobal(
+ const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
+ // Handle Small Section classification here.
+ if (Kind.isBSS() && isGlobalInSmallSection(GO, TM))
+ return SmallBSSSection;
+ if (Kind.isData() && isGlobalInSmallSection(GO, TM))
+ return SmallDataSection;
+
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::SelectSectionForGlobal(GO, Kind, TM);
+}
+
+void RISCVELFTargetObjectFile::getModuleMetadata(Module &M) {
+ TargetLoweringObjectFileELF::getModuleMetadata(M);
+ SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
+ M.getModuleFlagsMetadata(ModuleFlags);
+
+ for (const auto &MFE : ModuleFlags) {
+ StringRef Key = MFE.Key->getString();
+ if (Key == "SmallDataLimit") {
+ SSThreshold = mdconst::extract<ConstantInt>(MFE.Val)->getZExtValue();
+ break;
+ }
+ }
+}
+
+/// Return true if this constant should be placed into small data section.
+bool RISCVELFTargetObjectFile::isConstantInSmallSection(
+ const DataLayout &DL, const Constant *CN) const {
+ return isInSmallSection(DL.getTypeAllocSize(CN->getType()));
+}
+
+MCSection *RISCVELFTargetObjectFile::getSectionForConstant(
+ const DataLayout &DL, SectionKind Kind, const Constant *C,
+ Align &Alignment) const {
+ if (isConstantInSmallSection(DL, C))
+ return SmallDataSection;
+
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::getSectionForConstant(DL, Kind, C,
+ Alignment);
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.h
new file mode 100644
index 0000000000..830a7d813c
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetObjectFile.h
@@ -0,0 +1,47 @@
+//===-- RISCVTargetObjectFile.h - RISCV Object Info -*- C++ ---------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETOBJECTFILE_H
+#define LLVM_LIB_TARGET_RISCV_RISCVTARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+namespace llvm {
+
+/// This implementation is used for RISCV ELF targets.
+class RISCVELFTargetObjectFile : public TargetLoweringObjectFileELF {
+ MCSection *SmallDataSection;
+ MCSection *SmallBSSSection;
+ unsigned SSThreshold = 8;
+
+public:
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
+ /// Return true if this global address should be placed into small data/bss
+ /// section.
+ bool isGlobalInSmallSection(const GlobalObject *GO,
+ const TargetMachine &TM) const;
+
+ MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+ const TargetMachine &TM) const override;
+
+ /// Return true if this constant should be placed into small data section.
+ bool isConstantInSmallSection(const DataLayout &DL, const Constant *CN) const;
+
+ MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+ const Constant *C,
+ Align &Alignment) const override;
+
+ void getModuleMetadata(Module &M) override;
+
+ bool isInSmallSection(uint64_t Size) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
new file mode 100644
index 0000000000..585eb9a19c
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -0,0 +1,1486 @@
+//===-- RISCVTargetTransformInfo.cpp - RISC-V specific TTI ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVTargetTransformInfo.h"
+#include "MCTargetDesc/RISCVMatInt.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/CodeGen/CostTable.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include <cmath>
+#include <optional>
+using namespace llvm;
+
+#define DEBUG_TYPE "riscvtti"
+
+static cl::opt<unsigned> RVVRegisterWidthLMUL(
+ "riscv-v-register-bit-width-lmul",
+ cl::desc(
+ "The LMUL to use for getRegisterBitWidth queries. Affects LMUL used "
+ "by autovectorized code. Fractional LMULs are not supported."),
+ cl::init(1), cl::Hidden);
+
+static cl::opt<unsigned> SLPMaxVF(
+ "riscv-v-slp-max-vf",
+ cl::desc(
+ "Result used for getMaximumVF query which is used exclusively by "
+ "SLP vectorizer. Defaults to 1 which disables SLP."),
+ cl::init(1), cl::Hidden);
+
+InstructionCost RISCVTTIImpl::getLMULCost(MVT VT) {
+ // TODO: Here assume reciprocal throughput is 1 for LMUL_1, it is
+ // implementation-defined.
+ if (!VT.isVector())
+ return InstructionCost::getInvalid();
+ unsigned Cost;
+ if (VT.isScalableVector()) {
+ unsigned LMul;
+ bool Fractional;
+ std::tie(LMul, Fractional) =
+ RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(VT));
+ if (Fractional)
+ Cost = 1;
+ else
+ Cost = LMul;
+ } else {
+ Cost = VT.getSizeInBits() / ST->getRealMinVLen();
+ }
+ return std::max<unsigned>(Cost, 1);
+}
+
+InstructionCost RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
+ TTI::TargetCostKind CostKind) {
+ assert(Ty->isIntegerTy() &&
+ "getIntImmCost can only estimate cost of materialising integers");
+
+ // We have a Zero register, so 0 is always free.
+ if (Imm == 0)
+ return TTI::TCC_Free;
+
+ // Otherwise, we check how many instructions it will take to materialise.
+ const DataLayout &DL = getDataLayout();
+ return RISCVMatInt::getIntMatCost(Imm, DL.getTypeSizeInBits(Ty),
+ getST()->getFeatureBits());
+}
+
+// Look for patterns of shift followed by AND that can be turned into a pair of
+// shifts. We won't need to materialize an immediate for the AND so these can
+// be considered free.
+static bool canUseShiftPair(Instruction *Inst, const APInt &Imm) {
+ uint64_t Mask = Imm.getZExtValue();
+ auto *BO = dyn_cast<BinaryOperator>(Inst->getOperand(0));
+ if (!BO || !BO->hasOneUse())
+ return false;
+
+ if (BO->getOpcode() != Instruction::Shl)
+ return false;
+
+ if (!isa<ConstantInt>(BO->getOperand(1)))
+ return false;
+
+ unsigned ShAmt = cast<ConstantInt>(BO->getOperand(1))->getZExtValue();
+ // (and (shl x, c2), c1) will be matched to (srli (slli x, c2+c3), c3) if c1
+ // is a mask shifted by c2 bits with c3 leading zeros.
+ if (isShiftedMask_64(Mask)) {
+ unsigned Trailing = countTrailingZeros(Mask);
+ if (ShAmt == Trailing)
+ return true;
+ }
+
+ return false;
+}
+
+InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
+ const APInt &Imm, Type *Ty,
+ TTI::TargetCostKind CostKind,
+ Instruction *Inst) {
+ assert(Ty->isIntegerTy() &&
+ "getIntImmCost can only estimate cost of materialising integers");
+
+ // We have a Zero register, so 0 is always free.
+ if (Imm == 0)
+ return TTI::TCC_Free;
+
+ // Some instructions in RISC-V can take a 12-bit immediate. Some of these are
+ // commutative, in others the immediate comes from a specific argument index.
+ bool Takes12BitImm = false;
+ unsigned ImmArgIdx = ~0U;
+
+ switch (Opcode) {
+ case Instruction::GetElementPtr:
+ // Never hoist any arguments to a GetElementPtr. CodeGenPrepare will
+ // split up large offsets in GEP into better parts than ConstantHoisting
+ // can.
+ return TTI::TCC_Free;
+ case Instruction::And:
+ // zext.h
+ if (Imm == UINT64_C(0xffff) && ST->hasStdExtZbb())
+ return TTI::TCC_Free;
+ // zext.w
+ if (Imm == UINT64_C(0xffffffff) && ST->hasStdExtZba())
+ return TTI::TCC_Free;
+ // bclri
+ if (ST->hasStdExtZbs() && (~Imm).isPowerOf2())
+ return TTI::TCC_Free;
+ if (Inst && Idx == 1 && Imm.getBitWidth() <= ST->getXLen() &&
+ canUseShiftPair(Inst, Imm))
+ return TTI::TCC_Free;
+ Takes12BitImm = true;
+ break;
+ case Instruction::Add:
+ Takes12BitImm = true;
+ break;
+ case Instruction::Or:
+ case Instruction::Xor:
+ // bseti/binvi
+ if (ST->hasStdExtZbs() && Imm.isPowerOf2())
+ return TTI::TCC_Free;
+ Takes12BitImm = true;
+ break;
+ case Instruction::Mul:
+ // Negated power of 2 is a shift and a negate.
+ if (Imm.isNegatedPowerOf2())
+ return TTI::TCC_Free;
+ // FIXME: There is no MULI instruction.
+ Takes12BitImm = true;
+ break;
+ case Instruction::Sub:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ Takes12BitImm = true;
+ ImmArgIdx = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (Takes12BitImm) {
+ // Check immediate is the correct argument...
+ if (Instruction::isCommutative(Opcode) || Idx == ImmArgIdx) {
+ // ... and fits into the 12-bit immediate.
+ if (Imm.getMinSignedBits() <= 64 &&
+ getTLI()->isLegalAddImmediate(Imm.getSExtValue())) {
+ return TTI::TCC_Free;
+ }
+ }
+
+ // Otherwise, use the full materialisation cost.
+ return getIntImmCost(Imm, Ty, CostKind);
+ }
+
+ // By default, prevent hoisting.
+ return TTI::TCC_Free;
+}
+
+InstructionCost
+RISCVTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty,
+ TTI::TargetCostKind CostKind) {
+ // Prevent hoisting in unknown cases.
+ return TTI::TCC_Free;
+}
+
+TargetTransformInfo::PopcntSupportKind
+RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) {
+ assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
+ return ST->hasStdExtZbb() ? TTI::PSK_FastHardware : TTI::PSK_Software;
+}
+
+bool RISCVTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const {
+ // Currently, the ExpandReductions pass can't expand scalable-vector
+ // reductions, but we still request expansion as RVV doesn't support certain
+ // reductions and the SelectionDAG can't legalize them either.
+ switch (II->getIntrinsicID()) {
+ default:
+ return false;
+ // These reductions have no equivalent in RVV
+ case Intrinsic::vector_reduce_mul:
+ case Intrinsic::vector_reduce_fmul:
+ return true;
+ }
+}
+
+std::optional<unsigned> RISCVTTIImpl::getMaxVScale() const {
+ if (ST->hasVInstructions())
+ return ST->getRealMaxVLen() / RISCV::RVVBitsPerBlock;
+ return BaseT::getMaxVScale();
+}
+
+std::optional<unsigned> RISCVTTIImpl::getVScaleForTuning() const {
+ if (ST->hasVInstructions())
+ if (unsigned MinVLen = ST->getRealMinVLen();
+ MinVLen >= RISCV::RVVBitsPerBlock)
+ return MinVLen / RISCV::RVVBitsPerBlock;
+ return BaseT::getVScaleForTuning();
+}
+
+TypeSize
+RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
+ unsigned LMUL = PowerOf2Floor(
+ std::max<unsigned>(std::min<unsigned>(RVVRegisterWidthLMUL, 8), 1));
+ switch (K) {
+ case TargetTransformInfo::RGK_Scalar:
+ return TypeSize::getFixed(ST->getXLen());
+ case TargetTransformInfo::RGK_FixedWidthVector:
+ return TypeSize::getFixed(
+ ST->useRVVForFixedLengthVectors() ? LMUL * ST->getRealMinVLen() : 0);
+ case TargetTransformInfo::RGK_ScalableVector:
+ return TypeSize::getScalable(
+ (ST->hasVInstructions() &&
+ ST->getRealMinVLen() >= RISCV::RVVBitsPerBlock)
+ ? LMUL * RISCV::RVVBitsPerBlock
+ : 0);
+ }
+
+ llvm_unreachable("Unsupported register kind");
+}
+
+InstructionCost RISCVTTIImpl::getSpliceCost(VectorType *Tp, int Index) {
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
+
+ unsigned Cost = 2; // vslidedown+vslideup.
+ // TODO: Multiplying by LT.first implies this legalizes into multiple copies
+ // of similar code, but I think we expand through memory.
+ return Cost * LT.first * getLMULCost(LT.second);
+}
+
+InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
+ VectorType *Tp, ArrayRef<int> Mask,
+ TTI::TargetCostKind CostKind,
+ int Index, VectorType *SubTp,
+ ArrayRef<const Value *> Args) {
+ if (isa<ScalableVectorType>(Tp)) {
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
+ switch (Kind) {
+ default:
+ // Fallthrough to generic handling.
+ // TODO: Most of these cases will return getInvalid in generic code, and
+ // must be implemented here.
+ break;
+ case TTI::SK_Broadcast: {
+ return LT.first * 1;
+ }
+ case TTI::SK_Splice:
+ return getSpliceCost(Tp, Index);
+ case TTI::SK_Reverse:
+ // Most of the cost here is producing the vrgather index register
+ // Example sequence:
+ // csrr a0, vlenb
+ // srli a0, a0, 3
+ // addi a0, a0, -1
+ // vsetvli a1, zero, e8, mf8, ta, mu (ignored)
+ // vid.v v9
+ // vrsub.vx v10, v9, a0
+ // vrgather.vv v9, v8, v10
+ if (Tp->getElementType()->isIntegerTy(1))
+ // Mask operation additionally required extend and truncate
+ return LT.first * 9;
+ return LT.first * 6;
+ }
+ }
+
+ if (isa<FixedVectorType>(Tp) && Kind == TargetTransformInfo::SK_Broadcast) {
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
+ bool HasScalar = (Args.size() > 0) && (Operator::getOpcode(Args[0]) ==
+ Instruction::InsertElement);
+ if (LT.second.getScalarSizeInBits() == 1) {
+ if (HasScalar) {
+ // Example sequence:
+ // andi a0, a0, 1
+ // vsetivli zero, 2, e8, mf8, ta, ma (ignored)
+ // vmv.v.x v8, a0
+ // vmsne.vi v0, v8, 0
+ return LT.first * getLMULCost(LT.second) * 3;
+ }
+ // Example sequence:
+ // vsetivli zero, 2, e8, mf8, ta, mu (ignored)
+ // vmv.v.i v8, 0
+ // vmerge.vim v8, v8, 1, v0
+ // vmv.x.s a0, v8
+ // andi a0, a0, 1
+ // vmv.v.x v8, a0
+ // vmsne.vi v0, v8, 0
+
+ return LT.first * getLMULCost(LT.second) * 6;
+ }
+
+ if (HasScalar) {
+ // Example sequence:
+ // vmv.v.x v8, a0
+ return LT.first * getLMULCost(LT.second);
+ }
+
+ // Example sequence:
+ // vrgather.vi v9, v8, 0
+ // TODO: vrgather could be slower than vmv.v.x. It is
+ // implementation-dependent.
+ return LT.first * getLMULCost(LT.second);
+ }
+
+ return BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
+}
+
+InstructionCost
+RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind) {
+ if (!isLegalMaskedLoadStore(Src, Alignment) ||
+ CostKind != TTI::TCK_RecipThroughput)
+ return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
+ CostKind);
+
+ return getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
+}
+
+InstructionCost RISCVTTIImpl::getGatherScatterOpCost(
+ unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
+ Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
+ Alignment, CostKind, I);
+
+ if ((Opcode == Instruction::Load &&
+ !isLegalMaskedGather(DataTy, Align(Alignment))) ||
+ (Opcode == Instruction::Store &&
+ !isLegalMaskedScatter(DataTy, Align(Alignment))))
+ return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
+ Alignment, CostKind, I);
+
+ // Cost is proportional to the number of memory operations implied. For
+ // scalable vectors, we use an estimate on that number since we don't
+ // know exactly what VL will be.
+ auto &VTy = *cast<VectorType>(DataTy);
+ InstructionCost MemOpCost =
+ getMemoryOpCost(Opcode, VTy.getElementType(), Alignment, 0, CostKind,
+ {TTI::OK_AnyValue, TTI::OP_None}, I);
+ unsigned NumLoads = getEstimatedVLFor(&VTy);
+ return NumLoads * MemOpCost;
+}
+
+// Currently, these represent both throughput and codesize costs
+// for the respective intrinsics. The costs in this table are simply
+// instruction counts with the following adjustments made:
+// * One vsetvli is considered free.
+static const CostTblEntry VectorIntrinsicCostTable[]{
+ {Intrinsic::floor, MVT::v2f32, 9},
+ {Intrinsic::floor, MVT::v4f32, 9},
+ {Intrinsic::floor, MVT::v8f32, 9},
+ {Intrinsic::floor, MVT::v16f32, 9},
+ {Intrinsic::floor, MVT::nxv1f32, 9},
+ {Intrinsic::floor, MVT::nxv2f32, 9},
+ {Intrinsic::floor, MVT::nxv4f32, 9},
+ {Intrinsic::floor, MVT::nxv8f32, 9},
+ {Intrinsic::floor, MVT::nxv16f32, 9},
+ {Intrinsic::floor, MVT::v2f64, 9},
+ {Intrinsic::floor, MVT::v4f64, 9},
+ {Intrinsic::floor, MVT::v8f64, 9},
+ {Intrinsic::floor, MVT::v16f64, 9},
+ {Intrinsic::floor, MVT::nxv1f64, 9},
+ {Intrinsic::floor, MVT::nxv2f64, 9},
+ {Intrinsic::floor, MVT::nxv4f64, 9},
+ {Intrinsic::floor, MVT::nxv8f64, 9},
+ {Intrinsic::ceil, MVT::v2f32, 9},
+ {Intrinsic::ceil, MVT::v4f32, 9},
+ {Intrinsic::ceil, MVT::v8f32, 9},
+ {Intrinsic::ceil, MVT::v16f32, 9},
+ {Intrinsic::ceil, MVT::nxv1f32, 9},
+ {Intrinsic::ceil, MVT::nxv2f32, 9},
+ {Intrinsic::ceil, MVT::nxv4f32, 9},
+ {Intrinsic::ceil, MVT::nxv8f32, 9},
+ {Intrinsic::ceil, MVT::nxv16f32, 9},
+ {Intrinsic::ceil, MVT::v2f64, 9},
+ {Intrinsic::ceil, MVT::v4f64, 9},
+ {Intrinsic::ceil, MVT::v8f64, 9},
+ {Intrinsic::ceil, MVT::v16f64, 9},
+ {Intrinsic::ceil, MVT::nxv1f64, 9},
+ {Intrinsic::ceil, MVT::nxv2f64, 9},
+ {Intrinsic::ceil, MVT::nxv4f64, 9},
+ {Intrinsic::ceil, MVT::nxv8f64, 9},
+ {Intrinsic::trunc, MVT::v2f32, 7},
+ {Intrinsic::trunc, MVT::v4f32, 7},
+ {Intrinsic::trunc, MVT::v8f32, 7},
+ {Intrinsic::trunc, MVT::v16f32, 7},
+ {Intrinsic::trunc, MVT::nxv1f32, 7},
+ {Intrinsic::trunc, MVT::nxv2f32, 7},
+ {Intrinsic::trunc, MVT::nxv4f32, 7},
+ {Intrinsic::trunc, MVT::nxv8f32, 7},
+ {Intrinsic::trunc, MVT::nxv16f32, 7},
+ {Intrinsic::trunc, MVT::v2f64, 7},
+ {Intrinsic::trunc, MVT::v4f64, 7},
+ {Intrinsic::trunc, MVT::v8f64, 7},
+ {Intrinsic::trunc, MVT::v16f64, 7},
+ {Intrinsic::trunc, MVT::nxv1f64, 7},
+ {Intrinsic::trunc, MVT::nxv2f64, 7},
+ {Intrinsic::trunc, MVT::nxv4f64, 7},
+ {Intrinsic::trunc, MVT::nxv8f64, 7},
+ {Intrinsic::round, MVT::v2f32, 9},
+ {Intrinsic::round, MVT::v4f32, 9},
+ {Intrinsic::round, MVT::v8f32, 9},
+ {Intrinsic::round, MVT::v16f32, 9},
+ {Intrinsic::round, MVT::nxv1f32, 9},
+ {Intrinsic::round, MVT::nxv2f32, 9},
+ {Intrinsic::round, MVT::nxv4f32, 9},
+ {Intrinsic::round, MVT::nxv8f32, 9},
+ {Intrinsic::round, MVT::nxv16f32, 9},
+ {Intrinsic::round, MVT::v2f64, 9},
+ {Intrinsic::round, MVT::v4f64, 9},
+ {Intrinsic::round, MVT::v8f64, 9},
+ {Intrinsic::round, MVT::v16f64, 9},
+ {Intrinsic::round, MVT::nxv1f64, 9},
+ {Intrinsic::round, MVT::nxv2f64, 9},
+ {Intrinsic::round, MVT::nxv4f64, 9},
+ {Intrinsic::round, MVT::nxv8f64, 9},
+ {Intrinsic::roundeven, MVT::v2f32, 9},
+ {Intrinsic::roundeven, MVT::v4f32, 9},
+ {Intrinsic::roundeven, MVT::v8f32, 9},
+ {Intrinsic::roundeven, MVT::v16f32, 9},
+ {Intrinsic::roundeven, MVT::nxv1f32, 9},
+ {Intrinsic::roundeven, MVT::nxv2f32, 9},
+ {Intrinsic::roundeven, MVT::nxv4f32, 9},
+ {Intrinsic::roundeven, MVT::nxv8f32, 9},
+ {Intrinsic::roundeven, MVT::nxv16f32, 9},
+ {Intrinsic::roundeven, MVT::v2f64, 9},
+ {Intrinsic::roundeven, MVT::v4f64, 9},
+ {Intrinsic::roundeven, MVT::v8f64, 9},
+ {Intrinsic::roundeven, MVT::v16f64, 9},
+ {Intrinsic::roundeven, MVT::nxv1f64, 9},
+ {Intrinsic::roundeven, MVT::nxv2f64, 9},
+ {Intrinsic::roundeven, MVT::nxv4f64, 9},
+ {Intrinsic::roundeven, MVT::nxv8f64, 9},
+ {Intrinsic::bswap, MVT::v2i16, 3},
+ {Intrinsic::bswap, MVT::v4i16, 3},
+ {Intrinsic::bswap, MVT::v8i16, 3},
+ {Intrinsic::bswap, MVT::v16i16, 3},
+ {Intrinsic::bswap, MVT::nxv1i16, 3},
+ {Intrinsic::bswap, MVT::nxv2i16, 3},
+ {Intrinsic::bswap, MVT::nxv4i16, 3},
+ {Intrinsic::bswap, MVT::nxv8i16, 3},
+ {Intrinsic::bswap, MVT::nxv16i16, 3},
+ {Intrinsic::bswap, MVT::v2i32, 12},
+ {Intrinsic::bswap, MVT::v4i32, 12},
+ {Intrinsic::bswap, MVT::v8i32, 12},
+ {Intrinsic::bswap, MVT::v16i32, 12},
+ {Intrinsic::bswap, MVT::nxv1i32, 12},
+ {Intrinsic::bswap, MVT::nxv2i32, 12},
+ {Intrinsic::bswap, MVT::nxv4i32, 12},
+ {Intrinsic::bswap, MVT::nxv8i32, 12},
+ {Intrinsic::bswap, MVT::nxv16i32, 12},
+ {Intrinsic::bswap, MVT::v2i64, 31},
+ {Intrinsic::bswap, MVT::v4i64, 31},
+ {Intrinsic::bswap, MVT::v8i64, 31},
+ {Intrinsic::bswap, MVT::v16i64, 31},
+ {Intrinsic::bswap, MVT::nxv1i64, 31},
+ {Intrinsic::bswap, MVT::nxv2i64, 31},
+ {Intrinsic::bswap, MVT::nxv4i64, 31},
+ {Intrinsic::bswap, MVT::nxv8i64, 31},
+ {Intrinsic::vp_bswap, MVT::v2i16, 3},
+ {Intrinsic::vp_bswap, MVT::v4i16, 3},
+ {Intrinsic::vp_bswap, MVT::v8i16, 3},
+ {Intrinsic::vp_bswap, MVT::v16i16, 3},
+ {Intrinsic::vp_bswap, MVT::nxv1i16, 3},
+ {Intrinsic::vp_bswap, MVT::nxv2i16, 3},
+ {Intrinsic::vp_bswap, MVT::nxv4i16, 3},
+ {Intrinsic::vp_bswap, MVT::nxv8i16, 3},
+ {Intrinsic::vp_bswap, MVT::nxv16i16, 3},
+ {Intrinsic::vp_bswap, MVT::v2i32, 12},
+ {Intrinsic::vp_bswap, MVT::v4i32, 12},
+ {Intrinsic::vp_bswap, MVT::v8i32, 12},
+ {Intrinsic::vp_bswap, MVT::v16i32, 12},
+ {Intrinsic::vp_bswap, MVT::nxv1i32, 12},
+ {Intrinsic::vp_bswap, MVT::nxv2i32, 12},
+ {Intrinsic::vp_bswap, MVT::nxv4i32, 12},
+ {Intrinsic::vp_bswap, MVT::nxv8i32, 12},
+ {Intrinsic::vp_bswap, MVT::nxv16i32, 12},
+ {Intrinsic::vp_bswap, MVT::v2i64, 31},
+ {Intrinsic::vp_bswap, MVT::v4i64, 31},
+ {Intrinsic::vp_bswap, MVT::v8i64, 31},
+ {Intrinsic::vp_bswap, MVT::v16i64, 31},
+ {Intrinsic::vp_bswap, MVT::nxv1i64, 31},
+ {Intrinsic::vp_bswap, MVT::nxv2i64, 31},
+ {Intrinsic::vp_bswap, MVT::nxv4i64, 31},
+ {Intrinsic::vp_bswap, MVT::nxv8i64, 31},
+ {Intrinsic::vp_fshl, MVT::v2i8, 7},
+ {Intrinsic::vp_fshl, MVT::v4i8, 7},
+ {Intrinsic::vp_fshl, MVT::v8i8, 7},
+ {Intrinsic::vp_fshl, MVT::v16i8, 7},
+ {Intrinsic::vp_fshl, MVT::nxv1i8, 7},
+ {Intrinsic::vp_fshl, MVT::nxv2i8, 7},
+ {Intrinsic::vp_fshl, MVT::nxv4i8, 7},
+ {Intrinsic::vp_fshl, MVT::nxv8i8, 7},
+ {Intrinsic::vp_fshl, MVT::nxv16i8, 7},
+ {Intrinsic::vp_fshl, MVT::nxv32i8, 7},
+ {Intrinsic::vp_fshl, MVT::nxv64i8, 7},
+ {Intrinsic::vp_fshl, MVT::v2i16, 7},
+ {Intrinsic::vp_fshl, MVT::v4i16, 7},
+ {Intrinsic::vp_fshl, MVT::v8i16, 7},
+ {Intrinsic::vp_fshl, MVT::v16i16, 7},
+ {Intrinsic::vp_fshl, MVT::nxv1i16, 7},
+ {Intrinsic::vp_fshl, MVT::nxv2i16, 7},
+ {Intrinsic::vp_fshl, MVT::nxv4i16, 7},
+ {Intrinsic::vp_fshl, MVT::nxv8i16, 7},
+ {Intrinsic::vp_fshl, MVT::nxv16i16, 7},
+ {Intrinsic::vp_fshl, MVT::nxv32i16, 7},
+ {Intrinsic::vp_fshl, MVT::v2i32, 7},
+ {Intrinsic::vp_fshl, MVT::v4i32, 7},
+ {Intrinsic::vp_fshl, MVT::v8i32, 7},
+ {Intrinsic::vp_fshl, MVT::v16i32, 7},
+ {Intrinsic::vp_fshl, MVT::nxv1i32, 7},
+ {Intrinsic::vp_fshl, MVT::nxv2i32, 7},
+ {Intrinsic::vp_fshl, MVT::nxv4i32, 7},
+ {Intrinsic::vp_fshl, MVT::nxv8i32, 7},
+ {Intrinsic::vp_fshl, MVT::nxv16i32, 7},
+ {Intrinsic::vp_fshl, MVT::v2i64, 7},
+ {Intrinsic::vp_fshl, MVT::v4i64, 7},
+ {Intrinsic::vp_fshl, MVT::v8i64, 7},
+ {Intrinsic::vp_fshl, MVT::v16i64, 7},
+ {Intrinsic::vp_fshl, MVT::nxv1i64, 7},
+ {Intrinsic::vp_fshl, MVT::nxv2i64, 7},
+ {Intrinsic::vp_fshl, MVT::nxv4i64, 7},
+ {Intrinsic::vp_fshl, MVT::nxv8i64, 7},
+ {Intrinsic::vp_fshr, MVT::v2i8, 7},
+ {Intrinsic::vp_fshr, MVT::v4i8, 7},
+ {Intrinsic::vp_fshr, MVT::v8i8, 7},
+ {Intrinsic::vp_fshr, MVT::v16i8, 7},
+ {Intrinsic::vp_fshr, MVT::nxv1i8, 7},
+ {Intrinsic::vp_fshr, MVT::nxv2i8, 7},
+ {Intrinsic::vp_fshr, MVT::nxv4i8, 7},
+ {Intrinsic::vp_fshr, MVT::nxv8i8, 7},
+ {Intrinsic::vp_fshr, MVT::nxv16i8, 7},
+ {Intrinsic::vp_fshr, MVT::nxv32i8, 7},
+ {Intrinsic::vp_fshr, MVT::nxv64i8, 7},
+ {Intrinsic::vp_fshr, MVT::v2i16, 7},
+ {Intrinsic::vp_fshr, MVT::v4i16, 7},
+ {Intrinsic::vp_fshr, MVT::v8i16, 7},
+ {Intrinsic::vp_fshr, MVT::v16i16, 7},
+ {Intrinsic::vp_fshr, MVT::nxv1i16, 7},
+ {Intrinsic::vp_fshr, MVT::nxv2i16, 7},
+ {Intrinsic::vp_fshr, MVT::nxv4i16, 7},
+ {Intrinsic::vp_fshr, MVT::nxv8i16, 7},
+ {Intrinsic::vp_fshr, MVT::nxv16i16, 7},
+ {Intrinsic::vp_fshr, MVT::nxv32i16, 7},
+ {Intrinsic::vp_fshr, MVT::v2i32, 7},
+ {Intrinsic::vp_fshr, MVT::v4i32, 7},
+ {Intrinsic::vp_fshr, MVT::v8i32, 7},
+ {Intrinsic::vp_fshr, MVT::v16i32, 7},
+ {Intrinsic::vp_fshr, MVT::nxv1i32, 7},
+ {Intrinsic::vp_fshr, MVT::nxv2i32, 7},
+ {Intrinsic::vp_fshr, MVT::nxv4i32, 7},
+ {Intrinsic::vp_fshr, MVT::nxv8i32, 7},
+ {Intrinsic::vp_fshr, MVT::nxv16i32, 7},
+ {Intrinsic::vp_fshr, MVT::v2i64, 7},
+ {Intrinsic::vp_fshr, MVT::v4i64, 7},
+ {Intrinsic::vp_fshr, MVT::v8i64, 7},
+ {Intrinsic::vp_fshr, MVT::v16i64, 7},
+ {Intrinsic::vp_fshr, MVT::nxv1i64, 7},
+ {Intrinsic::vp_fshr, MVT::nxv2i64, 7},
+ {Intrinsic::vp_fshr, MVT::nxv4i64, 7},
+ {Intrinsic::vp_fshr, MVT::nxv8i64, 7},
+ {Intrinsic::bitreverse, MVT::v2i8, 17},
+ {Intrinsic::bitreverse, MVT::v4i8, 17},
+ {Intrinsic::bitreverse, MVT::v8i8, 17},
+ {Intrinsic::bitreverse, MVT::v16i8, 17},
+ {Intrinsic::bitreverse, MVT::nxv1i8, 17},
+ {Intrinsic::bitreverse, MVT::nxv2i8, 17},
+ {Intrinsic::bitreverse, MVT::nxv4i8, 17},
+ {Intrinsic::bitreverse, MVT::nxv8i8, 17},
+ {Intrinsic::bitreverse, MVT::nxv16i8, 17},
+ {Intrinsic::bitreverse, MVT::v2i16, 24},
+ {Intrinsic::bitreverse, MVT::v4i16, 24},
+ {Intrinsic::bitreverse, MVT::v8i16, 24},
+ {Intrinsic::bitreverse, MVT::v16i16, 24},
+ {Intrinsic::bitreverse, MVT::nxv1i16, 24},
+ {Intrinsic::bitreverse, MVT::nxv2i16, 24},
+ {Intrinsic::bitreverse, MVT::nxv4i16, 24},
+ {Intrinsic::bitreverse, MVT::nxv8i16, 24},
+ {Intrinsic::bitreverse, MVT::nxv16i16, 24},
+ {Intrinsic::bitreverse, MVT::v2i32, 33},
+ {Intrinsic::bitreverse, MVT::v4i32, 33},
+ {Intrinsic::bitreverse, MVT::v8i32, 33},
+ {Intrinsic::bitreverse, MVT::v16i32, 33},
+ {Intrinsic::bitreverse, MVT::nxv1i32, 33},
+ {Intrinsic::bitreverse, MVT::nxv2i32, 33},
+ {Intrinsic::bitreverse, MVT::nxv4i32, 33},
+ {Intrinsic::bitreverse, MVT::nxv8i32, 33},
+ {Intrinsic::bitreverse, MVT::nxv16i32, 33},
+ {Intrinsic::bitreverse, MVT::v2i64, 52},
+ {Intrinsic::bitreverse, MVT::v4i64, 52},
+ {Intrinsic::bitreverse, MVT::v8i64, 52},
+ {Intrinsic::bitreverse, MVT::v16i64, 52},
+ {Intrinsic::bitreverse, MVT::nxv1i64, 52},
+ {Intrinsic::bitreverse, MVT::nxv2i64, 52},
+ {Intrinsic::bitreverse, MVT::nxv4i64, 52},
+ {Intrinsic::bitreverse, MVT::nxv8i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::v2i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::v4i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::v8i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::v16i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::nxv1i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::nxv2i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::nxv4i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::nxv8i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::nxv16i8, 17},
+ {Intrinsic::vp_bitreverse, MVT::v2i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::v4i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::v8i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::v16i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::nxv1i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::nxv2i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::nxv4i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::nxv8i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::nxv16i16, 24},
+ {Intrinsic::vp_bitreverse, MVT::v2i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::v4i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::v8i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::v16i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::nxv1i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::nxv2i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::nxv4i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::nxv8i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::nxv16i32, 33},
+ {Intrinsic::vp_bitreverse, MVT::v2i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::v4i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::v8i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::v16i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::nxv1i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::nxv2i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::nxv4i64, 52},
+ {Intrinsic::vp_bitreverse, MVT::nxv8i64, 52},
+ {Intrinsic::ctpop, MVT::v2i8, 12},
+ {Intrinsic::ctpop, MVT::v4i8, 12},
+ {Intrinsic::ctpop, MVT::v8i8, 12},
+ {Intrinsic::ctpop, MVT::v16i8, 12},
+ {Intrinsic::ctpop, MVT::nxv1i8, 12},
+ {Intrinsic::ctpop, MVT::nxv2i8, 12},
+ {Intrinsic::ctpop, MVT::nxv4i8, 12},
+ {Intrinsic::ctpop, MVT::nxv8i8, 12},
+ {Intrinsic::ctpop, MVT::nxv16i8, 12},
+ {Intrinsic::ctpop, MVT::v2i16, 19},
+ {Intrinsic::ctpop, MVT::v4i16, 19},
+ {Intrinsic::ctpop, MVT::v8i16, 19},
+ {Intrinsic::ctpop, MVT::v16i16, 19},
+ {Intrinsic::ctpop, MVT::nxv1i16, 19},
+ {Intrinsic::ctpop, MVT::nxv2i16, 19},
+ {Intrinsic::ctpop, MVT::nxv4i16, 19},
+ {Intrinsic::ctpop, MVT::nxv8i16, 19},
+ {Intrinsic::ctpop, MVT::nxv16i16, 19},
+ {Intrinsic::ctpop, MVT::v2i32, 20},
+ {Intrinsic::ctpop, MVT::v4i32, 20},
+ {Intrinsic::ctpop, MVT::v8i32, 20},
+ {Intrinsic::ctpop, MVT::v16i32, 20},
+ {Intrinsic::ctpop, MVT::nxv1i32, 20},
+ {Intrinsic::ctpop, MVT::nxv2i32, 20},
+ {Intrinsic::ctpop, MVT::nxv4i32, 20},
+ {Intrinsic::ctpop, MVT::nxv8i32, 20},
+ {Intrinsic::ctpop, MVT::nxv16i32, 20},
+ {Intrinsic::ctpop, MVT::v2i64, 21},
+ {Intrinsic::ctpop, MVT::v4i64, 21},
+ {Intrinsic::ctpop, MVT::v8i64, 21},
+ {Intrinsic::ctpop, MVT::v16i64, 21},
+ {Intrinsic::ctpop, MVT::nxv1i64, 21},
+ {Intrinsic::ctpop, MVT::nxv2i64, 21},
+ {Intrinsic::ctpop, MVT::nxv4i64, 21},
+ {Intrinsic::ctpop, MVT::nxv8i64, 21},
+ {Intrinsic::vp_ctpop, MVT::v2i8, 12},
+ {Intrinsic::vp_ctpop, MVT::v4i8, 12},
+ {Intrinsic::vp_ctpop, MVT::v8i8, 12},
+ {Intrinsic::vp_ctpop, MVT::v16i8, 12},
+ {Intrinsic::vp_ctpop, MVT::nxv1i8, 12},
+ {Intrinsic::vp_ctpop, MVT::nxv2i8, 12},
+ {Intrinsic::vp_ctpop, MVT::nxv4i8, 12},
+ {Intrinsic::vp_ctpop, MVT::nxv8i8, 12},
+ {Intrinsic::vp_ctpop, MVT::nxv16i8, 12},
+ {Intrinsic::vp_ctpop, MVT::v2i16, 19},
+ {Intrinsic::vp_ctpop, MVT::v4i16, 19},
+ {Intrinsic::vp_ctpop, MVT::v8i16, 19},
+ {Intrinsic::vp_ctpop, MVT::v16i16, 19},
+ {Intrinsic::vp_ctpop, MVT::nxv1i16, 19},
+ {Intrinsic::vp_ctpop, MVT::nxv2i16, 19},
+ {Intrinsic::vp_ctpop, MVT::nxv4i16, 19},
+ {Intrinsic::vp_ctpop, MVT::nxv8i16, 19},
+ {Intrinsic::vp_ctpop, MVT::nxv16i16, 19},
+ {Intrinsic::vp_ctpop, MVT::v2i32, 20},
+ {Intrinsic::vp_ctpop, MVT::v4i32, 20},
+ {Intrinsic::vp_ctpop, MVT::v8i32, 20},
+ {Intrinsic::vp_ctpop, MVT::v16i32, 20},
+ {Intrinsic::vp_ctpop, MVT::nxv1i32, 20},
+ {Intrinsic::vp_ctpop, MVT::nxv2i32, 20},
+ {Intrinsic::vp_ctpop, MVT::nxv4i32, 20},
+ {Intrinsic::vp_ctpop, MVT::nxv8i32, 20},
+ {Intrinsic::vp_ctpop, MVT::nxv16i32, 20},
+ {Intrinsic::vp_ctpop, MVT::v2i64, 21},
+ {Intrinsic::vp_ctpop, MVT::v4i64, 21},
+ {Intrinsic::vp_ctpop, MVT::v8i64, 21},
+ {Intrinsic::vp_ctpop, MVT::v16i64, 21},
+ {Intrinsic::vp_ctpop, MVT::nxv1i64, 21},
+ {Intrinsic::vp_ctpop, MVT::nxv2i64, 21},
+ {Intrinsic::vp_ctpop, MVT::nxv4i64, 21},
+ {Intrinsic::vp_ctpop, MVT::nxv8i64, 21},
+ {Intrinsic::vp_ctlz, MVT::v2i8, 19},
+ {Intrinsic::vp_ctlz, MVT::v4i8, 19},
+ {Intrinsic::vp_ctlz, MVT::v8i8, 19},
+ {Intrinsic::vp_ctlz, MVT::v16i8, 19},
+ {Intrinsic::vp_ctlz, MVT::nxv1i8, 19},
+ {Intrinsic::vp_ctlz, MVT::nxv2i8, 19},
+ {Intrinsic::vp_ctlz, MVT::nxv4i8, 19},
+ {Intrinsic::vp_ctlz, MVT::nxv8i8, 19},
+ {Intrinsic::vp_ctlz, MVT::nxv16i8, 19},
+ {Intrinsic::vp_ctlz, MVT::nxv32i8, 19},
+ {Intrinsic::vp_ctlz, MVT::nxv64i8, 19},
+ {Intrinsic::vp_ctlz, MVT::v2i16, 28},
+ {Intrinsic::vp_ctlz, MVT::v4i16, 28},
+ {Intrinsic::vp_ctlz, MVT::v8i16, 28},
+ {Intrinsic::vp_ctlz, MVT::v16i16, 28},
+ {Intrinsic::vp_ctlz, MVT::nxv1i16, 28},
+ {Intrinsic::vp_ctlz, MVT::nxv2i16, 28},
+ {Intrinsic::vp_ctlz, MVT::nxv4i16, 28},
+ {Intrinsic::vp_ctlz, MVT::nxv8i16, 28},
+ {Intrinsic::vp_ctlz, MVT::nxv16i16, 28},
+ {Intrinsic::vp_ctlz, MVT::nxv32i16, 28},
+ {Intrinsic::vp_ctlz, MVT::v2i32, 31},
+ {Intrinsic::vp_ctlz, MVT::v4i32, 31},
+ {Intrinsic::vp_ctlz, MVT::v8i32, 31},
+ {Intrinsic::vp_ctlz, MVT::v16i32, 31},
+ {Intrinsic::vp_ctlz, MVT::nxv1i32, 31},
+ {Intrinsic::vp_ctlz, MVT::nxv2i32, 31},
+ {Intrinsic::vp_ctlz, MVT::nxv4i32, 31},
+ {Intrinsic::vp_ctlz, MVT::nxv8i32, 31},
+ {Intrinsic::vp_ctlz, MVT::nxv16i32, 31},
+ {Intrinsic::vp_ctlz, MVT::v2i64, 35},
+ {Intrinsic::vp_ctlz, MVT::v4i64, 35},
+ {Intrinsic::vp_ctlz, MVT::v8i64, 35},
+ {Intrinsic::vp_ctlz, MVT::v16i64, 35},
+ {Intrinsic::vp_ctlz, MVT::nxv1i64, 35},
+ {Intrinsic::vp_ctlz, MVT::nxv2i64, 35},
+ {Intrinsic::vp_ctlz, MVT::nxv4i64, 35},
+ {Intrinsic::vp_ctlz, MVT::nxv8i64, 35},
+ {Intrinsic::vp_cttz, MVT::v2i8, 16},
+ {Intrinsic::vp_cttz, MVT::v4i8, 16},
+ {Intrinsic::vp_cttz, MVT::v8i8, 16},
+ {Intrinsic::vp_cttz, MVT::v16i8, 16},
+ {Intrinsic::vp_cttz, MVT::nxv1i8, 16},
+ {Intrinsic::vp_cttz, MVT::nxv2i8, 16},
+ {Intrinsic::vp_cttz, MVT::nxv4i8, 16},
+ {Intrinsic::vp_cttz, MVT::nxv8i8, 16},
+ {Intrinsic::vp_cttz, MVT::nxv16i8, 16},
+ {Intrinsic::vp_cttz, MVT::nxv32i8, 16},
+ {Intrinsic::vp_cttz, MVT::nxv64i8, 16},
+ {Intrinsic::vp_cttz, MVT::v2i16, 23},
+ {Intrinsic::vp_cttz, MVT::v4i16, 23},
+ {Intrinsic::vp_cttz, MVT::v8i16, 23},
+ {Intrinsic::vp_cttz, MVT::v16i16, 23},
+ {Intrinsic::vp_cttz, MVT::nxv1i16, 23},
+ {Intrinsic::vp_cttz, MVT::nxv2i16, 23},
+ {Intrinsic::vp_cttz, MVT::nxv4i16, 23},
+ {Intrinsic::vp_cttz, MVT::nxv8i16, 23},
+ {Intrinsic::vp_cttz, MVT::nxv16i16, 23},
+ {Intrinsic::vp_cttz, MVT::nxv32i16, 23},
+ {Intrinsic::vp_cttz, MVT::v2i32, 24},
+ {Intrinsic::vp_cttz, MVT::v4i32, 24},
+ {Intrinsic::vp_cttz, MVT::v8i32, 24},
+ {Intrinsic::vp_cttz, MVT::v16i32, 24},
+ {Intrinsic::vp_cttz, MVT::nxv1i32, 24},
+ {Intrinsic::vp_cttz, MVT::nxv2i32, 24},
+ {Intrinsic::vp_cttz, MVT::nxv4i32, 24},
+ {Intrinsic::vp_cttz, MVT::nxv8i32, 24},
+ {Intrinsic::vp_cttz, MVT::nxv16i32, 24},
+ {Intrinsic::vp_cttz, MVT::v2i64, 25},
+ {Intrinsic::vp_cttz, MVT::v4i64, 25},
+ {Intrinsic::vp_cttz, MVT::v8i64, 25},
+ {Intrinsic::vp_cttz, MVT::v16i64, 25},
+ {Intrinsic::vp_cttz, MVT::nxv1i64, 25},
+ {Intrinsic::vp_cttz, MVT::nxv2i64, 25},
+ {Intrinsic::vp_cttz, MVT::nxv4i64, 25},
+ {Intrinsic::vp_cttz, MVT::nxv8i64, 25},
+};
+
+static unsigned getISDForVPIntrinsicID(Intrinsic::ID ID) {
+ switch (ID) {
+#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
+ case Intrinsic::VPID: \
+ return ISD::VPSD;
+#include "llvm/IR/VPIntrinsics.def"
+#undef HELPER_MAP_VPID_TO_VPSD
+ }
+ return ISD::DELETED_NODE;
+}
+
+InstructionCost
+RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
+ TTI::TargetCostKind CostKind) {
+ auto *RetTy = ICA.getReturnType();
+ switch (ICA.getID()) {
+ case Intrinsic::ceil:
+ case Intrinsic::floor:
+ case Intrinsic::trunc:
+ case Intrinsic::rint:
+ case Intrinsic::round:
+ case Intrinsic::roundeven: {
+ // These all use the same code.
+ auto LT = getTypeLegalizationCost(RetTy);
+ if (!LT.second.isVector() && TLI->isOperationCustom(ISD::FCEIL, LT.second))
+ return LT.first * 8;
+ break;
+ }
+ case Intrinsic::umin:
+ case Intrinsic::umax:
+ case Intrinsic::smin:
+ case Intrinsic::smax: {
+ auto LT = getTypeLegalizationCost(RetTy);
+ if ((ST->hasVInstructions() && LT.second.isVector()) ||
+ (LT.second.isScalarInteger() && ST->hasStdExtZbb()))
+ return LT.first;
+ break;
+ }
+ case Intrinsic::sadd_sat:
+ case Intrinsic::ssub_sat:
+ case Intrinsic::uadd_sat:
+ case Intrinsic::usub_sat: {
+ auto LT = getTypeLegalizationCost(RetTy);
+ if (ST->hasVInstructions() && LT.second.isVector())
+ return LT.first;
+ break;
+ }
+ case Intrinsic::abs: {
+ auto LT = getTypeLegalizationCost(RetTy);
+ if (ST->hasVInstructions() && LT.second.isVector()) {
+ // vrsub.vi v10, v8, 0
+ // vmax.vv v8, v8, v10
+ return LT.first * 2;
+ }
+ break;
+ }
+ case Intrinsic::fabs:
+ case Intrinsic::sqrt: {
+ auto LT = getTypeLegalizationCost(RetTy);
+ if (ST->hasVInstructions() && LT.second.isVector())
+ return LT.first;
+ break;
+ }
+ // TODO: add more intrinsic
+ case Intrinsic::experimental_stepvector: {
+ unsigned Cost = 1; // vid
+ auto LT = getTypeLegalizationCost(RetTy);
+ return Cost + (LT.first - 1);
+ }
+ case Intrinsic::vp_rint: {
+ // RISC-V target uses at least 5 instructions to lower rounding intrinsics.
+ unsigned Cost = 5;
+ auto LT = getTypeLegalizationCost(RetTy);
+ if (TLI->isOperationCustom(ISD::VP_FRINT, LT.second))
+ return Cost * LT.first;
+ break;
+ }
+ case Intrinsic::vp_nearbyint: {
+ // More one read and one write for fflags than vp_rint.
+ unsigned Cost = 7;
+ auto LT = getTypeLegalizationCost(RetTy);
+ if (TLI->isOperationCustom(ISD::VP_FRINT, LT.second))
+ return Cost * LT.first;
+ break;
+ }
+ case Intrinsic::vp_ceil:
+ case Intrinsic::vp_floor:
+ case Intrinsic::vp_round:
+ case Intrinsic::vp_roundeven:
+ case Intrinsic::vp_roundtozero: {
+ // Rounding with static rounding mode needs two more instructions to
+ // swap/write FRM than vp_rint.
+ unsigned Cost = 7;
+ auto LT = getTypeLegalizationCost(RetTy);
+ unsigned VPISD = getISDForVPIntrinsicID(ICA.getID());
+ if (TLI->isOperationCustom(VPISD, LT.second))
+ return Cost * LT.first;
+ break;
+ }
+ }
+
+ if (ST->hasVInstructions() && RetTy->isVectorTy()) {
+ auto LT = getTypeLegalizationCost(RetTy);
+ if (const auto *Entry = CostTableLookup(VectorIntrinsicCostTable,
+ ICA.getID(), LT.second))
+ return LT.first * Entry->Cost;
+ }
+
+ return BaseT::getIntrinsicInstrCost(ICA, CostKind);
+}
+
+InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src,
+ TTI::CastContextHint CCH,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I) {
+ if (isa<VectorType>(Dst) && isa<VectorType>(Src)) {
+ // FIXME: Need to compute legalizing cost for illegal types.
+ if (!isTypeLegal(Src) || !isTypeLegal(Dst))
+ return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+
+ // Skip if element size of Dst or Src is bigger than ELEN.
+ if (Src->getScalarSizeInBits() > ST->getELEN() ||
+ Dst->getScalarSizeInBits() > ST->getELEN())
+ return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ // FIXME: Need to consider vsetvli and lmul.
+ int PowDiff = (int)Log2_32(Dst->getScalarSizeInBits()) -
+ (int)Log2_32(Src->getScalarSizeInBits());
+ switch (ISD) {
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ if (Src->getScalarSizeInBits() == 1) {
+ // We do not use vsext/vzext to extend from mask vector.
+ // Instead we use the following instructions to extend from mask vector:
+ // vmv.v.i v8, 0
+ // vmerge.vim v8, v8, -1, v0
+ return 2;
+ }
+ return 1;
+ case ISD::TRUNCATE:
+ if (Dst->getScalarSizeInBits() == 1) {
+ // We do not use several vncvt to truncate to mask vector. So we could
+ // not use PowDiff to calculate it.
+ // Instead we use the following instructions to truncate to mask vector:
+ // vand.vi v8, v8, 1
+ // vmsne.vi v0, v8, 0
+ return 2;
+ }
+ [[fallthrough]];
+ case ISD::FP_EXTEND:
+ case ISD::FP_ROUND:
+ // Counts of narrow/widen instructions.
+ return std::abs(PowDiff);
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ if (Src->getScalarSizeInBits() == 1 || Dst->getScalarSizeInBits() == 1) {
+ // The cost of convert from or to mask vector is different from other
+ // cases. We could not use PowDiff to calculate it.
+ // For mask vector to fp, we should use the following instructions:
+ // vmv.v.i v8, 0
+ // vmerge.vim v8, v8, -1, v0
+ // vfcvt.f.x.v v8, v8
+
+ // And for fp vector to mask, we use:
+ // vfncvt.rtz.x.f.w v9, v8
+ // vand.vi v8, v9, 1
+ // vmsne.vi v0, v8, 0
+ return 3;
+ }
+ if (std::abs(PowDiff) <= 1)
+ return 1;
+ // Backend could lower (v[sz]ext i8 to double) to vfcvt(v[sz]ext.f8 i8),
+ // so it only need two conversion.
+ if (Src->isIntOrIntVectorTy())
+ return 2;
+ // Counts of narrow/widen instructions.
+ return std::abs(PowDiff);
+ }
+ }
+ return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+}
+
+unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) {
+ if (isa<ScalableVectorType>(Ty)) {
+ const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType());
+ const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue();
+ const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock;
+ return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize);
+ }
+ return cast<FixedVectorType>(Ty)->getNumElements();
+}
+
+InstructionCost
+RISCVTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
+ bool IsUnsigned,
+ TTI::TargetCostKind CostKind) {
+ if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
+ return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
+
+ // Skip if scalar size of Ty is bigger than ELEN.
+ if (Ty->getScalarSizeInBits() > ST->getELEN())
+ return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
+
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
+ if (Ty->getElementType()->isIntegerTy(1))
+ // vcpop sequences, see vreduction-mask.ll. umax, smin actually only
+ // cost 2, but we don't have enough info here so we slightly over cost.
+ return (LT.first - 1) + 3;
+
+ // IR Reduction is composed by two vmv and one rvv reduction instruction.
+ InstructionCost BaseCost = 2;
+ unsigned VL = getEstimatedVLFor(Ty);
+ return (LT.first - 1) + BaseCost + Log2_32_Ceil(VL);
+}
+
+InstructionCost
+RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
+ std::optional<FastMathFlags> FMF,
+ TTI::TargetCostKind CostKind) {
+ if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
+ return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
+
+ // Skip if scalar size of Ty is bigger than ELEN.
+ if (Ty->getScalarSizeInBits() > ST->getELEN())
+ return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
+
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ if (ISD != ISD::ADD && ISD != ISD::OR && ISD != ISD::XOR && ISD != ISD::AND &&
+ ISD != ISD::FADD)
+ return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
+
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
+ if (Ty->getElementType()->isIntegerTy(1))
+ // vcpop sequences, see vreduction-mask.ll
+ return (LT.first - 1) + (ISD == ISD::AND ? 3 : 2);
+
+ // IR Reduction is composed by two vmv and one rvv reduction instruction.
+ InstructionCost BaseCost = 2;
+ unsigned VL = getEstimatedVLFor(Ty);
+ if (TTI::requiresOrderedReduction(FMF))
+ return (LT.first - 1) + BaseCost + VL;
+ return (LT.first - 1) + BaseCost + Log2_32_Ceil(VL);
+}
+
+InstructionCost RISCVTTIImpl::getExtendedReductionCost(
+ unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy,
+ std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) {
+ if (isa<FixedVectorType>(ValTy) && !ST->useRVVForFixedLengthVectors())
+ return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
+ FMF, CostKind);
+
+ // Skip if scalar size of ResTy is bigger than ELEN.
+ if (ResTy->getScalarSizeInBits() > ST->getELEN())
+ return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
+ FMF, CostKind);
+
+ if (Opcode != Instruction::Add && Opcode != Instruction::FAdd)
+ return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
+ FMF, CostKind);
+
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
+
+ if (ResTy->getScalarSizeInBits() != 2 * LT.second.getScalarSizeInBits())
+ return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
+ FMF, CostKind);
+
+ return (LT.first - 1) +
+ getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
+}
+
+InstructionCost RISCVTTIImpl::getStoreImmCost(Type *Ty,
+ TTI::OperandValueInfo OpInfo,
+ TTI::TargetCostKind CostKind) {
+ assert(OpInfo.isConstant() && "non constant operand?");
+ if (!isa<VectorType>(Ty))
+ // FIXME: We need to account for immediate materialization here, but doing
+ // a decent job requires more knowledge about the immediate than we
+ // currently have here.
+ return 0;
+
+ if (OpInfo.isUniform())
+ // vmv.x.i, vmv.v.x, or vfmv.v.f
+ // We ignore the cost of the scalar constant materialization to be consistent
+ // with how we treat scalar constants themselves just above.
+ return 1;
+
+ // Add a cost of address generation + the cost of the vector load. The
+ // address is expected to be a PC relative offset to a constant pool entry
+ // using auipc/addi.
+ return 2 + getMemoryOpCost(Instruction::Load, Ty, DL.getABITypeAlign(Ty),
+ /*AddressSpace=*/0, CostKind);
+}
+
+
+InstructionCost RISCVTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+ MaybeAlign Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo OpInfo,
+ const Instruction *I) {
+ InstructionCost Cost = 0;
+ if (Opcode == Instruction::Store && OpInfo.isConstant())
+ Cost += getStoreImmCost(Src, OpInfo, CostKind);
+ return Cost + BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
+ CostKind, OpInfo, I);
+}
+
+InstructionCost RISCVTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy,
+ CmpInst::Predicate VecPred,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I) {
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
+ I);
+
+ if (isa<FixedVectorType>(ValTy) && !ST->useRVVForFixedLengthVectors())
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
+ I);
+
+ // Skip if scalar size of ValTy is bigger than ELEN.
+ if (ValTy->isVectorTy() && ValTy->getScalarSizeInBits() > ST->getELEN())
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
+ I);
+
+ if (Opcode == Instruction::Select && ValTy->isVectorTy()) {
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
+ if (CondTy->isVectorTy()) {
+ if (ValTy->getScalarSizeInBits() == 1) {
+ // vmandn.mm v8, v8, v9
+ // vmand.mm v9, v0, v9
+ // vmor.mm v0, v9, v8
+ return LT.first * 3;
+ }
+ // vselect and max/min are supported natively.
+ return LT.first * 1;
+ }
+
+ if (ValTy->getScalarSizeInBits() == 1) {
+ // vmv.v.x v9, a0
+ // vmsne.vi v9, v9, 0
+ // vmandn.mm v8, v8, v9
+ // vmand.mm v9, v0, v9
+ // vmor.mm v0, v9, v8
+ return LT.first * 5;
+ }
+
+ // vmv.v.x v10, a0
+ // vmsne.vi v0, v10, 0
+ // vmerge.vvm v8, v9, v8, v0
+ return LT.first * 3;
+ }
+
+ if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
+ ValTy->isVectorTy()) {
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
+
+ // Support natively.
+ if (CmpInst::isIntPredicate(VecPred))
+ return LT.first * 1;
+
+ // If we do not support the input floating point vector type, use the base
+ // one which will calculate as:
+ // ScalarizeCost + Num * Cost for fixed vector,
+ // InvalidCost for scalable vector.
+ if ((ValTy->getScalarSizeInBits() == 16 && !ST->hasVInstructionsF16()) ||
+ (ValTy->getScalarSizeInBits() == 32 && !ST->hasVInstructionsF32()) ||
+ (ValTy->getScalarSizeInBits() == 64 && !ST->hasVInstructionsF64()))
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
+ I);
+ switch (VecPred) {
+ // Support natively.
+ case CmpInst::FCMP_OEQ:
+ case CmpInst::FCMP_OGT:
+ case CmpInst::FCMP_OGE:
+ case CmpInst::FCMP_OLT:
+ case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_UNE:
+ return LT.first * 1;
+ // TODO: Other comparisons?
+ default:
+ break;
+ }
+ }
+
+ // TODO: Add cost for scalar type.
+
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
+}
+
+InstructionCost RISCVTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
+ TTI::TargetCostKind CostKind,
+ unsigned Index, Value *Op0,
+ Value *Op1) {
+ assert(Val->isVectorTy() && "This must be a vector type");
+
+ if (Opcode != Instruction::ExtractElement &&
+ Opcode != Instruction::InsertElement)
+ return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
+
+ // Legalize the type.
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
+
+ // This type is legalized to a scalar type.
+ if (!LT.second.isVector())
+ return 0;
+
+ // For unsupported scalable vector.
+ if (LT.second.isScalableVector() && !LT.first.isValid())
+ return LT.first;
+
+ if (!isTypeLegal(Val))
+ return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
+
+ // In RVV, we could use vslidedown + vmv.x.s to extract element from vector
+ // and vslideup + vmv.s.x to insert element to vector.
+ unsigned BaseCost = 1;
+ // When insertelement we should add the index with 1 as the input of vslideup.
+ unsigned SlideCost = Opcode == Instruction::InsertElement ? 2 : 1;
+
+ if (Index != -1U) {
+ // The type may be split. For fixed-width vectors we can normalize the
+ // index to the new type.
+ if (LT.second.isFixedLengthVector()) {
+ unsigned Width = LT.second.getVectorNumElements();
+ Index = Index % Width;
+ }
+
+ // We could extract/insert the first element without vslidedown/vslideup.
+ if (Index == 0)
+ SlideCost = 0;
+ else if (Opcode == Instruction::InsertElement)
+ SlideCost = 1; // With a constant index, we do not need to use addi.
+ }
+
+ // Mask vector extract/insert element is different from normal case.
+ if (Val->getScalarSizeInBits() == 1) {
+ // For extractelement, we need the following instructions:
+ // vmv.v.i v8, 0
+ // vmerge.vim v8, v8, 1, v0
+ // vsetivli zero, 1, e8, m2, ta, mu (not count)
+ // vslidedown.vx v8, v8, a0
+ // vmv.x.s a0, v8
+
+ // For insertelement, we need the following instructions:
+ // vsetvli a2, zero, e8, m1, ta, mu (not count)
+ // vmv.s.x v8, a0
+ // vmv.v.i v9, 0
+ // vmerge.vim v9, v9, 1, v0
+ // addi a0, a1, 1
+ // vsetvli zero, a0, e8, m1, tu, mu (not count)
+ // vslideup.vx v9, v8, a1
+ // vsetvli a0, zero, e8, m1, ta, mu (not count)
+ // vand.vi v8, v9, 1
+ // vmsne.vi v0, v8, 0
+
+ // TODO: should we count these special vsetvlis?
+ BaseCost = Opcode == Instruction::InsertElement ? 5 : 3;
+ }
+ // Extract i64 in the target that has XLEN=32 need more instruction.
+ if (Val->getScalarType()->isIntegerTy() &&
+ ST->getXLen() < Val->getScalarSizeInBits()) {
+ // For extractelement, we need the following instructions:
+ // vsetivli zero, 1, e64, m1, ta, mu (not count)
+ // vslidedown.vx v8, v8, a0
+ // vmv.x.s a0, v8
+ // li a1, 32
+ // vsrl.vx v8, v8, a1
+ // vmv.x.s a1, v8
+
+ // For insertelement, we need the following instructions:
+ // vsetivli zero, 2, e32, m4, ta, mu (not count)
+ // vmv.v.i v12, 0
+ // vslide1up.vx v16, v12, a1
+ // vslide1up.vx v12, v16, a0
+ // addi a0, a2, 1
+ // vsetvli zero, a0, e64, m4, tu, mu (not count)
+ // vslideup.vx v8, v12, a2
+
+ // TODO: should we count these special vsetvlis?
+ BaseCost = Opcode == Instruction::InsertElement ? 3 : 4;
+ }
+ return BaseCost + SlideCost;
+}
+
+InstructionCost RISCVTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
+ ArrayRef<const Value *> Args, const Instruction *CxtI) {
+
+ // TODO: Handle more cost kinds.
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
+ Args, CxtI);
+
+ if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
+ Args, CxtI);
+
+ // Skip if scalar size of Ty is bigger than ELEN.
+ if (isa<VectorType>(Ty) && Ty->getScalarSizeInBits() > ST->getELEN())
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
+ Args, CxtI);
+
+ // Legalize the type.
+ std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
+
+ // TODO: Handle scalar type.
+ if (!LT.second.isVector())
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
+ Args, CxtI);
+
+
+ auto getConstantMatCost =
+ [&](unsigned Operand, TTI::OperandValueInfo OpInfo) -> InstructionCost {
+ if (OpInfo.isUniform() && TLI->canSplatOperand(Opcode, Operand))
+ // Two sub-cases:
+ // * Has a 5 bit immediate operand which can be splatted.
+ // * Has a larger immediate which must be materialized in scalar register
+ // We return 0 for both as we currently ignore the cost of materializing
+ // scalar constants in GPRs.
+ return 0;
+
+ // Add a cost of address generation + the cost of the vector load. The
+ // address is expected to be a PC relative offset to a constant pool entry
+ // using auipc/addi.
+ return 2 + getMemoryOpCost(Instruction::Load, Ty, DL.getABITypeAlign(Ty),
+ /*AddressSpace=*/0, CostKind);
+ };
+
+ // Add the cost of materializing any constant vectors required.
+ InstructionCost ConstantMatCost = 0;
+ if (Op1Info.isConstant())
+ ConstantMatCost += getConstantMatCost(0, Op1Info);
+ if (Op2Info.isConstant())
+ ConstantMatCost += getConstantMatCost(1, Op2Info);
+
+ switch (TLI->InstructionOpcodeToISD(Opcode)) {
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ case ISD::SHL:
+ case ISD::SRL:
+ case ISD::SRA:
+ case ISD::MUL:
+ case ISD::MULHS:
+ case ISD::MULHU:
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ case ISD::FNEG: {
+ return ConstantMatCost + getLMULCost(LT.second) * LT.first * 1;
+ }
+ default:
+ return ConstantMatCost +
+ BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
+ Args, CxtI);
+ }
+}
+
+void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+ TTI::UnrollingPreferences &UP,
+ OptimizationRemarkEmitter *ORE) {
+ // TODO: More tuning on benchmarks and metrics with changes as needed
+ // would apply to all settings below to enable performance.
+
+
+ if (ST->enableDefaultUnroll())
+ return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
+
+ // Enable Upper bound unrolling universally, not dependant upon the conditions
+ // below.
+ UP.UpperBound = true;
+
+ // Disable loop unrolling for Oz and Os.
+ UP.OptSizeThreshold = 0;
+ UP.PartialOptSizeThreshold = 0;
+ if (L->getHeader()->getParent()->hasOptSize())
+ return;
+
+ SmallVector<BasicBlock *, 4> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+ LLVM_DEBUG(dbgs() << "Loop has:\n"
+ << "Blocks: " << L->getNumBlocks() << "\n"
+ << "Exit blocks: " << ExitingBlocks.size() << "\n");
+
+ // Only allow another exit other than the latch. This acts as an early exit
+ // as it mirrors the profitability calculation of the runtime unroller.
+ if (ExitingBlocks.size() > 2)
+ return;
+
+ // Limit the CFG of the loop body for targets with a branch predictor.
+ // Allowing 4 blocks permits if-then-else diamonds in the body.
+ if (L->getNumBlocks() > 4)
+ return;
+
+ // Don't unroll vectorized loops, including the remainder loop
+ if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
+ return;
+
+ // Scan the loop: don't unroll loops with calls as this could prevent
+ // inlining.
+ InstructionCost Cost = 0;
+ for (auto *BB : L->getBlocks()) {
+ for (auto &I : *BB) {
+ // Initial setting - Don't unroll loops containing vectorized
+ // instructions.
+ if (I.getType()->isVectorTy())
+ return;
+
+ if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
+ if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
+ if (!isLoweredToCall(F))
+ continue;
+ }
+ return;
+ }
+
+ SmallVector<const Value *> Operands(I.operand_values());
+ Cost += getInstructionCost(&I, Operands,
+ TargetTransformInfo::TCK_SizeAndLatency);
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
+
+ UP.Partial = true;
+ UP.Runtime = true;
+ UP.UnrollRemainder = true;
+ UP.UnrollAndJam = true;
+ UP.UnrollAndJamInnerLoopThreshold = 60;
+
+ // Force unrolling small loops can be very useful because of the branch
+ // taken cost of the backedge.
+ if (Cost < 12)
+ UP.Force = true;
+}
+
+void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
+ TTI::PeelingPreferences &PP) {
+ BaseT::getPeelingPreferences(L, SE, PP);
+}
+
+unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) {
+ TypeSize Size = DL.getTypeSizeInBits(Ty);
+ if (Ty->isVectorTy()) {
+ if (Size.isScalable() && ST->hasVInstructions())
+ return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
+
+ if (ST->useRVVForFixedLengthVectors())
+ return divideCeil(Size, ST->getRealMinVLen());
+ }
+
+ return BaseT::getRegUsageForType(Ty);
+}
+
+unsigned RISCVTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
+ // This interface is currently only used by SLP. Returning 1 (which is the
+ // default value for SLPMaxVF) disables SLP. We currently have a cost modeling
+ // problem w/ constant materialization which causes SLP to perform majorly
+ // unprofitable transformations.
+ // TODO: Figure out constant materialization cost modeling and remove.
+ return SLPMaxVF;
+}
+
+bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
+ const TargetTransformInfo::LSRCost &C2) {
+ // RISCV specific here are "instruction number 1st priority".
+ return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
+ C1.NumIVMuls, C1.NumBaseAdds,
+ C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
+ std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
+ C2.NumIVMuls, C2.NumBaseAdds,
+ C2.ScaleCost, C2.ImmCost, C2.SetupCost);
+}
diff --git a/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.h b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.h
new file mode 100644
index 0000000000..2bde679c18
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -0,0 +1,346 @@
+//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines a TargetTransformInfo::Concept conforming object specific
+/// to the RISC-V target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
+
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/Analysis/IVDescriptors.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/IR/Function.h"
+#include <optional>
+
+namespace llvm {
+
+class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
+ using BaseT = BasicTTIImplBase<RISCVTTIImpl>;
+ using TTI = TargetTransformInfo;
+
+ friend BaseT;
+
+ const RISCVSubtarget *ST;
+ const RISCVTargetLowering *TLI;
+
+ const RISCVSubtarget *getST() const { return ST; }
+ const RISCVTargetLowering *getTLI() const { return TLI; }
+
+ /// This function returns an estimate for VL to be used in VL based terms
+ /// of the cost model. For fixed length vectors, this is simply the
+ /// vector length. For scalable vectors, we return results consistent
+ /// with getVScaleForTuning under the assumption that clients are also
+ /// using that when comparing costs between scalar and vector representation.
+ /// This does unfortunately mean that we can both undershoot and overshot
+ /// the true cost significantly if getVScaleForTuning is wildly off for the
+ /// actual target hardware.
+ unsigned getEstimatedVLFor(VectorType *Ty);
+
+ /// Return the cost of LMUL. The larger the LMUL, the higher the cost.
+ InstructionCost getLMULCost(MVT VT);
+
+public:
+ explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
+ : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
+
+ /// Return the cost of materializing an immediate for a value operand of
+ /// a store instruction.
+ InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo,
+ TTI::TargetCostKind CostKind);
+
+ InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
+ TTI::TargetCostKind CostKind);
+ InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
+ const APInt &Imm, Type *Ty,
+ TTI::TargetCostKind CostKind,
+ Instruction *Inst = nullptr);
+ InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty,
+ TTI::TargetCostKind CostKind);
+
+ TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+
+ bool shouldExpandReduction(const IntrinsicInst *II) const;
+ bool supportsScalableVectors() const { return ST->hasVInstructions(); }
+ bool enableScalableVectorization() const { return ST->hasVInstructions(); }
+ PredicationStyle emitGetActiveLaneMask() const {
+ return ST->hasVInstructions() ? PredicationStyle::Data
+ : PredicationStyle::None;
+ }
+ std::optional<unsigned> getMaxVScale() const;
+ std::optional<unsigned> getVScaleForTuning() const;
+
+ TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
+
+ unsigned getRegUsageForType(Type *Ty);
+
+ unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
+
+ bool preferEpilogueVectorization() const {
+ // Epilogue vectorization is usually unprofitable - tail folding or
+ // a smaller VF would have been better. This a blunt hammer - we
+ // should re-examine this once vectorization is better tuned.
+ return false;
+ }
+
+ InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
+ Align Alignment, unsigned AddressSpace,
+ TTI::TargetCostKind CostKind);
+
+ void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+ TTI::UnrollingPreferences &UP,
+ OptimizationRemarkEmitter *ORE);
+
+ void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
+ TTI::PeelingPreferences &PP);
+
+ unsigned getMinVectorRegisterBitWidth() const {
+ return ST->useRVVForFixedLengthVectors() ? 16 : 0;
+ }
+
+ InstructionCost getSpliceCost(VectorType *Tp, int Index);
+ InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
+ ArrayRef<int> Mask,
+ TTI::TargetCostKind CostKind, int Index,
+ VectorType *SubTp,
+ ArrayRef<const Value *> Args = std::nullopt);
+
+ InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
+ TTI::TargetCostKind CostKind);
+
+ InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
+ const Value *Ptr, bool VariableMask,
+ Align Alignment,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I);
+
+ InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
+ TTI::CastContextHint CCH,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I = nullptr);
+
+ InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
+ bool IsUnsigned,
+ TTI::TargetCostKind CostKind);
+
+ InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
+ std::optional<FastMathFlags> FMF,
+ TTI::TargetCostKind CostKind);
+
+ InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
+ Type *ResTy, VectorType *ValTy,
+ std::optional<FastMathFlags> FMF,
+ TTI::TargetCostKind CostKind);
+
+ InstructionCost
+ getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
+ unsigned AddressSpace, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None},
+ const Instruction *I = nullptr);
+
+ InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
+ CmpInst::Predicate VecPred,
+ TTI::TargetCostKind CostKind,
+ const Instruction *I = nullptr);
+
+ using BaseT::getVectorInstrCost;
+ InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
+ TTI::TargetCostKind CostKind,
+ unsigned Index, Value *Op0, Value *Op1);
+
+ InstructionCost getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
+ TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
+ ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
+ const Instruction *CxtI = nullptr);
+
+ bool isElementTypeLegalForScalableVector(Type *Ty) const {
+ return TLI->isLegalElementTypeForRVV(Ty);
+ }
+
+ bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
+ if (!ST->hasVInstructions())
+ return false;
+
+ // Only support fixed vectors if we know the minimum vector size.
+ if (isa<FixedVectorType>(DataType) && !ST->useRVVForFixedLengthVectors())
+ return false;
+
+ // Don't allow elements larger than the ELEN.
+ // FIXME: How to limit for scalable vectors?
+ if (isa<FixedVectorType>(DataType) &&
+ DataType->getScalarSizeInBits() > ST->getELEN())
+ return false;
+
+ if (Alignment <
+ DL.getTypeStoreSize(DataType->getScalarType()).getFixedValue())
+ return false;
+
+ return TLI->isLegalElementTypeForRVV(DataType->getScalarType());
+ }
+
+ bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
+ return isLegalMaskedLoadStore(DataType, Alignment);
+ }
+ bool isLegalMaskedStore(Type *DataType, Align Alignment) {
+ return isLegalMaskedLoadStore(DataType, Alignment);
+ }
+
+ bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) {
+ if (!ST->hasVInstructions())
+ return false;
+
+ // Only support fixed vectors if we know the minimum vector size.
+ if (isa<FixedVectorType>(DataType) && !ST->useRVVForFixedLengthVectors())
+ return false;
+
+ // Don't allow elements larger than the ELEN.
+ // FIXME: How to limit for scalable vectors?
+ if (isa<FixedVectorType>(DataType) &&
+ DataType->getScalarSizeInBits() > ST->getELEN())
+ return false;
+
+ if (Alignment <
+ DL.getTypeStoreSize(DataType->getScalarType()).getFixedValue())
+ return false;
+
+ return TLI->isLegalElementTypeForRVV(DataType->getScalarType());
+ }
+
+ bool isLegalMaskedGather(Type *DataType, Align Alignment) {
+ return isLegalMaskedGatherScatter(DataType, Alignment);
+ }
+ bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
+ return isLegalMaskedGatherScatter(DataType, Alignment);
+ }
+
+ bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
+ // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
+ return ST->is64Bit() && !ST->hasVInstructionsI64();
+ }
+
+ bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) {
+ // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
+ return ST->is64Bit() && !ST->hasVInstructionsI64();
+ }
+
+ /// \returns How the target needs this vector-predicated operation to be
+ /// transformed.
+ TargetTransformInfo::VPLegalization
+ getVPLegalizationStrategy(const VPIntrinsic &PI) const {
+ using VPLegalization = TargetTransformInfo::VPLegalization;
+ if (!ST->hasVInstructions() ||
+ (PI.getIntrinsicID() == Intrinsic::vp_reduce_mul &&
+ cast<VectorType>(PI.getArgOperand(1)->getType())
+ ->getElementType()
+ ->getIntegerBitWidth() != 1))
+ return VPLegalization(VPLegalization::Discard, VPLegalization::Convert);
+ return VPLegalization(VPLegalization::Legal, VPLegalization::Legal);
+ }
+
+ bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
+ ElementCount VF) const {
+ if (!VF.isScalable())
+ return true;
+
+ Type *Ty = RdxDesc.getRecurrenceType();
+ if (!TLI->isLegalElementTypeForRVV(Ty))
+ return false;
+
+ switch (RdxDesc.getRecurrenceKind()) {
+ case RecurKind::Add:
+ case RecurKind::FAdd:
+ case RecurKind::And:
+ case RecurKind::Or:
+ case RecurKind::Xor:
+ case RecurKind::SMin:
+ case RecurKind::SMax:
+ case RecurKind::UMin:
+ case RecurKind::UMax:
+ case RecurKind::FMin:
+ case RecurKind::FMax:
+ case RecurKind::SelectICmp:
+ case RecurKind::SelectFCmp:
+ case RecurKind::FMulAdd:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ unsigned getMaxInterleaveFactor(unsigned VF) {
+ // If the loop will not be vectorized, don't interleave the loop.
+ // Let regular unroll to unroll the loop.
+ return VF == 1 ? 1 : ST->getMaxInterleaveFactor();
+ }
+
+ enum RISCVRegisterClass { GPRRC, FPRRC, VRRC };
+ unsigned getNumberOfRegisters(unsigned ClassID) const {
+ switch (ClassID) {
+ case RISCVRegisterClass::GPRRC:
+ // 31 = 32 GPR - x0 (zero register)
+ // FIXME: Should we exclude fixed registers like SP, TP or GP?
+ return 31;
+ case RISCVRegisterClass::FPRRC:
+ if (ST->hasStdExtF())
+ return 32;
+ return 0;
+ case RISCVRegisterClass::VRRC:
+ // Although there are 32 vector registers, v0 is special in that it is the
+ // only register that can be used to hold a mask.
+ // FIXME: Should we conservatively return 31 as the number of usable
+ // vector registers?
+ return ST->hasVInstructions() ? 32 : 0;
+ }
+ llvm_unreachable("unknown register class");
+ }
+
+ unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
+ if (Vector)
+ return RISCVRegisterClass::VRRC;
+ if (!Ty)
+ return RISCVRegisterClass::GPRRC;
+
+ Type *ScalarTy = Ty->getScalarType();
+ if ((ScalarTy->isHalfTy() && ST->hasStdExtZfhOrZfhmin()) ||
+ (ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
+ (ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
+ return RISCVRegisterClass::FPRRC;
+ }
+
+ return RISCVRegisterClass::GPRRC;
+ }
+
+ const char *getRegisterClassName(unsigned ClassID) const {
+ switch (ClassID) {
+ case RISCVRegisterClass::GPRRC:
+ return "RISCV::GPRRC";
+ case RISCVRegisterClass::FPRRC:
+ return "RISCV::FPRRC";
+ case RISCVRegisterClass::VRRC:
+ return "RISCV::VRRC";
+ }
+ llvm_unreachable("unknown register class");
+ }
+
+ bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
+ const TargetTransformInfo::LSRCost &C2);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
diff --git a/contrib/libs/llvm16/lib/Target/WebAssembly/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/WebAssembly/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..4ea09caa17
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/WebAssembly/.yandex_meta/licenses.list.txt
@@ -0,0 +1,13 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ IRBuilder<> IRB(C);
+ SmallVector<Instruction *, 64> ToErase;
+ // Vector of %setjmpTable values
diff --git a/contrib/libs/llvm16/lib/Target/WebAssembly/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/WebAssembly/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/WebAssembly/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/WebAssembly/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/WebAssembly/Disassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/WebAssembly/Disassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/WebAssembly/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/WebAssembly/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/WebAssembly/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/WebAssembly/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/WebAssembly/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/WebAssembly/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/WebAssembly/Utils/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/WebAssembly/Utils/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/WebAssembly/Utils/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/X86/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/X86/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/X86/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/X86/MCA/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/X86/MCA/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/X86/MCA/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/TargetParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/TargetParser/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..d4eeea588a
--- /dev/null
+++ b/contrib/libs/llvm16/lib/TargetParser/.yandex_meta/licenses.list.txt
@@ -0,0 +1,579 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================File: LICENSE.TXT====================
+==============================================================================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+==============================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+==============================================================================
+Software from third parties included in the LLVM Project:
+==============================================================================
+The LLVM Project contains third party software which is under different license
+terms. All such code will be identified clearly using at least one of two
+mechanisms:
+1) It will be in a separate directory tree with its own `LICENSE.txt` or
+ `LICENSE` file at the top containing the specific license and restrictions
+ which apply to that software, or
+2) It will contain specific license and restriction terms at the top of every
+ file.
+
+==============================================================================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+
+
+====================File: include/llvm/Support/LICENSE.TXT====================
+LLVM System Interface Library
+-------------------------------------------------------------------------------
+
+Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+See https://llvm.org/LICENSE.txt for license information.
+SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================File: tools/polly/LICENSE.TXT====================
+==============================================================================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+==============================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+==============================================================================
+Software from third parties included in the LLVM Project:
+==============================================================================
+The LLVM Project contains third party software which is under different license
+terms. All such code will be identified clearly using at least one of two
+mechanisms:
+1) It will be in a separate directory tree with its own `LICENSE.txt` or
+ `LICENSE` file at the top containing the specific license and restrictions
+ which apply to that software, or
+2) It will contain specific license and restriction terms at the top of every
+ file.
+
+==============================================================================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2019 Polly Team
+All rights reserved.
+
+Developed by:
+
+ Polly Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the Polly Team, copyright holders, nor the names of
+ its contributors may be used to endorse or promote products derived from
+ this Software without specific prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+
+====================NCSA====================
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm16/lib/TextAPI/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/TextAPI/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/TextAPI/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Transforms/Coroutines/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/Coroutines/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..8b00e1d08d
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/Coroutines/.yandex_meta/licenses.list.txt
@@ -0,0 +1,13 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ IRBuilder<> Builder(C);
+ StructType *FrameTy = Shape.FrameTy;
+ Value *FramePtr = Shape.FramePtr;
diff --git a/contrib/libs/llvm16/lib/Transforms/IPO/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/IPO/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/IPO/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..966b6740f5
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt
@@ -0,0 +1,13 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ // icmp <pred> iK %E, trunc(C)
+ Value *Vec;
+ ArrayRef<int> Mask;
diff --git a/contrib/libs/llvm16/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..2ad499e351
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt
@@ -0,0 +1,13 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ GV->setComdat(C);
+ // COFF doesn't allow the comdat group leader to have private linkage, so
+ // upgrade private linkage to internal linkage to produce a symbol table
diff --git a/contrib/libs/llvm16/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/Transforms/Utils/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/Utils/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..8830d2c6fb
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/Utils/.yandex_meta/licenses.list.txt
@@ -0,0 +1,35 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ Type *SizeTy = M.getDataLayout().getIntPtrType(C);
+ Type *SizePtrTy = SizeTy->getPointerTo();
+ GlobalVariable *GV = new GlobalVariable(M, SizeTy, /*isConstant=*/false,
+
+
+====================COPYRIGHT====================
+ PointerType *VoidStar = Type::getInt8PtrTy(C);
+ Type *AtExitFuncArgs[] = {VoidStar};
+ FunctionType *AtExitFuncTy =
+ FunctionType::get(Type::getVoidTy(C), AtExitFuncArgs,
+ /*isVarArg=*/false);
+
+
+====================COPYRIGHT====================
+ Type *DsoHandleTy = Type::getInt8Ty(C);
+ Constant *DsoHandle = M.getOrInsertGlobal("__dso_handle", DsoHandleTy, [&] {
+ auto *GV = new GlobalVariable(M, DsoHandleTy, /*isConstant=*/true,
+
+
+====================COPYRIGHT====================
+ if (C) {
+ ValueLatticeElement CV;
+ CV.markConstant(C);
+ mergeInValue(&I, CV);
+ return;
diff --git a/contrib/libs/llvm16/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/WindowsDriver/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/WindowsDriver/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/WindowsDriver/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/WindowsManifest/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/WindowsManifest/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/WindowsManifest/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/lib/XRay/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/lib/XRay/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/lib/XRay/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/provides.pbtxt b/contrib/libs/llvm16/provides.pbtxt
new file mode 100644
index 0000000000..6d6d4574ae
--- /dev/null
+++ b/contrib/libs/llvm16/provides.pbtxt
@@ -0,0 +1,192 @@
+p { i: "llvm" x: "LLVMAArch64AsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/Target/AArch64/AsmParser" }
+p { i: "llvm" x: "LLVMAArch64CodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/AArch64" }
+p { i: "llvm" x: "LLVMAArch64Desc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/AArch64/MCTargetDesc" }
+p { i: "llvm" x: "LLVMAArch64Disassembler" ix: true peerdir: "contrib/libs/llvm16/lib/Target/AArch64/Disassembler" }
+p { i: "llvm" x: "LLVMAArch64Info" ix: true peerdir: "contrib/libs/llvm16/lib/Target/AArch64/TargetInfo" }
+p { i: "llvm" x: "LLVMAArch64Utils" ix: true peerdir: "contrib/libs/llvm16/lib/Target/AArch64/Utils" }
+p { i: "llvm" x: "LLVMARMAsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/Target/ARM/AsmParser" }
+p { i: "llvm" x: "LLVMARMCodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/ARM" }
+p { i: "llvm" x: "LLVMARMDesc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/ARM/MCTargetDesc" }
+p { i: "llvm" x: "LLVMARMDisassembler" ix: true peerdir: "contrib/libs/llvm16/lib/Target/ARM/Disassembler" }
+p { i: "llvm" x: "LLVMARMInfo" ix: true peerdir: "contrib/libs/llvm16/lib/Target/ARM/TargetInfo" }
+p { i: "llvm" x: "LLVMARMUtils" ix: true peerdir: "contrib/libs/llvm16/lib/Target/ARM/Utils" }
+p { i: "llvm" x: "LLVMAggressiveInstCombine" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine" }
+p { i: "llvm" x: "LLVMAnalysis" ix: true peerdir: "contrib/libs/llvm16/lib/Analysis" }
+p { i: "llvm" x: "LLVMAsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/AsmParser" }
+p { i: "llvm" x: "LLVMAsmPrinter" ix: true peerdir: "contrib/libs/llvm16/lib/CodeGen/AsmPrinter" }
+p { i: "llvm" x: "LLVMBPFAsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/Target/BPF/AsmParser" }
+p { i: "llvm" x: "LLVMBPFCodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/BPF" }
+p { i: "llvm" x: "LLVMBPFDesc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/BPF/MCTargetDesc" }
+p { i: "llvm" x: "LLVMBPFDisassembler" ix: true peerdir: "contrib/libs/llvm16/lib/Target/BPF/Disassembler" }
+p { i: "llvm" x: "LLVMBPFInfo" ix: true peerdir: "contrib/libs/llvm16/lib/Target/BPF/TargetInfo" }
+p { i: "llvm" x: "LLVMBinaryFormat" ix: true peerdir: "contrib/libs/llvm16/lib/BinaryFormat" }
+p { i: "llvm" x: "LLVMBitReader" ix: true peerdir: "contrib/libs/llvm16/lib/Bitcode/Reader" }
+p { i: "llvm" x: "LLVMBitWriter" ix: true peerdir: "contrib/libs/llvm16/lib/Bitcode/Writer" }
+p { i: "llvm" x: "LLVMBitstreamReader" ix: true peerdir: "contrib/libs/llvm16/lib/Bitstream/Reader" }
+p { i: "llvm" x: "LLVMCFGuard" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/CFGuard" }
+p { i: "llvm" x: "LLVMCFIVerify" ix: true peerdir: "contrib/libs/llvm16/tools/llvm-cfi-verify/lib" }
+p { i: "llvm" x: "LLVMCodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/CodeGen" }
+p { i: "llvm" x: "LLVMCore" ix: true peerdir: "contrib/libs/llvm16/lib/IR" }
+p { i: "llvm" x: "LLVMCoroutines" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/Coroutines" }
+p { i: "llvm" x: "LLVMCoverage" ix: true peerdir: "contrib/libs/llvm16/lib/ProfileData/Coverage" }
+p { i: "llvm" x: "LLVMDWARFLinker" ix: true peerdir: "contrib/libs/llvm16/lib/DWARFLinker" }
+p { i: "llvm" x: "LLVMDWP" ix: true peerdir: "contrib/libs/llvm16/lib/DWP" }
+p { i: "llvm" x: "LLVMDebugInfoCodeView" ix: true peerdir: "contrib/libs/llvm16/lib/DebugInfo/CodeView" }
+p { i: "llvm" x: "LLVMDebugInfoDWARF" ix: true peerdir: "contrib/libs/llvm16/lib/DebugInfo/DWARF" }
+p { i: "llvm" x: "LLVMDebugInfoGSYM" ix: true peerdir: "contrib/libs/llvm16/lib/DebugInfo/GSYM" }
+p { i: "llvm" x: "LLVMDebugInfoMSF" ix: true peerdir: "contrib/libs/llvm16/lib/DebugInfo/MSF" }
+p { i: "llvm" x: "LLVMDebugInfoPDB" ix: true peerdir: "contrib/libs/llvm16/lib/DebugInfo/PDB" }
+p { i: "llvm" x: "LLVMDebuginfod" ix: true peerdir: "contrib/libs/llvm16/lib/Debuginfod" }
+p { i: "llvm" x: "LLVMDemangle" ix: true peerdir: "contrib/libs/llvm16/lib/Demangle" }
+p { i: "llvm" x: "LLVMDiff" ix: true peerdir: "contrib/libs/llvm16/tools/llvm-diff/lib" }
+p { i: "llvm" x: "LLVMDlltoolDriver" ix: true peerdir: "contrib/libs/llvm16/lib/ToolDrivers/llvm-dlltool" }
+p { i: "llvm" x: "LLVMExecutionEngine" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine" }
+p { i: "llvm" x: "LLVMExegesis" ix: true peerdir: "contrib/libs/llvm16/tools/llvm-exegesis/lib" }
+p { i: "llvm" x: "LLVMExegesisAArch64" ix: true peerdir: "contrib/libs/llvm16/tools/llvm-exegesis/lib/AArch64" }
+p { i: "llvm" x: "LLVMExegesisPowerPC" ix: true peerdir: "contrib/libs/llvm16/tools/llvm-exegesis/lib/PowerPC" }
+p { i: "llvm" x: "LLVMExegesisX86" ix: true peerdir: "contrib/libs/llvm16/tools/llvm-exegesis/lib/X86" }
+p { i: "llvm" x: "LLVMExtensions" ix: true peerdir: "contrib/libs/llvm16/lib/Extensions" }
+p { i: "llvm" x: "LLVMFileCheck" ix: true peerdir: "contrib/libs/llvm16/lib/FileCheck" }
+p { i: "llvm" x: "LLVMFrontendHLSL" ix: true peerdir: "contrib/libs/llvm16/lib/Frontend/HLSL" }
+p { i: "llvm" x: "LLVMFrontendOpenACC" ix: true peerdir: "contrib/libs/llvm16/lib/Frontend/OpenACC" }
+p { i: "llvm" x: "LLVMFrontendOpenMP" ix: true peerdir: "contrib/libs/llvm16/lib/Frontend/OpenMP" }
+p { i: "llvm" x: "LLVMFuzzMutate" ix: true peerdir: "contrib/libs/llvm16/lib/FuzzMutate" }
+p { i: "llvm" x: "LLVMGlobalISel" ix: true peerdir: "contrib/libs/llvm16/lib/CodeGen/GlobalISel" }
+p { i: "llvm" x: "LLVMIRPrinter" ix: true peerdir: "contrib/libs/llvm16/lib/IRPrinter" }
+p { i: "llvm" x: "LLVMIRReader" ix: true peerdir: "contrib/libs/llvm16/lib/IRReader" }
+p { i: "llvm" x: "LLVMInstCombine" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/InstCombine" }
+p { i: "llvm" x: "LLVMInstrumentation" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/Instrumentation" }
+p { i: "llvm" x: "LLVMInterfaceStub" ix: true peerdir: "contrib/libs/llvm16/lib/InterfaceStub" }
+p { i: "llvm" x: "LLVMInterpreter" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/Interpreter" }
+p { i: "llvm" x: "LLVMJITLink" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/JITLink" }
+p { i: "llvm" x: "LLVMLTO" ix: true peerdir: "contrib/libs/llvm16/lib/LTO" }
+p { i: "llvm" x: "LLVMLibDriver" ix: true peerdir: "contrib/libs/llvm16/lib/ToolDrivers/llvm-lib" }
+p { i: "llvm" x: "LLVMLineEditor" ix: true peerdir: "contrib/libs/llvm16/lib/LineEditor" }
+p { i: "llvm" x: "LLVMLinker" ix: true peerdir: "contrib/libs/llvm16/lib/Linker" }
+p { i: "llvm" x: "LLVMLoongArchAsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/Target/LoongArch/AsmParser" }
+p { i: "llvm" x: "LLVMLoongArchCodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/LoongArch" }
+p { i: "llvm" x: "LLVMLoongArchDesc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/LoongArch/MCTargetDesc" }
+p { i: "llvm" x: "LLVMLoongArchDisassembler" ix: true peerdir: "contrib/libs/llvm16/lib/Target/LoongArch/Disassembler" }
+p { i: "llvm" x: "LLVMLoongArchInfo" ix: true peerdir: "contrib/libs/llvm16/lib/Target/LoongArch/TargetInfo" }
+p { i: "llvm" x: "LLVMMC" ix: true peerdir: "contrib/libs/llvm16/lib/MC" }
+p { i: "llvm" x: "LLVMMCA" ix: true peerdir: "contrib/libs/llvm16/lib/MCA" }
+p { i: "llvm" x: "LLVMMCDisassembler" ix: true peerdir: "contrib/libs/llvm16/lib/MC/MCDisassembler" }
+p { i: "llvm" x: "LLVMMCJIT" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/MCJIT" }
+p { i: "llvm" x: "LLVMMCParser" ix: true peerdir: "contrib/libs/llvm16/lib/MC/MCParser" }
+p { i: "llvm" x: "LLVMMIRParser" ix: true peerdir: "contrib/libs/llvm16/lib/CodeGen/MIRParser" }
+p { i: "llvm" x: "LLVMNVPTXCodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/NVPTX" }
+p { i: "llvm" x: "LLVMNVPTXDesc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/NVPTX/MCTargetDesc" }
+p { i: "llvm" x: "LLVMNVPTXInfo" ix: true peerdir: "contrib/libs/llvm16/lib/Target/NVPTX/TargetInfo" }
+p { i: "llvm" x: "LLVMObjCARCOpts" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/ObjCARC" }
+p { i: "llvm" x: "LLVMObjCopy" ix: true peerdir: "contrib/libs/llvm16/lib/ObjCopy" }
+p { i: "llvm" x: "LLVMObject" ix: true peerdir: "contrib/libs/llvm16/lib/Object" }
+p { i: "llvm" x: "LLVMObjectYAML" ix: true peerdir: "contrib/libs/llvm16/lib/ObjectYAML" }
+p { i: "llvm" x: "LLVMOption" ix: true peerdir: "contrib/libs/llvm16/lib/Option" }
+p { i: "llvm" x: "LLVMOrcJIT" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/Orc" }
+p { i: "llvm" x: "LLVMOrcShared" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared" }
+p { i: "llvm" x: "LLVMOrcTargetProcess" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess" }
+p { i: "llvm" x: "LLVMPasses" ix: true peerdir: "contrib/libs/llvm16/lib/Passes" }
+p { i: "llvm" x: "LLVMPerfJITEvents" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents" }
+p { i: "llvm" x: "LLVMPowerPCAsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/Target/PowerPC/AsmParser" }
+p { i: "llvm" x: "LLVMPowerPCCodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/PowerPC" }
+p { i: "llvm" x: "LLVMPowerPCDesc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/PowerPC/MCTargetDesc" }
+p { i: "llvm" x: "LLVMPowerPCDisassembler" ix: true peerdir: "contrib/libs/llvm16/lib/Target/PowerPC/Disassembler" }
+p { i: "llvm" x: "LLVMPowerPCInfo" ix: true peerdir: "contrib/libs/llvm16/lib/Target/PowerPC/TargetInfo" }
+p { i: "llvm" x: "LLVMProfileData" ix: true peerdir: "contrib/libs/llvm16/lib/ProfileData" }
+p { i: "llvm" x: "LLVMRemarks" ix: true peerdir: "contrib/libs/llvm16/lib/Remarks" }
+p { i: "llvm" x: "LLVMRuntimeDyld" ix: true peerdir: "contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld" }
+p { i: "llvm" x: "LLVMScalarOpts" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/Scalar" }
+p { i: "llvm" x: "LLVMSelectionDAG" ix: true peerdir: "contrib/libs/llvm16/lib/CodeGen/SelectionDAG" }
+p { i: "llvm" x: "LLVMSupport" ix: true peerdir: "contrib/libs/llvm16/lib/Support" }
+p { i: "llvm" x: "LLVMSymbolize" ix: true peerdir: "contrib/libs/llvm16/lib/DebugInfo/Symbolize" }
+p { i: "llvm" x: "LLVMTableGen" ix: true peerdir: "contrib/libs/llvm16/lib/TableGen" }
+p { i: "llvm" x: "LLVMTableGenGlobalISel" ix: true peerdir: "contrib/libs/llvm16/utils/TableGen/GlobalISel" }
+p { i: "llvm" x: "LLVMTarget" ix: true peerdir: "contrib/libs/llvm16/lib/Target" }
+p { i: "llvm" x: "LLVMTargetParser" ix: true peerdir: "contrib/libs/llvm16/lib/TargetParser" }
+p { i: "llvm" x: "LLVMTextAPI" ix: true peerdir: "contrib/libs/llvm16/lib/TextAPI" }
+p { i: "llvm" x: "LLVMTransformUtils" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/Utils" }
+p { i: "llvm" x: "LLVMVectorize" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/Vectorize" }
+p { i: "llvm" x: "LLVMWebAssemblyAsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/Target/WebAssembly/AsmParser" }
+p { i: "llvm" x: "LLVMWebAssemblyCodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/WebAssembly" }
+p { i: "llvm" x: "LLVMWebAssemblyDesc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/WebAssembly/MCTargetDesc" }
+p { i: "llvm" x: "LLVMWebAssemblyDisassembler" ix: true peerdir: "contrib/libs/llvm16/lib/Target/WebAssembly/Disassembler" }
+p { i: "llvm" x: "LLVMWebAssemblyInfo" ix: true peerdir: "contrib/libs/llvm16/lib/Target/WebAssembly/TargetInfo" }
+p { i: "llvm" x: "LLVMWebAssemblyUtils" ix: true peerdir: "contrib/libs/llvm16/lib/Target/WebAssembly/Utils" }
+p { i: "llvm" x: "LLVMWindowsDriver" ix: true peerdir: "contrib/libs/llvm16/lib/WindowsDriver" }
+p { i: "llvm" x: "LLVMWindowsManifest" ix: true peerdir: "contrib/libs/llvm16/lib/WindowsManifest" }
+p { i: "llvm" x: "LLVMX86AsmParser" ix: true peerdir: "contrib/libs/llvm16/lib/Target/X86/AsmParser" }
+p { i: "llvm" x: "LLVMX86CodeGen" ix: true peerdir: "contrib/libs/llvm16/lib/Target/X86" }
+p { i: "llvm" x: "LLVMX86Desc" ix: true peerdir: "contrib/libs/llvm16/lib/Target/X86/MCTargetDesc" }
+p { i: "llvm" x: "LLVMX86Disassembler" ix: true peerdir: "contrib/libs/llvm16/lib/Target/X86/Disassembler" }
+p { i: "llvm" x: "LLVMX86Info" ix: true peerdir: "contrib/libs/llvm16/lib/Target/X86/TargetInfo" }
+p { i: "llvm" x: "LLVMX86TargetMCA" ix: true peerdir: "contrib/libs/llvm16/lib/Target/X86/MCA" }
+p { i: "llvm" x: "LLVMXRay" ix: true peerdir: "contrib/libs/llvm16/lib/XRay" }
+p { i: "llvm" x: "LLVMgold" ix: true peerdir: "contrib/libs/llvm16/tools/gold" }
+p { i: "llvm" x: "LLVMipo" ix: true peerdir: "contrib/libs/llvm16/lib/Transforms/IPO" }
+p { i: "llvm" x: "LTO" ix: true peerdir: "contrib/libs/llvm16/tools/lto" }
+p { i: "llvm" x: "Polly" ix: true peerdir: "contrib/libs/llvm16/tools/polly/lib" }
+p { i: "llvm" x: "PollyISL" ix: true peerdir: "contrib/libs/llvm16/tools/polly/lib/External/isl" }
+p { i: "llvm" x: "PollyPPCG" ix: true peerdir: "contrib/libs/llvm16/tools/polly/lib/External/ppcg" }
+p { i: "llvm" x: "Remarks" ix: true peerdir: "contrib/libs/llvm16/tools/remarks-shlib" }
+p { x: "bugpoint" peerdir: "contrib/libs/llvm16/tools/bugpoint" }
+p { x: "dsymutil" peerdir: "contrib/libs/llvm16/tools/dsymutil" }
+p { x: "llc" peerdir: "contrib/libs/llvm16/tools/llc" }
+p { x: "lli" peerdir: "contrib/libs/llvm16/tools/lli" }
+p { x: "lli-child-target" peerdir: "contrib/libs/llvm16/tools/lli/ChildTarget" }
+p { x: "llvm-ar" peerdir: "contrib/libs/llvm16/tools/llvm-ar" }
+p { x: "llvm-as" peerdir: "contrib/libs/llvm16/tools/llvm-as" }
+p { x: "llvm-bcanalyzer" peerdir: "contrib/libs/llvm16/tools/llvm-bcanalyzer" }
+p { x: "llvm-cat" peerdir: "contrib/libs/llvm16/tools/llvm-cat" }
+p { x: "llvm-cfi-verify" peerdir: "contrib/libs/llvm16/tools/llvm-cfi-verify" }
+p { x: "llvm-config" peerdir: "contrib/libs/llvm16/tools/llvm-config" }
+p { x: "llvm-cov" peerdir: "contrib/libs/llvm16/tools/llvm-cov" }
+p { x: "llvm-cvtres" peerdir: "contrib/libs/llvm16/tools/llvm-cvtres" }
+p { x: "llvm-cxxdump" peerdir: "contrib/libs/llvm16/tools/llvm-cxxdump" }
+p { x: "llvm-cxxfilt" peerdir: "contrib/libs/llvm16/tools/llvm-cxxfilt" }
+p { x: "llvm-cxxmap" peerdir: "contrib/libs/llvm16/tools/llvm-cxxmap" }
+p { x: "llvm-diff" peerdir: "contrib/libs/llvm16/tools/llvm-diff" }
+p { x: "llvm-dis" peerdir: "contrib/libs/llvm16/tools/llvm-dis" }
+p { x: "llvm-dwarfdump" peerdir: "contrib/libs/llvm16/tools/llvm-dwarfdump" }
+p { x: "llvm-dwp" peerdir: "contrib/libs/llvm16/tools/llvm-dwp" }
+p { x: "llvm-exegesis" peerdir: "contrib/libs/llvm16/tools/llvm-exegesis" }
+p { x: "llvm-extract" peerdir: "contrib/libs/llvm16/tools/llvm-extract" }
+p { x: "llvm-gsymutil" peerdir: "contrib/libs/llvm16/tools/llvm-gsymutil" }
+p { x: "llvm-ifs" peerdir: "contrib/libs/llvm16/tools/llvm-ifs" }
+p { x: "llvm-jitlink" peerdir: "contrib/libs/llvm16/tools/llvm-jitlink" }
+p { x: "llvm-jitlink-executor" peerdir: "contrib/libs/llvm16/tools/llvm-jitlink/llvm-jitlink-executor" }
+p { x: "llvm-libtool-darwin" peerdir: "contrib/libs/llvm16/tools/llvm-libtool-darwin" }
+p { x: "llvm-link" peerdir: "contrib/libs/llvm16/tools/llvm-link" }
+p { x: "llvm-lipo" peerdir: "contrib/libs/llvm16/tools/llvm-lipo" }
+p { x: "llvm-lto" peerdir: "contrib/libs/llvm16/tools/llvm-lto" }
+p { x: "llvm-lto2" peerdir: "contrib/libs/llvm16/tools/llvm-lto2" }
+p { x: "llvm-mc" peerdir: "contrib/libs/llvm16/tools/llvm-mc" }
+p { x: "llvm-mca" peerdir: "contrib/libs/llvm16/tools/llvm-mca" }
+p { x: "llvm-ml" peerdir: "contrib/libs/llvm16/tools/llvm-ml" }
+p { x: "llvm-modextract" peerdir: "contrib/libs/llvm16/tools/llvm-modextract" }
+p { x: "llvm-mt" peerdir: "contrib/libs/llvm16/tools/llvm-mt" }
+p { x: "llvm-nm" peerdir: "contrib/libs/llvm16/tools/llvm-nm" }
+p { x: "llvm-objcopy" peerdir: "contrib/libs/llvm16/tools/llvm-objcopy" }
+p { x: "llvm-objdump" peerdir: "contrib/libs/llvm16/tools/llvm-objdump" }
+p { x: "llvm-opt-report" peerdir: "contrib/libs/llvm16/tools/llvm-opt-report" }
+p { x: "llvm-pdbutil" peerdir: "contrib/libs/llvm16/tools/llvm-pdbutil" }
+p { x: "llvm-profdata" peerdir: "contrib/libs/llvm16/tools/llvm-profdata" }
+p { x: "llvm-profgen" peerdir: "contrib/libs/llvm16/tools/llvm-profgen" }
+p { x: "llvm-rc" peerdir: "contrib/libs/llvm16/tools/llvm-rc" }
+p { x: "llvm-readobj" peerdir: "contrib/libs/llvm16/tools/llvm-readobj" }
+p { x: "llvm-reduce" peerdir: "contrib/libs/llvm16/tools/llvm-reduce" }
+p { x: "llvm-rtdyld" peerdir: "contrib/libs/llvm16/tools/llvm-rtdyld" }
+p { x: "llvm-size" peerdir: "contrib/libs/llvm16/tools/llvm-size" }
+p { x: "llvm-split" peerdir: "contrib/libs/llvm16/tools/llvm-split" }
+p { x: "llvm-stress" peerdir: "contrib/libs/llvm16/tools/llvm-stress" }
+p { x: "llvm-strings" peerdir: "contrib/libs/llvm16/tools/llvm-strings" }
+p { x: "llvm-symbolizer" peerdir: "contrib/libs/llvm16/tools/llvm-symbolizer" }
+p { x: "llvm-tblgen" peerdir: "contrib/libs/llvm16/utils/TableGen" }
+p { x: "llvm-undname" peerdir: "contrib/libs/llvm16/tools/llvm-undname" }
+p { x: "llvm-xray" peerdir: "contrib/libs/llvm16/tools/llvm-xray" }
+p { x: "obj2yaml" peerdir: "contrib/libs/llvm16/tools/obj2yaml" }
+p { x: "opt" peerdir: "contrib/libs/llvm16/tools/opt" }
+p { x: "sancov" peerdir: "contrib/libs/llvm16/tools/sancov" }
+p { x: "sanstats" peerdir: "contrib/libs/llvm16/tools/sanstats" }
+p { x: "verify-uselistorder" peerdir: "contrib/libs/llvm16/tools/verify-uselistorder" }
+p { x: "yaml2obj" peerdir: "contrib/libs/llvm16/tools/yaml2obj" }
+p { i: "llvm/include" addincl: "contrib/libs/llvm16/include" addinclbuild: true }
diff --git a/contrib/libs/llvm16/tools/bugpoint/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/bugpoint/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/bugpoint/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/dsymutil/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/dsymutil/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/dsymutil/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/gold/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/gold/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/gold/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/lli/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/lli/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/lli/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/lli/ChildTarget/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/lli/ChildTarget/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/lli/ChildTarget/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-ar/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-ar/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-ar/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-as/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-as/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-as/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-bcanalyzer/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-bcanalyzer/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-bcanalyzer/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-cat/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cat/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cat/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-cfi-verify/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cfi-verify/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cfi-verify/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-cfi-verify/lib/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cfi-verify/lib/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cfi-verify/lib/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-config/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-config/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-config/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-cov/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cov/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cov/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-cvtres/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cvtres/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cvtres/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-cxxdump/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cxxdump/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cxxdump/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-cxxfilt/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cxxfilt/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..201c37420a
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cxxfilt/.yandex_meta/licenses.list.txt
@@ -0,0 +1,13 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================COPYRIGHT====================
+ static bool isChar6(char C) { return isAlnum(C) || C == '.' || C == '_'; }
+ static unsigned EncodeChar6(char C) {
+ if (C >= 'a' && C <= 'z') return C-'a';
diff --git a/contrib/libs/llvm16/tools/llvm-cxxmap/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-cxxmap/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-cxxmap/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-diff/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-diff/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-diff/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-diff/lib/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-diff/lib/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-diff/lib/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-dis/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-dis/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-dis/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-dwarfdump/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-dwarfdump/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-dwarfdump/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-dwp/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-dwp/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-dwp/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-exegesis/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-exegesis/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-exegesis/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-exegesis/lib/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-exegesis/lib/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-exegesis/lib/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-exegesis/lib/AArch64/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-exegesis/lib/AArch64/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-exegesis/lib/AArch64/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-exegesis/lib/PowerPC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-exegesis/lib/PowerPC/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-exegesis/lib/PowerPC/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-exegesis/lib/X86/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-exegesis/lib/X86/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-exegesis/lib/X86/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-extract/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-extract/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-extract/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-gsymutil/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-gsymutil/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-gsymutil/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-ifs/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-ifs/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-ifs/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-jitlink/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-jitlink/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-jitlink/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-jitlink/llvm-jitlink-executor/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-jitlink/llvm-jitlink-executor/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-jitlink/llvm-jitlink-executor/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-libtool-darwin/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-libtool-darwin/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-libtool-darwin/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-link/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-link/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-link/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-lipo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-lipo/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-lipo/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-lto/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-lto/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-lto/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-lto2/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-lto2/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-lto2/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-mc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-mc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-mc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-mca/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-mca/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-mca/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-ml/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-ml/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-ml/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-modextract/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-modextract/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-modextract/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-mt/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-mt/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-mt/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-nm/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-nm/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-nm/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-objcopy/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-objcopy/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-objcopy/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-objdump/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-objdump/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-objdump/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-opt-report/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-opt-report/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-opt-report/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-pdbutil/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-pdbutil/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-pdbutil/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-profdata/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-profdata/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-profdata/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-profgen/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-profgen/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-profgen/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-rc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-rc/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-rc/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-readobj/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-readobj/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-readobj/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-reduce/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-reduce/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-reduce/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-rtdyld/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-rtdyld/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-rtdyld/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-size/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-size/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-size/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-split/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-split/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-split/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-stress/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-stress/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-stress/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-strings/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-strings/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-strings/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-symbolizer/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-symbolizer/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-symbolizer/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-undname/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-undname/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-undname/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/llvm-xray/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/llvm-xray/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/llvm-xray/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/lto/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/lto/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/lto/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/lto/lto.exports b/contrib/libs/llvm16/tools/lto/lto.exports
new file mode 100644
index 0000000000..4164c3919a
--- /dev/null
+++ b/contrib/libs/llvm16/tools/lto/lto.exports
@@ -0,0 +1,77 @@
+lto_get_error_message
+lto_get_version
+lto_initialize_disassembler
+lto_module_create
+lto_module_create_from_fd
+lto_module_create_from_fd_at_offset
+lto_module_create_from_memory
+lto_module_create_from_memory_with_path
+lto_module_create_in_local_context
+lto_module_create_in_codegen_context
+lto_module_has_ctor_dtor
+lto_module_get_linkeropts
+lto_module_get_macho_cputype
+lto_module_get_num_symbols
+lto_module_get_symbol_attribute
+lto_module_get_symbol_name
+lto_module_get_target_triple
+lto_module_set_target_triple
+lto_module_is_object_file
+lto_module_is_object_file_for_target
+lto_module_is_object_file_in_memory
+lto_module_is_object_file_in_memory_for_target
+lto_module_has_objc_category
+lto_module_dispose
+lto_api_version
+lto_codegen_set_diagnostic_handler
+lto_codegen_add_module
+lto_codegen_set_module
+lto_codegen_add_must_preserve_symbol
+lto_codegen_compile
+lto_codegen_create
+lto_codegen_create_in_local_context
+lto_codegen_dispose
+lto_codegen_set_debug_model
+lto_codegen_set_pic_model
+lto_codegen_write_merged_modules
+lto_codegen_debug_options
+lto_codegen_debug_options_array
+lto_codegen_set_assembler_args
+lto_codegen_set_assembler_path
+lto_codegen_set_cpu
+lto_codegen_compile_to_file
+lto_codegen_optimize
+lto_codegen_compile_optimized
+lto_codegen_set_should_internalize
+lto_codegen_set_should_embed_uselists
+lto_set_debug_options
+thinlto_create_codegen
+thinlto_codegen_dispose
+thinlto_codegen_add_module
+thinlto_codegen_process
+thinlto_module_get_num_objects
+thinlto_module_get_object
+thinlto_codegen_set_pic_model
+thinlto_codegen_set_cache_dir
+thinlto_codegen_set_cache_pruning_interval
+thinlto_codegen_set_cache_entry_expiration
+thinlto_codegen_set_final_cache_size_relative_to_available_space
+thinlto_codegen_set_cache_size_bytes
+thinlto_codegen_set_cache_size_megabytes
+thinlto_codegen_set_cache_size_files
+thinlto_codegen_set_savetemps_dir
+thinlto_codegen_set_cpu
+thinlto_debug_options
+lto_module_is_thinlto
+thinlto_codegen_add_must_preserve_symbol
+thinlto_codegen_add_cross_referenced_symbol
+thinlto_codegen_set_codegen_only
+thinlto_codegen_disable_codegen
+thinlto_module_get_num_object_files
+thinlto_module_get_object_file
+thinlto_set_generated_objects_dir
+lto_input_create
+lto_input_dispose
+lto_input_get_num_dependent_libraries
+lto_input_get_dependent_library
+lto_runtime_lib_symbols_list
diff --git a/contrib/libs/llvm16/tools/obj2yaml/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/obj2yaml/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/obj2yaml/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/opt/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/opt/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/opt/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/polly/include/polly/Support/ISLOperators.h b/contrib/libs/llvm16/tools/polly/include/polly/Support/ISLOperators.h
new file mode 100644
index 0000000000..db6b42ccf2
--- /dev/null
+++ b/contrib/libs/llvm16/tools/polly/include/polly/Support/ISLOperators.h
@@ -0,0 +1,184 @@
+//===------ ISLOperators.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Operator overloads for isl C++ objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef POLLY_ISLOPERATORS_H
+#define POLLY_ISLOPERATORS_H
+
+#include "isl/isl-noexceptions.h"
+
+namespace polly {
+
+/// Addition
+/// @{
+inline isl::pw_aff operator+(isl::pw_aff Left, isl::pw_aff Right) {
+ return Left.add(Right);
+}
+
+inline isl::pw_aff operator+(isl::val ValLeft, isl::pw_aff Right) {
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.add(Right);
+}
+
+inline isl::pw_aff operator+(isl::pw_aff Left, isl::val ValRight) {
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.add(Right);
+}
+
+inline isl::pw_aff operator+(long IntLeft, isl::pw_aff Right) {
+ isl::ctx Ctx = Right.ctx();
+ isl::val ValLeft(Ctx, IntLeft);
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.add(Right);
+}
+
+inline isl::pw_aff operator+(isl::pw_aff Left, long IntRight) {
+ isl::ctx Ctx = Left.ctx();
+ isl::val ValRight(Ctx, IntRight);
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.add(Right);
+}
+/// @}
+
+/// Multiplication
+/// @{
+inline isl::pw_aff operator*(isl::pw_aff Left, isl::pw_aff Right) {
+ return Left.mul(Right);
+}
+
+inline isl::pw_aff operator*(isl::val ValLeft, isl::pw_aff Right) {
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.mul(Right);
+}
+
+inline isl::pw_aff operator*(isl::pw_aff Left, isl::val ValRight) {
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.mul(Right);
+}
+
+inline isl::pw_aff operator*(long IntLeft, isl::pw_aff Right) {
+ isl::ctx Ctx = Right.ctx();
+ isl::val ValLeft(Ctx, IntLeft);
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.mul(Right);
+}
+
+inline isl::pw_aff operator*(isl::pw_aff Left, long IntRight) {
+ isl::ctx Ctx = Left.ctx();
+ isl::val ValRight(Ctx, IntRight);
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.mul(Right);
+}
+/// @}
+
+/// Subtraction
+/// @{
+inline isl::pw_aff operator-(isl::pw_aff Left, isl::pw_aff Right) {
+ return Left.sub(Right);
+}
+
+inline isl::pw_aff operator-(isl::val ValLeft, isl::pw_aff Right) {
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.sub(Right);
+}
+
+inline isl::pw_aff operator-(isl::pw_aff Left, isl::val ValRight) {
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.sub(Right);
+}
+
+inline isl::pw_aff operator-(long IntLeft, isl::pw_aff Right) {
+ isl::ctx Ctx = Right.ctx();
+ isl::val ValLeft(Ctx, IntLeft);
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.sub(Right);
+}
+
+inline isl::pw_aff operator-(isl::pw_aff Left, long IntRight) {
+ isl::ctx Ctx = Left.ctx();
+ isl::val ValRight(Ctx, IntRight);
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.sub(Right);
+}
+/// @}
+
+/// Division
+///
+/// This division rounds towards zero. This follows the semantics of C/C++.
+///
+/// @{
+inline isl::pw_aff operator/(isl::pw_aff Left, isl::pw_aff Right) {
+ return Left.tdiv_q(Right);
+}
+
+inline isl::pw_aff operator/(isl::val ValLeft, isl::pw_aff Right) {
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.tdiv_q(Right);
+}
+
+inline isl::pw_aff operator/(isl::pw_aff Left, isl::val ValRight) {
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.tdiv_q(Right);
+}
+
+inline isl::pw_aff operator/(long IntLeft, isl::pw_aff Right) {
+ isl::ctx Ctx = Right.ctx();
+ isl::val ValLeft(Ctx, IntLeft);
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.tdiv_q(Right);
+}
+
+inline isl::pw_aff operator/(isl::pw_aff Left, long IntRight) {
+ isl::ctx Ctx = Left.ctx();
+ isl::val ValRight(Ctx, IntRight);
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.tdiv_q(Right);
+}
+/// @}
+
+/// Remainder
+///
+/// This is the remainder of a division which rounds towards zero. This follows
+/// the semantics of C/C++.
+///
+/// @{
+inline isl::pw_aff operator%(isl::pw_aff Left, isl::pw_aff Right) {
+ return Left.tdiv_r(Right);
+}
+
+inline isl::pw_aff operator%(isl::val ValLeft, isl::pw_aff Right) {
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.tdiv_r(Right);
+}
+
+inline isl::pw_aff operator%(isl::pw_aff Left, isl::val ValRight) {
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.tdiv_r(Right);
+}
+
+inline isl::pw_aff operator%(long IntLeft, isl::pw_aff Right) {
+ isl::ctx Ctx = Right.ctx();
+ isl::val ValLeft(Ctx, IntLeft);
+ isl::pw_aff Left(Right.domain(), ValLeft);
+ return Left.tdiv_r(Right);
+}
+
+inline isl::pw_aff operator%(isl::pw_aff Left, long IntRight) {
+ isl::ctx Ctx = Left.ctx();
+ isl::val ValRight(Ctx, IntRight);
+ isl::pw_aff Right(Left.domain(), ValRight);
+ return Left.tdiv_r(Right);
+}
+/// @}
+
+} // namespace polly
+
+#endif // POLLY_ISLOPERATORS_H
diff --git a/contrib/libs/llvm16/tools/polly/lib/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/polly/lib/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..3aad0d7ee8
--- /dev/null
+++ b/contrib/libs/llvm16/tools/polly/lib/.yandex_meta/licenses.list.txt
@@ -0,0 +1,11 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+====================MIT====================
+License: MIT-STYLE
diff --git a/contrib/libs/llvm16/tools/polly/lib/External/isl/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/polly/lib/External/isl/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..fd530db688
--- /dev/null
+++ b/contrib/libs/llvm16/tools/polly/lib/External/isl/.yandex_meta/licenses.list.txt
@@ -0,0 +1,332 @@
+====================BSD-3-Clause====================
+ license I am using. This is basically the BSD license, so
+
+
+====================COPYRIGHT====================
+ Copyright (C) 2002-2007 Michael J. Fromberger, All Rights Reserved.
+
+
+====================COPYRIGHT====================
+ Copyright (c) 2012 Qualcomm Innovation Center, Inc. All rights reserved.
+
+
+====================COPYRIGHT====================
+ * Copyright 2005-2007 Universiteit Leiden
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+
+
+====================COPYRIGHT====================
+ * Copyright 2005-2007 Universiteit Leiden
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2012 Universiteit Leiden
+ * Copyright 2014 Ecole Normale Superieure
+
+
+====================COPYRIGHT====================
+ * Copyright 2006-2007 Universiteit Leiden
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+
+
+====================COPYRIGHT====================
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+
+
+====================COPYRIGHT====================
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2012 Ecole Normale Superieure
+
+
+====================COPYRIGHT====================
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2012-2013 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+ * Copyright 2016 INRIA Paris
+ * Copyright 2020 Cerebras Systems
+
+
+====================COPYRIGHT====================
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2012-2014 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+ * Copyright 2016 INRIA Paris
+ * Copyright 2016 Sven Verdoolaege
+ * Copyright 2018-2019 Cerebras Systems
+
+
+====================COPYRIGHT====================
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2016-2017 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2012-2013 Ecole Normale Superieure
+ * Copyright 2014-2015 INRIA Rocquencourt
+ * Copyright 2016 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2013 Ecole Normale Superieure
+ * Copyright 2015 Sven Verdoolaege
+ * Copyright 2019 Cerebras Systems
+
+
+====================COPYRIGHT====================
+ * Copyright 2010-2011 INRIA Saclay
+ * Copyright 2012-2013 Ecole Normale Superieure
+
+
+====================COPYRIGHT====================
+ * Copyright 2011 INRIA Saclay
+ * Copyright 2011 Sven Verdoolaege
+ * Copyright 2012-2014 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+ * Copyright 2016 Sven Verdoolaege
+ * Copyright 2018,2020 Cerebras Systems
+ * Copyright 2021 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2011 INRIA Saclay
+ * Copyright 2012-2013 Ecole Normale Superieure
+ * Copyright 2016 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2012 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+ * Copyright 2019 Cerebras Systems
+
+
+====================COPYRIGHT====================
+ * Copyright 2012 Ecole Normale Superieure
+ * Copyright 2015-2016 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2012 Ecole Normale Superieure
+ * Copyright 2017 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2012,2014 Ecole Normale Superieure
+
+
+====================COPYRIGHT====================
+ * Copyright 2013 Ecole Normale Superieure
+
+
+====================COPYRIGHT====================
+ * Copyright 2013-2014 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+
+
+====================COPYRIGHT====================
+ * Copyright 2015 INRIA Paris-Rocquencourt
+
+
+====================COPYRIGHT====================
+ * Copyright 2018 Cerebras Systems
+
+
+====================COPYRIGHT====================
+ * Copyright 2018 Sven Verdoolaege
+ * Copyright 2019 Cerebras Systems
+
+
+====================COPYRIGHT====================
+IMath is Copyright © 2002-2009 Michael J. Fromberger
+You may use it subject to the following Licensing Terms:
+
+
+====================COPYRIGHT====================
+IMath is copyright &copy; 2002-2009 Michael J. Fromberger.
+
+
+====================File: tools/polly/lib/External/isl/AUTHORS====================
+isl was written by
+
+ Sven Verdoolaege
+2006-2007 Leiden Institute of Advanced Computer Science
+ Universiteit Leiden
+ Niels Bohrweg 1
+ 2333 CA Leiden
+ The Netherlands
+2008-2009 K.U.Leuven
+ Departement Computerwetenschappen
+ Celestijnenlaan 200A
+ B-3001 Leuven
+ Belgium
+2010-2011 INRIA Saclay - Ile-de-France
+ Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod
+ 91893 Orsay
+ France
+2011-2012 consultant for Leiden Institute of Advanced Computer Science
+2012-2014 Ecole Normale Superieure
+ 45 rue d'Ulm, 75230 Paris
+ France
+2014-2015 INRIA Rocquencourt
+ Domaine de Voluceau - Rocquencourt, B.P. 105
+ 78153 Le Chesnay
+ France
+2015-2020 Polly Labs
+2018-2020 Cerebras Systems
+ 175 S San Antonio Rd
+ Los Altos, CA
+ USA
+
+Contributions by
+
+Mythri Alle
+Riyadh Baghdadi
+Serge Belyshev
+Albert Cohen
+Ray Donnelly
+Johannes Doerfert
+Andi Drebes
+Ron Estrin
+Clement Foyer
+Armin Groesslinger
+Tobias Grosser
+Frederik Harwath
+Alexandre Isoard
+Andreas Kloeckner
+Michael Kruse
+Manjunath Kudlur
+Alexander Matz
+Chielo Newctle
+Sebastian Pop
+Louis-Noel Pouchet
+Benoit Pradelle
+Uday Bondhugula
+Andreas Simbuerger
+Tianjiao Sun
+Malhar Thakkar
+Sergei Trofimovich
+Miheer Vaidya
+Sven van Haastregt
+Oleksandr Zinenko
+
+The merge sort implementation was written by Jeffrey Stedfast.
+
+
+====================MIT====================
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
+
+====================MIT====================
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+
+
+====================MIT====================
+ * Use of this software is governed by the MIT license
+
+
+====================MIT====================
+ * where it was posted in 2011 by Jeffrey Stedfast under the MIT license.
+ * The MIT license text is as follows:
+
+
+====================MIT====================
+> Permission is hereby granted, free of charge, to any person obtaining a copy
+> of this software and associated documentation files (the "Software"), to deal
+> in the Software without restriction, including without limitation the rights
+> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+> copies of the Software, and to permit persons to whom the Software is
+> furnished to do so, subject to the following conditions:
+>
+> The above copyright notice and this permission notice shall be included in
+> all copies or substantial portions of the Software.
+>
+> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+> SOFTWARE.
+
+
+====================MIT====================
+MIT License (MIT)
+
+
+====================MIT====================
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+====================MIT====================
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+====================MIT====================
+isl is released under the MIT license, but depends on the LGPL GMP
diff --git a/contrib/libs/llvm16/tools/polly/lib/External/isl/GIT_HEAD_ID b/contrib/libs/llvm16/tools/polly/lib/External/isl/GIT_HEAD_ID
new file mode 100644
index 0000000000..60beb8f894
--- /dev/null
+++ b/contrib/libs/llvm16/tools/polly/lib/External/isl/GIT_HEAD_ID
@@ -0,0 +1 @@
+isl-0.24-69-g54aac5ac
diff --git a/contrib/libs/llvm16/tools/polly/lib/External/ppcg/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/polly/lib/External/ppcg/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..e866e26661
--- /dev/null
+++ b/contrib/libs/llvm16/tools/polly/lib/External/ppcg/.yandex_meta/licenses.list.txt
@@ -0,0 +1,45 @@
+====================COPYRIGHT====================
+ * Copyright 2008-2009 Katholieke Universiteit Leuven
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2012 Ecole Normale Superieure
+
+
+====================COPYRIGHT====================
+ * Copyright 2010 INRIA Saclay
+ * Copyright 2013 Ecole Normale Superieure
+ * Copyright 2015 Sven Verdoolaege
+ * Copyright 2019 Cerebras Systems
+
+
+====================COPYRIGHT====================
+ * Copyright 2010-2011 INRIA Saclay
+ * Copyright 2012-2013 Ecole Normale Superieure
+
+
+====================COPYRIGHT====================
+ * Copyright 2011 INRIA Saclay
+ * Copyright 2011 Sven Verdoolaege
+ * Copyright 2012-2014 Ecole Normale Superieure
+ * Copyright 2014 INRIA Rocquencourt
+ * Copyright 2016 Sven Verdoolaege
+ * Copyright 2018,2020 Cerebras Systems
+ * Copyright 2021 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2011 INRIA Saclay
+ * Copyright 2012-2013 Ecole Normale Superieure
+ * Copyright 2016 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2012 Ecole Normale Superieure
+ * Copyright 2015-2016 Sven Verdoolaege
+
+
+====================COPYRIGHT====================
+ * Copyright 2013 Ecole Normale Superieure
+
+
+====================MIT====================
+ * Use of this software is governed by the MIT license
diff --git a/contrib/libs/llvm16/tools/polly/lib/External/ppcg/GIT_HEAD_ID b/contrib/libs/llvm16/tools/polly/lib/External/ppcg/GIT_HEAD_ID
new file mode 100644
index 0000000000..642f492dd4
--- /dev/null
+++ b/contrib/libs/llvm16/tools/polly/lib/External/ppcg/GIT_HEAD_ID
@@ -0,0 +1 @@
+ppcg-0.07
diff --git a/contrib/libs/llvm16/tools/remarks-shlib/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/remarks-shlib/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/remarks-shlib/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/sancov/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/sancov/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/sancov/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/sanstats/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/sanstats/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/sanstats/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/verify-uselistorder/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/verify-uselistorder/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/verify-uselistorder/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/tools/yaml2obj/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/tools/yaml2obj/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/tools/yaml2obj/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/utils/DSAclean.py b/contrib/libs/llvm16/utils/DSAclean.py
new file mode 100755
index 0000000000..c5fb56b037
--- /dev/null
+++ b/contrib/libs/llvm16/utils/DSAclean.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+#changelog:
+#10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
+#nodes such as %tmp.1.i and %tmp._i.3
+#10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just
+#%tmp.#, i.e. it now will remove %tmp.12.3.15 etc, additionally fixed a spelling error in
+#the comments
+#10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
+#than removing all lines for which the lable CONTAINS %tmp.#
+
+from __future__ import print_function
+
+import re
+import sys
+if( len(sys.argv) < 3 ):
+ print('usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>')
+ sys.exit(1)
+#get a file object
+input = open(sys.argv[1], 'r')
+output = open(sys.argv[2], 'w')
+#we'll get this one line at a time...while we could just put the whole thing in a string
+#it would kill old computers
+buffer = input.readline()
+while buffer != '':
+ if re.compile("label(\s*)=(\s*)\"\s%tmp(.\w*)*(\s*)\"").search(buffer):
+ #skip next line, write neither this line nor the next
+ buffer = input.readline()
+ else:
+ #this isn't a tmp Node, we can write it
+ output.write(buffer)
+ #prepare for the next iteration
+ buffer = input.readline()
+input.close()
+output.close()
diff --git a/contrib/libs/llvm16/utils/DSAextract.py b/contrib/libs/llvm16/utils/DSAextract.py
new file mode 100755
index 0000000000..1d93f1e30c
--- /dev/null
+++ b/contrib/libs/llvm16/utils/DSAextract.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+
+#this is a script to extract given named nodes from a dot file, with
+#the associated edges. An edge is kept iff for edge x -> y
+# x and y are both nodes specified to be kept.
+
+#known issues: if a line contains '->' and is not an edge line
+#problems will occur. If node labels do not begin with
+#Node this also will not work. Since this is designed to work
+#on DSA dot output and not general dot files this is ok.
+#If you want to use this on other files rename the node labels
+#to Node[.*] with a script or something. This also relies on
+#the length of a node name being 13 characters (as it is in all
+#DSA dot output files)
+
+#Note that the name of the node can be any substring of the actual
+#name in the dot file. Thus if you say specify COLLAPSED
+#as a parameter this script will pull out all COLLAPSED
+#nodes in the file
+
+#Specifying escape characters in the name like \n also will not work,
+#as Python
+#will make it \\n, I'm not really sure how to fix this
+
+#currently the script prints the names it is searching for
+#to STDOUT, so you can check to see if they are what you intend
+
+from __future__ import print_function
+
+import re
+import string
+import sys
+
+
+if len(sys.argv) < 3:
+ print('usage is ./DSAextract <dot_file_to_modify> \
+ <output_file> [list of nodes to extract]')
+
+#open the input file
+input = open(sys.argv[1], 'r')
+
+#construct a set of node names
+node_name_set = set()
+for name in sys.argv[3:]:
+ node_name_set |= set([name])
+
+#construct a list of compiled regular expressions from the
+#node_name_set
+regexp_list = []
+for name in node_name_set:
+ regexp_list.append(re.compile(name))
+
+#used to see what kind of line we are on
+nodeexp = re.compile('Node')
+#used to check to see if the current line is an edge line
+arrowexp = re.compile('->')
+
+node_set = set()
+
+#read the file one line at a time
+buffer = input.readline()
+while buffer != '':
+ #filter out the unnecessary checks on all the edge lines
+ if not arrowexp.search(buffer):
+ #check to see if this is a node we are looking for
+ for regexp in regexp_list:
+ #if this name is for the current node, add the dot variable name
+ #for the node (it will be Node(hex number)) to our set of nodes
+ if regexp.search(buffer):
+ node_set |= set([re.split('\s+',buffer,2)[1]])
+ break
+ buffer = input.readline()
+
+
+#test code
+#print '\n'
+
+print(node_name_set)
+
+#print node_set
+
+
+#open the output file
+output = open(sys.argv[2], 'w')
+#start the second pass over the file
+input = open(sys.argv[1], 'r')
+
+buffer = input.readline()
+while buffer != '':
+ #there are three types of lines we are looking for
+ #1) node lines, 2) edge lines 3) support lines (like page size, etc)
+
+ #is this an edge line?
+ #note that this is no completely robust, if a none edge line
+ #for some reason contains -> it will be missidentified
+ #hand edit the file if this happens
+ if arrowexp.search(buffer):
+ #check to make sure that both nodes are in the node list
+ #if they are print this to output
+ nodes = arrowexp.split(buffer)
+ nodes[0] = string.strip(nodes[0])
+ nodes[1] = string.strip(nodes[1])
+ if nodes[0][:13] in node_set and \
+ nodes[1][:13] in node_set:
+ output.write(buffer)
+ elif nodeexp.search(buffer): #this is a node line
+ node = re.split('\s+', buffer,2)[1]
+ if node in node_set:
+ output.write(buffer)
+ else: #this is a support line
+ output.write(buffer)
+ buffer = input.readline()
+
diff --git a/contrib/libs/llvm16/utils/TableGen/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/utils/TableGen/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/.yandex_meta/licenses.list.txt b/contrib/libs/llvm16/utils/TableGen/GlobalISel/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..c62d353021
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/.yandex_meta/licenses.list.txt
@@ -0,0 +1,7 @@
+====================Apache-2.0 WITH LLVM-exception====================
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+
+
+====================Apache-2.0 WITH LLVM-exception====================
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm16/utils/gdb-scripts/prettyprinters.py b/contrib/libs/llvm16/utils/gdb-scripts/prettyprinters.py
new file mode 100644
index 0000000000..1fdf4fe781
--- /dev/null
+++ b/contrib/libs/llvm16/utils/gdb-scripts/prettyprinters.py
@@ -0,0 +1,499 @@
+from __future__ import print_function
+import struct
+import sys
+
+import gdb.printing
+import gdb.types
+
+class Iterator:
+ def __iter__(self):
+ return self
+
+ if sys.version_info.major == 2:
+ def next(self):
+ return self.__next__()
+
+ def children(self):
+ return self
+
+class SmallStringPrinter:
+ """Print an llvm::SmallString object."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ data = self.val['BeginX'].cast(gdb.lookup_type('char').pointer())
+ length = self.val['Size']
+ return data.lazy_string(length=length)
+
+ def display_hint (self):
+ return 'string'
+
+class StringRefPrinter:
+ """Print an llvm::StringRef object."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ data = self.val['Data']
+ length = self.val['Length']
+ return data.lazy_string(length=length)
+
+ def display_hint(self):
+ return 'string'
+
+class SmallVectorPrinter(Iterator):
+ """Print an llvm::SmallVector object."""
+
+ def __init__(self, val):
+ self.val = val
+ t = val.type.template_argument(0).pointer()
+ self.begin = val['BeginX'].cast(t)
+ self.size = val['Size']
+ self.i = 0
+
+ def __next__(self):
+ if self.i == self.size:
+ raise StopIteration
+ ret = '[{}]'.format(self.i), (self.begin+self.i).dereference()
+ self.i += 1
+ return ret
+
+ def to_string(self):
+ return 'llvm::SmallVector of Size {}, Capacity {}'.format(self.size, self.val['Capacity'])
+
+ def display_hint (self):
+ return 'array'
+
+class ArrayRefPrinter:
+ """Print an llvm::ArrayRef object."""
+
+ class _iterator:
+ def __init__(self, begin, end):
+ self.cur = begin
+ self.end = end
+ self.count = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.cur == self.end:
+ raise StopIteration
+ count = self.count
+ self.count = self.count + 1
+ cur = self.cur
+ self.cur = self.cur + 1
+ return '[%d]' % count, cur.dereference()
+
+ if sys.version_info.major == 2:
+ next = __next__
+
+ def __init__(self, val):
+ self.val = val
+
+ def children(self):
+ data = self.val['Data']
+ return self._iterator(data, data + self.val['Length'])
+
+ def to_string(self):
+ return 'llvm::ArrayRef of length %d' % (self.val['Length'])
+
+ def display_hint (self):
+ return 'array'
+
+class ExpectedPrinter(Iterator):
+ """Print an llvm::Expected object."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def __next__(self):
+ val = self.val
+ if val is None:
+ raise StopIteration
+ self.val = None
+ if val['HasError']:
+ return ('error', val['ErrorStorage'].address.cast(
+ gdb.lookup_type('llvm::ErrorInfoBase').pointer()).dereference())
+ return ('value', val['TStorage'].address.cast(
+ val.type.template_argument(0).pointer()).dereference())
+
+ def to_string(self):
+ return 'llvm::Expected{}'.format(' is error' if self.val['HasError'] else '')
+
+class OptionalPrinter(Iterator):
+ """Print an llvm::Optional object."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def __next__(self):
+ val = self.val
+ if val is None:
+ raise StopIteration
+ self.val = None
+ if not val['Storage']['hasVal']:
+ raise StopIteration
+ return ('value', val['Storage']['val'])
+
+ def to_string(self):
+ return 'llvm::Optional{}'.format('' if self.val['Storage']['hasVal'] else ' is not initialized')
+
+class DenseMapPrinter:
+ "Print a DenseMap"
+
+ class _iterator:
+ def __init__(self, key_info_t, begin, end):
+ self.key_info_t = key_info_t
+ self.cur = begin
+ self.end = end
+ self.advancePastEmptyBuckets()
+ self.first = True
+
+ def __iter__(self):
+ return self
+
+ def advancePastEmptyBuckets(self):
+ # disabled until the comments below can be addressed
+ # keeping as notes/posterity/hints for future contributors
+ return
+ n = self.key_info_t.name
+ is_equal = gdb.parse_and_eval(n + '::isEqual')
+ empty = gdb.parse_and_eval(n + '::getEmptyKey()')
+ tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
+ # the following is invalid, GDB fails with:
+ # Python Exception <class 'gdb.error'> Attempt to take address of value
+ # not located in memory.
+ # because isEqual took parameter (for the unsigned long key I was testing)
+ # by const ref, and GDB
+ # It's also not entirely general - we should be accessing the "getFirst()"
+ # member function, not the 'first' member variable, but I've yet to figure
+ # out how to find/call member functions (especially (const) overloaded
+ # ones) on a gdb.Value.
+ while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
+ self.cur = self.cur + 1
+
+ def __next__(self):
+ if self.cur == self.end:
+ raise StopIteration
+ cur = self.cur
+ v = cur.dereference()['first' if self.first else 'second']
+ if not self.first:
+ self.cur = self.cur + 1
+ self.advancePastEmptyBuckets()
+ self.first = True
+ else:
+ self.first = False
+ return 'x', v
+
+ if sys.version_info.major == 2:
+ next = __next__
+
+ def __init__(self, val):
+ self.val = val
+
+ def children(self):
+ t = self.val.type.template_argument(3).pointer()
+ begin = self.val['Buckets'].cast(t)
+ end = (begin + self.val['NumBuckets']).cast(t)
+ return self._iterator(self.val.type.template_argument(2), begin, end)
+
+ def to_string(self):
+ return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
+
+ def display_hint(self):
+ return 'map'
+
+class StringMapPrinter:
+ "Print a StringMap"
+
+ def __init__(self, val):
+ self.val = val
+
+ def children(self):
+ it = self.val['TheTable']
+ end = (it + self.val['NumBuckets'])
+ value_ty = self.val.type.template_argument(0)
+ entry_base_ty = gdb.lookup_type('llvm::StringMapEntryBase')
+ tombstone = gdb.parse_and_eval('llvm::StringMapImpl::TombstoneIntVal');
+
+ while it != end:
+ it_deref = it.dereference()
+ if it_deref == 0 or it_deref == tombstone:
+ it = it + 1
+ continue
+
+ entry_ptr = it_deref.cast(entry_base_ty.pointer())
+ entry = entry_ptr.dereference()
+
+ str_len = entry['keyLength']
+ value_ptr = (entry_ptr + 1).cast(value_ty.pointer())
+ str_data = (entry_ptr + 1).cast(gdb.lookup_type('uintptr_t')) + max(value_ty.sizeof, entry_base_ty.alignof)
+ str_data = str_data.cast(gdb.lookup_type('char').const().pointer())
+ string_ref = gdb.Value(struct.pack('PN', int(str_data), int(str_len)), gdb.lookup_type('llvm::StringRef'))
+ yield 'key', string_ref
+
+ value = value_ptr.dereference()
+ yield 'value', value
+
+ it = it + 1
+
+ def to_string(self):
+ return 'llvm::StringMap with %d elements' % (self.val['NumItems'])
+
+ def display_hint(self):
+ return 'map'
+
+class TwinePrinter:
+ "Print a Twine"
+
+ def __init__(self, val):
+ self._val = val
+
+ def display_hint(self):
+ return 'string'
+
+ def string_from_pretty_printer_lookup(self, val):
+ '''Lookup the default pretty-printer for val and use it.
+
+ If no pretty-printer is defined for the type of val, print an error and
+ return a placeholder string.'''
+
+ pp = gdb.default_visualizer(val)
+ if pp:
+ s = pp.to_string()
+
+ # The pretty-printer may return a LazyString instead of an actual Python
+ # string. Convert it to a Python string. However, GDB doesn't seem to
+ # register the LazyString type, so we can't check
+ # "type(s) == gdb.LazyString".
+ if 'LazyString' in type(s).__name__:
+ s = s.value().string()
+
+ else:
+ print(('No pretty printer for {} found. The resulting Twine ' +
+ 'representation will be incomplete.').format(val.type.name))
+ s = '(missing {})'.format(val.type.name)
+
+ return s
+
+ def is_twine_kind(self, kind, expected):
+ if not kind.endswith(expected):
+ return False
+ # apparently some GDB versions add the NodeKind:: namespace
+ # (happens for me on GDB 7.11)
+ return kind in ('llvm::Twine::' + expected,
+ 'llvm::Twine::NodeKind::' + expected)
+
+ def string_from_child(self, child, kind):
+ '''Return the string representation of the Twine::Child child.'''
+
+ if self.is_twine_kind(kind, 'EmptyKind') or self.is_twine_kind(kind, 'NullKind'):
+ return ''
+
+ if self.is_twine_kind(kind, 'TwineKind'):
+ return self.string_from_twine_object(child['twine'].dereference())
+
+ if self.is_twine_kind(kind, 'CStringKind'):
+ return child['cString'].string()
+
+ if self.is_twine_kind(kind, 'StdStringKind'):
+ val = child['stdString'].dereference()
+ return self.string_from_pretty_printer_lookup(val)
+
+ if self.is_twine_kind(kind, 'PtrAndLengthKind'):
+ val = child['ptrAndLength']
+ data = val['ptr']
+ length = val['length']
+ return data.string(length=length)
+
+ if self.is_twine_kind(kind, 'CharKind'):
+ return chr(child['character'])
+
+ if self.is_twine_kind(kind, 'DecUIKind'):
+ return str(child['decUI'])
+
+ if self.is_twine_kind(kind, 'DecIKind'):
+ return str(child['decI'])
+
+ if self.is_twine_kind(kind, 'DecULKind'):
+ return str(child['decUL'].dereference())
+
+ if self.is_twine_kind(kind, 'DecLKind'):
+ return str(child['decL'].dereference())
+
+ if self.is_twine_kind(kind, 'DecULLKind'):
+ return str(child['decULL'].dereference())
+
+ if self.is_twine_kind(kind, 'DecLLKind'):
+ return str(child['decLL'].dereference())
+
+ if self.is_twine_kind(kind, 'UHexKind'):
+ val = child['uHex'].dereference()
+ return hex(int(val))
+
+ print(('Unhandled NodeKind {} in Twine pretty-printer. The result will be '
+ 'incomplete.').format(kind))
+
+ return '(unhandled {})'.format(kind)
+
+ def string_from_twine_object(self, twine):
+ '''Return the string representation of the Twine object twine.'''
+
+ lhs = twine['LHS']
+ rhs = twine['RHS']
+
+ lhs_kind = str(twine['LHSKind'])
+ rhs_kind = str(twine['RHSKind'])
+
+ lhs_str = self.string_from_child(lhs, lhs_kind)
+ rhs_str = self.string_from_child(rhs, rhs_kind)
+
+ return lhs_str + rhs_str
+
+ def to_string(self):
+ return self.string_from_twine_object(self._val)
+
+ def display_hint(self):
+ return 'string'
+
+def get_pointer_int_pair(val):
+ """Get tuple from llvm::PointerIntPair."""
+ info_name = val.type.template_argument(4).strip_typedefs().name
+ # Note: this throws a gdb.error if the info type is not used (by means of a
+ # call to getPointer() or similar) in the current translation unit.
+ enum_type = gdb.lookup_type(info_name + '::MaskAndShiftConstants')
+ enum_dict = gdb.types.make_enum_dict(enum_type)
+ ptr_mask = enum_dict[info_name + '::PointerBitMask']
+ int_shift = enum_dict[info_name + '::IntShift']
+ int_mask = enum_dict[info_name + '::IntMask']
+ pair_union = val['Value']
+ pointer = (pair_union & ptr_mask)
+ value = ((pair_union >> int_shift) & int_mask)
+ return (pointer, value)
+
+class PointerIntPairPrinter:
+ """Print a PointerIntPair."""
+
+ def __init__(self, pointer, value):
+ self.pointer = pointer
+ self.value = value
+
+ def children(self):
+ yield ('pointer', self.pointer)
+ yield ('value', self.value)
+
+ def to_string(self):
+ return '(%s, %s)' % (self.pointer.type, self.value.type)
+
+def make_pointer_int_pair_printer(val):
+ """Factory for an llvm::PointerIntPair printer."""
+ try:
+ pointer, value = get_pointer_int_pair(val)
+ except gdb.error:
+ return None # If PointerIntPair cannot be analyzed, print as raw value.
+ pointer_type = val.type.template_argument(0)
+ value_type = val.type.template_argument(2)
+ return PointerIntPairPrinter(pointer.cast(pointer_type),
+ value.cast(value_type))
+
+class PointerUnionPrinter:
+ """Print a PointerUnion."""
+
+ def __init__(self, pointer):
+ self.pointer = pointer
+
+ def children(self):
+ yield ('pointer', self.pointer)
+
+ def to_string(self):
+ return "Containing %s" % self.pointer.type
+
+def make_pointer_union_printer(val):
+ """Factory for an llvm::PointerUnion printer."""
+ try:
+ pointer, value = get_pointer_int_pair(val['Val'])
+ except gdb.error:
+ return None # If PointerIntPair cannot be analyzed, print as raw value.
+ pointer_type = val.type.template_argument(int(value))
+ return PointerUnionPrinter(pointer.cast(pointer_type))
+
+class IlistNodePrinter:
+ """Print an llvm::ilist_node object."""
+
+ def __init__(self, val):
+ impl_type = val.type.fields()[0].type
+ base_type = impl_type.fields()[0].type
+ derived_type = val.type.template_argument(0)
+
+ def get_prev_and_sentinel(base):
+ # One of Prev and PrevAndSentinel exists. Depending on #defines used to
+ # compile LLVM, the base_type's template argument is either true of false.
+ if base_type.template_argument(0):
+ return get_pointer_int_pair(base['PrevAndSentinel'])
+ return base['Prev'], None
+
+ # Casts a base_type pointer to the appropriate derived type.
+ def cast_pointer(pointer):
+ sentinel = get_prev_and_sentinel(pointer.dereference())[1]
+ pointer = pointer.cast(impl_type.pointer())
+ if sentinel:
+ return pointer
+ return pointer.cast(derived_type.pointer())
+
+ # Repeated cast becaue val.type's base_type is ambiguous when using tags.
+ base = val.cast(impl_type).cast(base_type)
+ (prev, sentinel) = get_prev_and_sentinel(base)
+ prev = prev.cast(base_type.pointer())
+ self.prev = cast_pointer(prev)
+ self.next = cast_pointer(val['Next'])
+ self.sentinel = sentinel
+
+ def children(self):
+ if self.sentinel:
+ yield 'sentinel', 'yes'
+ yield 'prev', self.prev
+ yield 'next', self.next
+
+class IlistPrinter:
+ """Print an llvm::simple_ilist or llvm::iplist object."""
+
+ def __init__(self, val):
+ self.node_type = val.type.template_argument(0)
+ sentinel = val['Sentinel']
+ # First field is common base type of sentinel and ilist_node.
+ base_type = sentinel.type.fields()[0].type
+ self.sentinel = sentinel.address.cast(base_type.pointer())
+
+ def _pointers(self):
+ pointer = self.sentinel
+ while True:
+ pointer = pointer['Next'].cast(pointer.type)
+ if pointer == self.sentinel:
+ return
+ yield pointer.cast(self.node_type.pointer())
+
+ def children(self):
+ for k, v in enumerate(self._pointers()):
+ yield ('[%d]' % k, v.dereference())
+
+
+pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
+pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
+pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
+pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
+pp.add_printer('llvm::ArrayRef', '^llvm::(Mutable)?ArrayRef<.*>$', ArrayRefPrinter)
+pp.add_printer('llvm::Expected', '^llvm::Expected<.*>$', ExpectedPrinter)
+pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
+pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
+pp.add_printer('llvm::StringMap', '^llvm::StringMap<.*>$', StringMapPrinter)
+pp.add_printer('llvm::Twine', '^llvm::Twine$', TwinePrinter)
+pp.add_printer('llvm::PointerIntPair', '^llvm::PointerIntPair<.*>$', make_pointer_int_pair_printer)
+pp.add_printer('llvm::PointerUnion', '^llvm::PointerUnion<.*>$', make_pointer_union_printer)
+pp.add_printer('llvm::ilist_node', '^llvm::ilist_node<.*>$', IlistNodePrinter)
+pp.add_printer('llvm::iplist', '^llvm::iplist<.*>$', IlistPrinter)
+pp.add_printer('llvm::simple_ilist', '^llvm::simple_ilist<.*>$', IlistPrinter)
+gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)