aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm16/include/llvm/Frontend
diff options
context:
space:
mode:
authorvitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
committervitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
commit6ffe9e53658409f212834330e13564e4952558f6 (patch)
tree85b1e00183517648b228aafa7c8fb07f5276f419 /contrib/libs/llvm16/include/llvm/Frontend
parent726057070f9c5a91fc10fde0d5024913d10f1ab9 (diff)
downloadydb-6ffe9e53658409f212834330e13564e4952558f6.tar.gz
YQ Connector: support managed ClickHouse
Со стороны dqrun можно обратиться к инстансу коннектора, который работает на streaming стенде, и извлечь данные из облачного CH.
Diffstat (limited to 'contrib/libs/llvm16/include/llvm/Frontend')
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/Directive/DirectiveBase.td157
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenACC/ACC.td639
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.td2103
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPAssume.h66
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPConstants.h288
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPContext.h221
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPDeviceConstants.h42
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPGridValues.h137
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPIRBuilder.h2416
-rw-r--r--contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPKinds.def1322
10 files changed, 7391 insertions, 0 deletions
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/Directive/DirectiveBase.td b/contrib/libs/llvm16/include/llvm/Frontend/Directive/DirectiveBase.td
new file mode 100644
index 0000000000..4269a966a9
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/Directive/DirectiveBase.td
@@ -0,0 +1,157 @@
+//===-- DirectiveBase.td - Base directive definition file --*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the base definition file directives and clauses.
+//
+//===----------------------------------------------------------------------===//
+
+
+// General information about the directive language.
+class DirectiveLanguage {
+ // Name of the directive language such as omp or acc.
+ string name = ?;
+
+ // The C++ namespace that code of this directive language should be placed
+ // into. This namespace is nested in llvm namespace.
+ //
+ // By default, uses the name of the directive language as the only namespace.
+ // To avoid placing in any namespace, use "". To specify nested namespaces,
+ // use "::" as the delimiter, e.g., given "A::B", ops will be placed in
+ // `namespace A { namespace B { <directives-clauses> } }`.
+ string cppNamespace = name;
+
+ // Optional prefix used for the generation of the enumerator in the Directive
+ // enum.
+ string directivePrefix = "";
+
+ // Optional prefix used for the generation of the enumerator in the Clause
+ // enum.
+ string clausePrefix = "";
+
+ // Make the enum values available in the namespace. This allows us to
+ // write something like Enum_X if we have a `using namespace cppNamespace`.
+ bit makeEnumAvailableInNamespace = false;
+
+ // Generate include and macro to enable LLVM BitmaskEnum.
+ bit enableBitmaskEnumInNamespace = false;
+
+ // Header file included in the implementation code generated. Ususally the
+ // output file of the declaration code generation. Can be left blank.
+ string includeHeader = "";
+
+ // EnumSet class name used for clauses to generated the allowed clauses map.
+ string clauseEnumSetClass = "";
+
+ // Class holding the clauses in the flang parse-tree.
+ string flangClauseBaseClass = "";
+}
+
+// Information about values accepted by enum-like clauses
+class ClauseVal<string n, int v, bit uv> {
+ // Name of the clause value.
+ string name = n;
+
+ // Integer value of the clause.
+ int value = v;
+
+ // Can user specify this value?
+ bit isUserValue = uv;
+
+ // Set clause value used by default when unknown.
+ bit isDefault = false;
+}
+
+// Information about a specific clause.
+class Clause<string c> {
+ // Name of the clause.
+ string name = c;
+
+ // Define an alternative name return in get<LanguageName>ClauseName function.
+ string alternativeName = "";
+
+ // Define aliases used in the parser.
+ list<string> aliases = [];
+
+ // Optional class holding value of the clause in clang AST.
+ string clangClass = "";
+
+ // Optional class holding value of the clause in flang AST.
+ string flangClass = "";
+
+ // If set to true, value is optional. Not optional by default.
+ bit isValueOptional = false;
+
+ // Name of enum when there is a list of allowed clause values.
+ string enumClauseValue = "";
+
+ // List of allowed clause values
+ list<ClauseVal> allowedClauseValues = [];
+
+ // If set to true, value class is part of a list. Single class by default.
+ bit isValueList = false;
+
+ // Define a default value such as "*".
+ string defaultValue = "";
+
+ // Is clause implicit? If clause is set as implicit, the default kind will
+ // be return in get<LanguageName>ClauseKind instead of their own kind.
+ bit isImplicit = false;
+
+ // Set clause used by default when unknown. Function returning the kind
+ // of enumeration will use this clause as the default.
+ bit isDefault = false;
+
+ // Prefix before the actual value. Used in the parser generation.
+ // `clause(prefix: value)`
+ string prefix = "";
+
+ // Set the prefix as optional.
+ // `clause([prefix]: value)`
+ bit isPrefixOptional = true;
+}
+
+// Hold information about clause validity by version.
+class VersionedClause<Clause c, int min = 1, int max = 0x7FFFFFFF> {
+ // Actual clause.
+ Clause clause = c;
+
+ // Mininum version number where this clause is valid.
+ int minVersion = min;
+
+ // Maximum version number where this clause is valid.
+ int maxVersion = max;
+}
+
+// Information about a specific directive.
+class Directive<string d> {
+ // Name of the directive. Can be composite directive sepearted by whitespace.
+ string name = d;
+
+ // Define an alternative name return in get<LanguageName>DirectiveName
+ // function.
+ string alternativeName = "";
+
+ // Clauses cannot appear twice in the three allowed lists below. Also, since
+ // required implies allowed, the same clause cannot appear in both the
+ // allowedClauses and requiredClauses lists.
+
+ // List of allowed clauses for the directive.
+ list<VersionedClause> allowedClauses = [];
+
+ // List of clauses that are allowed to appear only once.
+ list<VersionedClause> allowedOnceClauses = [];
+
+ // List of clauses that are allowed but mutually exclusive.
+ list<VersionedClause> allowedExclusiveClauses = [];
+
+ // List of clauses that are required.
+ list<VersionedClause> requiredClauses = [];
+
+ // Set directive used by default when unknown.
+ bit isDefault = false;
+}
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenACC/ACC.td b/contrib/libs/llvm16/include/llvm/Frontend/OpenACC/ACC.td
new file mode 100644
index 0000000000..e5f0632f59
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenACC/ACC.td
@@ -0,0 +1,639 @@
+//===-- ACC.td - OpenACC directive definition file ---------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the definition file for OpenACC 3.1 directives and clauses.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/Frontend/Directive/DirectiveBase.td"
+
+//===----------------------------------------------------------------------===//
+// Definition of general OpenACC information
+//===----------------------------------------------------------------------===//
+
+def OpenACC : DirectiveLanguage {
+ let name = "OpenACC";
+ let cppNamespace = "acc"; // final namespace will be llvm::acc
+ let directivePrefix = "ACCD_";
+ let clausePrefix = "ACCC_";
+ let makeEnumAvailableInNamespace = true;
+ let enableBitmaskEnumInNamespace = true;
+ let clauseEnumSetClass = "AccClauseSet";
+ let flangClauseBaseClass = "AccClause";
+}
+
+//===----------------------------------------------------------------------===//
+// Definition of OpenACC clauses
+//===----------------------------------------------------------------------===//
+
+// 2.16.1
+def ACCC_Async : Clause<"async"> {
+ let flangClass = "ScalarIntExpr";
+ let isValueOptional = true;
+}
+
+// 2.9.7
+def ACCC_Auto : Clause<"auto"> {}
+
+// 2.7.12
+def ACCC_Attach : Clause<"attach"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.15.1
+def ACCC_Bind : Clause<"bind"> {
+ let flangClass = "AccBindClause";
+}
+
+// 2.12
+def ACCC_Capture : Clause<"capture"> {
+}
+
+// 2.9.1
+def ACCC_Collapse : Clause<"collapse"> {
+ let flangClass = "ScalarIntConstantExpr";
+}
+
+// 2.7.6
+def ACCC_Copy : Clause<"copy"> {
+ let flangClass = "AccObjectList";
+ let aliases = ["present_or_copy", "pcopy"];
+}
+// 2.7.7
+def ACCC_Copyin : Clause<"copyin"> {
+ let flangClass = "AccObjectListWithModifier";
+ let aliases = ["present_or_copyin", "pcopyin"];
+}
+
+// 2.7.8
+def ACCC_Copyout : Clause<"copyout"> {
+ let flangClass = "AccObjectListWithModifier";
+ let aliases = ["present_or_copyout", "pcopyout"];
+}
+
+// 2.7.9
+def ACCC_Create : Clause<"create"> {
+ let flangClass = "AccObjectListWithModifier";
+ let aliases = ["present_or_create", "pcreate"];
+}
+
+// 2.5.15
+def ACC_Default_none : ClauseVal<"none", 1, 1> { let isDefault = 1; }
+def ACC_Default_present : ClauseVal<"present", 0, 1> {}
+
+def ACCC_Default : Clause<"default"> {
+ let flangClass = "AccDefaultClause";
+ let enumClauseValue = "DefaultValue";
+ let allowedClauseValues = [
+ ACC_Default_present,
+ ACC_Default_none
+ ];
+}
+
+// 2.14.3
+def ACCC_DefaultAsync : Clause<"default_async"> {
+ let flangClass = "ScalarIntExpr";
+}
+
+// 2.7.11
+def ACCC_Delete : Clause<"delete"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.7.13
+def ACCC_Detach : Clause<"detach"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.14.4
+def ACCC_Device : Clause<"device"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.14.1 - 2.14.2
+def ACCC_DeviceNum : Clause<"device_num"> {
+ let flangClass = "ScalarIntExpr";
+}
+
+// 2.7.4
+def ACCC_DevicePtr : Clause<"deviceptr"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.13.1
+def ACCC_DeviceResident : Clause<"device_resident"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.4
+def ACCC_DeviceType : Clause<"device_type"> {
+ let flangClass = "AccDeviceTypeExprList";
+ let defaultValue = "*";
+ let aliases = ["dtype"];
+}
+
+// 2.6.6
+def ACCC_Finalize : Clause<"finalize"> {}
+
+// 2.5.13
+def ACCC_FirstPrivate : Clause<"firstprivate"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.9.2
+def ACCC_Gang : Clause<"gang"> {
+ let flangClass = "AccGangArgument";
+ let isValueOptional = true;
+}
+
+// 2.14.4
+def ACCC_Host : Clause<"host"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.5.5
+def ACCC_If : Clause <"if"> {
+ let flangClass = "ScalarLogicalExpr";
+}
+
+// 2.14.4
+def ACCC_IfPresent : Clause<"if_present"> {}
+
+// 2.9.6
+def ACCC_Independent : Clause<"independent"> {}
+
+// 2.13.3
+def ACCC_Link : Clause<"link"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.7.10
+def ACCC_NoCreate : Clause<"no_create"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.15.1
+def ACCC_NoHost : Clause<"nohost"> {}
+
+// 2.5.9
+def ACCC_NumGangs : Clause<"num_gangs"> {
+ let flangClass = "ScalarIntExpr";
+}
+
+// 2.5.10
+def ACCC_NumWorkers : Clause<"num_workers"> {
+ let flangClass = "ScalarIntExpr";
+}
+
+// 2.7.5
+def ACCC_Present : Clause<"present"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.5.12
+def ACCC_Private : Clause<"private"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.9.8
+def ACCC_Tile : Clause <"tile"> {
+ let flangClass = "AccTileExprList";
+}
+
+// 2.8.1
+def ACCC_UseDevice : Clause <"use_device"> {
+ let flangClass = "AccObjectList";
+}
+
+// 2.12
+def ACCC_Read : Clause<"read"> {}
+
+// 2.5.14
+def ACCC_Reduction : Clause<"reduction"> {
+ let flangClass = "AccObjectListWithReduction";
+}
+
+// 2.5.6
+def ACCC_Self : Clause<"self"> {
+ let flangClass = "AccSelfClause";
+ let isValueOptional = true;
+}
+
+// 2.9.5
+def ACCC_Seq : Clause<"seq"> {}
+
+// 2.9.4
+def ACCC_Vector : Clause<"vector"> {
+ let flangClass = "ScalarIntExpr";
+ let isValueOptional = true;
+ let prefix = "length";
+}
+
+// 2.5.11
+def ACCC_VectorLength : Clause<"vector_length"> {
+ let flangClass = "ScalarIntExpr";
+}
+
+// 2.16.2
+def ACCC_Wait : Clause<"wait"> {
+ let flangClass = "AccWaitArgument";
+ let isValueOptional = true;
+}
+
+// 2.9.3
+def ACCC_Worker: Clause<"worker"> {
+ let flangClass = "ScalarIntExpr";
+ let isValueOptional = true;
+ let prefix = "num";
+}
+
+// 2.12
+def ACCC_Write : Clause<"write"> {}
+
+def ACCC_Unknown : Clause<"unknown"> {
+ let isDefault = true;
+}
+
+//===----------------------------------------------------------------------===//
+// Definition of OpenACC directives
+//===----------------------------------------------------------------------===//
+
+// 2.12
+def ACC_Atomic : Directive<"atomic"> {}
+
+// 2.6.5
+def ACC_Data : Directive<"data"> {
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_Default>
+ ];
+ let requiredClauses = [
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_Default>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_NoCreate>,
+ VersionedClause<ACCC_Present>
+ ];
+}
+
+// 2.13
+def ACC_Declare : Directive<"declare"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_Present>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_DeviceResident>,
+ VersionedClause<ACCC_Link>
+ ];
+}
+
+// 2.5.3
+def ACC_Kernels : Directive<"kernels"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_NoCreate>,
+ VersionedClause<ACCC_Present>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_Default>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_NumGangs>,
+ VersionedClause<ACCC_NumWorkers>,
+ VersionedClause<ACCC_Self>,
+ VersionedClause<ACCC_VectorLength>
+ ];
+}
+
+// 2.5.1
+def ACC_Parallel : Directive<"parallel"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_NoCreate>,
+ VersionedClause<ACCC_Present>,
+ VersionedClause<ACCC_Private>,
+ VersionedClause<ACCC_FirstPrivate>,
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_Default>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_NumGangs>,
+ VersionedClause<ACCC_NumWorkers>,
+ VersionedClause<ACCC_Reduction>,
+ VersionedClause<ACCC_Self>,
+ VersionedClause<ACCC_VectorLength>
+ ];
+}
+
+// 2.5.2
+def ACC_Serial : Directive<"serial"> {
+ // Spec line 950-951: clause is as for the parallel construct except that the
+ // num_gangs, num_workers, and vector_length clauses are not permitted.
+ let allowedClauses = [
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_NoCreate>,
+ VersionedClause<ACCC_Present>,
+ VersionedClause<ACCC_Private>,
+ VersionedClause<ACCC_FirstPrivate>,
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_Default>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_Reduction>,
+ VersionedClause<ACCC_Self>
+ ];
+}
+
+// 2.9
+def ACC_Loop : Directive<"loop"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_Private>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Collapse>,
+ VersionedClause<ACCC_Gang>,
+ VersionedClause<ACCC_Reduction>,
+ VersionedClause<ACCC_Tile>,
+ VersionedClause<ACCC_Vector>,
+ VersionedClause<ACCC_Worker>
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<ACCC_Auto>,
+ VersionedClause<ACCC_Independent>,
+ VersionedClause<ACCC_Seq>
+ ];
+}
+
+// 2.10
+def ACC_Cache : Directive<"cache"> {}
+
+// 2.14.1
+def ACC_Init : Directive<"init"> {
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_DeviceNum>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_If>
+ ];
+}
+
+// 2.15.1
+def ACC_Routine : Directive<"routine"> {
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Bind>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_NoHost>
+ ];
+ let requiredClauses = [
+ VersionedClause<ACCC_Gang>,
+ VersionedClause<ACCC_Seq>,
+ VersionedClause<ACCC_Vector>,
+ VersionedClause<ACCC_Worker>
+ ];
+}
+
+// 2.14.3
+def ACC_Set : Directive<"set"> {
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_DefaultAsync>,
+ VersionedClause<ACCC_DeviceNum>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_If>
+ ];
+ let requiredClauses = [
+ // The three following clauses are also in allowedOnceClauses list due to
+ // restriction 2255 - Two instances of the same clause may not appear on the
+ // same directive.
+ VersionedClause<ACCC_DefaultAsync>,
+ VersionedClause<ACCC_DeviceNum>,
+ VersionedClause<ACCC_DeviceType>
+ ];
+}
+
+// 2.14.2
+def ACC_Shutdown : Directive<"shutdown"> {
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_DeviceNum>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_If>
+ ];
+}
+
+// 2.14.4
+def ACC_Update : Directive<"update"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_IfPresent>
+ ];
+ let requiredClauses = [
+ VersionedClause<ACCC_Device>,
+ VersionedClause<ACCC_Host>,
+ VersionedClause<ACCC_Self>
+ ];
+}
+
+// 2.16.3
+def ACC_Wait : Directive<"wait"> {
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_If>
+ ];
+}
+
+// 2.14.6
+def ACC_EnterData : Directive<"enter data"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_If>
+ ];
+ let requiredClauses = [
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_Copyin>
+ ];
+}
+
+// 2.14.7
+def ACC_ExitData : Directive<"exit data"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_Finalize>
+ ];
+ let requiredClauses = [
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Delete>,
+ VersionedClause<ACCC_Detach>
+ ];
+}
+
+// 2.8
+def ACC_HostData : Directive<"host_data"> {
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_IfPresent>
+ ];
+ let requiredClauses = [
+ VersionedClause<ACCC_UseDevice>
+ ];
+}
+
+// 2.11
+def ACC_KernelsLoop : Directive<"kernels loop"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_NoCreate>,
+ VersionedClause<ACCC_Present>,
+ VersionedClause<ACCC_Private>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_Collapse>,
+ VersionedClause<ACCC_Default>,
+ VersionedClause<ACCC_Gang>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_NumGangs>,
+ VersionedClause<ACCC_NumWorkers>,
+ VersionedClause<ACCC_Reduction>,
+ VersionedClause<ACCC_Self>,
+ VersionedClause<ACCC_Tile>,
+ VersionedClause<ACCC_Vector>,
+ VersionedClause<ACCC_VectorLength>,
+ VersionedClause<ACCC_Worker>
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<ACCC_Auto>,
+ VersionedClause<ACCC_Independent>,
+ VersionedClause<ACCC_Seq>
+ ];
+}
+
+// 2.11
+def ACC_ParallelLoop : Directive<"parallel loop"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_FirstPrivate>,
+ VersionedClause<ACCC_NoCreate>,
+ VersionedClause<ACCC_Present>,
+ VersionedClause<ACCC_Private>,
+ VersionedClause<ACCC_Tile>,
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_Collapse>,
+ VersionedClause<ACCC_Default>,
+ VersionedClause<ACCC_Gang>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_NumGangs>,
+ VersionedClause<ACCC_NumWorkers>,
+ VersionedClause<ACCC_Reduction>,
+ VersionedClause<ACCC_Self>,
+ VersionedClause<ACCC_Vector>,
+ VersionedClause<ACCC_VectorLength>,
+ VersionedClause<ACCC_Worker>
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<ACCC_Auto>,
+ VersionedClause<ACCC_Independent>,
+ VersionedClause<ACCC_Seq>
+ ];
+}
+
+// 2.11
+def ACC_SerialLoop : Directive<"serial loop"> {
+ let allowedClauses = [
+ VersionedClause<ACCC_Attach>,
+ VersionedClause<ACCC_Copy>,
+ VersionedClause<ACCC_Copyin>,
+ VersionedClause<ACCC_Copyout>,
+ VersionedClause<ACCC_Create>,
+ VersionedClause<ACCC_DevicePtr>,
+ VersionedClause<ACCC_DeviceType>,
+ VersionedClause<ACCC_FirstPrivate>,
+ VersionedClause<ACCC_NoCreate>,
+ VersionedClause<ACCC_Present>,
+ VersionedClause<ACCC_Private>,
+ VersionedClause<ACCC_Wait>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<ACCC_Async>,
+ VersionedClause<ACCC_Collapse>,
+ VersionedClause<ACCC_Default>,
+ VersionedClause<ACCC_Gang>,
+ VersionedClause<ACCC_If>,
+ VersionedClause<ACCC_Reduction>,
+ VersionedClause<ACCC_Self>,
+ VersionedClause<ACCC_Tile>,
+ VersionedClause<ACCC_Vector>,
+ VersionedClause<ACCC_Worker>
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<ACCC_Auto>,
+ VersionedClause<ACCC_Independent>,
+ VersionedClause<ACCC_Seq>
+ ];
+}
+
+def ACC_Unknown : Directive<"unknown"> {
+ let isDefault = true;
+}
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.td b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.td
new file mode 100644
index 0000000000..9f732e8c61
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMP.td
@@ -0,0 +1,2103 @@
+//===-- OMP.td - OpenMP directive definition file ----------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the definition file for OpenMP directives and clauses.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/Frontend/Directive/DirectiveBase.td"
+
+//===----------------------------------------------------------------------===//
+// Definition of general OpenMP information
+//===----------------------------------------------------------------------===//
+
+def OpenMP : DirectiveLanguage {
+ let name = "OpenMP";
+ let cppNamespace = "omp"; // final namespace will be llvm::omp
+ let directivePrefix = "OMPD_";
+ let clausePrefix = "OMPC_";
+ let makeEnumAvailableInNamespace = true;
+ let enableBitmaskEnumInNamespace = true;
+ let clauseEnumSetClass = "OmpClauseSet";
+ let flangClauseBaseClass = "OmpClause";
+}
+
+//===----------------------------------------------------------------------===//
+// Definition of OpenMP clauses
+//===----------------------------------------------------------------------===//
+
+def OMPC_Allocator : Clause<"allocator"> {
+ let clangClass = "OMPAllocatorClause";
+ let flangClass = "ScalarIntExpr";
+}
+def OMPC_If : Clause<"if"> {
+ let clangClass = "OMPIfClause";
+ let flangClass = "OmpIfClause";
+}
+def OMPC_Final : Clause<"final"> {
+ let clangClass = "OMPFinalClause";
+ let flangClass = "ScalarLogicalExpr";
+}
+def OMPC_NumThreads : Clause<"num_threads"> {
+ let clangClass = "OMPNumThreadsClause";
+ let flangClass = "ScalarIntExpr";
+}
+def OMPC_SafeLen : Clause<"safelen"> {
+ let clangClass = "OMPSafelenClause";
+ let flangClass = "ScalarIntConstantExpr";
+}
+def OMPC_SimdLen : Clause<"simdlen"> {
+ let clangClass = "OMPSimdlenClause";
+ let flangClass = "ScalarIntConstantExpr";
+}
+def OMPC_Collapse : Clause<"collapse"> {
+ let clangClass = "OMPCollapseClause";
+ let flangClass = "ScalarIntConstantExpr";
+}
+def OMPC_Default : Clause<"default"> {
+ let clangClass = "OMPDefaultClause";
+ let flangClass = "OmpDefaultClause";
+}
+def OMPC_Private : Clause<"private"> {
+ let clangClass = "OMPPrivateClause";
+ let flangClass = "OmpObjectList";
+}
+def OMPC_Sizes: Clause<"sizes"> {
+ let clangClass = "OMPSizesClause";
+ let flangClass = "ScalarIntExpr";
+ let isValueList = true;
+ }
+def OMPC_Full: Clause<"full"> {
+ let clangClass = "OMPFullClause";
+}
+def OMPC_Partial: Clause<"partial"> {
+ let clangClass = "OMPPartialClause";
+ let flangClass = "ScalarIntConstantExpr";
+ let isValueOptional = true;
+ }
+def OMPC_FirstPrivate : Clause<"firstprivate"> {
+ let clangClass = "OMPFirstprivateClause";
+ let flangClass = "OmpObjectList";
+}
+def OMPC_LastPrivate : Clause<"lastprivate"> {
+ let clangClass = "OMPLastprivateClause";
+ let flangClass = "OmpObjectList";
+}
+def OMPC_Shared : Clause<"shared"> {
+ let clangClass = "OMPSharedClause";
+ let flangClass = "OmpObjectList";
+}
+def OMPC_Reduction : Clause<"reduction"> {
+ let clangClass = "OMPReductionClause";
+ let flangClass = "OmpReductionClause";
+}
+def OMPC_Linear : Clause<"linear"> {
+ let clangClass = "OMPLinearClause";
+ let flangClass = "OmpLinearClause";
+}
+def OMPC_Aligned : Clause<"aligned"> {
+ let clangClass = "OMPAlignedClause";
+ let flangClass = "OmpAlignedClause";
+}
+def OMPC_Copyin : Clause<"copyin"> {
+ let clangClass = "OMPCopyinClause";
+ let flangClass = "OmpObjectList";
+}
+def OMPC_CopyPrivate : Clause<"copyprivate"> {
+ let clangClass = "OMPCopyprivateClause";
+ let flangClass = "OmpObjectList";
+}
+def OMP_PROC_BIND_master : ClauseVal<"master",2,1> {}
+def OMP_PROC_BIND_close : ClauseVal<"close",3,1> {}
+def OMP_PROC_BIND_spread : ClauseVal<"spread",4,1> {}
+def OMP_PROC_BIND_primary : ClauseVal<"primary",5,1> {}
+def OMP_PROC_BIND_default : ClauseVal<"default",6,0> {}
+def OMP_PROC_BIND_unknown : ClauseVal<"unknown",7,0> { let isDefault = true; }
+def OMPC_ProcBind : Clause<"proc_bind"> {
+ let clangClass = "OMPProcBindClause";
+ let flangClass = "OmpProcBindClause";
+ let enumClauseValue = "ProcBindKind";
+ let allowedClauseValues = [
+ OMP_PROC_BIND_primary,
+ OMP_PROC_BIND_master,
+ OMP_PROC_BIND_close,
+ OMP_PROC_BIND_spread,
+ OMP_PROC_BIND_default,
+ OMP_PROC_BIND_unknown
+ ];
+}
+
+def OMP_SCHEDULE_Static : ClauseVal<"static", 2, 1> {}
+def OMP_SCHEDULE_Dynamic : ClauseVal<"dynamic", 3, 1> {}
+def OMP_SCHEDULE_Guided : ClauseVal<"guided", 4, 1> {}
+def OMP_SCHEDULE_Auto : ClauseVal<"auto", 5, 1> {}
+def OMP_SCHEDULE_Runtime : ClauseVal<"runtime", 6, 1> {}
+def OMP_SCHEDULE_Default : ClauseVal<"default", 7, 0> { let isDefault = 1; }
+
+def OMPC_Schedule : Clause<"schedule"> {
+ let clangClass = "OMPScheduleClause";
+ let flangClass = "OmpScheduleClause";
+ let enumClauseValue = "ScheduleKind";
+ let allowedClauseValues = [
+ OMP_SCHEDULE_Static,
+ OMP_SCHEDULE_Dynamic,
+ OMP_SCHEDULE_Guided,
+ OMP_SCHEDULE_Auto,
+ OMP_SCHEDULE_Runtime,
+ OMP_SCHEDULE_Default
+ ];
+}
+
+def OMP_MEMORY_ORDER_SeqCst : ClauseVal<"seq_cst", 1, 1> {}
+def OMP_MEMORY_ORDER_AcqRel : ClauseVal<"acq_rel", 2, 1> {}
+def OMP_MEMORY_ORDER_Acquire : ClauseVal<"acquire", 3, 1> {}
+def OMP_MEMORY_ORDER_Release : ClauseVal<"release", 4, 1> {}
+def OMP_MEMORY_ORDER_Relaxed : ClauseVal<"relaxed", 5, 1> {}
+def OMP_MEMORY_ORDER_Default : ClauseVal<"default", 6, 0> {
+ let isDefault = 1;
+}
+def OMPC_MemoryOrder : Clause<"memory_order"> {
+ let enumClauseValue = "MemoryOrderKind";
+ let allowedClauseValues = [
+ OMP_MEMORY_ORDER_SeqCst,
+ OMP_MEMORY_ORDER_AcqRel,
+ OMP_MEMORY_ORDER_Acquire,
+ OMP_MEMORY_ORDER_Release,
+ OMP_MEMORY_ORDER_Relaxed,
+ OMP_MEMORY_ORDER_Default
+ ];
+}
+
+def OMP_CANCELLATION_CONSTRUCT_Parallel : ClauseVal<"parallel", 1, 1> {}
+def OMP_CANCELLATION_CONSTRUCT_Loop : ClauseVal<"loop", 2, 1> {}
+def OMP_CANCELLATION_CONSTRUCT_Sections : ClauseVal<"sections", 3, 1> {}
+def OMP_CANCELLATION_CONSTRUCT_Taskgroup : ClauseVal<"taskgroup", 4, 1> {}
+def OMP_CANCELLATION_CONSTRUCT_None : ClauseVal<"none", 5, 0> {
+ let isDefault = 1;
+}
+
+def OMPC_CancellationConstructType : Clause<"cancellation_construct_type"> {
+ let enumClauseValue = "CancellationConstructType";
+ let allowedClauseValues = [
+ OMP_CANCELLATION_CONSTRUCT_Parallel,
+ OMP_CANCELLATION_CONSTRUCT_Loop,
+ OMP_CANCELLATION_CONSTRUCT_Sections,
+ OMP_CANCELLATION_CONSTRUCT_Taskgroup,
+ OMP_CANCELLATION_CONSTRUCT_None
+ ];
+}
+
+def OMPC_Ordered : Clause<"ordered"> {
+ let clangClass = "OMPOrderedClause";
+ let flangClass = "ScalarIntConstantExpr";
+ let isValueOptional = true;
+}
+def OMPC_NoWait : Clause<"nowait"> {
+ let clangClass = "OMPNowaitClause";
+}
+def OMPC_Untied : Clause<"untied"> { let clangClass = "OMPUntiedClause"; }
+def OMPC_Mergeable : Clause<"mergeable"> {
+ let clangClass = "OMPMergeableClause";
+}
+def OMPC_Read : Clause<"read"> { let clangClass = "OMPReadClause"; }
+def OMPC_Write : Clause<"write"> { let clangClass = "OMPWriteClause"; }
+def OMPC_Update : Clause<"update"> { let clangClass = "OMPUpdateClause"; }
+def OMPC_Capture : Clause<"capture"> { let clangClass = "OMPCaptureClause"; }
+def OMPC_Compare : Clause<"compare"> { let clangClass = "OMPCompareClause"; }
+def OMPC_SeqCst : Clause<"seq_cst"> { let clangClass = "OMPSeqCstClause"; }
+def OMPC_AcqRel : Clause<"acq_rel"> { let clangClass = "OMPAcqRelClause"; }
+def OMPC_Acquire : Clause<"acquire"> { let clangClass = "OMPAcquireClause"; }
+def OMPC_Release : Clause<"release"> { let clangClass = "OMPReleaseClause"; }
+def OMPC_Relaxed : Clause<"relaxed"> { let clangClass = "OMPRelaxedClause"; }
+def OMPC_Depend : Clause<"depend"> {
+ let clangClass = "OMPDependClause";
+ let flangClass = "OmpDependClause";
+}
+def OMPC_Device : Clause<"device"> {
+ let clangClass = "OMPDeviceClause";
+ let flangClass = "OmpDeviceClause";
+}
+def OMPC_Threads : Clause<"threads"> { let clangClass = "OMPThreadsClause"; }
+def OMPC_Simd : Clause<"simd"> { let clangClass = "OMPSIMDClause"; }
+def OMPC_Map : Clause<"map"> {
+ let clangClass = "OMPMapClause";
+ let flangClass = "OmpMapClause";
+}
+def OMPC_NumTeams : Clause<"num_teams"> {
+ let clangClass = "OMPNumTeamsClause";
+ let flangClass = "ScalarIntExpr";
+}
+def OMPC_ThreadLimit : Clause<"thread_limit"> {
+ let clangClass = "OMPThreadLimitClause";
+ let flangClass = "ScalarIntExpr";
+}
+def OMPC_Priority : Clause<"priority"> {
+ let clangClass = "OMPPriorityClause";
+ let flangClass = "ScalarIntExpr";
+}
+
+def OMP_GRAINSIZE_Strict : ClauseVal<"strict", 1, 1> {}
+def OMP_GRAINSIZE_Unknown : ClauseVal<"unkonwn", 2, 0> { let isDefault = 1; }
+
+def OMPC_GrainSize : Clause<"grainsize"> {
+ let clangClass = "OMPGrainsizeClause";
+ let flangClass = "ScalarIntExpr";
+ let enumClauseValue = "GrainsizeType";
+ let allowedClauseValues = [
+ OMP_GRAINSIZE_Strict,
+ OMP_GRAINSIZE_Unknown
+ ];
+}
+def OMPC_NoGroup : Clause<"nogroup"> {
+ let clangClass = "OMPNogroupClause";
+}
+
+def OMP_NUMTASKS_Strict : ClauseVal<"strict", 1, 1> {}
+def OMP_NUMTASKS_Unknown : ClauseVal<"unkonwn", 2, 0> { let isDefault = 1; }
+
+def OMPC_NumTasks : Clause<"num_tasks"> {
+ let clangClass = "OMPNumTasksClause";
+ let flangClass = "ScalarIntExpr";
+ let enumClauseValue = "NumTasksType";
+ let allowedClauseValues = [
+ OMP_NUMTASKS_Strict,
+ OMP_NUMTASKS_Unknown
+ ];
+}
+def OMPC_Hint : Clause<"hint"> {
+ let clangClass = "OMPHintClause";
+ let flangClass = "ConstantExpr";
+}
+def OMPC_DistSchedule : Clause<"dist_schedule"> {
+ let clangClass = "OMPDistScheduleClause";
+ let flangClass = "ScalarIntExpr";
+ let isValueOptional = true;
+}
+def OMPC_DefaultMap : Clause<"defaultmap"> {
+ let clangClass = "OMPDefaultmapClause";
+ let flangClass = "OmpDefaultmapClause";
+}
+def OMPC_To : Clause<"to"> {
+ let clangClass = "OMPToClause";
+ let flangClass = "OmpObjectList";
+}
+def OMPC_From : Clause<"from"> {
+ let clangClass = "OMPFromClause";
+ let flangClass = "OmpObjectList";
+}
+def OMPC_UseDevicePtr : Clause<"use_device_ptr"> {
+ let clangClass = "OMPUseDevicePtrClause";
+ let flangClass = "Name";
+ let isValueList = true;
+}
+def OMPC_IsDevicePtr : Clause<"is_device_ptr"> {
+ let clangClass = "OMPIsDevicePtrClause";
+ let flangClass = "Name";
+ let isValueList = true;
+}
+def OMPC_HasDeviceAddr : Clause<"has_device_addr"> {
+ let clangClass = "OMPHasDeviceAddrClause";
+ let flangClass = "Name";
+ let isValueList = true;
+}
+def OMPC_TaskReduction : Clause<"task_reduction"> {
+ let clangClass = "OMPTaskReductionClause";
+ let flangClass = "OmpReductionClause";
+}
+def OMPC_InReduction : Clause<"in_reduction"> {
+ let clangClass = "OMPInReductionClause";
+ let flangClass = "OmpInReductionClause";
+}
+def OMPC_UnifiedAddress : Clause<"unified_address"> {
+ let clangClass = "OMPUnifiedAddressClause";
+}
+def OMPC_UnifiedSharedMemory : Clause<"unified_shared_memory"> {
+ let clangClass = "OMPUnifiedSharedMemoryClause";
+}
+def OMPC_ReverseOffload : Clause<"reverse_offload"> {
+ let clangClass = "OMPReverseOffloadClause";
+}
+def OMPC_DynamicAllocators : Clause<"dynamic_allocators"> {
+ let clangClass = "OMPDynamicAllocatorsClause";
+}
+def OMPC_AtomicDefaultMemOrder : Clause<"atomic_default_mem_order"> {
+ let clangClass = "OMPAtomicDefaultMemOrderClause";
+ let flangClass = "OmpAtomicDefaultMemOrderClause";
+}
+def OMPC_At : Clause<"at"> {
+ let clangClass = "OMPAtClause";
+}
+def OMPC_Severity : Clause<"severity"> {
+ let clangClass = "OMPSeverityClause";
+}
+def OMPC_Message : Clause<"message"> {
+ let clangClass = "OMPMessageClause";
+}
+def OMPC_Allocate : Clause<"allocate"> {
+ let clangClass = "OMPAllocateClause";
+ let flangClass = "OmpAllocateClause";
+}
+def OMPC_NonTemporal : Clause<"nontemporal"> {
+ let clangClass = "OMPNontemporalClause";
+ let flangClass = "Name";
+ let isValueList = true;
+}
+
+def OMP_ORDER_concurrent : ClauseVal<"concurrent",1,1> {}
+def OMP_ORDER_unknown : ClauseVal<"unknown",2,0> { let isDefault = 1; }
+def OMPC_Order : Clause<"order"> {
+ let clangClass = "OMPOrderClause";
+ let enumClauseValue = "OrderKind";
+ let allowedClauseValues = [
+ OMP_ORDER_unknown,
+ OMP_ORDER_concurrent
+ ];
+}
+def OMPC_Init : Clause<"init"> {
+ let clangClass = "OMPInitClause";
+}
+def OMPC_Use : Clause<"use"> {
+ let clangClass = "OMPUseClause";
+}
+def OMPC_Destroy : Clause<"destroy"> {
+ let clangClass = "OMPDestroyClause";
+}
+def OMPC_Novariants : Clause<"novariants"> {
+ let clangClass = "OMPNovariantsClause";
+ let flangClass = "ScalarLogicalExpr";
+}
+def OMPC_Nocontext : Clause<"nocontext"> {
+ let clangClass = "OMPNocontextClause";
+ let flangClass = "ScalarLogicalExpr";
+}
+def OMPC_Detach : Clause<"detach"> {
+ let clangClass = "OMPDetachClause";
+}
+def OMPC_Inclusive : Clause<"inclusive"> {
+ let clangClass = "OMPInclusiveClause";
+}
+def OMPC_Exclusive : Clause<"exclusive"> {
+ let clangClass = "OMPExclusiveClause";
+}
+def OMPC_UsesAllocators : Clause<"uses_allocators"> {
+ let clangClass = "OMPUsesAllocatorsClause";
+}
+def OMPC_Affinity : Clause<"affinity"> {
+ let clangClass = "OMPAffinityClause";
+}
+def OMPC_UseDeviceAddr : Clause<"use_device_addr"> {
+ let clangClass = "OMPUseDeviceAddrClause";
+}
+def OMPC_Uniform : Clause<"uniform"> {
+ let flangClass = "Name";
+ let isValueList = true;
+}
+def OMPC_DeviceType : Clause<"device_type"> {}
+def OMPC_Match : Clause<"match"> {}
+def OMPC_AdjustArgs : Clause<"adjust_args"> { }
+def OMPC_AppendArgs : Clause<"append_args"> { }
+def OMPC_Depobj : Clause<"depobj"> {
+ let clangClass = "OMPDepobjClause";
+ let isImplicit = true;
+}
+def OMPC_Flush : Clause<"flush"> {
+ let clangClass = "OMPFlushClause";
+ let isImplicit = true;
+}
+def OMPC_ThreadPrivate : Clause<"threadprivate"> {
+ let alternativeName = "threadprivate or thread local";
+ let isImplicit = true;
+}
+def OMPC_Unknown : Clause<"unknown"> {
+ let isImplicit = true;
+ let isDefault = true;
+}
+def OMPC_Link : Clause<"link"> {
+ let flangClass = "OmpObjectList";
+}
+def OMPC_Indirect : Clause<"indirect"> {}
+def OMPC_Inbranch : Clause<"inbranch"> {}
+def OMPC_Notinbranch : Clause<"notinbranch"> {}
+def OMPC_Filter : Clause<"filter"> {
+ let clangClass = "OMPFilterClause";
+ let flangClass = "ScalarIntExpr";
+}
+def OMPC_Align : Clause<"align"> {
+ let clangClass = "OMPAlignClause";
+}
+def OMPC_When: Clause<"when"> {}
+
+def OMPC_Bind : Clause<"bind"> {
+ let clangClass = "OMPBindClause";
+}
+
+def OMPC_OMPX_DynCGroupMem : Clause<"ompx_dyn_cgroup_mem"> {
+ let clangClass = "OMPXDynCGroupMemClause";
+ let flangClass = "ScalarIntExpr";
+}
+
+//===----------------------------------------------------------------------===//
+// Definition of OpenMP directives
+//===----------------------------------------------------------------------===//
+
+def OMP_ThreadPrivate : Directive<"threadprivate"> {}
+def OMP_Parallel : Directive<"parallel"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Allocate>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ ];
+}
+def OMP_Task : Directive<"task"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_InReduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Detach, 50>,
+ VersionedClause<OMPC_Affinity, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Priority>
+ ];
+}
+def OMP_Simd : Directive<"simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_If, 50>,
+ ];
+}
+def OMP_Tile : Directive<"tile"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Sizes, 51>,
+ ];
+}
+def OMP_Unroll : Directive<"unroll"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Full, 51>,
+ VersionedClause<OMPC_Partial, 51>,
+ ];
+}
+def OMP_For : Directive<"for"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_Do : Directive<"do"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Reduction>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_NoWait>
+ ];
+}
+def OMP_Sections : Directive<"sections"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Allocate>
+ ];
+}
+def OMP_Section : Directive<"section"> {}
+def OMP_Single : Directive<"single"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_CopyPrivate>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Allocate>
+ ];
+}
+def OMP_Master : Directive<"master"> {}
+def OMP_Critical : Directive<"critical"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Hint>
+ ];
+}
+def OMP_TaskYield : Directive<"taskyield"> {}
+def OMP_Barrier : Directive<"barrier"> {}
+def OMP_Error : Directive<"error"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_At, 51>,
+ VersionedClause<OMPC_Severity, 51>,
+ VersionedClause<OMPC_Message, 51>
+ ];
+}
+def OMP_TaskWait : Directive<"taskwait"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Depend, 50>,
+ VersionedClause<OMPC_NoWait, 51>
+ ];
+}
+def OMP_TaskGroup : Directive<"taskgroup"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_TaskReduction, 50>,
+ VersionedClause<OMPC_Allocate, 50>
+ ];
+}
+def OMP_Flush : Directive<"flush"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_AcqRel, 50>,
+ VersionedClause<OMPC_Acquire, 50>,
+ VersionedClause<OMPC_Release, 50>,
+ // TODO This should ne `none` instead. Comment carried over from
+ // OMPKinds.def.
+ VersionedClause<OMPC_Flush>
+ ];
+}
+def OMP_Ordered : Directive<"ordered"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Depend>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Threads>,
+ VersionedClause<OMPC_Simd>
+ ];
+}
+def OMP_Atomic : Directive<"atomic"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Read>,
+ VersionedClause<OMPC_Write>,
+ VersionedClause<OMPC_Update>,
+ VersionedClause<OMPC_Capture>,
+ VersionedClause<OMPC_Compare, 51>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_SeqCst>,
+ VersionedClause<OMPC_AcqRel, 50>,
+ VersionedClause<OMPC_Acquire, 50>,
+ VersionedClause<OMPC_Release, 50>,
+ VersionedClause<OMPC_Relaxed, 50>,
+ VersionedClause<OMPC_Hint, 50>
+ ];
+}
+def OMP_Target : Directive<"target"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_InReduction, 50>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_ThreadLimit, 51>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_Teams : Directive<"teams"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>
+ ];
+}
+def OMP_Cancel : Directive<"cancel"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>
+ ];
+}
+def OMP_Requires : Directive<"requires"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_UnifiedAddress>,
+ VersionedClause<OMPC_UnifiedSharedMemory>,
+ // OpenMP 5.2 Spec: If an implementation is not supporting a requirement
+ // (reverse offload in this case) then it should give compile-time error
+ // termination.
+ // Seeting supported version for reverse_offload to a distant future version
+ // 9.9 so that its partial support can be tested in the meantime.
+ //
+ // TODO: Correct this supprted version number whenever complete
+ // implementation of reverse_offload is available.
+ VersionedClause<OMPC_ReverseOffload, 99>,
+ VersionedClause<OMPC_DynamicAllocators>,
+ VersionedClause<OMPC_AtomicDefaultMemOrder>
+ ];
+}
+def OMP_Nothing : Directive<"nothing"> {}
+def OMP_TargetData : Directive<"target data"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_UseDevicePtr>,
+ VersionedClause<OMPC_UseDeviceAddr, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_If>
+ ];
+ let requiredClauses = [
+ VersionedClause<OMPC_Map>
+ ];
+}
+def OMP_TargetEnterData : Directive<"target enter data"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Depend>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_NoWait>
+ ];
+ let requiredClauses = [
+ VersionedClause<OMPC_Map>
+ ];
+}
+def OMP_TargetExitData : Directive<"target exit data"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Depend>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NoWait>
+ ];
+ let requiredClauses = [
+ VersionedClause<OMPC_Map>
+ ];
+}
+def OMP_TargetParallel : Directive<"target parallel"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_TargetParallelFor : Directive<"target parallel for"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_TargetParallelDo : Directive<"target parallel do"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Allocator>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_UsesAllocators>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Copyin>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_NoWait>
+ ];
+}
+def OMP_TargetUpdate : Directive<"target update"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_To>,
+ VersionedClause<OMPC_From>,
+ VersionedClause<OMPC_Depend>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NoWait>
+ ];
+}
+def OMP_ParallelFor : Directive<"parallel for"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_ParallelDo : Directive<"parallel do"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Collapse>
+ ];
+}
+def OMP_ParallelForSimd : Directive<"parallel for simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_ParallelDoSimd : Directive<"parallel do simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal>,
+ VersionedClause<OMPC_Order>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>
+ ];
+}
+def OMP_ParallelMaster : Directive<"parallel master"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Allocate>
+ ];
+}
+def OMP_ParallelMasked : Directive<"parallel masked"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Filter>
+ ];
+}
+def OMP_ParallelSections : Directive<"parallel sections"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Allocate>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_NumThreads>
+ ];
+}
+def OMP_ForSimd : Directive<"for simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_If, 50>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ ];
+}
+def OMP_DoSimd : Directive<"do simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Reduction>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_NoWait>
+ ];
+}
+def OMP_CancellationPoint : Directive<"cancellation point"> {}
+def OMP_DeclareReduction : Directive<"declare reduction"> {}
+def OMP_DeclareMapper : Directive<"declare mapper"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Map>
+ ];
+}
+def OMP_DeclareSimd : Directive<"declare simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Uniform>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_SimdLen>
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<OMPC_Inbranch>,
+ VersionedClause<OMPC_Notinbranch>
+ ];
+}
+def OMP_TaskLoop : Directive<"taskloop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_InReduction>,
+ VersionedClause<OMPC_Allocate>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Priority>,
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NumTasks>
+ ];
+}
+def OMP_TaskLoopSimd : Directive<"taskloop simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_InReduction>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Untied>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Priority>
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NumTasks>
+ ];
+}
+def OMP_Distribute : Directive<"distribute"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Allocate>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>
+ ];
+}
+def OMP_BeginDeclareTarget : Directive<"begin declare target"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_To>,
+ VersionedClause<OMPC_Link>,
+ VersionedClause<OMPC_DeviceType>,
+ VersionedClause<OMPC_Indirect>
+ ];
+}
+def OMP_DeclareTarget : Directive<"declare target"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_To>,
+ VersionedClause<OMPC_Link>,
+ VersionedClause<OMPC_Indirect>
+ ];
+}
+def OMP_EndDeclareTarget : Directive<"end declare target"> {}
+def OMP_DistributeParallelFor : Directive<"distribute parallel for"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_DistributeParallelDo : Directive<"distribute parallel do"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Linear>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>
+ ];
+}
+def OMP_DistributeParallelForSimd : Directive<"distribute parallel for simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_DistributeParallelDoSimd : Directive<"distribute parallel do simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal>,
+ VersionedClause<OMPC_Order>
+ ];
+}
+def OMP_DistributeSimd : Directive<"distribute simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If, 50>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>
+ ];
+}
+
+def OMP_TargetParallelForSimd : Directive<"target parallel for simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_TargetParallelDoSimd : Directive<"target parallel do simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_UsesAllocators>
+ ];
+}
+def OMP_TargetSimd : Directive<"target simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_TeamsDistribute : Directive<"teams distribute"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_Allocate>
+ ];
+}
+def OMP_TeamsDistributeSimd : Directive<"teams distribute simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If, 50>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_ThreadLimit>
+ ];
+}
+
+def OMP_TeamsDistributeParallelForSimd :
+ Directive<"teams distribute parallel for simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_TeamsDistributeParallelDoSimd :
+ Directive<"teams distribute parallel do simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_NonTemporal>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_If>,
+ ];
+}
+def OMP_TeamsDistributeParallelFor :
+ Directive<"teams distribute parallel for"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_TeamsDistributeParallelDo :
+ Directive<"teams distribute parallel do"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Linear>
+ ];
+let allowedOnceClauses = [
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>
+ ];
+}
+def OMP_TargetTeams : Directive<"target teams"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_UsesAllocators, 50>,
+ VersionedClause<OMPC_Shared>
+ ];
+
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_TargetTeamsDistribute : Directive<"target teams distribute"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_UsesAllocators, 50>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_LastPrivate>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+
+def OMP_TargetTeamsDistributeParallelFor :
+ Directive<"target teams distribute parallel for"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_TargetTeamsDistributeParallelDo :
+ Directive<"target teams distribute parallel do"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_UsesAllocators>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Order>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ ];
+}
+def OMP_TargetTeamsDistributeParallelForSimd :
+ Directive<"target teams distribute parallel for simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_TargetTeamsDistributeParallelDoSimd :
+ Directive<"target teams distribute parallel do simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_UsesAllocators>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Ordered>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_NonTemporal>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Schedule>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>
+ ];
+}
+def OMP_TargetTeamsDistributeSimd :
+ Directive<"target teams distribute simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_DistSchedule>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_Allocate : Directive<"allocate"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Allocator>,
+ VersionedClause<OMPC_Align, 51>
+ ];
+}
+def OMP_DeclareVariant : Directive<"declare variant"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Match>
+ ];
+ let allowedExclusiveClauses = [
+ VersionedClause<OMPC_AdjustArgs, 51>,
+ VersionedClause<OMPC_AppendArgs, 51>
+ ];
+}
+def OMP_MasterTaskloop : Directive<"master taskloop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_InReduction>,
+ VersionedClause<OMPC_Allocate>
+ ];
+}
+def OMP_MaskedTaskloop : Directive<"masked taskloop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_InReduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Filter>
+ ];
+}
+def OMP_ParallelMasterTaskloop :
+ Directive<"parallel master taskloop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Copyin>
+ ];
+}
+def OMP_ParallelMaskedTaskloop :
+ Directive<"parallel masked taskloop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Filter>
+ ];
+}
+def OMP_MasterTaskloopSimd : Directive<"master taskloop simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_InReduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_MaskedTaskloopSimd : Directive<"masked taskloop simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_InReduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_Filter>
+ ];
+}
+def OMP_ParallelMasterTaskloopSimd :
+ Directive<"parallel master taskloop simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>
+ ];
+}
+def OMP_ParallelMaskedTaskloopSimd :
+ Directive<"parallel masked taskloop simd"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Final>,
+ VersionedClause<OMPC_Untied>,
+ VersionedClause<OMPC_Mergeable>,
+ VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_GrainSize>,
+ VersionedClause<OMPC_NoGroup>,
+ VersionedClause<OMPC_NumTasks>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Linear>,
+ VersionedClause<OMPC_Aligned>,
+ VersionedClause<OMPC_SafeLen>,
+ VersionedClause<OMPC_SimdLen>,
+ VersionedClause<OMPC_NonTemporal, 50>,
+ VersionedClause<OMPC_Order, 50>,
+ VersionedClause<OMPC_Filter>
+ ];
+}
+def OMP_Depobj : Directive<"depobj"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Depend, 50>,
+ VersionedClause<OMPC_Destroy, 50>,
+ VersionedClause<OMPC_Update, 50>,
+ // TODO This should ne `none` instead. Comment carried over from
+ // OMPKinds.def.
+ VersionedClause<OMPC_Depobj, 50>
+ ];
+}
+def OMP_Scan : Directive<"scan"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Inclusive, 50>,
+ VersionedClause<OMPC_Exclusive, 50>
+ ];
+}
+def OMP_Assumes : Directive<"assumes"> {}
+def OMP_BeginAssumes : Directive<"begin assumes"> {}
+def OMP_EndAssumes : Directive<"end assumes"> {}
+def OMP_BeginDeclareVariant : Directive<"begin declare variant"> {}
+def OMP_EndDeclareVariant : Directive<"end declare variant"> {}
+def OMP_ParallelWorkshare : Directive<"parallel workshare"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_ProcBind>
+ ];
+}
+def OMP_Workshare : Directive<"workshare"> {}
+def OMP_EndDo : Directive<"end do"> {}
+def OMP_EndDoSimd : Directive<"end do simd"> {}
+def OMP_EndSections : Directive<"end sections"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_NoWait>
+ ];
+}
+def OMP_EndSingle : Directive<"end single"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_CopyPrivate>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_NoWait>
+ ];
+}
+def OMP_EndWorkshare : Directive<"end workshare"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_NoWait>
+ ];
+}
+def OMP_interop : Directive<"interop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_Destroy>,
+ VersionedClause<OMPC_Init>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Use>,
+ ];
+}
+def OMP_dispatch : Directive<"dispatch"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_Novariants>,
+ VersionedClause<OMPC_Nocontext>
+ ];
+}
+def OMP_masked : Directive<"masked"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Filter>
+ ];
+}
+def OMP_loop : Directive<"loop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Bind, 50>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Order>,
+ ];
+}
+def OMP_teams_loop : Directive<"teams loop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>,
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Bind, 50>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_ThreadLimit>,
+ ];
+}
+def OMP_target_teams_loop : Directive<"target teams loop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_UsesAllocators, 50>
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Bind, 50>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_NumTeams>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_ThreadLimit>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_parallel_loop : Directive<"parallel loop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>,
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Bind, 50>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_ProcBind>,
+ ];
+}
+def OMP_target_parallel_loop : Directive<"target parallel loop"> {
+ let allowedClauses = [
+ VersionedClause<OMPC_Allocate>,
+ VersionedClause<OMPC_Copyin>,
+ VersionedClause<OMPC_Depend>,
+ VersionedClause<OMPC_Device>,
+ VersionedClause<OMPC_FirstPrivate>,
+ VersionedClause<OMPC_IsDevicePtr>,
+ VersionedClause<OMPC_HasDeviceAddr, 51>,
+ VersionedClause<OMPC_LastPrivate>,
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_Private>,
+ VersionedClause<OMPC_Reduction>,
+ VersionedClause<OMPC_Shared>,
+ VersionedClause<OMPC_UsesAllocators, 50>,
+ ];
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Bind, 50>,
+ VersionedClause<OMPC_Collapse>,
+ VersionedClause<OMPC_Default>,
+ VersionedClause<OMPC_DefaultMap>,
+ VersionedClause<OMPC_If>,
+ VersionedClause<OMPC_NoWait>,
+ VersionedClause<OMPC_NumThreads>,
+ VersionedClause<OMPC_Order>,
+ VersionedClause<OMPC_ProcBind>,
+ VersionedClause<OMPC_OMPX_DynCGroupMem>,
+ ];
+}
+def OMP_Metadirective : Directive<"metadirective"> {
+ let allowedClauses = [VersionedClause<OMPC_When>];
+ let allowedOnceClauses = [VersionedClause<OMPC_Default>];
+}
+def OMP_Unknown : Directive<"unknown"> {
+ let isDefault = true;
+}
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPAssume.h b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPAssume.h
new file mode 100644
index 0000000000..701274c46d
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPAssume.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- OpenMP/OMPAssume.h --- OpenMP assumption helper functions - C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides helper functions and classes to deal with OpenMP
+/// assumptions, e.g., as used by `[begin/end] assumes` and `assume`.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_OPENMP_OMPASSUME_H
+#define LLVM_FRONTEND_OPENMP_OMPASSUME_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+namespace omp {
+
+/// Helper to describe assume clauses.
+struct AssumptionClauseMappingInfo {
+ /// The identifier describing the (beginning of the) clause.
+ llvm::StringLiteral Identifier;
+ /// Flag to determine if the identifier is a full name or the start of a name.
+ bool StartsWith;
+ /// Flag to determine if a directive lists follows.
+ bool HasDirectiveList;
+ /// Flag to determine if an expression follows.
+ bool HasExpression;
+};
+
+/// All known assume clauses.
+static constexpr AssumptionClauseMappingInfo AssumptionClauseMappings[] = {
+#define OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList, \
+ HasExpression) \
+ {Identifier, StartsWith, HasDirectiveList, HasExpression},
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+inline std::string getAllAssumeClauseOptions() {
+ std::string S;
+ for (const AssumptionClauseMappingInfo &ACMI : AssumptionClauseMappings)
+ S += (S.empty() ? "'" : "', '") + ACMI.Identifier.str();
+ return S + "'";
+}
+
+} // namespace omp
+
+} // namespace llvm
+
+#endif // LLVM_FRONTEND_OPENMP_OMPASSUME_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPConstants.h b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPConstants.h
new file mode 100644
index 0000000000..5dd6b791f5
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPConstants.h
@@ -0,0 +1,288 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- OMPConstants.h - OpenMP related constants and helpers ------ C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines constans and helpers used when dealing with OpenMP.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_OPENMP_OMPCONSTANTS_H
+#define LLVM_FRONTEND_OPENMP_OMPCONSTANTS_H
+
+#include "llvm/ADT/BitmaskEnum.h"
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Frontend/OpenMP/OMP.h.inc"
+
+namespace llvm {
+namespace omp {
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+/// IDs for all Internal Control Variables (ICVs).
+enum class InternalControlVar {
+#define ICV_DATA_ENV(Enum, ...) Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+#define ICV_DATA_ENV(Enum, ...) \
+ constexpr auto Enum = omp::InternalControlVar::Enum;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+
+enum class ICVInitValue {
+#define ICV_INIT_VALUE(Enum, Name) Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+#define ICV_INIT_VALUE(Enum, Name) \
+ constexpr auto Enum = omp::ICVInitValue::Enum;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+
+/// IDs for all omp runtime library (RTL) functions.
+enum class RuntimeFunction {
+#define OMP_RTL(Enum, ...) Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+#define OMP_RTL(Enum, ...) constexpr auto Enum = omp::RuntimeFunction::Enum;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+
+/// IDs for the different default kinds.
+enum class DefaultKind {
+#define OMP_DEFAULT_KIND(Enum, Str) Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+#define OMP_DEFAULT_KIND(Enum, ...) \
+ constexpr auto Enum = omp::DefaultKind::Enum;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+
+/// IDs for all omp runtime library ident_t flag encodings (see
+/// their defintion in openmp/runtime/src/kmp.h).
+enum class IdentFlag {
+#define OMP_IDENT_FLAG(Enum, Str, Value) Enum = Value,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ LLVM_MARK_AS_BITMASK_ENUM(0x7FFFFFFF)
+};
+
+#define OMP_IDENT_FLAG(Enum, ...) constexpr auto Enum = omp::IdentFlag::Enum;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+
+/// \note This needs to be kept in sync with kmp.h enum sched_type.
+/// Todo: Update kmp.h to include this file, and remove the enums in kmp.h
+enum class OMPScheduleType {
+ // For typed comparisons, not a valid schedule
+ None = 0,
+
+ // Schedule algorithms
+ BaseStaticChunked = 1,
+ BaseStatic = 2,
+ BaseDynamicChunked = 3,
+ BaseGuidedChunked = 4,
+ BaseRuntime = 5,
+ BaseAuto = 6,
+ BaseTrapezoidal = 7,
+ BaseGreedy = 8,
+ BaseBalanced = 9,
+ BaseGuidedIterativeChunked = 10,
+ BaseGuidedAnalyticalChunked = 11,
+ BaseSteal = 12,
+
+ // with chunk adjustment (e.g., simd)
+ BaseStaticBalancedChunked = 13,
+ BaseGuidedSimd = 14,
+ BaseRuntimeSimd = 15,
+
+ // static schedules algorithims for distribute
+ BaseDistributeChunked = 27,
+ BaseDistribute = 28,
+
+ // Modifier flags to be combined with schedule algorithms
+ ModifierUnordered = (1 << 5),
+ ModifierOrdered = (1 << 6),
+ ModifierNomerge = (1 << 7),
+ ModifierMonotonic = (1 << 29),
+ ModifierNonmonotonic = (1 << 30),
+
+ // Masks combining multiple flags
+ OrderingMask = ModifierUnordered | ModifierOrdered | ModifierNomerge,
+ MonotonicityMask = ModifierMonotonic | ModifierNonmonotonic,
+ ModifierMask = OrderingMask | MonotonicityMask,
+
+ // valid schedule type values, without monotonicity flags
+ UnorderedStaticChunked = BaseStaticChunked | ModifierUnordered, // 33
+ UnorderedStatic = BaseStatic | ModifierUnordered, // 34
+ UnorderedDynamicChunked = BaseDynamicChunked | ModifierUnordered, // 35
+ UnorderedGuidedChunked = BaseGuidedChunked | ModifierUnordered, // 36
+ UnorderedRuntime = BaseRuntime | ModifierUnordered, // 37
+ UnorderedAuto = BaseAuto | ModifierUnordered, // 38
+ UnorderedTrapezoidal = BaseTrapezoidal | ModifierUnordered, // 39
+ UnorderedGreedy = BaseGreedy | ModifierUnordered, // 40
+ UnorderedBalanced = BaseBalanced | ModifierUnordered, // 41
+ UnorderedGuidedIterativeChunked =
+ BaseGuidedIterativeChunked | ModifierUnordered, // 42
+ UnorderedGuidedAnalyticalChunked =
+ BaseGuidedAnalyticalChunked | ModifierUnordered, // 43
+ UnorderedSteal = BaseSteal | ModifierUnordered, // 44
+
+ UnorderedStaticBalancedChunked =
+ BaseStaticBalancedChunked | ModifierUnordered, // 45
+ UnorderedGuidedSimd = BaseGuidedSimd | ModifierUnordered, // 46
+ UnorderedRuntimeSimd = BaseRuntimeSimd | ModifierUnordered, // 47
+
+ OrderedStaticChunked = BaseStaticChunked | ModifierOrdered, // 65
+ OrderedStatic = BaseStatic | ModifierOrdered, // 66
+ OrderedDynamicChunked = BaseDynamicChunked | ModifierOrdered, // 67
+ OrderedGuidedChunked = BaseGuidedChunked | ModifierOrdered, // 68
+ OrderedRuntime = BaseRuntime | ModifierOrdered, // 69
+ OrderedAuto = BaseAuto | ModifierOrdered, // 70
+ OrderdTrapezoidal = BaseTrapezoidal | ModifierOrdered, // 71
+
+ OrderedDistributeChunked = BaseDistributeChunked | ModifierOrdered, // 91
+ OrderedDistribute = BaseDistribute | ModifierOrdered, // 92
+
+ NomergeUnorderedStaticChunked =
+ BaseStaticChunked | ModifierUnordered | ModifierNomerge, // 161
+ NomergeUnorderedStatic =
+ BaseStatic | ModifierUnordered | ModifierNomerge, // 162
+ NomergeUnorderedDynamicChunked =
+ BaseDynamicChunked | ModifierUnordered | ModifierNomerge, // 163
+ NomergeUnorderedGuidedChunked =
+ BaseGuidedChunked | ModifierUnordered | ModifierNomerge, // 164
+ NomergeUnorderedRuntime =
+ BaseRuntime | ModifierUnordered | ModifierNomerge, // 165
+ NomergeUnorderedAuto = BaseAuto | ModifierUnordered | ModifierNomerge, // 166
+ NomergeUnorderedTrapezoidal =
+ BaseTrapezoidal | ModifierUnordered | ModifierNomerge, // 167
+ NomergeUnorderedGreedy =
+ BaseGreedy | ModifierUnordered | ModifierNomerge, // 168
+ NomergeUnorderedBalanced =
+ BaseBalanced | ModifierUnordered | ModifierNomerge, // 169
+ NomergeUnorderedGuidedIterativeChunked =
+ BaseGuidedIterativeChunked | ModifierUnordered | ModifierNomerge, // 170
+ NomergeUnorderedGuidedAnalyticalChunked =
+ BaseGuidedAnalyticalChunked | ModifierUnordered | ModifierNomerge, // 171
+ NomergeUnorderedSteal =
+ BaseSteal | ModifierUnordered | ModifierNomerge, // 172
+
+ NomergeOrderedStaticChunked =
+ BaseStaticChunked | ModifierOrdered | ModifierNomerge, // 193
+ NomergeOrderedStatic = BaseStatic | ModifierOrdered | ModifierNomerge, // 194
+ NomergeOrderedDynamicChunked =
+ BaseDynamicChunked | ModifierOrdered | ModifierNomerge, // 195
+ NomergeOrderedGuidedChunked =
+ BaseGuidedChunked | ModifierOrdered | ModifierNomerge, // 196
+ NomergeOrderedRuntime =
+ BaseRuntime | ModifierOrdered | ModifierNomerge, // 197
+ NomergeOrderedAuto = BaseAuto | ModifierOrdered | ModifierNomerge, // 198
+ NomergeOrderedTrapezoidal =
+ BaseTrapezoidal | ModifierOrdered | ModifierNomerge, // 199
+
+ LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue */ ModifierMask)
+};
+
+/// Values for bit flags used to specify the mapping type for
+/// offloading.
+enum class OpenMPOffloadMappingFlags : uint64_t {
+ /// No flags
+ OMP_MAP_NONE = 0x0,
+ /// Allocate memory on the device and move data from host to device.
+ OMP_MAP_TO = 0x01,
+ /// Allocate memory on the device and move data from device to host.
+ OMP_MAP_FROM = 0x02,
+ /// Always perform the requested mapping action on the element, even
+ /// if it was already mapped before.
+ OMP_MAP_ALWAYS = 0x04,
+ /// Delete the element from the device environment, ignoring the
+ /// current reference count associated with the element.
+ OMP_MAP_DELETE = 0x08,
+ /// The element being mapped is a pointer-pointee pair; both the
+ /// pointer and the pointee should be mapped.
+ OMP_MAP_PTR_AND_OBJ = 0x10,
+ /// This flags signals that the base address of an entry should be
+ /// passed to the target kernel as an argument.
+ OMP_MAP_TARGET_PARAM = 0x20,
+ /// Signal that the runtime library has to return the device pointer
+ /// in the current position for the data being mapped. Used when we have the
+ /// use_device_ptr or use_device_addr clause.
+ OMP_MAP_RETURN_PARAM = 0x40,
+ /// This flag signals that the reference being passed is a pointer to
+ /// private data.
+ OMP_MAP_PRIVATE = 0x80,
+ /// Pass the element to the device by value.
+ OMP_MAP_LITERAL = 0x100,
+ /// Implicit map
+ OMP_MAP_IMPLICIT = 0x200,
+ /// Close is a hint to the runtime to allocate memory close to
+ /// the target device.
+ OMP_MAP_CLOSE = 0x400,
+ /// 0x800 is reserved for compatibility with XLC.
+ /// Produce a runtime error if the data is not already allocated.
+ OMP_MAP_PRESENT = 0x1000,
+ // Increment and decrement a separate reference counter so that the data
+ // cannot be unmapped within the associated region. Thus, this flag is
+ // intended to be used on 'target' and 'target data' directives because they
+ // are inherently structured. It is not intended to be used on 'target
+ // enter data' and 'target exit data' directives because they are inherently
+ // dynamic.
+ // This is an OpenMP extension for the sake of OpenACC support.
+ OMP_MAP_OMPX_HOLD = 0x2000,
+ /// Signal that the runtime library should use args as an array of
+ /// descriptor_dim pointers and use args_size as dims. Used when we have
+ /// non-contiguous list items in target update directive
+ OMP_MAP_NON_CONTIG = 0x100000000000,
+ /// The 16 MSBs of the flags indicate whether the entry is member of some
+ /// struct/class.
+ OMP_MAP_MEMBER_OF = 0xffff000000000000,
+ LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF)
+};
+
+enum class AddressSpace : unsigned {
+ Generic = 0,
+ Global = 1,
+ Shared = 3,
+ Constant = 4,
+ Local = 5,
+};
+
+/// \note This needs to be kept in sync with interop.h enum kmp_interop_type_t.:
+enum class OMPInteropType { Unknown, Target, TargetSync };
+
+/// Atomic compare operations. Currently OpenMP only supports ==, >, and <.
+enum class OMPAtomicCompareOp : unsigned { EQ, MIN, MAX };
+
+/// Fields ids in kmp_depend_info record.
+enum class RTLDependInfoFields { BaseAddr, Len, Flags };
+
+/// Dependence kind for RTL.
+enum class RTLDependenceKindTy {
+ DepUnknown = 0x0,
+ DepIn = 0x01,
+ DepInOut = 0x3,
+ DepMutexInOutSet = 0x4,
+ DepInOutSet = 0x8,
+ DepOmpAllMem = 0x80,
+};
+
+} // end namespace omp
+
+} // end namespace llvm
+
+#include "OMPDeviceConstants.h"
+
+#endif // LLVM_FRONTEND_OPENMP_OMPCONSTANTS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPContext.h b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPContext.h
new file mode 100644
index 0000000000..0f5483cf16
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPContext.h
@@ -0,0 +1,221 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- OpenMP/OMPContext.h ----- OpenMP context helper functions - C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides helper functions and classes to deal with OpenMP
+/// contexts as used by `[begin/end] declare variant` and `metadirective`.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_OPENMP_OMPCONTEXT_H
+#define LLVM_FRONTEND_OPENMP_OMPCONTEXT_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Frontend/OpenMP/OMPConstants.h"
+
+namespace llvm {
+class Triple;
+namespace omp {
+
+/// OpenMP Context related IDs and helpers
+///
+///{
+
+/// IDs for all OpenMP context selector trait sets (construct/device/...).
+enum class TraitSet {
+#define OMP_TRAIT_SET(Enum, ...) Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+/// IDs for all OpenMP context selector trait (device={kind/isa...}/...).
+enum class TraitSelector {
+#define OMP_TRAIT_SELECTOR(Enum, ...) Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+/// IDs for all OpenMP context trait properties (host/gpu/bsc/llvm/...)
+enum class TraitProperty {
+#define OMP_TRAIT_PROPERTY(Enum, ...) Enum,
+#define OMP_LAST_TRAIT_PROPERTY(Enum) Last = Enum
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+/// Parse \p Str and return the trait set it matches or TraitSet::invalid.
+TraitSet getOpenMPContextTraitSetKind(StringRef Str);
+
+/// Return the trait set for which \p Selector is a selector.
+TraitSet getOpenMPContextTraitSetForSelector(TraitSelector Selector);
+
+/// Return the trait set for which \p Property is a property.
+TraitSet getOpenMPContextTraitSetForProperty(TraitProperty Property);
+
+/// Return a textual representation of the trait set \p Kind.
+StringRef getOpenMPContextTraitSetName(TraitSet Kind);
+
+/// Parse \p Str and return the trait set it matches or
+/// TraitSelector::invalid.
+TraitSelector getOpenMPContextTraitSelectorKind(StringRef Str);
+
+/// Return the trait selector for which \p Property is a property.
+TraitSelector getOpenMPContextTraitSelectorForProperty(TraitProperty Property);
+
+/// Return a textual representation of the trait selector \p Kind.
+StringRef getOpenMPContextTraitSelectorName(TraitSelector Kind);
+
+/// Parse \p Str and return the trait property it matches in the set \p Set and
+/// selector \p Selector or TraitProperty::invalid.
+TraitProperty getOpenMPContextTraitPropertyKind(TraitSet Set,
+ TraitSelector Selector,
+ StringRef Str);
+
+/// Return the trait property for a singleton selector \p Selector.
+TraitProperty getOpenMPContextTraitPropertyForSelector(TraitSelector Selector);
+
+/// Return a textual representation of the trait property \p Kind, which might
+/// be the raw string we parsed (\p RawString) if we do not translate the
+/// property into a (distinct) enum.
+StringRef getOpenMPContextTraitPropertyName(TraitProperty Kind,
+ StringRef RawString);
+
+/// Return a textual representation of the trait property \p Kind with selector
+/// and set name included.
+StringRef getOpenMPContextTraitPropertyFullName(TraitProperty Kind);
+
+/// Return a string listing all trait sets.
+std::string listOpenMPContextTraitSets();
+
+/// Return a string listing all trait selectors for \p Set.
+std::string listOpenMPContextTraitSelectors(TraitSet Set);
+
+/// Return a string listing all trait properties for \p Set and \p Selector.
+std::string listOpenMPContextTraitProperties(TraitSet Set,
+ TraitSelector Selector);
+///}
+
+/// Return true if \p Selector can be nested in \p Set. Also sets
+/// \p AllowsTraitScore and \p RequiresProperty to true/false if the user can
+/// specify a score for properties in \p Selector and if the \p Selector
+/// requires at least one property.
+bool isValidTraitSelectorForTraitSet(TraitSelector Selector, TraitSet Set,
+ bool &AllowsTraitScore,
+ bool &RequiresProperty);
+
+/// Return true if \p Property can be nested in \p Selector and \p Set.
+bool isValidTraitPropertyForTraitSetAndSelector(TraitProperty Property,
+ TraitSelector Selector,
+ TraitSet Set);
+
+/// Variant match information describes the required traits and how they are
+/// scored (via the ScoresMap). In addition, the required consturct nesting is
+/// decribed as well.
+struct VariantMatchInfo {
+ /// Add the trait \p Property to the required trait set. \p RawString is the
+ /// string we parsed and derived \p Property from. If \p Score is not null, it
+ /// recorded as well. If \p Property is in the `construct` set it is recorded
+ /// in-order in the ConstructTraits as well.
+ void addTrait(TraitProperty Property, StringRef RawString,
+ APInt *Score = nullptr) {
+ addTrait(getOpenMPContextTraitSetForProperty(Property), Property, RawString,
+ Score);
+ }
+ /// Add the trait \p Property which is in set \p Set to the required trait
+ /// set. \p RawString is the string we parsed and derived \p Property from. If
+ /// \p Score is not null, it recorded as well. If \p Set is the `construct`
+ /// set it is recorded in-order in the ConstructTraits as well.
+ void addTrait(TraitSet Set, TraitProperty Property, StringRef RawString,
+ APInt *Score = nullptr) {
+ if (Score)
+ ScoreMap[Property] = *Score;
+
+ // Special handling for `device={isa(...)}` as we do not match the enum but
+ // the raw string.
+ if (Property == TraitProperty::device_isa___ANY)
+ ISATraits.push_back(RawString);
+
+ RequiredTraits.set(unsigned(Property));
+ if (Set == TraitSet::construct)
+ ConstructTraits.push_back(Property);
+ }
+
+ BitVector RequiredTraits = BitVector(unsigned(TraitProperty::Last) + 1);
+ SmallVector<StringRef, 8> ISATraits;
+ SmallVector<TraitProperty, 8> ConstructTraits;
+ SmallDenseMap<TraitProperty, APInt> ScoreMap;
+};
+
+/// The context for a source location is made up of active property traits,
+/// e.g., device={kind(host)}, and constructs traits which describe the nesting
+/// in OpenMP constructs at the location.
+struct OMPContext {
+ OMPContext(bool IsDeviceCompilation, Triple TargetTriple);
+ virtual ~OMPContext() = default;
+
+ void addTrait(TraitProperty Property) {
+ addTrait(getOpenMPContextTraitSetForProperty(Property), Property);
+ }
+ void addTrait(TraitSet Set, TraitProperty Property) {
+ ActiveTraits.set(unsigned(Property));
+ if (Set == TraitSet::construct)
+ ConstructTraits.push_back(Property);
+ }
+
+ /// Hook for users to check if an ISA trait matches. The trait is described as
+ /// the string that got parsed and it depends on the target and context if
+ /// this matches or not.
+ virtual bool matchesISATrait(StringRef) const { return false; }
+
+ BitVector ActiveTraits = BitVector(unsigned(TraitProperty::Last) + 1);
+ SmallVector<TraitProperty, 8> ConstructTraits;
+};
+
+/// Return true if \p VMI is applicable in \p Ctx, that is, all traits required
+/// by \p VMI are available in the OpenMP context \p Ctx. If \p DeviceSetOnly is
+/// true, only the device selector set, if present, are checked. Note that we
+/// still honor extension traits provided by the user.
+bool isVariantApplicableInContext(const VariantMatchInfo &VMI,
+ const OMPContext &Ctx,
+ bool DeviceSetOnly = false);
+
+/// Return the index (into \p VMIs) of the variant with the highest score
+/// from the ones applicble in \p Ctx. See llvm::isVariantApplicableInContext.
+int getBestVariantMatchForContext(const SmallVectorImpl<VariantMatchInfo> &VMIs,
+ const OMPContext &Ctx);
+
+} // namespace omp
+
+template <> struct DenseMapInfo<omp::TraitProperty> {
+ static inline omp::TraitProperty getEmptyKey() {
+ return omp::TraitProperty(-1);
+ }
+ static inline omp::TraitProperty getTombstoneKey() {
+ return omp::TraitProperty(-2);
+ }
+ static unsigned getHashValue(omp::TraitProperty val) {
+ return std::hash<unsigned>{}(unsigned(val));
+ }
+ static bool isEqual(omp::TraitProperty LHS, omp::TraitProperty RHS) {
+ return LHS == RHS;
+ }
+};
+
+} // end namespace llvm
+#endif // LLVM_FRONTEND_OPENMP_OMPCONTEXT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPDeviceConstants.h b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPDeviceConstants.h
new file mode 100644
index 0000000000..d44ccce9fb
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPDeviceConstants.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- OMPDeviceConstants.h - OpenMP device related constants ----- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines constans that will be used by both host and device
+/// compilation.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_OPENMP_OMPDEVICECONSTANTS_H
+#define LLVM_FRONTEND_OPENMP_OMPDEVICECONSTANTS_H
+
+namespace llvm {
+namespace omp {
+
+enum OMPTgtExecModeFlags : unsigned char {
+ OMP_TGT_EXEC_MODE_GENERIC = 1 << 0,
+ OMP_TGT_EXEC_MODE_SPMD = 1 << 1,
+ OMP_TGT_EXEC_MODE_GENERIC_SPMD =
+ OMP_TGT_EXEC_MODE_GENERIC | OMP_TGT_EXEC_MODE_SPMD
+};
+
+} // end namespace omp
+} // end namespace llvm
+
+#endif // LLVM_FRONTEND_OPENMP_OMPDEVICECONSTANTS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPGridValues.h b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPGridValues.h
new file mode 100644
index 0000000000..6010068a68
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPGridValues.h
@@ -0,0 +1,137 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//====--- OMPGridValues.h - Language-specific address spaces --*- C++ -*-====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Provides definitions for Target specific Grid Values
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_OPENMP_OMPGRIDVALUES_H
+#define LLVM_FRONTEND_OPENMP_OMPGRIDVALUES_H
+
+namespace llvm {
+
+namespace omp {
+
+/// \brief Defines various target-specific GPU grid values that must be
+/// consistent between host RTL (plugin), device RTL, and clang.
+/// We can change grid values for a "fat" binary so that different
+/// passes get the correct values when generating code for a
+/// multi-target binary. Both amdgcn and nvptx values are stored in
+/// this file. In the future, should there be differences between GPUs
+/// of the same architecture, then simply make a different array and
+/// use the new array name.
+///
+/// Example usage in clang:
+/// const unsigned slot_size =
+/// ctx.GetTargetInfo().getGridValue().GV_Warp_Size;
+///
+/// Example usage in libomptarget/deviceRTLs:
+/// #include "llvm/Frontend/OpenMP/OMPGridValues.h"
+/// #ifdef __AMDGPU__
+/// #define GRIDVAL AMDGPUGridValues
+/// #else
+/// #define GRIDVAL NVPTXGridValues
+/// #endif
+/// ... Then use this reference for GV_Warp_Size in the deviceRTL source.
+/// llvm::omp::GRIDVAL().GV_Warp_Size
+///
+/// Example usage in libomptarget hsa plugin:
+/// #include "llvm/Frontend/OpenMP/OMPGridValues.h"
+/// #define GRIDVAL AMDGPUGridValues
+/// ... Then use this reference to access GV_Warp_Size in the hsa plugin.
+/// llvm::omp::GRIDVAL().GV_Warp_Size
+///
+/// Example usage in libomptarget cuda plugin:
+/// #include "llvm/Frontend/OpenMP/OMPGridValues.h"
+/// #define GRIDVAL NVPTXGridValues
+/// ... Then use this reference to access GV_Warp_Size in the cuda plugin.
+/// llvm::omp::GRIDVAL().GV_Warp_Size
+///
+
+struct GV {
+ /// The size reserved for data in a shared memory slot.
+ unsigned GV_Slot_Size;
+ /// The default value of maximum number of threads in a worker warp.
+ unsigned GV_Warp_Size;
+
+ constexpr unsigned warpSlotSize() const {
+ return GV_Warp_Size * GV_Slot_Size;
+ }
+
+ /// the maximum number of teams.
+ unsigned GV_Max_Teams;
+ // The default number of teams in the absence of any other information.
+ unsigned GV_Default_Num_Teams;
+
+ // An alternative to the heavy data sharing infrastructure that uses global
+ // memory is one that uses device __shared__ memory. The amount of such space
+ // (in bytes) reserved by the OpenMP runtime is noted here.
+ unsigned GV_SimpleBufferSize;
+ // The absolute maximum team size for a working group
+ unsigned GV_Max_WG_Size;
+ // The default maximum team size for a working group
+ unsigned GV_Default_WG_Size;
+
+ constexpr unsigned maxWarpNumber() const {
+ return GV_Max_WG_Size / GV_Warp_Size;
+ }
+};
+
+/// For AMDGPU GPUs
+static constexpr GV AMDGPUGridValues64 = {
+ 256, // GV_Slot_Size
+ 64, // GV_Warp_Size
+ (1 << 16), // GV_Max_Teams
+ 440, // GV_Default_Num_Teams
+ 896, // GV_SimpleBufferSize
+ 1024, // GV_Max_WG_Size,
+ 256, // GV_Default_WG_Size
+};
+
+static constexpr GV AMDGPUGridValues32 = {
+ 256, // GV_Slot_Size
+ 32, // GV_Warp_Size
+ (1 << 16), // GV_Max_Teams
+ 440, // GV_Default_Num_Teams
+ 896, // GV_SimpleBufferSize
+ 1024, // GV_Max_WG_Size,
+ 256, // GV_Default_WG_Size
+};
+
+template <unsigned wavesize> constexpr const GV &getAMDGPUGridValues() {
+ static_assert(wavesize == 32 || wavesize == 64, "Unexpected wavesize");
+ return wavesize == 32 ? AMDGPUGridValues32 : AMDGPUGridValues64;
+}
+
+/// For Nvidia GPUs
+static constexpr GV NVPTXGridValues = {
+ 256, // GV_Slot_Size
+ 32, // GV_Warp_Size
+ (1 << 16), // GV_Max_Teams
+ 3200, // GV_Default_Num_Teams
+ 896, // GV_SimpleBufferSize
+ 1024, // GV_Max_WG_Size
+ 128, // GV_Default_WG_Size
+};
+
+} // namespace omp
+} // namespace llvm
+
+#endif // LLVM_FRONTEND_OPENMP_OMPGRIDVALUES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
new file mode 100644
index 0000000000..cf04f060b5
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -0,0 +1,2416 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OpenMPIRBuilder class and helpers used as a convenient
+// way to create LLVM instructions for OpenMP directives.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
+#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
+
+#include "llvm/Analysis/MemorySSAUpdater.h"
+#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/Allocator.h"
+#include <forward_list>
+#include <map>
+#include <optional>
+
+namespace llvm {
+class CanonicalLoopInfo;
+struct TargetRegionEntryInfo;
+class OffloadEntriesInfoManager;
+
+/// Move the instruction after an InsertPoint to the beginning of another
+/// BasicBlock.
+///
+/// The instructions after \p IP are moved to the beginning of \p New which must
+/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
+/// \p New will be added such that there is no semantic change. Otherwise, the
+/// \p IP insert block remains degenerate and it is up to the caller to insert a
+/// terminator.
+void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
+ bool CreateBranch);
+
+/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
+/// insert location will stick to after the instruction before the insertion
+/// point (instead of moving with the instruction the InsertPoint stores
+/// internally).
+void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
+
+/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
+/// (missing the terminator).
+///
+/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
+/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
+/// is true, a branch to the new successor will new created such that
+/// semantically there is no change; otherwise the block of the insertion point
+/// remains degenerate and it is the caller's responsibility to insert a
+/// terminator. Returns the new successor block.
+BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
+ llvm::Twine Name = {});
+
+/// Split a BasicBlock at \p Builder's insertion point, even if the block is
+/// degenerate (missing the terminator). Its new insert location will stick to
+/// after the instruction before the insertion point (instead of moving with the
+/// instruction the InsertPoint stores internally).
+BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Name = {});
+
+/// Split a BasicBlock at \p Builder's insertion point, even if the block is
+/// degenerate (missing the terminator). Its new insert location will stick to
+/// after the instruction before the insertion point (instead of moving with the
+/// instruction the InsertPoint stores internally).
+BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
+
+/// Like splitBB, but reuses the current block's name for the new name.
+BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
+ llvm::Twine Suffix = ".split");
+
+/// Captures attributes that affect generating LLVM-IR using the
+/// OpenMPIRBuilder and related classes. Note that not all attributes are
+/// required for all classes or functions. In some use cases the configuration
+/// is not necessary at all, because because the only functions that are called
+/// are ones that are not dependent on the configuration.
+class OpenMPIRBuilderConfig {
+public:
+ /// Flag for specifying if the compilation is done for embedded device code
+ /// or host code.
+ std::optional<bool> IsEmbedded;
+
+ /// Flag for specifying if the compilation is done for an offloading target,
+ /// like GPU.
+ std::optional<bool> IsTargetCodegen;
+
+ /// Flag for specifying weather a requires unified_shared_memory
+ /// directive is present or not.
+ std::optional<bool> HasRequiresUnifiedSharedMemory;
+
+ // Flag for specifying if offloading is mandatory.
+ std::optional<bool> OpenMPOffloadMandatory;
+
+ /// First separator used between the initial two parts of a name.
+ std::optional<StringRef> FirstSeparator;
+ /// Separator used between all of the rest consecutive parts of s name
+ std::optional<StringRef> Separator;
+
+ OpenMPIRBuilderConfig() {}
+ OpenMPIRBuilderConfig(bool IsEmbedded, bool IsTargetCodegen,
+ bool HasRequiresUnifiedSharedMemory,
+ bool OpenMPOffloadMandatory)
+ : IsEmbedded(IsEmbedded), IsTargetCodegen(IsTargetCodegen),
+ HasRequiresUnifiedSharedMemory(HasRequiresUnifiedSharedMemory),
+ OpenMPOffloadMandatory(OpenMPOffloadMandatory) {}
+
+ // Getters functions that assert if the required values are not present.
+ bool isEmbedded() const {
+ assert(IsEmbedded.has_value() && "IsEmbedded is not set");
+ return *IsEmbedded;
+ }
+
+ bool isTargetCodegen() const {
+ assert(IsTargetCodegen.has_value() && "IsTargetCodegen is not set");
+ return *IsTargetCodegen;
+ }
+
+ bool hasRequiresUnifiedSharedMemory() const {
+ assert(HasRequiresUnifiedSharedMemory.has_value() &&
+ "HasUnifiedSharedMemory is not set");
+ return *HasRequiresUnifiedSharedMemory;
+ }
+
+ bool openMPOffloadMandatory() const {
+ assert(OpenMPOffloadMandatory.has_value() &&
+ "OpenMPOffloadMandatory is not set");
+ return *OpenMPOffloadMandatory;
+ }
+ // Returns the FirstSeparator if set, otherwise use the default
+ // separator depending on isTargetCodegen
+ StringRef firstSeparator() const {
+ if (FirstSeparator.has_value())
+ return *FirstSeparator;
+ if (isTargetCodegen())
+ return "_";
+ return ".";
+ }
+
+ // Returns the Separator if set, otherwise use the default
+ // separator depending on isTargetCodegen
+ StringRef separator() const {
+ if (Separator.has_value())
+ return *Separator;
+ if (isTargetCodegen())
+ return "$";
+ return ".";
+ }
+
+ void setIsEmbedded(bool Value) { IsEmbedded = Value; }
+ void setIsTargetCodegen(bool Value) { IsTargetCodegen = Value; }
+ void setHasRequiresUnifiedSharedMemory(bool Value) {
+ HasRequiresUnifiedSharedMemory = Value;
+ }
+ void setFirstSeparator(StringRef FS) { FirstSeparator = FS; }
+ void setSeparator(StringRef S) { Separator = S; }
+};
+
+/// An interface to create LLVM-IR for OpenMP directives.
+///
+/// Each OpenMP directive has a corresponding public generator method.
+class OpenMPIRBuilder {
+public:
+ /// Create a new OpenMPIRBuilder operating on the given module \p M. This will
+ /// not have an effect on \p M (see initialize)
+ OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
+ ~OpenMPIRBuilder();
+
+ /// Initialize the internal state, this will put structures types and
+ /// potentially other helpers into the underlying module. Must be called
+ /// before any other method and only once!
+ void initialize();
+
+ void setConfig(OpenMPIRBuilderConfig C) { Config = C; }
+
+ /// Finalize the underlying module, e.g., by outlining regions.
+ /// \param Fn The function to be finalized. If not used,
+ /// all functions are finalized.
+ void finalize(Function *Fn = nullptr);
+
+ /// Add attributes known for \p FnID to \p Fn.
+ void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
+
+ /// Type used throughout for insertion points.
+ using InsertPointTy = IRBuilder<>::InsertPoint;
+
+ /// Get the create a name using the platform specific separators.
+ /// \param Parts parts of the final name that needs separation
+ /// The created name has a first separator between the first and second part
+ /// and a second separator between all other parts.
+ /// E.g. with FirstSeparator "$" and Separator "." and
+ /// parts: "p1", "p2", "p3", "p4"
+ /// The resulting name is "p1$p2.p3.p4"
+ /// The separators are retrieved from the OpenMPIRBuilderConfig.
+ std::string createPlatformSpecificName(ArrayRef<StringRef> Parts) const;
+
+ /// Callback type for variable finalization (think destructors).
+ ///
+ /// \param CodeGenIP is the insertion point at which the finalization code
+ /// should be placed.
+ ///
+ /// A finalize callback knows about all objects that need finalization, e.g.
+ /// destruction, when the scope of the currently generated construct is left
+ /// at the time, and location, the callback is invoked.
+ using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
+
+ struct FinalizationInfo {
+ /// The finalization callback provided by the last in-flight invocation of
+ /// createXXXX for the directive of kind DK.
+ FinalizeCallbackTy FiniCB;
+
+ /// The directive kind of the innermost directive that has an associated
+ /// region which might require finalization when it is left.
+ omp::Directive DK;
+
+ /// Flag to indicate if the directive is cancellable.
+ bool IsCancellable;
+ };
+
+ /// Push a finalization callback on the finalization stack.
+ ///
+ /// NOTE: Temporary solution until Clang CG is gone.
+ void pushFinalizationCB(const FinalizationInfo &FI) {
+ FinalizationStack.push_back(FI);
+ }
+
+ /// Pop the last finalization callback from the finalization stack.
+ ///
+ /// NOTE: Temporary solution until Clang CG is gone.
+ void popFinalizationCB() { FinalizationStack.pop_back(); }
+
+ /// Callback type for body (=inner region) code generation
+ ///
+ /// The callback takes code locations as arguments, each describing a
+ /// location where additional instructions can be inserted.
+ ///
+ /// The CodeGenIP may be in the middle of a basic block or point to the end of
+ /// it. The basic block may have a terminator or be degenerate. The callback
+ /// function may just insert instructions at that position, but also split the
+ /// block (without the Before argument of BasicBlock::splitBasicBlock such
+ /// that the identify of the split predecessor block is preserved) and insert
+ /// additional control flow, including branches that do not lead back to what
+ /// follows the CodeGenIP. Note that since the callback is allowed to split
+ /// the block, callers must assume that InsertPoints to positions in the
+ /// BasicBlock after CodeGenIP including CodeGenIP itself are invalidated. If
+ /// such InsertPoints need to be preserved, it can split the block itself
+ /// before calling the callback.
+ ///
+ /// AllocaIP and CodeGenIP must not point to the same position.
+ ///
+ /// \param AllocaIP is the insertion point at which new alloca instructions
+ /// should be placed. The BasicBlock it is pointing to must
+ /// not be split.
+ /// \param CodeGenIP is the insertion point at which the body code should be
+ /// placed.
+ using BodyGenCallbackTy =
+ function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
+
+ // This is created primarily for sections construct as llvm::function_ref
+ // (BodyGenCallbackTy) is not storable (as described in the comments of
+ // function_ref class - function_ref contains non-ownable reference
+ // to the callable.
+ using StorableBodyGenCallbackTy =
+ std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
+
+ /// Callback type for loop body code generation.
+ ///
+ /// \param CodeGenIP is the insertion point where the loop's body code must be
+ /// placed. This will be a dedicated BasicBlock with a
+ /// conditional branch from the loop condition check and
+ /// terminated with an unconditional branch to the loop
+ /// latch.
+ /// \param IndVar is the induction variable usable at the insertion point.
+ using LoopBodyGenCallbackTy =
+ function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
+
+ /// Callback type for variable privatization (think copy & default
+ /// constructor).
+ ///
+ /// \param AllocaIP is the insertion point at which new alloca instructions
+ /// should be placed.
+ /// \param CodeGenIP is the insertion point at which the privatization code
+ /// should be placed.
+ /// \param Original The value being copied/created, should not be used in the
+ /// generated IR.
+ /// \param Inner The equivalent of \p Original that should be used in the
+ /// generated IR; this is equal to \p Original if the value is
+ /// a pointer and can thus be passed directly, otherwise it is
+ /// an equivalent but different value.
+ /// \param ReplVal The replacement value, thus a copy or new created version
+ /// of \p Inner.
+ ///
+ /// \returns The new insertion point where code generation continues and
+ /// \p ReplVal the replacement value.
+ using PrivatizeCallbackTy = function_ref<InsertPointTy(
+ InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
+ Value &Inner, Value *&ReplVal)>;
+
+ /// Description of a LLVM-IR insertion point (IP) and a debug/source location
+ /// (filename, line, column, ...).
+ struct LocationDescription {
+ LocationDescription(const IRBuilderBase &IRB)
+ : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
+ LocationDescription(const InsertPointTy &IP) : IP(IP) {}
+ LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
+ : IP(IP), DL(DL) {}
+ InsertPointTy IP;
+ DebugLoc DL;
+ };
+
+ /// Emitter methods for OpenMP directives.
+ ///
+ ///{
+
+ /// Generator for '#omp barrier'
+ ///
+ /// \param Loc The location where the barrier directive was encountered.
+ /// \param DK The kind of directive that caused the barrier.
+ /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
+ /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
+ /// should be checked and acted upon.
+ ///
+ /// \returns The insertion point after the barrier.
+ InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
+ bool ForceSimpleCall = false,
+ bool CheckCancelFlag = true);
+
+ /// Generator for '#omp cancel'
+ ///
+ /// \param Loc The location where the directive was encountered.
+ /// \param IfCondition The evaluated 'if' clause expression, if any.
+ /// \param CanceledDirective The kind of directive that is cancled.
+ ///
+ /// \returns The insertion point after the barrier.
+ InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
+ omp::Directive CanceledDirective);
+
+ /// Generator for '#omp parallel'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocaIP The insertion points to be used for alloca instructions.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param PrivCB Callback to copy a given variable (think copy constructor).
+ /// \param FiniCB Callback to finalize variable copies.
+ /// \param IfCondition The evaluated 'if' clause expression, if any.
+ /// \param NumThreads The evaluated 'num_threads' clause expression, if any.
+ /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
+ /// \param IsCancellable Flag to indicate a cancellable parallel region.
+ ///
+ /// \returns The insertion position *after* the parallel.
+ IRBuilder<>::InsertPoint
+ createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
+ FinalizeCallbackTy FiniCB, Value *IfCondition,
+ Value *NumThreads, omp::ProcBindKind ProcBind,
+ bool IsCancellable);
+
+ /// Generator for the control flow structure of an OpenMP canonical loop.
+ ///
+ /// This generator operates on the logical iteration space of the loop, i.e.
+ /// the caller only has to provide a loop trip count of the loop as defined by
+ /// base language semantics. The trip count is interpreted as an unsigned
+ /// integer. The induction variable passed to \p BodyGenCB will be of the same
+ /// type and run from 0 to \p TripCount - 1. It is up to the callback to
+ /// convert the logical iteration variable to the loop counter variable in the
+ /// loop body.
+ ///
+ /// \param Loc The insert and source location description. The insert
+ /// location can be between two instructions or the end of a
+ /// degenerate block (e.g. a BB under construction).
+ /// \param BodyGenCB Callback that will generate the loop body code.
+ /// \param TripCount Number of iterations the loop body is executed.
+ /// \param Name Base name used to derive BB and instruction names.
+ ///
+ /// \returns An object representing the created control flow structure which
+ /// can be used for loop-associated directives.
+ CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
+ LoopBodyGenCallbackTy BodyGenCB,
+ Value *TripCount,
+ const Twine &Name = "loop");
+
+ /// Generator for the control flow structure of an OpenMP canonical loop.
+ ///
+ /// Instead of a logical iteration space, this allows specifying user-defined
+ /// loop counter values using increment, upper- and lower bounds. To
+ /// disambiguate the terminology when counting downwards, instead of lower
+ /// bounds we use \p Start for the loop counter value in the first body
+ /// iteration.
+ ///
+ /// Consider the following limitations:
+ ///
+ /// * A loop counter space over all integer values of its bit-width cannot be
+ /// represented. E.g using uint8_t, its loop trip count of 256 cannot be
+ /// stored into an 8 bit integer):
+ ///
+ /// DO I = 0, 255, 1
+ ///
+ /// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
+ /// effectively counting downwards:
+ ///
+ /// for (uint8_t i = 100u; i > 0; i += 127u)
+ ///
+ ///
+ /// TODO: May need to add additional parameters to represent:
+ ///
+ /// * Allow representing downcounting with unsigned integers.
+ ///
+ /// * Sign of the step and the comparison operator might disagree:
+ ///
+ /// for (int i = 0; i < 42; i -= 1u)
+ ///
+ //
+ /// \param Loc The insert and source location description.
+ /// \param BodyGenCB Callback that will generate the loop body code.
+ /// \param Start Value of the loop counter for the first iterations.
+ /// \param Stop Loop counter values past this will stop the loop.
+ /// \param Step Loop counter increment after each iteration; negative
+ /// means counting down.
+ /// \param IsSigned Whether Start, Stop and Step are signed integers.
+ /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
+ /// counter.
+ /// \param ComputeIP Insertion point for instructions computing the trip
+ /// count. Can be used to ensure the trip count is available
+ /// at the outermost loop of a loop nest. If not set,
+ /// defaults to the preheader of the generated loop.
+ /// \param Name Base name used to derive BB and instruction names.
+ ///
+ /// \returns An object representing the created control flow structure which
+ /// can be used for loop-associated directives.
+ CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
+ LoopBodyGenCallbackTy BodyGenCB,
+ Value *Start, Value *Stop, Value *Step,
+ bool IsSigned, bool InclusiveStop,
+ InsertPointTy ComputeIP = {},
+ const Twine &Name = "loop");
+
+ /// Collapse a loop nest into a single loop.
+ ///
+ /// Merges loops of a loop nest into a single CanonicalLoopNest representation
+ /// that has the same number of innermost loop iterations as the origin loop
+ /// nest. The induction variables of the input loops are derived from the
+ /// collapsed loop's induction variable. This is intended to be used to
+ /// implement OpenMP's collapse clause. Before applying a directive,
+ /// collapseLoops normalizes a loop nest to contain only a single loop and the
+ /// directive's implementation does not need to handle multiple loops itself.
+ /// This does not remove the need to handle all loop nest handling by
+ /// directives, such as the ordered(<n>) clause or the simd schedule-clause
+ /// modifier of the worksharing-loop directive.
+ ///
+ /// Example:
+ /// \code
+ /// for (int i = 0; i < 7; ++i) // Canonical loop "i"
+ /// for (int j = 0; j < 9; ++j) // Canonical loop "j"
+ /// body(i, j);
+ /// \endcode
+ ///
+ /// After collapsing with Loops={i,j}, the loop is changed to
+ /// \code
+ /// for (int ij = 0; ij < 63; ++ij) {
+ /// int i = ij / 9;
+ /// int j = ij % 9;
+ /// body(i, j);
+ /// }
+ /// \endcode
+ ///
+ /// In the current implementation, the following limitations apply:
+ ///
+ /// * All input loops have an induction variable of the same type.
+ ///
+ /// * The collapsed loop will have the same trip count integer type as the
+ /// input loops. Therefore it is possible that the collapsed loop cannot
+ /// represent all iterations of the input loops. For instance, assuming a
+ /// 32 bit integer type, and two input loops both iterating 2^16 times, the
+ /// theoretical trip count of the collapsed loop would be 2^32 iteration,
+ /// which cannot be represented in an 32-bit integer. Behavior is undefined
+ /// in this case.
+ ///
+ /// * The trip counts of every input loop must be available at \p ComputeIP.
+ /// Non-rectangular loops are not yet supported.
+ ///
+ /// * At each nest level, code between a surrounding loop and its nested loop
+ /// is hoisted into the loop body, and such code will be executed more
+ /// often than before collapsing (or not at all if any inner loop iteration
+ /// has a trip count of 0). This is permitted by the OpenMP specification.
+ ///
+ /// \param DL Debug location for instructions added for collapsing,
+ /// such as instructions to compute/derive the input loop's
+ /// induction variables.
+ /// \param Loops Loops in the loop nest to collapse. Loops are specified
+ /// from outermost-to-innermost and every control flow of a
+ /// loop's body must pass through its directly nested loop.
+ /// \param ComputeIP Where additional instruction that compute the collapsed
+ /// trip count. If not set, defaults to before the generated
+ /// loop.
+ ///
+ /// \returns The CanonicalLoopInfo object representing the collapsed loop.
+ CanonicalLoopInfo *collapseLoops(DebugLoc DL,
+ ArrayRef<CanonicalLoopInfo *> Loops,
+ InsertPointTy ComputeIP);
+
+private:
+ /// Modifies the canonical loop to be a statically-scheduled workshare loop.
+ ///
+ /// This takes a \p LoopInfo representing a canonical loop, such as the one
+ /// created by \p createCanonicalLoop and emits additional instructions to
+ /// turn it into a workshare loop. In particular, it calls to an OpenMP
+ /// runtime function in the preheader to obtain the loop bounds to be used in
+ /// the current thread, updates the relevant instructions in the canonical
+ /// loop and calls to an OpenMP runtime finalization function after the loop.
+ ///
+ /// \param DL Debug location for instructions added for the
+ /// workshare-loop construct itself.
+ /// \param CLI A descriptor of the canonical loop to workshare.
+ /// \param AllocaIP An insertion point for Alloca instructions usable in the
+ /// preheader of the loop.
+ /// \param NeedsBarrier Indicates whether a barrier must be inserted after
+ /// the loop.
+ ///
+ /// \returns Point where to insert code after the workshare construct.
+ InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
+ InsertPointTy AllocaIP,
+ bool NeedsBarrier);
+
+ /// Modifies the canonical loop a statically-scheduled workshare loop with a
+ /// user-specified chunk size.
+ ///
+ /// \param DL Debug location for instructions added for the
+ /// workshare-loop construct itself.
+ /// \param CLI A descriptor of the canonical loop to workshare.
+ /// \param AllocaIP An insertion point for Alloca instructions usable in
+ /// the preheader of the loop.
+ /// \param NeedsBarrier Indicates whether a barrier must be inserted after the
+ /// loop.
+ /// \param ChunkSize The user-specified chunk size.
+ ///
+ /// \returns Point where to insert code after the workshare construct.
+ InsertPointTy applyStaticChunkedWorkshareLoop(DebugLoc DL,
+ CanonicalLoopInfo *CLI,
+ InsertPointTy AllocaIP,
+ bool NeedsBarrier,
+ Value *ChunkSize);
+
+ /// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
+ ///
+ /// This takes a \p LoopInfo representing a canonical loop, such as the one
+ /// created by \p createCanonicalLoop and emits additional instructions to
+ /// turn it into a workshare loop. In particular, it calls to an OpenMP
+ /// runtime function in the preheader to obtain, and then in each iteration
+ /// to update the loop counter.
+ ///
+ /// \param DL Debug location for instructions added for the
+ /// workshare-loop construct itself.
+ /// \param CLI A descriptor of the canonical loop to workshare.
+ /// \param AllocaIP An insertion point for Alloca instructions usable in the
+ /// preheader of the loop.
+ /// \param SchedType Type of scheduling to be passed to the init function.
+ /// \param NeedsBarrier Indicates whether a barrier must be insterted after
+ /// the loop.
+ /// \param Chunk The size of loop chunk considered as a unit when
+ /// scheduling. If \p nullptr, defaults to 1.
+ ///
+ /// \returns Point where to insert code after the workshare construct.
+ InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
+ InsertPointTy AllocaIP,
+ omp::OMPScheduleType SchedType,
+ bool NeedsBarrier,
+ Value *Chunk = nullptr);
+
+ /// Create alternative version of the loop to support if clause
+ ///
+ /// OpenMP if clause can require to generate second loop. This loop
+ /// will be executed when if clause condition is not met. createIfVersion
+ /// adds branch instruction to the copied loop if \p ifCond is not met.
+ ///
+ /// \param Loop Original loop which should be versioned.
+ /// \param IfCond Value which corresponds to if clause condition
+ /// \param VMap Value to value map to define relation between
+ /// original and copied loop values and loop blocks.
+ /// \param NamePrefix Optional name prefix for if.then if.else blocks.
+ void createIfVersion(CanonicalLoopInfo *Loop, Value *IfCond,
+ ValueToValueMapTy &VMap, const Twine &NamePrefix = "");
+
+public:
+ /// Modifies the canonical loop to be a workshare loop.
+ ///
+ /// This takes a \p LoopInfo representing a canonical loop, such as the one
+ /// created by \p createCanonicalLoop and emits additional instructions to
+ /// turn it into a workshare loop. In particular, it calls to an OpenMP
+ /// runtime function in the preheader to obtain the loop bounds to be used in
+ /// the current thread, updates the relevant instructions in the canonical
+ /// loop and calls to an OpenMP runtime finalization function after the loop.
+ ///
+ /// The concrete transformation is done by applyStaticWorkshareLoop,
+ /// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending
+ /// on the value of \p SchedKind and \p ChunkSize.
+ ///
+ /// \param DL Debug location for instructions added for the
+ /// workshare-loop construct itself.
+ /// \param CLI A descriptor of the canonical loop to workshare.
+ /// \param AllocaIP An insertion point for Alloca instructions usable in the
+ /// preheader of the loop.
+ /// \param NeedsBarrier Indicates whether a barrier must be insterted after
+ /// the loop.
+ /// \param SchedKind Scheduling algorithm to use.
+ /// \param ChunkSize The chunk size for the inner loop.
+ /// \param HasSimdModifier Whether the simd modifier is present in the
+ /// schedule clause.
+ /// \param HasMonotonicModifier Whether the monotonic modifier is present in
+ /// the schedule clause.
+ /// \param HasNonmonotonicModifier Whether the nonmonotonic modifier is
+ /// present in the schedule clause.
+ /// \param HasOrderedClause Whether the (parameterless) ordered clause is
+ /// present.
+ ///
+ /// \returns Point where to insert code after the workshare construct.
+ InsertPointTy applyWorkshareLoop(
+ DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
+ bool NeedsBarrier,
+ llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default,
+ Value *ChunkSize = nullptr, bool HasSimdModifier = false,
+ bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false,
+ bool HasOrderedClause = false);
+
+ /// Tile a loop nest.
+ ///
+ /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
+ /// \p/ Loops must be perfectly nested, from outermost to innermost loop
+ /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
+ /// of every loop and every tile sizes must be usable in the outermost
+ /// loop's preheader. This implies that the loop nest is rectangular.
+ ///
+ /// Example:
+ /// \code
+ /// for (int i = 0; i < 15; ++i) // Canonical loop "i"
+ /// for (int j = 0; j < 14; ++j) // Canonical loop "j"
+ /// body(i, j);
+ /// \endcode
+ ///
+ /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
+ /// \code
+ /// for (int i1 = 0; i1 < 3; ++i1)
+ /// for (int j1 = 0; j1 < 2; ++j1)
+ /// for (int i2 = 0; i2 < 5; ++i2)
+ /// for (int j2 = 0; j2 < 7; ++j2)
+ /// body(i1*3+i2, j1*3+j2);
+ /// \endcode
+ ///
+ /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
+ /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
+ /// handles non-constant trip counts, non-constant tile sizes and trip counts
+ /// that are not multiples of the tile size. In the latter case the tile loop
+ /// of the last floor-loop iteration will have fewer iterations than specified
+ /// as its tile size.
+ ///
+ ///
+ /// @param DL Debug location for instructions added by tiling, for
+ /// instance the floor- and tile trip count computation.
+ /// @param Loops Loops to tile. The CanonicalLoopInfo objects are
+ /// invalidated by this method, i.e. should not used after
+ /// tiling.
+ /// @param TileSizes For each loop in \p Loops, the tile size for that
+ /// dimensions.
+ ///
+ /// \returns A list of generated loops. Contains twice as many loops as the
+ /// input loop nest; the first half are the floor loops and the
+ /// second half are the tile loops.
+ std::vector<CanonicalLoopInfo *>
+ tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
+ ArrayRef<Value *> TileSizes);
+
+ /// Fully unroll a loop.
+ ///
+ /// Instead of unrolling the loop immediately (and duplicating its body
+ /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
+ /// metadata.
+ ///
+ /// \param DL Debug location for instructions added by unrolling.
+ /// \param Loop The loop to unroll. The loop will be invalidated.
+ void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
+
+ /// Fully or partially unroll a loop. How the loop is unrolled is determined
+ /// using LLVM's LoopUnrollPass.
+ ///
+ /// \param DL Debug location for instructions added by unrolling.
+ /// \param Loop The loop to unroll. The loop will be invalidated.
+ void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
+
+ /// Partially unroll a loop.
+ ///
+ /// The CanonicalLoopInfo of the unrolled loop for use with chained
+ /// loop-associated directive can be requested using \p UnrolledCLI. Not
+ /// needing the CanonicalLoopInfo allows more efficient code generation by
+ /// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
+ /// A loop-associated directive applied to the unrolled loop needs to know the
+ /// new trip count which means that if using a heuristically determined unroll
+ /// factor (\p Factor == 0), that factor must be computed immediately. We are
+ /// using the same logic as the LoopUnrollPass to derived the unroll factor,
+ /// but which assumes that some canonicalization has taken place (e.g.
+ /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
+ /// better when the unrolled loop's CanonicalLoopInfo is not needed.
+ ///
+ /// \param DL Debug location for instructions added by unrolling.
+ /// \param Loop The loop to unroll. The loop will be invalidated.
+ /// \param Factor The factor to unroll the loop by. A factor of 0
+ /// indicates that a heuristic should be used to determine
+ /// the unroll-factor.
+ /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
+ /// partially unrolled loop. Otherwise, uses loop metadata
+ /// to defer unrolling to the LoopUnrollPass.
+ void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
+ CanonicalLoopInfo **UnrolledCLI);
+
+ /// Add metadata to simd-ize a loop. If IfCond is not nullptr, the loop
+ /// is cloned. The metadata which prevents vectorization is added to
+ /// to the cloned loop. The cloned loop is executed when ifCond is evaluated
+ /// to false.
+ ///
+ /// \param Loop The loop to simd-ize.
+ /// \param AlignedVars The map which containts pairs of the pointer
+ /// and its corresponding alignment.
+ /// \param IfCond The value which corresponds to the if clause
+ /// condition.
+ /// \param Order The enum to map order clause.
+ /// \param Simdlen The Simdlen length to apply to the simd loop.
+ /// \param Safelen The Safelen length to apply to the simd loop.
+ void applySimd(CanonicalLoopInfo *Loop,
+ MapVector<Value *, Value *> AlignedVars, Value *IfCond,
+ omp::OrderKind Order, ConstantInt *Simdlen,
+ ConstantInt *Safelen);
+
+ /// Generator for '#omp flush'
+ ///
+ /// \param Loc The location where the flush directive was encountered
+ void createFlush(const LocationDescription &Loc);
+
+ /// Generator for '#omp taskwait'
+ ///
+ /// \param Loc The location where the taskwait directive was encountered.
+ void createTaskwait(const LocationDescription &Loc);
+
+ /// Generator for '#omp taskyield'
+ ///
+ /// \param Loc The location where the taskyield directive was encountered.
+ void createTaskyield(const LocationDescription &Loc);
+
+ /// A struct to pack the relevant information for an OpenMP depend clause.
+ struct DependData {
+ omp::RTLDependenceKindTy DepKind = omp::RTLDependenceKindTy::DepUnknown;
+ Type *DepValueType;
+ Value *DepVal;
+ explicit DependData() = default;
+ DependData(omp::RTLDependenceKindTy DepKind, Type *DepValueType,
+ Value *DepVal)
+ : DepKind(DepKind), DepValueType(DepValueType), DepVal(DepVal) {}
+ };
+
+ /// Generator for `#omp task`
+ ///
+ /// \param Loc The location where the task construct was encountered.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param Tied True if the task is tied, false if the task is untied.
+ /// \param Final i1 value which is `true` if the task is final, `false` if the
+ /// task is not final.
+ /// \param IfCondition i1 value. If it evaluates to `false`, an undeferred
+ /// task is generated, and the encountering thread must
+ /// suspend the current task region, for which execution
+ /// cannot be resumed until execution of the structured
+ /// block that is associated with the generated task is
+ /// completed.
+ InsertPointTy createTask(const LocationDescription &Loc,
+ InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB,
+ bool Tied = true, Value *Final = nullptr,
+ Value *IfCondition = nullptr,
+ SmallVector<DependData> Dependencies = {});
+
+ /// Generator for the taskgroup construct
+ ///
+ /// \param Loc The location where the taskgroup construct was encountered.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param BodyGenCB Callback that will generate the region code.
+ InsertPointTy createTaskgroup(const LocationDescription &Loc,
+ InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB);
+
+ /// Functions used to generate reductions. Such functions take two Values
+ /// representing LHS and RHS of the reduction, respectively, and a reference
+ /// to the value that is updated to refer to the reduction result.
+ using ReductionGenTy =
+ function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
+
+ /// Functions used to generate atomic reductions. Such functions take two
+ /// Values representing pointers to LHS and RHS of the reduction, as well as
+ /// the element type of these pointers. They are expected to atomically
+ /// update the LHS to the reduced value.
+ using AtomicReductionGenTy =
+ function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
+
+ /// Information about an OpenMP reduction.
+ struct ReductionInfo {
+ ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
+ ReductionGenTy ReductionGen,
+ AtomicReductionGenTy AtomicReductionGen)
+ : ElementType(ElementType), Variable(Variable),
+ PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
+ AtomicReductionGen(AtomicReductionGen) {
+ assert(cast<PointerType>(Variable->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
+ }
+
+ /// Reduction element type, must match pointee type of variable.
+ Type *ElementType;
+
+ /// Reduction variable of pointer type.
+ Value *Variable;
+
+ /// Thread-private partial reduction variable.
+ Value *PrivateVariable;
+
+ /// Callback for generating the reduction body. The IR produced by this will
+ /// be used to combine two values in a thread-safe context, e.g., under
+ /// lock or within the same thread, and therefore need not be atomic.
+ ReductionGenTy ReductionGen;
+
+ /// Callback for generating the atomic reduction body, may be null. The IR
+ /// produced by this will be used to atomically combine two values during
+ /// reduction. If null, the implementation will use the non-atomic version
+ /// along with the appropriate synchronization mechanisms.
+ AtomicReductionGenTy AtomicReductionGen;
+ };
+
+ // TODO: provide atomic and non-atomic reduction generators for reduction
+ // operators defined by the OpenMP specification.
+
+ /// Generator for '#omp reduction'.
+ ///
+ /// Emits the IR instructing the runtime to perform the specific kind of
+ /// reductions. Expects reduction variables to have been privatized and
+ /// initialized to reduction-neutral values separately. Emits the calls to
+ /// runtime functions as well as the reduction function and the basic blocks
+ /// performing the reduction atomically and non-atomically.
+ ///
+ /// The code emitted for the following:
+ ///
+ /// \code
+ /// type var_1;
+ /// type var_2;
+ /// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
+ /// /* body */;
+ /// \endcode
+ ///
+ /// corresponds to the following sketch.
+ ///
+ /// \code
+ /// void _outlined_par() {
+ /// // N is the number of different reductions.
+ /// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
+ /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
+ /// _omp_reduction_func,
+ /// _gomp_critical_user.reduction.var)) {
+ /// case 1: {
+ /// var_1 = var_1 <reduction-op> privatized_var_1;
+ /// var_2 = var_2 <reduction-op> privatized_var_2;
+ /// // ...
+ /// __kmpc_end_reduce(...);
+ /// break;
+ /// }
+ /// case 2: {
+ /// _Atomic<ReductionOp>(var_1, privatized_var_1);
+ /// _Atomic<ReductionOp>(var_2, privatized_var_2);
+ /// // ...
+ /// break;
+ /// }
+ /// default: break;
+ /// }
+ /// }
+ ///
+ /// void _omp_reduction_func(void **lhs, void **rhs) {
+ /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
+ /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
+ /// // ...
+ /// }
+ /// \endcode
+ ///
+ /// \param Loc The location where the reduction was
+ /// encountered. Must be within the associate
+ /// directive and after the last local access to the
+ /// reduction variables.
+ /// \param AllocaIP An insertion point suitable for allocas usable
+ /// in reductions.
+ /// \param ReductionInfos A list of info on each reduction variable.
+ /// \param IsNoWait A flag set if the reduction is marked as nowait.
+ InsertPointTy createReductions(const LocationDescription &Loc,
+ InsertPointTy AllocaIP,
+ ArrayRef<ReductionInfo> ReductionInfos,
+ bool IsNoWait = false);
+
+ ///}
+
+ /// Return the insertion point used by the underlying IRBuilder.
+ InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
+
+ /// Update the internal location to \p Loc.
+ bool updateToLocation(const LocationDescription &Loc) {
+ Builder.restoreIP(Loc.IP);
+ Builder.SetCurrentDebugLocation(Loc.DL);
+ return Loc.IP.getBlock() != nullptr;
+ }
+
+ /// Return the function declaration for the runtime function with \p FnID.
+ FunctionCallee getOrCreateRuntimeFunction(Module &M,
+ omp::RuntimeFunction FnID);
+
+ Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
+
+ /// Return the (LLVM-IR) string describing the source location \p LocStr.
+ Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
+
+ /// Return the (LLVM-IR) string describing the default source location.
+ Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
+
+ /// Return the (LLVM-IR) string describing the source location identified by
+ /// the arguments.
+ Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
+ unsigned Line, unsigned Column,
+ uint32_t &SrcLocStrSize);
+
+ /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
+ /// fallback if \p DL does not specify the function name.
+ Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
+ Function *F = nullptr);
+
+ /// Return the (LLVM-IR) string describing the source location \p Loc.
+ Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
+ uint32_t &SrcLocStrSize);
+
+ /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
+ /// TODO: Create a enum class for the Reserve2Flags
+ Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
+ omp::IdentFlag Flags = omp::IdentFlag(0),
+ unsigned Reserve2Flags = 0);
+
+ /// Create a hidden global flag \p Name in the module with initial value \p
+ /// Value.
+ GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
+
+ /// Create an offloading section struct used to register this global at
+ /// runtime.
+ ///
+ /// Type struct __tgt_offload_entry{
+ /// void *addr; // Pointer to the offload entry info.
+ /// // (function or global)
+ /// char *name; // Name of the function or global.
+ /// size_t size; // Size of the entry info (0 if it a function).
+ /// int32_t flags;
+ /// int32_t reserved;
+ /// };
+ ///
+ /// \param Addr The pointer to the global being registered.
+ /// \param Name The symbol name associated with the global.
+ /// \param Size The size in bytes of the global (0 for functions).
+ /// \param Flags Flags associated with the entry.
+ /// \param SectionName The section this entry will be placed at.
+ void emitOffloadingEntry(Constant *Addr, StringRef Name, uint64_t Size,
+ int32_t Flags,
+ StringRef SectionName = "omp_offloading_entries");
+
+ /// Generate control flow and cleanup for cancellation.
+ ///
+ /// \param CancelFlag Flag indicating if the cancellation is performed.
+ /// \param CanceledDirective The kind of directive that is cancled.
+ /// \param ExitCB Extra code to be generated in the exit block.
+ void emitCancelationCheckImpl(Value *CancelFlag,
+ omp::Directive CanceledDirective,
+ FinalizeCallbackTy ExitCB = {});
+
+ /// Generate a target region entry call.
+ ///
+ /// \param Loc The location at which the request originated and is fulfilled.
+ /// \param Return Return value of the created function returned by reference.
+ /// \param DeviceID Identifier for the device via the 'device' clause.
+ /// \param NumTeams Numer of teams for the region via the 'num_teams' clause
+ /// or 0 if unspecified and -1 if there is no 'teams' clause.
+ /// \param NumThreads Number of threads via the 'thread_limit' clause.
+ /// \param HostPtr Pointer to the host-side pointer of the target kernel.
+ /// \param KernelArgs Array of arguments to the kernel.
+ InsertPointTy emitTargetKernel(const LocationDescription &Loc, Value *&Return,
+ Value *Ident, Value *DeviceID, Value *NumTeams,
+ Value *NumThreads, Value *HostPtr,
+ ArrayRef<Value *> KernelArgs);
+
+ /// Generate a barrier runtime call.
+ ///
+ /// \param Loc The location at which the request originated and is fulfilled.
+ /// \param DK The directive which caused the barrier
+ /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
+ /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
+ /// should be checked and acted upon.
+ ///
+ /// \returns The insertion point after the barrier.
+ InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
+ omp::Directive DK, bool ForceSimpleCall,
+ bool CheckCancelFlag);
+
+ /// Generate a flush runtime call.
+ ///
+ /// \param Loc The location at which the request originated and is fulfilled.
+ void emitFlush(const LocationDescription &Loc);
+
+ /// The finalization stack made up of finalize callbacks currently in-flight,
+ /// wrapped into FinalizationInfo objects that reference also the finalization
+ /// target block and the kind of cancellable directive.
+ SmallVector<FinalizationInfo, 8> FinalizationStack;
+
+ /// Return true if the last entry in the finalization stack is of kind \p DK
+ /// and cancellable.
+ bool isLastFinalizationInfoCancellable(omp::Directive DK) {
+ return !FinalizationStack.empty() &&
+ FinalizationStack.back().IsCancellable &&
+ FinalizationStack.back().DK == DK;
+ }
+
+ /// Generate a taskwait runtime call.
+ ///
+ /// \param Loc The location at which the request originated and is fulfilled.
+ void emitTaskwaitImpl(const LocationDescription &Loc);
+
+ /// Generate a taskyield runtime call.
+ ///
+ /// \param Loc The location at which the request originated and is fulfilled.
+ void emitTaskyieldImpl(const LocationDescription &Loc);
+
+ /// Return the current thread ID.
+ ///
+ /// \param Ident The ident (ident_t*) describing the query origin.
+ Value *getOrCreateThreadID(Value *Ident);
+
+ /// The OpenMPIRBuilder Configuration
+ OpenMPIRBuilderConfig Config;
+
+ /// The underlying LLVM-IR module
+ Module &M;
+
+ /// The LLVM-IR Builder used to create IR.
+ IRBuilder<> Builder;
+
+ /// Map to remember source location strings
+ StringMap<Constant *> SrcLocStrMap;
+
+ /// Map to remember existing ident_t*.
+ DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
+
+ /// Helper that contains information about regions we need to outline
+ /// during finalization.
+ struct OutlineInfo {
+ using PostOutlineCBTy = std::function<void(Function &)>;
+ PostOutlineCBTy PostOutlineCB;
+ BasicBlock *EntryBB, *ExitBB, *OuterAllocaBB;
+ SmallVector<Value *, 2> ExcludeArgsFromAggregate;
+
+ /// Collect all blocks in between EntryBB and ExitBB in both the given
+ /// vector and set.
+ void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
+ SmallVectorImpl<BasicBlock *> &BlockVector);
+
+ /// Return the function that contains the region to be outlined.
+ Function *getFunction() const { return EntryBB->getParent(); }
+ };
+
+ /// Collection of regions that need to be outlined during finalization.
+ SmallVector<OutlineInfo, 16> OutlineInfos;
+
+ /// Collection of owned canonical loop objects that eventually need to be
+ /// free'd.
+ std::forward_list<CanonicalLoopInfo> LoopInfos;
+
+ /// Add a new region that will be outlined later.
+ void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
+
+ /// An ordered map of auto-generated variables to their unique names.
+ /// It stores variables with the following names: 1) ".gomp_critical_user_" +
+ /// <critical_section_name> + ".var" for "omp critical" directives; 2)
+ /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
+ /// variables.
+ StringMap<Constant*, BumpPtrAllocator> InternalVars;
+
+ /// Create the global variable holding the offload mappings information.
+ GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
+ std::string VarName);
+
+ /// Create the global variable holding the offload names information.
+ GlobalVariable *
+ createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
+ std::string VarName);
+
+ struct MapperAllocas {
+ AllocaInst *ArgsBase = nullptr;
+ AllocaInst *Args = nullptr;
+ AllocaInst *ArgSizes = nullptr;
+ };
+
+ /// Create the allocas instruction used in call to mapper functions.
+ void createMapperAllocas(const LocationDescription &Loc,
+ InsertPointTy AllocaIP, unsigned NumOperands,
+ struct MapperAllocas &MapperAllocas);
+
+ /// Create the call for the target mapper function.
+ /// \param Loc The source location description.
+ /// \param MapperFunc Function to be called.
+ /// \param SrcLocInfo Source location information global.
+ /// \param MaptypesArg The argument types.
+ /// \param MapnamesArg The argument names.
+ /// \param MapperAllocas The AllocaInst used for the call.
+ /// \param DeviceID Device ID for the call.
+ /// \param NumOperands Number of operands in the call.
+ void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
+ Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
+ struct MapperAllocas &MapperAllocas, int64_t DeviceID,
+ unsigned NumOperands);
+
+ /// Container for the arguments used to pass data to the runtime library.
+ struct TargetDataRTArgs {
+ explicit TargetDataRTArgs() {}
+ /// The array of base pointer passed to the runtime library.
+ Value *BasePointersArray = nullptr;
+ /// The array of section pointers passed to the runtime library.
+ Value *PointersArray = nullptr;
+ /// The array of sizes passed to the runtime library.
+ Value *SizesArray = nullptr;
+ /// The array of map types passed to the runtime library for the beginning
+ /// of the region or for the entire region if there are no separate map
+ /// types for the region end.
+ Value *MapTypesArray = nullptr;
+ /// The array of map types passed to the runtime library for the end of the
+ /// region, or nullptr if there are no separate map types for the region
+ /// end.
+ Value *MapTypesArrayEnd = nullptr;
+ /// The array of user-defined mappers passed to the runtime library.
+ Value *MappersArray = nullptr;
+ /// The array of original declaration names of mapped pointers sent to the
+ /// runtime library for debugging
+ Value *MapNamesArray = nullptr;
+ };
+
+ /// Struct that keeps the information that should be kept throughout
+ /// a 'target data' region.
+ class TargetDataInfo {
+ /// Set to true if device pointer information have to be obtained.
+ bool RequiresDevicePointerInfo = false;
+ /// Set to true if Clang emits separate runtime calls for the beginning and
+ /// end of the region. These calls might have separate map type arrays.
+ bool SeparateBeginEndCalls = false;
+
+ public:
+ TargetDataRTArgs RTArgs;
+
+ /// Indicate whether any user-defined mapper exists.
+ bool HasMapper = false;
+ /// The total number of pointers passed to the runtime library.
+ unsigned NumberOfPtrs = 0u;
+
+ explicit TargetDataInfo() {}
+ explicit TargetDataInfo(bool RequiresDevicePointerInfo,
+ bool SeparateBeginEndCalls)
+ : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
+ SeparateBeginEndCalls(SeparateBeginEndCalls) {}
+ /// Clear information about the data arrays.
+ void clearArrayInfo() {
+ RTArgs = TargetDataRTArgs();
+ HasMapper = false;
+ NumberOfPtrs = 0u;
+ }
+ /// Return true if the current target data information has valid arrays.
+ bool isValid() {
+ return RTArgs.BasePointersArray && RTArgs.PointersArray &&
+ RTArgs.SizesArray && RTArgs.MapTypesArray &&
+ (!HasMapper || RTArgs.MappersArray) && NumberOfPtrs;
+ }
+ bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
+ bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
+ };
+
+ /// Emit the arguments to be passed to the runtime library based on the
+ /// arrays of base pointers, pointers, sizes, map types, and mappers. If
+ /// ForEndCall, emit map types to be passed for the end of the region instead
+ /// of the beginning.
+ void emitOffloadingArraysArgument(IRBuilderBase &Builder,
+ OpenMPIRBuilder::TargetDataRTArgs &RTArgs,
+ OpenMPIRBuilder::TargetDataInfo &Info,
+ bool EmitDebug = false,
+ bool ForEndCall = false);
+
+ /// Creates offloading entry for the provided entry ID \a ID, address \a
+ /// Addr, size \a Size, and flags \a Flags.
+ void createOffloadEntry(Constant *ID, Constant *Addr, uint64_t Size,
+ int32_t Flags, GlobalValue::LinkageTypes);
+
+ /// The kind of errors that can occur when emitting the offload entries and
+ /// metadata.
+ enum EmitMetadataErrorKind {
+ EMIT_MD_TARGET_REGION_ERROR,
+ EMIT_MD_DECLARE_TARGET_ERROR,
+ EMIT_MD_GLOBAL_VAR_LINK_ERROR
+ };
+
+ /// Callback function type
+ using EmitMetadataErrorReportFunctionTy =
+ std::function<void(EmitMetadataErrorKind, TargetRegionEntryInfo)>;
+
+ // Emit the offloading entries and metadata so that the device codegen side
+ // can easily figure out what to emit. The produced metadata looks like
+ // this:
+ //
+ // !omp_offload.info = !{!1, ...}
+ //
+ // We only generate metadata for function that contain target regions.
+ void createOffloadEntriesAndInfoMetadata(
+ OffloadEntriesInfoManager &OffloadEntriesInfoManager,
+ EmitMetadataErrorReportFunctionTy &ErrorReportFunction);
+
+public:
+ /// Generator for __kmpc_copyprivate
+ ///
+ /// \param Loc The source location description.
+ /// \param BufSize Number of elements in the buffer.
+ /// \param CpyBuf List of pointers to data to be copied.
+ /// \param CpyFn function to call for copying data.
+ /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
+ ///
+ /// \return The insertion position *after* the CopyPrivate call.
+
+ InsertPointTy createCopyPrivate(const LocationDescription &Loc,
+ llvm::Value *BufSize, llvm::Value *CpyBuf,
+ llvm::Value *CpyFn, llvm::Value *DidIt);
+
+ /// Generator for '#omp single'
+ ///
+ /// \param Loc The source location description.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param FiniCB Callback to finalize variable copies.
+ /// \param IsNowait If false, a barrier is emitted.
+ /// \param DidIt Local variable used as a flag to indicate 'single' thread
+ ///
+ /// \returns The insertion position *after* the single call.
+ InsertPointTy createSingle(const LocationDescription &Loc,
+ BodyGenCallbackTy BodyGenCB,
+ FinalizeCallbackTy FiniCB, bool IsNowait,
+ llvm::Value *DidIt);
+
+ /// Generator for '#omp master'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param FiniCB Callback to finalize variable copies.
+ ///
+ /// \returns The insertion position *after* the master.
+ InsertPointTy createMaster(const LocationDescription &Loc,
+ BodyGenCallbackTy BodyGenCB,
+ FinalizeCallbackTy FiniCB);
+
+ /// Generator for '#omp masked'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param FiniCB Callback to finialize variable copies.
+ ///
+ /// \returns The insertion position *after* the masked.
+ InsertPointTy createMasked(const LocationDescription &Loc,
+ BodyGenCallbackTy BodyGenCB,
+ FinalizeCallbackTy FiniCB, Value *Filter);
+
+ /// Generator for '#omp critical'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param BodyGenCB Callback that will generate the region body code.
+ /// \param FiniCB Callback to finalize variable copies.
+ /// \param CriticalName name of the lock used by the critical directive
+ /// \param HintInst Hint Instruction for hint clause associated with critical
+ ///
+ /// \returns The insertion position *after* the critical.
+ InsertPointTy createCritical(const LocationDescription &Loc,
+ BodyGenCallbackTy BodyGenCB,
+ FinalizeCallbackTy FiniCB,
+ StringRef CriticalName, Value *HintInst);
+
+ /// Generator for '#omp ordered depend (source | sink)'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param NumLoops The number of loops in depend clause.
+ /// \param StoreValues The value will be stored in vector address.
+ /// \param Name The name of alloca instruction.
+ /// \param IsDependSource If true, depend source; otherwise, depend sink.
+ ///
+ /// \return The insertion position *after* the ordered.
+ InsertPointTy createOrderedDepend(const LocationDescription &Loc,
+ InsertPointTy AllocaIP, unsigned NumLoops,
+ ArrayRef<llvm::Value *> StoreValues,
+ const Twine &Name, bool IsDependSource);
+
+ /// Generator for '#omp ordered [threads | simd]'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param FiniCB Callback to finalize variable copies.
+ /// \param IsThreads If true, with threads clause or without clause;
+ /// otherwise, with simd clause;
+ ///
+ /// \returns The insertion position *after* the ordered.
+ InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
+ BodyGenCallbackTy BodyGenCB,
+ FinalizeCallbackTy FiniCB,
+ bool IsThreads);
+
+ /// Generator for '#omp sections'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocaIP The insertion points to be used for alloca instructions.
+ /// \param SectionCBs Callbacks that will generate body of each section.
+ /// \param PrivCB Callback to copy a given variable (think copy constructor).
+ /// \param FiniCB Callback to finalize variable copies.
+ /// \param IsCancellable Flag to indicate a cancellable parallel region.
+ /// \param IsNowait If true, barrier - to ensure all sections are executed
+ /// before moving forward will not be generated.
+ /// \returns The insertion position *after* the sections.
+ InsertPointTy createSections(const LocationDescription &Loc,
+ InsertPointTy AllocaIP,
+ ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
+ PrivatizeCallbackTy PrivCB,
+ FinalizeCallbackTy FiniCB, bool IsCancellable,
+ bool IsNowait);
+
+ /// Generator for '#omp section'
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param BodyGenCB Callback that will generate the region body code.
+ /// \param FiniCB Callback to finalize variable copies.
+ /// \returns The insertion position *after* the section.
+ InsertPointTy createSection(const LocationDescription &Loc,
+ BodyGenCallbackTy BodyGenCB,
+ FinalizeCallbackTy FiniCB);
+
+ /// Generate conditional branch and relevant BasicBlocks through which private
+ /// threads copy the 'copyin' variables from Master copy to threadprivate
+ /// copies.
+ ///
+ /// \param IP insertion block for copyin conditional
+ /// \param MasterVarPtr a pointer to the master variable
+ /// \param PrivateVarPtr a pointer to the threadprivate variable
+ /// \param IntPtrTy Pointer size type
+ /// \param BranchtoEnd Create a branch between the copyin.not.master blocks
+ // and copy.in.end block
+ ///
+ /// \returns The insertion point where copying operation to be emitted.
+ InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
+ Value *PrivateAddr,
+ llvm::IntegerType *IntPtrTy,
+ bool BranchtoEnd = true);
+
+ /// Create a runtime call for kmpc_Alloc
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param Size Size of allocated memory space
+ /// \param Allocator Allocator information instruction
+ /// \param Name Name of call Instruction for OMP_alloc
+ ///
+ /// \returns CallInst to the OMP_Alloc call
+ CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
+ Value *Allocator, std::string Name = "");
+
+ /// Create a runtime call for kmpc_free
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param Addr Address of memory space to be freed
+ /// \param Allocator Allocator information instruction
+ /// \param Name Name of call Instruction for OMP_Free
+ ///
+ /// \returns CallInst to the OMP_Free call
+ CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
+ Value *Allocator, std::string Name = "");
+
+ /// Create a runtime call for kmpc_threadprivate_cached
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param Pointer pointer to data to be cached
+ /// \param Size size of data to be cached
+ /// \param Name Name of call Instruction for callinst
+ ///
+ /// \returns CallInst to the thread private cache call.
+ CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
+ llvm::Value *Pointer,
+ llvm::ConstantInt *Size,
+ const llvm::Twine &Name = Twine(""));
+
+ /// Create a runtime call for __tgt_interop_init
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param InteropVar variable to be allocated
+ /// \param InteropType type of interop operation
+ /// \param Device devide to which offloading will occur
+ /// \param NumDependences number of dependence variables
+ /// \param DependenceAddress pointer to dependence variables
+ /// \param HaveNowaitClause does nowait clause exist
+ ///
+ /// \returns CallInst to the __tgt_interop_init call
+ CallInst *createOMPInteropInit(const LocationDescription &Loc,
+ Value *InteropVar,
+ omp::OMPInteropType InteropType, Value *Device,
+ Value *NumDependences,
+ Value *DependenceAddress,
+ bool HaveNowaitClause);
+
+ /// Create a runtime call for __tgt_interop_destroy
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param InteropVar variable to be allocated
+ /// \param Device devide to which offloading will occur
+ /// \param NumDependences number of dependence variables
+ /// \param DependenceAddress pointer to dependence variables
+ /// \param HaveNowaitClause does nowait clause exist
+ ///
+ /// \returns CallInst to the __tgt_interop_destroy call
+ CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
+ Value *InteropVar, Value *Device,
+ Value *NumDependences,
+ Value *DependenceAddress,
+ bool HaveNowaitClause);
+
+ /// Create a runtime call for __tgt_interop_use
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param InteropVar variable to be allocated
+ /// \param Device devide to which offloading will occur
+ /// \param NumDependences number of dependence variables
+ /// \param DependenceAddress pointer to dependence variables
+ /// \param HaveNowaitClause does nowait clause exist
+ ///
+ /// \returns CallInst to the __tgt_interop_use call
+ CallInst *createOMPInteropUse(const LocationDescription &Loc,
+ Value *InteropVar, Value *Device,
+ Value *NumDependences, Value *DependenceAddress,
+ bool HaveNowaitClause);
+
+ /// The `omp target` interface
+ ///
+ /// For more information about the usage of this interface,
+ /// \see openmp/libomptarget/deviceRTLs/common/include/target.h
+ ///
+ ///{
+
+ /// Create a runtime call for kmpc_target_init
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
+ InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD);
+
+ /// Create a runtime call for kmpc_target_deinit
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
+ void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD);
+
+ ///}
+
+private:
+ // Sets the function attributes expected for the outlined function
+ void setOutlinedTargetRegionFunctionAttributes(Function *OutlinedFn,
+ int32_t NumTeams,
+ int32_t NumThreads);
+
+ // Creates the function ID/Address for the given outlined function.
+ // In the case of an embedded device function the address of the function is
+ // used, in the case of a non-offload function a constant is created.
+ Constant *createOutlinedFunctionID(Function *OutlinedFn,
+ StringRef EntryFnIDName);
+
+ // Creates the region entry address for the outlined function
+ Constant *createTargetRegionEntryAddr(Function *OutlinedFunction,
+ StringRef EntryFnName);
+
+public:
+ /// Functions used to generate a function with the given name.
+ using FunctionGenCallback = std::function<Function *(StringRef FunctionName)>;
+
+ /// Create a unique name for the entry function using the source location
+ /// information of the current target region. The name will be something like:
+ ///
+ /// __omp_offloading_DD_FFFF_PP_lBB[_CC]
+ ///
+ /// where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
+ /// mangled name of the function that encloses the target region and BB is the
+ /// line number of the target region. CC is a count added when more than one
+ /// region is located at the same location.
+ ///
+ /// If this target outline function is not an offload entry, we don't need to
+ /// register it. This may happen if it is guarded by an if clause that is
+ /// false at compile time, or no target archs have been specified.
+ ///
+ /// The created target region ID is used by the runtime library to identify
+ /// the current target region, so it only has to be unique and not
+ /// necessarily point to anything. It could be the pointer to the outlined
+ /// function that implements the target region, but we aren't using that so
+ /// that the compiler doesn't need to keep that, and could therefore inline
+ /// the host function if proven worthwhile during optimization. In the other
+ /// hand, if emitting code for the device, the ID has to be the function
+ /// address so that it can retrieved from the offloading entry and launched
+ /// by the runtime library. We also mark the outlined function to have
+ /// external linkage in case we are emitting code for the device, because
+ /// these functions will be entry points to the device.
+ ///
+ /// \param InfoManager The info manager keeping track of the offload entries
+ /// \param EntryInfo The entry information about the function
+ /// \param GenerateFunctionCallback The callback function to generate the code
+ /// \param NumTeams Number default teams
+ /// \param NumThreads Number default threads
+ /// \param OutlinedFunction Pointer to the outlined function
+ /// \param EntryFnIDName Name of the ID o be created
+ void emitTargetRegionFunction(OffloadEntriesInfoManager &InfoManager,
+ TargetRegionEntryInfo &EntryInfo,
+ FunctionGenCallback &GenerateFunctionCallback,
+ int32_t NumTeams, int32_t NumThreads,
+ bool IsOffloadEntry, Function *&OutlinedFn,
+ Constant *&OutlinedFnID);
+
+ /// Registers the given function and sets up the attribtues of the function
+ /// Returns the FunctionID.
+ ///
+ /// \param InfoManager The info manager keeping track of the offload entries
+ /// \param EntryInfo The entry information about the function
+ /// \param OutlinedFunction Pointer to the outlined function
+ /// \param EntryFnName Name of the outlined function
+ /// \param EntryFnIDName Name of the ID o be created
+ /// \param NumTeams Number default teams
+ /// \param NumThreads Number default threads
+ Constant *registerTargetRegionFunction(OffloadEntriesInfoManager &InfoManager,
+ TargetRegionEntryInfo &EntryInfo,
+ Function *OutlinedFunction,
+ StringRef EntryFnName,
+ StringRef EntryFnIDName,
+ int32_t NumTeams, int32_t NumThreads);
+
+ /// Declarations for LLVM-IR types (simple, array, function and structure) are
+ /// generated below. Their names are defined and used in OpenMPKinds.def. Here
+ /// we provide the declarations, the initializeTypes function will provide the
+ /// values.
+ ///
+ ///{
+#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
+#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
+ ArrayType *VarName##Ty = nullptr; \
+ PointerType *VarName##PtrTy = nullptr;
+#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
+ FunctionType *VarName = nullptr; \
+ PointerType *VarName##Ptr = nullptr;
+#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
+ StructType *VarName = nullptr; \
+ PointerType *VarName##Ptr = nullptr;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+
+ ///}
+
+private:
+ /// Create all simple and struct types exposed by the runtime and remember
+ /// the llvm::PointerTypes of them for easy access later.
+ void initializeTypes(Module &M);
+
+ /// Common interface for generating entry calls for OMP Directives.
+ /// if the directive has a region/body, It will set the insertion
+ /// point to the body
+ ///
+ /// \param OMPD Directive to generate entry blocks for
+ /// \param EntryCall Call to the entry OMP Runtime Function
+ /// \param ExitBB block where the region ends.
+ /// \param Conditional indicate if the entry call result will be used
+ /// to evaluate a conditional of whether a thread will execute
+ /// body code or not.
+ ///
+ /// \return The insertion position in exit block
+ InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
+ BasicBlock *ExitBB,
+ bool Conditional = false);
+
+ /// Common interface to finalize the region
+ ///
+ /// \param OMPD Directive to generate exiting code for
+ /// \param FinIP Insertion point for emitting Finalization code and exit call
+ /// \param ExitCall Call to the ending OMP Runtime Function
+ /// \param HasFinalize indicate if the directive will require finalization
+ /// and has a finalization callback in the stack that
+ /// should be called.
+ ///
+ /// \return The insertion position in exit block
+ InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
+ InsertPointTy FinIP,
+ Instruction *ExitCall,
+ bool HasFinalize = true);
+
+ /// Common Interface to generate OMP inlined regions
+ ///
+ /// \param OMPD Directive to generate inlined region for
+ /// \param EntryCall Call to the entry OMP Runtime Function
+ /// \param ExitCall Call to the ending OMP Runtime Function
+ /// \param BodyGenCB Body code generation callback.
+ /// \param FiniCB Finalization Callback. Will be called when finalizing region
+ /// \param Conditional indicate if the entry call result will be used
+ /// to evaluate a conditional of whether a thread will execute
+ /// body code or not.
+ /// \param HasFinalize indicate if the directive will require finalization
+ /// and has a finalization callback in the stack that
+ /// should be called.
+ /// \param IsCancellable if HasFinalize is set to true, indicate if the
+ /// the directive should be cancellable.
+ /// \return The insertion point after the region
+
+ InsertPointTy
+ EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
+ Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
+ FinalizeCallbackTy FiniCB, bool Conditional = false,
+ bool HasFinalize = true, bool IsCancellable = false);
+
+ /// Get the platform-specific name separator.
+ /// \param Parts different parts of the final name that needs separation
+ /// \param FirstSeparator First separator used between the initial two
+ /// parts of the name.
+ /// \param Separator separator used between all of the rest consecutive
+ /// parts of the name
+ static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
+ StringRef FirstSeparator,
+ StringRef Separator);
+
+ /// Returns corresponding lock object for the specified critical region
+ /// name. If the lock object does not exist it is created, otherwise the
+ /// reference to the existing copy is returned.
+ /// \param CriticalName Name of the critical region.
+ ///
+ Value *getOMPCriticalRegionLock(StringRef CriticalName);
+
+ /// Callback type for Atomic Expression update
+ /// ex:
+ /// \code{.cpp}
+ /// unsigned x = 0;
+ /// #pragma omp atomic update
+ /// x = Expr(x_old); //Expr() is any legal operation
+ /// \endcode
+ ///
+ /// \param XOld the value of the atomic memory address to use for update
+ /// \param IRB reference to the IRBuilder to use
+ ///
+ /// \returns Value to update X to.
+ using AtomicUpdateCallbackTy =
+ const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
+
+private:
+ enum AtomicKind { Read, Write, Update, Capture, Compare };
+
+ /// Determine whether to emit flush or not
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AO The required atomic ordering
+ /// \param AK The OpenMP atomic operation kind used.
+ ///
+ /// \returns wether a flush was emitted or not
+ bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
+ AtomicOrdering AO, AtomicKind AK);
+
+ /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
+ /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
+ /// Only Scalar data types.
+ ///
+ /// \param AllocaIP The insertion point to be used for alloca
+ /// instructions.
+ /// \param X The target atomic pointer to be updated
+ /// \param XElemTy The element type of the atomic pointer.
+ /// \param Expr The value to update X with.
+ /// \param AO Atomic ordering of the generated atomic
+ /// instructions.
+ /// \param RMWOp The binary operation used for update. If
+ /// operation is not supported by atomicRMW,
+ /// or belong to {FADD, FSUB, BAD_BINOP}.
+ /// Then a `cmpExch` based atomic will be generated.
+ /// \param UpdateOp Code generator for complex expressions that cannot be
+ /// expressed through atomicrmw instruction.
+ /// \param VolatileX true if \a X volatile?
+ /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
+ /// update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
+ ///
+ /// \returns A pair of the old value of X before the update, and the value
+ /// used for the update.
+ std::pair<Value *, Value *>
+ emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
+ AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
+ bool IsXBinopExpr);
+
+ /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
+ ///
+ /// \Return The instruction
+ Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
+ AtomicRMWInst::BinOp RMWOp);
+
+public:
+ /// a struct to pack relevant information while generating atomic Ops
+ struct AtomicOpValue {
+ Value *Var = nullptr;
+ Type *ElemTy = nullptr;
+ bool IsSigned = false;
+ bool IsVolatile = false;
+ };
+
+ /// Emit atomic Read for : V = X --- Only Scalar data types.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param X The target pointer to be atomically read
+ /// \param V Memory address where to store atomically read
+ /// value
+ /// \param AO Atomic ordering of the generated atomic
+ /// instructions.
+ ///
+ /// \return Insertion point after generated atomic read IR.
+ InsertPointTy createAtomicRead(const LocationDescription &Loc,
+ AtomicOpValue &X, AtomicOpValue &V,
+ AtomicOrdering AO);
+
+ /// Emit atomic write for : X = Expr --- Only Scalar data types.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param X The target pointer to be atomically written to
+ /// \param Expr The value to store.
+ /// \param AO Atomic ordering of the generated atomic
+ /// instructions.
+ ///
+ /// \return Insertion point after generated atomic Write IR.
+ InsertPointTy createAtomicWrite(const LocationDescription &Loc,
+ AtomicOpValue &X, Value *Expr,
+ AtomicOrdering AO);
+
+ /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
+ /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
+ /// Only Scalar data types.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param X The target atomic pointer to be updated
+ /// \param Expr The value to update X with.
+ /// \param AO Atomic ordering of the generated atomic instructions.
+ /// \param RMWOp The binary operation used for update. If operation
+ /// is not supported by atomicRMW, or belong to
+ /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
+ /// atomic will be generated.
+ /// \param UpdateOp Code generator for complex expressions that cannot be
+ /// expressed through atomicrmw instruction.
+ /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
+ /// update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
+ ///
+ /// \return Insertion point after generated atomic update IR.
+ InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
+ InsertPointTy AllocaIP, AtomicOpValue &X,
+ Value *Expr, AtomicOrdering AO,
+ AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp,
+ bool IsXBinopExpr);
+
+ /// Emit atomic update for constructs: --- Only Scalar data types
+ /// V = X; X = X BinOp Expr ,
+ /// X = X BinOp Expr; V = X,
+ /// V = X; X = Expr BinOp X,
+ /// X = Expr BinOp X; V = X,
+ /// V = X; X = UpdateOp(X),
+ /// X = UpdateOp(X); V = X,
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param X The target atomic pointer to be updated
+ /// \param V Memory address where to store captured value
+ /// \param Expr The value to update X with.
+ /// \param AO Atomic ordering of the generated atomic instructions
+ /// \param RMWOp The binary operation used for update. If
+ /// operation is not supported by atomicRMW, or belong to
+ /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
+ /// atomic will be generated.
+ /// \param UpdateOp Code generator for complex expressions that cannot be
+ /// expressed through atomicrmw instruction.
+ /// \param UpdateExpr true if X is an in place update of the form
+ /// X = X BinOp Expr or X = Expr BinOp X
+ /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
+ /// update expression, false otherwise.
+ /// (e.g. true for X = X BinOp Expr)
+ /// \param IsPostfixUpdate true if original value of 'x' must be stored in
+ /// 'v', not an updated one.
+ ///
+ /// \return Insertion point after generated atomic capture IR.
+ InsertPointTy
+ createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP,
+ AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
+ AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
+ AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
+ bool IsPostfixUpdate, bool IsXBinopExpr);
+
+ /// Emit atomic compare for constructs: --- Only scalar data types
+ /// cond-expr-stmt:
+ /// x = x ordop expr ? expr : x;
+ /// x = expr ordop x ? expr : x;
+ /// x = x == e ? d : x;
+ /// x = e == x ? d : x; (this one is not in the spec)
+ /// cond-update-stmt:
+ /// if (x ordop expr) { x = expr; }
+ /// if (expr ordop x) { x = expr; }
+ /// if (x == e) { x = d; }
+ /// if (e == x) { x = d; } (this one is not in the spec)
+ /// conditional-update-capture-atomic:
+ /// v = x; cond-update-stmt; (IsPostfixUpdate=true, IsFailOnly=false)
+ /// cond-update-stmt; v = x; (IsPostfixUpdate=false, IsFailOnly=false)
+ /// if (x == e) { x = d; } else { v = x; } (IsPostfixUpdate=false,
+ /// IsFailOnly=true)
+ /// r = x == e; if (r) { x = d; } (IsPostfixUpdate=false, IsFailOnly=false)
+ /// r = x == e; if (r) { x = d; } else { v = x; } (IsPostfixUpdate=false,
+ /// IsFailOnly=true)
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param X The target atomic pointer to be updated.
+ /// \param V Memory address where to store captured value (for
+ /// compare capture only).
+ /// \param R Memory address where to store comparison result
+ /// (for compare capture with '==' only).
+ /// \param E The expected value ('e') for forms that use an
+ /// equality comparison or an expression ('expr') for
+ /// forms that use 'ordop' (logically an atomic maximum or
+ /// minimum).
+ /// \param D The desired value for forms that use an equality
+ /// comparison. If forms that use 'ordop', it should be
+ /// \p nullptr.
+ /// \param AO Atomic ordering of the generated atomic instructions.
+ /// \param Op Atomic compare operation. It can only be ==, <, or >.
+ /// \param IsXBinopExpr True if the conditional statement is in the form where
+ /// x is on LHS. It only matters for < or >.
+ /// \param IsPostfixUpdate True if original value of 'x' must be stored in
+ /// 'v', not an updated one (for compare capture
+ /// only).
+ /// \param IsFailOnly True if the original value of 'x' is stored to 'v'
+ /// only when the comparison fails. This is only valid for
+ /// the case the comparison is '=='.
+ ///
+ /// \return Insertion point after generated atomic capture IR.
+ InsertPointTy
+ createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X,
+ AtomicOpValue &V, AtomicOpValue &R, Value *E, Value *D,
+ AtomicOrdering AO, omp::OMPAtomicCompareOp Op,
+ bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly);
+
+ /// Create the control flow structure of a canonical OpenMP loop.
+ ///
+ /// The emitted loop will be disconnected, i.e. no edge to the loop's
+ /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
+ /// IRBuilder location is not preserved.
+ ///
+ /// \param DL DebugLoc used for the instructions in the skeleton.
+ /// \param TripCount Value to be used for the trip count.
+ /// \param F Function in which to insert the BasicBlocks.
+ /// \param PreInsertBefore Where to insert BBs that execute before the body,
+ /// typically the body itself.
+ /// \param PostInsertBefore Where to insert BBs that execute after the body.
+ /// \param Name Base name used to derive BB
+ /// and instruction names.
+ ///
+ /// \returns The CanonicalLoopInfo that represents the emitted loop.
+ CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
+ Function *F,
+ BasicBlock *PreInsertBefore,
+ BasicBlock *PostInsertBefore,
+ const Twine &Name = {});
+ /// OMP Offload Info Metadata name string
+ const std::string ompOffloadInfoName = "omp_offload.info";
+
+ /// Loads all the offload entries information from the host IR
+ /// metadata. This function is only meant to be used with device code
+ /// generation.
+ ///
+ /// \param M Module to load Metadata info from. Module passed maybe
+ /// loaded from bitcode file, i.e, different from OpenMPIRBuilder::M module.
+ /// \param OffloadEntriesInfoManager Initialize Offload Entry information.
+ void
+ loadOffloadInfoMetadata(Module &M,
+ OffloadEntriesInfoManager &OffloadEntriesInfoManager);
+
+ /// Gets (if variable with the given name already exist) or creates
+ /// internal global variable with the specified Name. The created variable has
+ /// linkage CommonLinkage by default and is initialized by null value.
+ /// \param Ty Type of the global variable. If it is exist already the type
+ /// must be the same.
+ /// \param Name Name of the variable.
+ GlobalVariable *getOrCreateInternalVariable(Type *Ty, const StringRef &Name,
+ unsigned AddressSpace = 0);
+};
+
+/// Data structure to contain the information needed to uniquely identify
+/// a target entry.
+struct TargetRegionEntryInfo {
+ std::string ParentName;
+ unsigned DeviceID;
+ unsigned FileID;
+ unsigned Line;
+ unsigned Count;
+
+ TargetRegionEntryInfo()
+ : ParentName(""), DeviceID(0), FileID(0), Line(0), Count(0) {}
+ TargetRegionEntryInfo(StringRef ParentName, unsigned DeviceID,
+ unsigned FileID, unsigned Line, unsigned Count = 0)
+ : ParentName(ParentName), DeviceID(DeviceID), FileID(FileID), Line(Line),
+ Count(Count) {}
+
+ static void getTargetRegionEntryFnName(SmallVectorImpl<char> &Name,
+ StringRef ParentName,
+ unsigned DeviceID, unsigned FileID,
+ unsigned Line, unsigned Count);
+
+ bool operator<(const TargetRegionEntryInfo RHS) const {
+ return std::make_tuple(ParentName, DeviceID, FileID, Line, Count) <
+ std::make_tuple(RHS.ParentName, RHS.DeviceID, RHS.FileID, RHS.Line,
+ RHS.Count);
+ }
+};
+
+/// Class that manages information about offload code regions and data
+class OffloadEntriesInfoManager {
+ /// Number of entries registered so far.
+ OpenMPIRBuilderConfig Config;
+ unsigned OffloadingEntriesNum = 0;
+
+public:
+ void setConfig(OpenMPIRBuilderConfig C) { Config = C; }
+
+ /// Base class of the entries info.
+ class OffloadEntryInfo {
+ public:
+ /// Kind of a given entry.
+ enum OffloadingEntryInfoKinds : unsigned {
+ /// Entry is a target region.
+ OffloadingEntryInfoTargetRegion = 0,
+ /// Entry is a declare target variable.
+ OffloadingEntryInfoDeviceGlobalVar = 1,
+ /// Invalid entry info.
+ OffloadingEntryInfoInvalid = ~0u
+ };
+
+ protected:
+ OffloadEntryInfo() = delete;
+ explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
+ explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
+ uint32_t Flags)
+ : Flags(Flags), Order(Order), Kind(Kind) {}
+ ~OffloadEntryInfo() = default;
+
+ public:
+ bool isValid() const { return Order != ~0u; }
+ unsigned getOrder() const { return Order; }
+ OffloadingEntryInfoKinds getKind() const { return Kind; }
+ uint32_t getFlags() const { return Flags; }
+ void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
+ Constant *getAddress() const { return cast_or_null<Constant>(Addr); }
+ void setAddress(Constant *V) {
+ assert(!Addr.pointsToAliveValue() && "Address has been set before!");
+ Addr = V;
+ }
+ static bool classof(const OffloadEntryInfo *Info) { return true; }
+
+ private:
+ /// Address of the entity that has to be mapped for offloading.
+ WeakTrackingVH Addr;
+
+ /// Flags associated with the device global.
+ uint32_t Flags = 0u;
+
+ /// Order this entry was emitted.
+ unsigned Order = ~0u;
+
+ OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
+ };
+
+ /// Return true if a there are no entries defined.
+ bool empty() const;
+ /// Return number of entries defined so far.
+ unsigned size() const { return OffloadingEntriesNum; }
+
+ OffloadEntriesInfoManager() : Config() {}
+
+ //
+ // Target region entries related.
+ //
+
+ /// Kind of the target registry entry.
+ enum OMPTargetRegionEntryKind : uint32_t {
+ /// Mark the entry as target region.
+ OMPTargetRegionEntryTargetRegion = 0x0,
+ /// Mark the entry as a global constructor.
+ OMPTargetRegionEntryCtor = 0x02,
+ /// Mark the entry as a global destructor.
+ OMPTargetRegionEntryDtor = 0x04,
+ };
+
+ /// Target region entries info.
+ class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
+ /// Address that can be used as the ID of the entry.
+ Constant *ID = nullptr;
+
+ public:
+ OffloadEntryInfoTargetRegion()
+ : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
+ explicit OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr,
+ Constant *ID,
+ OMPTargetRegionEntryKind Flags)
+ : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
+ ID(ID) {
+ setAddress(Addr);
+ }
+
+ Constant *getID() const { return ID; }
+ void setID(Constant *V) {
+ assert(!ID && "ID has been set before!");
+ ID = V;
+ }
+ static bool classof(const OffloadEntryInfo *Info) {
+ return Info->getKind() == OffloadingEntryInfoTargetRegion;
+ }
+ };
+
+ /// Initialize target region entry.
+ /// This is ONLY needed for DEVICE compilation.
+ void initializeTargetRegionEntryInfo(const TargetRegionEntryInfo &EntryInfo,
+ unsigned Order);
+ /// Register target region entry.
+ void registerTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo,
+ Constant *Addr, Constant *ID,
+ OMPTargetRegionEntryKind Flags);
+ /// Return true if a target region entry with the provided information
+ /// exists.
+ bool hasTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo,
+ bool IgnoreAddressId = false) const;
+
+ // Return the Name based on \a EntryInfo using the next available Count.
+ void getTargetRegionEntryFnName(SmallVectorImpl<char> &Name,
+ const TargetRegionEntryInfo &EntryInfo);
+
+ /// brief Applies action \a Action on all registered entries.
+ typedef function_ref<void(const TargetRegionEntryInfo &EntryInfo,
+ const OffloadEntryInfoTargetRegion &)>
+ OffloadTargetRegionEntryInfoActTy;
+ void
+ actOnTargetRegionEntriesInfo(const OffloadTargetRegionEntryInfoActTy &Action);
+
+ //
+ // Device global variable entries related.
+ //
+
+ /// Kind of the global variable entry..
+ enum OMPTargetGlobalVarEntryKind : uint32_t {
+ /// Mark the entry as a to declare target.
+ OMPTargetGlobalVarEntryTo = 0x0,
+ /// Mark the entry as a to declare target link.
+ OMPTargetGlobalVarEntryLink = 0x1,
+ };
+
+ /// Device global variable entries info.
+ class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
+ /// Type of the global variable.
+ int64_t VarSize;
+ GlobalValue::LinkageTypes Linkage;
+
+ public:
+ OffloadEntryInfoDeviceGlobalVar()
+ : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
+ explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
+ OMPTargetGlobalVarEntryKind Flags)
+ : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
+ explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr,
+ int64_t VarSize,
+ OMPTargetGlobalVarEntryKind Flags,
+ GlobalValue::LinkageTypes Linkage)
+ : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
+ VarSize(VarSize), Linkage(Linkage) {
+ setAddress(Addr);
+ }
+
+ int64_t getVarSize() const { return VarSize; }
+ void setVarSize(int64_t Size) { VarSize = Size; }
+ GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
+ void setLinkage(GlobalValue::LinkageTypes LT) { Linkage = LT; }
+ static bool classof(const OffloadEntryInfo *Info) {
+ return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
+ }
+ };
+
+ /// Initialize device global variable entry.
+ /// This is ONLY used for DEVICE compilation.
+ void initializeDeviceGlobalVarEntryInfo(StringRef Name,
+ OMPTargetGlobalVarEntryKind Flags,
+ unsigned Order);
+
+ /// Register device global variable entry.
+ void registerDeviceGlobalVarEntryInfo(StringRef VarName, Constant *Addr,
+ int64_t VarSize,
+ OMPTargetGlobalVarEntryKind Flags,
+ GlobalValue::LinkageTypes Linkage);
+ /// Checks if the variable with the given name has been registered already.
+ bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
+ return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
+ }
+ /// Applies action \a Action on all registered entries.
+ typedef function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)>
+ OffloadDeviceGlobalVarEntryInfoActTy;
+ void actOnDeviceGlobalVarEntriesInfo(
+ const OffloadDeviceGlobalVarEntryInfoActTy &Action);
+
+private:
+ /// Return the count of entries at a particular source location.
+ unsigned
+ getTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo) const;
+
+ /// Update the count of entries at a particular source location.
+ void
+ incrementTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo);
+
+ static TargetRegionEntryInfo
+ getTargetRegionEntryCountKey(const TargetRegionEntryInfo &EntryInfo) {
+ return TargetRegionEntryInfo(EntryInfo.ParentName, EntryInfo.DeviceID,
+ EntryInfo.FileID, EntryInfo.Line, 0);
+ }
+
+ // Count of entries at a location.
+ std::map<TargetRegionEntryInfo, unsigned> OffloadEntriesTargetRegionCount;
+
+ // Storage for target region entries kind.
+ typedef std::map<TargetRegionEntryInfo, OffloadEntryInfoTargetRegion>
+ OffloadEntriesTargetRegionTy;
+ OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
+ /// Storage for device global variable entries kind. The storage is to be
+ /// indexed by mangled name.
+ typedef StringMap<OffloadEntryInfoDeviceGlobalVar>
+ OffloadEntriesDeviceGlobalVarTy;
+ OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
+};
+
+/// Class to represented the control flow structure of an OpenMP canonical loop.
+///
+/// The control-flow structure is standardized for easy consumption by
+/// directives associated with loops. For instance, the worksharing-loop
+/// construct may change this control flow such that each loop iteration is
+/// executed on only one thread. The constraints of a canonical loop in brief
+/// are:
+///
+/// * The number of loop iterations must have been computed before entering the
+/// loop.
+///
+/// * Has an (unsigned) logical induction variable that starts at zero and
+/// increments by one.
+///
+/// * The loop's CFG itself has no side-effects. The OpenMP specification
+/// itself allows side-effects, but the order in which they happen, including
+/// how often or whether at all, is unspecified. We expect that the frontend
+/// will emit those side-effect instructions somewhere (e.g. before the loop)
+/// such that the CanonicalLoopInfo itself can be side-effect free.
+///
+/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
+/// execution of a loop body that satifies these constraints. It does NOT
+/// represent arbitrary SESE regions that happen to contain a loop. Do not use
+/// CanonicalLoopInfo for such purposes.
+///
+/// The control flow can be described as follows:
+///
+/// Preheader
+/// |
+/// /-> Header
+/// | |
+/// | Cond---\
+/// | | |
+/// | Body |
+/// | | | |
+/// | <...> |
+/// | | | |
+/// \--Latch |
+/// |
+/// Exit
+/// |
+/// After
+///
+/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
+/// including) and end at AfterIP (at the After's first instruction, excluding).
+/// That is, instructions in the Preheader and After blocks (except the
+/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
+/// side-effects. Typically, the Preheader is used to compute the loop's trip
+/// count. The instructions from BodyIP (at the Body block's first instruction,
+/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
+/// control and thus can have side-effects. The body block is the single entry
+/// point into the loop body, which may contain arbitrary control flow as long
+/// as all control paths eventually branch to the Latch block.
+///
+/// TODO: Consider adding another standardized BasicBlock between Body CFG and
+/// Latch to guarantee that there is only a single edge to the latch. It would
+/// make loop transformations easier to not needing to consider multiple
+/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
+/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
+/// executes after each body iteration.
+///
+/// There must be no loop-carried dependencies through llvm::Values. This is
+/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
+/// for the induction variable.
+///
+/// All code in Header, Cond, Latch and Exit (plus the terminator of the
+/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
+/// by assertOK(). They are expected to not be modified unless explicitly
+/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
+/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
+/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
+/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
+/// anymore as its underlying control flow may not exist anymore.
+/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
+/// may also return a new CanonicalLoopInfo that can be passed to other
+/// loop-associated construct implementing methods. These loop-transforming
+/// methods may either create a new CanonicalLoopInfo usually using
+/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
+/// modify one of the input CanonicalLoopInfo and return it as representing the
+/// modified loop. What is done is an implementation detail of
+/// transformation-implementing method and callers should always assume that the
+/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
+/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
+/// created by createCanonicalLoop, such that transforming methods do not have
+/// to special case where the CanonicalLoopInfo originated from.
+///
+/// Generally, methods consuming CanonicalLoopInfo do not need an
+/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
+/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
+/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
+/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
+/// any InsertPoint in the Preheader, After or Block can still be used after
+/// calling such a method.
+///
+/// TODO: Provide mechanisms for exception handling and cancellation points.
+///
+/// Defined outside OpenMPIRBuilder because nested classes cannot be
+/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
+class CanonicalLoopInfo {
+ friend class OpenMPIRBuilder;
+
+private:
+ BasicBlock *Header = nullptr;
+ BasicBlock *Cond = nullptr;
+ BasicBlock *Latch = nullptr;
+ BasicBlock *Exit = nullptr;
+
+ /// Add the control blocks of this loop to \p BBs.
+ ///
+ /// This does not include any block from the body, including the one returned
+ /// by getBody().
+ ///
+ /// FIXME: This currently includes the Preheader and After blocks even though
+ /// their content is (mostly) not under CanonicalLoopInfo's control.
+ /// Re-evaluated whether this makes sense.
+ void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
+
+ /// Sets the number of loop iterations to the given value. This value must be
+ /// valid in the condition block (i.e., defined in the preheader) and is
+ /// interpreted as an unsigned integer.
+ void setTripCount(Value *TripCount);
+
+ /// Replace all uses of the canonical induction variable in the loop body with
+ /// a new one.
+ ///
+ /// The intended use case is to update the induction variable for an updated
+ /// iteration space such that it can stay normalized in the 0...tripcount-1
+ /// range.
+ ///
+ /// The \p Updater is called with the (presumable updated) current normalized
+ /// induction variable and is expected to return the value that uses of the
+ /// pre-updated induction values should use instead, typically dependent on
+ /// the new induction variable. This is a lambda (instead of e.g. just passing
+ /// the new value) to be able to distinguish the uses of the pre-updated
+ /// induction variable and uses of the induction varible to compute the
+ /// updated induction variable value.
+ void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater);
+
+public:
+ /// Returns whether this object currently represents the IR of a loop. If
+ /// returning false, it may have been consumed by a loop transformation or not
+ /// been intialized. Do not use in this case;
+ bool isValid() const { return Header; }
+
+ /// The preheader ensures that there is only a single edge entering the loop.
+ /// Code that must be execute before any loop iteration can be emitted here,
+ /// such as computing the loop trip count and begin lifetime markers. Code in
+ /// the preheader is not considered part of the canonical loop.
+ BasicBlock *getPreheader() const;
+
+ /// The header is the entry for each iteration. In the canonical control flow,
+ /// it only contains the PHINode for the induction variable.
+ BasicBlock *getHeader() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return Header;
+ }
+
+ /// The condition block computes whether there is another loop iteration. If
+ /// yes, branches to the body; otherwise to the exit block.
+ BasicBlock *getCond() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return Cond;
+ }
+
+ /// The body block is the single entry for a loop iteration and not controlled
+ /// by CanonicalLoopInfo. It can contain arbitrary control flow but must
+ /// eventually branch to the \p Latch block.
+ BasicBlock *getBody() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
+ }
+
+ /// Reaching the latch indicates the end of the loop body code. In the
+ /// canonical control flow, it only contains the increment of the induction
+ /// variable.
+ BasicBlock *getLatch() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return Latch;
+ }
+
+ /// Reaching the exit indicates no more iterations are being executed.
+ BasicBlock *getExit() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return Exit;
+ }
+
+ /// The after block is intended for clean-up code such as lifetime end
+ /// markers. It is separate from the exit block to ensure, analogous to the
+ /// preheader, it having just a single entry edge and being free from PHI
+ /// nodes should there be multiple loop exits (such as from break
+ /// statements/cancellations).
+ BasicBlock *getAfter() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return Exit->getSingleSuccessor();
+ }
+
+ /// Returns the llvm::Value containing the number of loop iterations. It must
+ /// be valid in the preheader and always interpreted as an unsigned integer of
+ /// any bit-width.
+ Value *getTripCount() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ Instruction *CmpI = &Cond->front();
+ assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
+ return CmpI->getOperand(1);
+ }
+
+ /// Returns the instruction representing the current logical induction
+ /// variable. Always unsigned, always starting at 0 with an increment of one.
+ Instruction *getIndVar() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ Instruction *IndVarPHI = &Header->front();
+ assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
+ return IndVarPHI;
+ }
+
+ /// Return the type of the induction variable (and the trip count).
+ Type *getIndVarType() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return getIndVar()->getType();
+ }
+
+ /// Return the insertion point for user code before the loop.
+ OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ BasicBlock *Preheader = getPreheader();
+ return {Preheader, std::prev(Preheader->end())};
+ };
+
+ /// Return the insertion point for user code in the body.
+ OpenMPIRBuilder::InsertPointTy getBodyIP() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ BasicBlock *Body = getBody();
+ return {Body, Body->begin()};
+ };
+
+ /// Return the insertion point for user code after the loop.
+ OpenMPIRBuilder::InsertPointTy getAfterIP() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ BasicBlock *After = getAfter();
+ return {After, After->begin()};
+ };
+
+ Function *getFunction() const {
+ assert(isValid() && "Requires a valid canonical loop");
+ return Header->getParent();
+ }
+
+ /// Consistency self-check.
+ void assertOK() const;
+
+ /// Invalidate this loop. That is, the underlying IR does not fulfill the
+ /// requirements of an OpenMP canonical loop anymore.
+ void invalidate();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPKinds.def b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPKinds.def
new file mode 100644
index 0000000000..8a09fb7cb7
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -0,0 +1,1322 @@
+//===--- OMPKinds.def - OpenMP directives, clauses, rt-calls -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the list of supported OpenMP runtime
+/// calls, and other things that need to be listed in enums.
+///
+/// This file is under transition to OMP.td with TableGen code generation.
+///
+//===----------------------------------------------------------------------===//
+
+/// OpenMP Directives, combined directives and Clauses
+/// - Moved to OMP.td
+
+/// Types used in runtime structs or runtime functions
+///
+///{
+
+#ifndef OMP_TYPE
+#define OMP_TYPE(VarName, InitValue)
+#endif
+
+#define __OMP_TYPE(VarName) OMP_TYPE(VarName, Type::get##VarName##Ty(Ctx))
+
+__OMP_TYPE(Void)
+__OMP_TYPE(Int1)
+__OMP_TYPE(Int8)
+__OMP_TYPE(Int16)
+__OMP_TYPE(Int32)
+__OMP_TYPE(Int64)
+__OMP_TYPE(Int8Ptr)
+__OMP_TYPE(Int16Ptr)
+__OMP_TYPE(Int32Ptr)
+__OMP_TYPE(Int64Ptr)
+__OMP_TYPE(Double)
+
+OMP_TYPE(SizeTy, M.getDataLayout().getIntPtrType(Ctx))
+OMP_TYPE(Int63, Type::getIntNTy(Ctx, 63))
+
+#define __OMP_PTR_TYPE(NAME, BASE) OMP_TYPE(NAME, BASE->getPointerTo())
+
+__OMP_PTR_TYPE(VoidPtr, Int8)
+__OMP_PTR_TYPE(VoidPtrPtr, VoidPtr)
+__OMP_PTR_TYPE(VoidPtrPtrPtr, VoidPtrPtr)
+
+__OMP_PTR_TYPE(Int8PtrPtr, Int8Ptr)
+__OMP_PTR_TYPE(Int8PtrPtrPtr, Int8PtrPtr)
+
+#undef __OMP_PTR_TYPE
+
+#undef __OMP_TYPE
+#undef OMP_TYPE
+
+///}
+
+/// array types
+///
+///{
+
+#ifndef OMP_ARRAY_TYPE
+#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize)
+#endif
+
+#define __OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
+ OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize)
+
+__OMP_ARRAY_TYPE(KmpCriticalName, Int32, 8)
+__OMP_ARRAY_TYPE(Int32Arr3, Int32, 3)
+
+#undef __OMP_ARRAY_TYPE
+#undef OMP_ARRAY_TYPE
+
+///}
+
+/// Struct and function types
+///
+///{
+
+#ifndef OMP_STRUCT_TYPE
+#define OMP_STRUCT_TYPE(VarName, StructName, Packed, ...)
+#endif
+
+#define __OMP_STRUCT_TYPE(VarName, Name, Packed, ...) \
+ OMP_STRUCT_TYPE(VarName, "struct." #Name, Packed, __VA_ARGS__)
+
+__OMP_STRUCT_TYPE(Ident, ident_t, false, Int32, Int32, Int32, Int32, Int8Ptr)
+__OMP_STRUCT_TYPE(OffloadEntry, __tgt_offload_entry, false, Int8Ptr, Int8Ptr, SizeTy,
+ Int32, Int32)
+__OMP_STRUCT_TYPE(KernelArgs, __tgt_kernel_arguments, false, Int32, Int32, VoidPtrPtr,
+ VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr,
+ Int64, Int64, Int32Arr3Ty, Int32Arr3Ty, Int32)
+__OMP_STRUCT_TYPE(AsyncInfo, __tgt_async_info, false, Int8Ptr)
+__OMP_STRUCT_TYPE(DependInfo, kmp_dep_info, false, SizeTy, SizeTy, Int8)
+
+#undef __OMP_STRUCT_TYPE
+#undef OMP_STRUCT_TYPE
+
+#ifndef OMP_FUNCTION_TYPE
+#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...)
+#endif
+
+#define __OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
+ OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, __VA_ARGS__)
+
+__OMP_FUNCTION_TYPE(ParallelTask, true, Void, Int32Ptr, Int32Ptr)
+__OMP_FUNCTION_TYPE(ReduceFunction, false, Void, VoidPtr, VoidPtr)
+__OMP_FUNCTION_TYPE(CopyFunction, false, Void, VoidPtr, VoidPtr)
+__OMP_FUNCTION_TYPE(KmpcCtor, false, VoidPtr, VoidPtr)
+__OMP_FUNCTION_TYPE(KmpcDtor, false, Void, VoidPtr)
+__OMP_FUNCTION_TYPE(KmpcCopyCtor, false, VoidPtr, VoidPtr, VoidPtr)
+__OMP_FUNCTION_TYPE(TaskRoutineEntry, false, Int32, Int32,
+ /* kmp_task_t */ VoidPtr)
+__OMP_FUNCTION_TYPE(ShuffleReduce, false, Void, VoidPtr, Int16, Int16, Int16)
+__OMP_FUNCTION_TYPE(InterWarpCopy, false, Void, VoidPtr, Int32)
+__OMP_FUNCTION_TYPE(GlobalList, false, Void, VoidPtr, Int32, VoidPtr)
+
+#undef __OMP_FUNCTION_TYPE
+#undef OMP_FUNCTION_TYPE
+
+///}
+
+/// Internal Control Variables information
+///
+///{
+
+#ifndef ICV_INIT_VALUE
+#define ICV_INIT_VALUE(Enum, Name)
+#endif
+
+#define __ICV_INIT_VALUE(Name) ICV_INIT_VALUE(ICV_##Name, #Name)
+
+__ICV_INIT_VALUE(ZERO)
+__ICV_INIT_VALUE(FALSE)
+__ICV_INIT_VALUE(IMPLEMENTATION_DEFINED)
+__ICV_INIT_VALUE(LAST)
+
+#undef __ICV_INIT_VALUE
+#undef ICV_INIT_VALUE
+
+#ifndef ICV_DATA_ENV
+#define ICV_DATA_ENV(Enum, Name, EnvVarName, Init)
+#endif
+
+#define __ICV_DATA_ENV(Name, EnvVarName, Init) \
+ ICV_DATA_ENV(ICV_##Name, #Name, #EnvVarName, Init)
+
+__ICV_DATA_ENV(nthreads, OMP_NUM_THREADS, ICV_IMPLEMENTATION_DEFINED)
+__ICV_DATA_ENV(active_levels, NONE, ICV_ZERO)
+__ICV_DATA_ENV(cancel, OMP_CANCELLATION, ICV_FALSE)
+__ICV_DATA_ENV(proc_bind, OMP_PROC_BIND, ICV_IMPLEMENTATION_DEFINED)
+__ICV_DATA_ENV(__last, last, ICV_LAST)
+
+#undef __ICV_DATA_ENV
+#undef ICV_DATA_ENV
+
+#ifndef ICV_RT_SET
+#define ICV_RT_SET(Name, RTL)
+#endif
+
+#define __ICV_RT_SET(Name, RTL) ICV_RT_SET(ICV_##Name, OMPRTL_##RTL)
+
+__ICV_RT_SET(nthreads, omp_set_num_threads)
+
+#undef __ICV_RT_SET
+#undef ICV_RT_SET
+
+#ifndef ICV_RT_GET
+#define ICV_RT_GET(Name, RTL)
+#endif
+
+#define __ICV_RT_GET(Name, RTL) ICV_RT_GET(ICV_##Name, OMPRTL_##RTL)
+
+__ICV_RT_GET(nthreads, omp_get_max_threads)
+__ICV_RT_GET(active_levels, omp_get_active_level)
+__ICV_RT_GET(cancel, omp_get_cancellation)
+__ICV_RT_GET(proc_bind, omp_get_proc_bind)
+
+#undef __ICV_RT_GET
+#undef ICV_RT_GET
+
+///}
+
+/// Runtime library function (and their attributes)
+///
+///{
+
+#ifndef OMP_RTL
+#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...)
+#endif
+
+#define __OMP_RTL(Name, IsVarArg, ReturnType, ...) \
+ OMP_RTL(OMPRTL_##Name, #Name, IsVarArg, ReturnType, __VA_ARGS__)
+
+
+
+__OMP_RTL(__kmpc_barrier, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_cancel, false, Int32, IdentPtr, Int32, Int32)
+__OMP_RTL(__kmpc_cancel_barrier, false, Int32, IdentPtr, Int32)
+__OMP_RTL(__kmpc_error, false, Void, IdentPtr, Int32, Int8Ptr)
+__OMP_RTL(__kmpc_flush, false, Void, IdentPtr)
+__OMP_RTL(__kmpc_global_thread_num, false, Int32, IdentPtr)
+__OMP_RTL(__kmpc_get_hardware_thread_id_in_block, false, Int32, )
+__OMP_RTL(__kmpc_fork_call, true, Void, IdentPtr, Int32, ParallelTaskPtr)
+__OMP_RTL(__kmpc_fork_call_if, false, Void, IdentPtr, Int32, ParallelTaskPtr,
+ Int32, VoidPtr)
+__OMP_RTL(__kmpc_omp_taskwait, false, Int32, IdentPtr, Int32)
+__OMP_RTL(__kmpc_omp_taskyield, false, Int32, IdentPtr, Int32, /* Int */ Int32)
+__OMP_RTL(__kmpc_push_num_threads, false, Void, IdentPtr, Int32,
+ /* Int */ Int32)
+__OMP_RTL(__kmpc_push_proc_bind, false, Void, IdentPtr, Int32, /* Int */ Int32)
+__OMP_RTL(__kmpc_omp_reg_task_with_affinity, false, Int32, IdentPtr, Int32,
+ /* kmp_task_t */ VoidPtr, Int32,
+ /* kmp_task_affinity_info_t */ VoidPtr)
+
+__OMP_RTL(__kmpc_get_hardware_num_blocks, false, Int32, )
+__OMP_RTL(__kmpc_get_hardware_num_threads_in_block, false, Int32, )
+__OMP_RTL(__kmpc_get_warp_size, false, Int32, )
+
+__OMP_RTL(omp_get_thread_num, false, Int32, )
+__OMP_RTL(omp_get_num_threads, false, Int32, )
+__OMP_RTL(omp_get_max_threads, false, Int32, )
+__OMP_RTL(omp_in_parallel, false, Int32, )
+__OMP_RTL(omp_get_dynamic, false, Int32, )
+__OMP_RTL(omp_get_cancellation, false, Int32, )
+__OMP_RTL(omp_get_nested, false, Int32, )
+__OMP_RTL(omp_get_schedule, false, Void, Int32Ptr, Int32Ptr)
+__OMP_RTL(omp_get_thread_limit, false, Int32, )
+__OMP_RTL(omp_get_supported_active_levels, false, Int32, )
+__OMP_RTL(omp_get_max_active_levels, false, Int32, )
+__OMP_RTL(omp_get_level, false, Int32, )
+__OMP_RTL(omp_get_ancestor_thread_num, false, Int32, Int32)
+__OMP_RTL(omp_get_team_size, false, Int32, Int32)
+__OMP_RTL(omp_get_active_level, false, Int32, )
+__OMP_RTL(omp_in_final, false, Int32, )
+__OMP_RTL(omp_get_proc_bind, false, Int32, )
+__OMP_RTL(omp_get_num_places, false, Int32, )
+__OMP_RTL(omp_get_num_procs, false, Int32, )
+__OMP_RTL(omp_get_place_proc_ids, false, Void, Int32, Int32Ptr)
+__OMP_RTL(omp_get_place_num, false, Int32, )
+__OMP_RTL(omp_get_partition_num_places, false, Int32, )
+__OMP_RTL(omp_get_partition_place_nums, false, Void, Int32Ptr)
+__OMP_RTL(omp_get_wtime, false, Double,)
+
+__OMP_RTL(omp_set_num_threads, false, Void, Int32)
+__OMP_RTL(omp_set_dynamic, false, Void, Int32)
+__OMP_RTL(omp_set_nested, false, Void, Int32)
+__OMP_RTL(omp_set_schedule, false, Void, Int32, Int32)
+__OMP_RTL(omp_set_max_active_levels, false, Void, Int32)
+
+__OMP_RTL(__kmpc_master, false, Int32, IdentPtr, Int32)
+__OMP_RTL(__kmpc_end_master, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_masked, false, Int32, IdentPtr, Int32, Int32)
+__OMP_RTL(__kmpc_end_masked, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_critical, false, Void, IdentPtr, Int32, KmpCriticalNamePtrTy)
+__OMP_RTL(__kmpc_critical_with_hint, false, Void, IdentPtr, Int32,
+ KmpCriticalNamePtrTy, Int32)
+__OMP_RTL(__kmpc_end_critical, false, Void, IdentPtr, Int32,
+ KmpCriticalNamePtrTy)
+
+__OMP_RTL(__kmpc_begin, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_end, false, Void, IdentPtr)
+
+__OMP_RTL(__kmpc_reduce, false, Int32, IdentPtr, Int32, Int32, SizeTy, VoidPtr,
+ ReduceFunctionPtr, KmpCriticalNamePtrTy)
+__OMP_RTL(__kmpc_reduce_nowait, false, Int32, IdentPtr, Int32, Int32, SizeTy,
+ VoidPtr, ReduceFunctionPtr, KmpCriticalNamePtrTy)
+__OMP_RTL(__kmpc_end_reduce, false, Void, IdentPtr, Int32, KmpCriticalNamePtrTy)
+__OMP_RTL(__kmpc_end_reduce_nowait, false, Void, IdentPtr, Int32,
+ KmpCriticalNamePtrTy)
+
+__OMP_RTL(__kmpc_ordered, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_end_ordered, false, Void, IdentPtr, Int32)
+
+__OMP_RTL(__kmpc_for_static_init_4, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_for_static_init_4u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_for_static_init_8, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+__OMP_RTL(__kmpc_for_static_init_8u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+__OMP_RTL(__kmpc_for_static_fini, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_distribute_static_init_4, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_distribute_static_init_4u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_distribute_static_init_8, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+__OMP_RTL(__kmpc_distribute_static_init_8u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+__OMP_RTL(__kmpc_distribute_static_fini, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_dist_dispatch_init_4, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32, Int32, Int32, Int32)
+__OMP_RTL(__kmpc_dist_dispatch_init_4u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32, Int32, Int32, Int32)
+__OMP_RTL(__kmpc_dist_dispatch_init_8, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64, Int64, Int64, Int64)
+__OMP_RTL(__kmpc_dist_dispatch_init_8u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64, Int64, Int64, Int64)
+__OMP_RTL(__kmpc_dispatch_init_4, false, Void, IdentPtr, Int32, Int32, Int32,
+ Int32, Int32, Int32)
+__OMP_RTL(__kmpc_dispatch_init_4u, false, Void, IdentPtr, Int32, Int32, Int32,
+ Int32, Int32, Int32)
+__OMP_RTL(__kmpc_dispatch_init_8, false, Void, IdentPtr, Int32, Int32, Int64,
+ Int64, Int64, Int64)
+__OMP_RTL(__kmpc_dispatch_init_8u, false, Void, IdentPtr, Int32, Int32, Int64,
+ Int64, Int64, Int64)
+__OMP_RTL(__kmpc_dispatch_next_4, false, Int32, IdentPtr, Int32, Int32Ptr,
+ Int32Ptr, Int32Ptr, Int32Ptr)
+__OMP_RTL(__kmpc_dispatch_next_4u, false, Int32, IdentPtr, Int32, Int32Ptr,
+ Int32Ptr, Int32Ptr, Int32Ptr)
+__OMP_RTL(__kmpc_dispatch_next_8, false, Int32, IdentPtr, Int32, Int32Ptr,
+ Int64Ptr, Int64Ptr, Int64Ptr)
+__OMP_RTL(__kmpc_dispatch_next_8u, false, Int32, IdentPtr, Int32, Int32Ptr,
+ Int64Ptr, Int64Ptr, Int64Ptr)
+__OMP_RTL(__kmpc_dispatch_fini_4, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_dispatch_fini_4u, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_dispatch_fini_8, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_dispatch_fini_8u, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_team_static_init_4, false, Void, IdentPtr, Int32, Int32Ptr,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_team_static_init_4u, false, Void, IdentPtr, Int32, Int32Ptr,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_team_static_init_8, false, Void, IdentPtr, Int32, Int32Ptr,
+ Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+__OMP_RTL(__kmpc_team_static_init_8u, false, Void, IdentPtr, Int32, Int32Ptr,
+ Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+__OMP_RTL(__kmpc_dist_for_static_init_4, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_dist_for_static_init_4u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
+__OMP_RTL(__kmpc_dist_for_static_init_8, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+__OMP_RTL(__kmpc_dist_for_static_init_8u, false, Void, IdentPtr, Int32, Int32,
+ Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
+
+__OMP_RTL(__kmpc_single, false, Int32, IdentPtr, Int32)
+__OMP_RTL(__kmpc_end_single, false, Void, IdentPtr, Int32)
+
+__OMP_RTL(__kmpc_omp_task_alloc, false, /* kmp_task_t */ VoidPtr, IdentPtr,
+ Int32, Int32, SizeTy, SizeTy, TaskRoutineEntryPtr)
+__OMP_RTL(__kmpc_omp_task, false, Int32, IdentPtr, Int32,
+ /* kmp_task_t */ VoidPtr)
+__OMP_RTL(__kmpc_end_taskgroup, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_taskgroup, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_omp_task_begin_if0, false, Void, IdentPtr, Int32,
+ /* kmp_task_t */ VoidPtr)
+__OMP_RTL(__kmpc_omp_task_complete_if0, false, Void, IdentPtr, Int32,
+ /* kmp_tasK_t */ VoidPtr)
+__OMP_RTL(__kmpc_omp_task_with_deps, false, Int32, IdentPtr, Int32,
+ /* kmp_task_t */ VoidPtr, Int32,
+ /* kmp_depend_info_t */ VoidPtr, Int32,
+ /* kmp_depend_info_t */ VoidPtr)
+__OMP_RTL(__kmpc_taskloop, false, Void, IdentPtr, /* Int */ Int32, VoidPtr,
+ /* Int */ Int32, Int64Ptr, Int64Ptr, Int64, /* Int */ Int32,
+ /* Int */ Int32, Int64, VoidPtr)
+__OMP_RTL(__kmpc_omp_target_task_alloc, false, /* kmp_task_t */ VoidPtr,
+ IdentPtr, Int32, Int32, SizeTy, SizeTy, TaskRoutineEntryPtr, Int64)
+__OMP_RTL(__kmpc_taskred_modifier_init, false, /* kmp_taskgroup */ VoidPtr,
+ IdentPtr, /* Int */ Int32, /* Int */ Int32, /* Int */ Int32, VoidPtr)
+__OMP_RTL(__kmpc_taskred_init, false, /* kmp_taskgroup */ VoidPtr,
+ /* Int */ Int32, /* Int */ Int32, VoidPtr)
+__OMP_RTL(__kmpc_task_reduction_modifier_fini, false, Void, IdentPtr,
+ /* Int */ Int32, /* Int */ Int32)
+__OMP_RTL(__kmpc_task_reduction_get_th_data, false, VoidPtr, Int32, VoidPtr,
+ VoidPtr)
+__OMP_RTL(__kmpc_task_reduction_init, false, VoidPtr, Int32, Int32, VoidPtr)
+__OMP_RTL(__kmpc_task_reduction_modifier_init, false, VoidPtr, VoidPtr, Int32,
+ Int32, Int32, VoidPtr)
+__OMP_RTL(__kmpc_proxy_task_completed_ooo, false, Void, VoidPtr)
+
+__OMP_RTL(__kmpc_omp_wait_deps, false, Void, IdentPtr, Int32, Int32,
+ /* kmp_depend_info_t */ VoidPtr, Int32, VoidPtr)
+__OMP_RTL(__kmpc_omp_taskwait_deps_51, false, Void, IdentPtr, Int32, Int32,
+ /* kmp_depend_info_t */ VoidPtr, Int32, VoidPtr, Int32)
+__OMP_RTL(__kmpc_cancellationpoint, false, Int32, IdentPtr, Int32, Int32)
+
+__OMP_RTL(__kmpc_fork_teams, true, Void, IdentPtr, Int32, ParallelTaskPtr)
+__OMP_RTL(__kmpc_push_num_teams, false, Void, IdentPtr, Int32, Int32, Int32)
+
+__OMP_RTL(__kmpc_copyprivate, false, Void, IdentPtr, Int32, SizeTy, VoidPtr,
+ CopyFunctionPtr, Int32)
+__OMP_RTL(__kmpc_threadprivate_cached, false, VoidPtr, IdentPtr, Int32, VoidPtr,
+ SizeTy, VoidPtrPtrPtr)
+__OMP_RTL(__kmpc_threadprivate_register, false, Void, IdentPtr, VoidPtr,
+ KmpcCtorPtr, KmpcCopyCtorPtr, KmpcDtorPtr)
+
+__OMP_RTL(__kmpc_doacross_init, false, Void, IdentPtr, Int32, Int32,
+ /* kmp_dim */ VoidPtr)
+__OMP_RTL(__kmpc_doacross_post, false, Void, IdentPtr, Int32, Int64Ptr)
+__OMP_RTL(__kmpc_doacross_wait, false, Void, IdentPtr, Int32, Int64Ptr)
+__OMP_RTL(__kmpc_doacross_fini, false, Void, IdentPtr, Int32)
+
+__OMP_RTL(__kmpc_alloc, false, VoidPtr, /* Int */ Int32, SizeTy, VoidPtr)
+__OMP_RTL(__kmpc_aligned_alloc, false, VoidPtr, /* Int */ Int32, SizeTy, SizeTy,
+ VoidPtr)
+__OMP_RTL(__kmpc_free, false, Void, /* Int */ Int32, VoidPtr, VoidPtr)
+
+__OMP_RTL(__tgt_interop_init, false, Void, IdentPtr, Int32, VoidPtrPtr, Int32,
+ Int32, Int64, VoidPtr, Int32)
+__OMP_RTL(__tgt_interop_destroy, false, Void, IdentPtr, Int32, VoidPtrPtr,
+ Int32, Int32, VoidPtr, Int32)
+__OMP_RTL(__tgt_interop_use, false, Void, IdentPtr, Int32, VoidPtrPtr, Int32,
+ Int32, VoidPtr, Int32)
+
+__OMP_RTL(__kmpc_init_allocator, false, /* omp_allocator_handle_t */ VoidPtr,
+ /* Int */ Int32, /* omp_memespace_handle_t */ VoidPtr,
+ /* Int */ Int32, /* omp_alloctrait_t */ VoidPtr)
+__OMP_RTL(__kmpc_destroy_allocator, false, Void, /* Int */ Int32,
+ /* omp_allocator_handle_t */ VoidPtr)
+
+__OMP_RTL(__kmpc_push_target_tripcount_mapper, false, Void, IdentPtr, Int64, Int64)
+__OMP_RTL(__tgt_target_mapper, false, Int32, IdentPtr, Int64, VoidPtr, Int32, VoidPtrPtr,
+ VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
+__OMP_RTL(__tgt_target_nowait_mapper, false, Int32, IdentPtr, Int64, VoidPtr,
+ Int32, VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr,
+ VoidPtrPtr, Int32, VoidPtr, Int32, VoidPtr)
+__OMP_RTL(__tgt_target_teams_mapper, false, Int32, IdentPtr, Int64, VoidPtr, Int32,
+ VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr, Int32, Int32)
+__OMP_RTL(__tgt_target_teams_nowait_mapper, false, Int32, IdentPtr, Int64,
+ VoidPtr, Int32, VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr,
+ VoidPtrPtr, VoidPtrPtr, Int32, Int32, Int32, VoidPtr, Int32, VoidPtr)
+__OMP_RTL(__tgt_target_kernel, false, Int32, IdentPtr, Int64, Int32, Int32,
+ VoidPtr, KernelArgsPtr)
+__OMP_RTL(__tgt_target_kernel_nowait, false, Int32, IdentPtr, Int64, Int32,
+ Int32, VoidPtr, KernelArgsPtr, Int32, VoidPtr, Int32, VoidPtr)
+__OMP_RTL(__tgt_register_requires, false, Void, Int64)
+__OMP_RTL(__tgt_target_data_begin_mapper, false, Void, IdentPtr, Int64, Int32, VoidPtrPtr,
+ VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
+__OMP_RTL(__tgt_target_data_begin_nowait_mapper, false, Void, IdentPtr, Int64, Int32,
+ VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
+__OMP_RTL(__tgt_target_data_begin_mapper_issue, false, Void, IdentPtr, Int64, Int32,
+ VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr, AsyncInfoPtr)
+__OMP_RTL(__tgt_target_data_begin_mapper_wait, false, Void, Int64, AsyncInfoPtr)
+__OMP_RTL(__tgt_target_data_end_mapper, false, Void, IdentPtr, Int64, Int32, VoidPtrPtr,
+ VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
+__OMP_RTL(__tgt_target_data_end_nowait_mapper, false, Void, IdentPtr, Int64, Int32,
+ VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
+__OMP_RTL(__tgt_target_data_update_mapper, false, Void, IdentPtr, Int64, Int32,
+ VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
+__OMP_RTL(__tgt_target_data_update_nowait_mapper, false, Void, IdentPtr, Int64, Int32,
+ VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
+__OMP_RTL(__tgt_mapper_num_components, false, Int64, VoidPtr)
+__OMP_RTL(__tgt_push_mapper_component, false, Void, VoidPtr, VoidPtr, VoidPtr,
+ Int64, Int64, VoidPtr)
+__OMP_RTL(__kmpc_task_allow_completion_event, false, VoidPtr, IdentPtr,
+ /* Int */ Int32, /* kmp_task_t */ VoidPtr)
+
+/// OpenMP Device runtime functions
+__OMP_RTL(__kmpc_target_init, false, Int32, IdentPtr, Int8, Int1)
+__OMP_RTL(__kmpc_target_deinit, false, Void, IdentPtr, Int8)
+__OMP_RTL(__kmpc_kernel_prepare_parallel, false, Void, VoidPtr)
+__OMP_RTL(__kmpc_parallel_51, false, Void, IdentPtr, Int32, Int32, Int32, Int32,
+ VoidPtr, VoidPtr, VoidPtrPtr, SizeTy)
+__OMP_RTL(__kmpc_kernel_parallel, false, Int1, VoidPtrPtr)
+__OMP_RTL(__kmpc_kernel_end_parallel, false, Void, )
+__OMP_RTL(__kmpc_serialized_parallel, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_end_serialized_parallel, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_shuffle_int32, false, Int32, Int32, Int16, Int16)
+__OMP_RTL(__kmpc_nvptx_parallel_reduce_nowait_v2, false, Int32, IdentPtr, Int32,
+ Int32, SizeTy, VoidPtr, ShuffleReducePtr, InterWarpCopyPtr)
+__OMP_RTL(__kmpc_nvptx_end_reduce_nowait, false, Void, Int32)
+__OMP_RTL(__kmpc_nvptx_teams_reduce_nowait_v2, false, Int32, IdentPtr, Int32,
+ VoidPtr, Int32, VoidPtr, ShuffleReducePtr, InterWarpCopyPtr,
+ GlobalListPtr, GlobalListPtr, GlobalListPtr, GlobalListPtr)
+
+__OMP_RTL(__kmpc_shuffle_int64, false, Int64, Int64, Int16, Int16)
+
+__OMP_RTL(__kmpc_alloc_shared, false, VoidPtr, SizeTy)
+__OMP_RTL(__kmpc_free_shared, false, Void, VoidPtr, SizeTy)
+__OMP_RTL(__kmpc_begin_sharing_variables, false, Void, VoidPtrPtrPtr, SizeTy)
+__OMP_RTL(__kmpc_end_sharing_variables, false, Void, )
+__OMP_RTL(__kmpc_get_shared_variables, false, Void, VoidPtrPtrPtr)
+__OMP_RTL(__kmpc_parallel_level, false, Int16, IdentPtr, Int32)
+__OMP_RTL(__kmpc_is_spmd_exec_mode, false, Int8, )
+__OMP_RTL(__kmpc_barrier_simple_spmd, false, Void, IdentPtr, Int32)
+__OMP_RTL(__kmpc_barrier_simple_generic, false, Void, IdentPtr, Int32)
+
+__OMP_RTL(__kmpc_warp_active_thread_mask, false, Int64,)
+__OMP_RTL(__kmpc_syncwarp, false, Void, Int64)
+
+__OMP_RTL(__last, false, Void, )
+
+#undef __OMP_RTL
+#undef OMP_RTL
+
+#define ParamAttrs(...) ArrayRef<AttributeSet>({__VA_ARGS__})
+#define EnumAttr(Kind) Attribute::get(Ctx, Attribute::AttrKind::Kind)
+#define EnumAttrInt(Kind, N) Attribute::get(Ctx, Attribute::AttrKind::Kind, N)
+#define AllocSizeAttr(N, M) Attribute::getWithAllocSizeArgs(Ctx, N, M)
+#define MemoryAttr(ME) Attribute::getWithMemoryEffects(Ctx, ME)
+#define AttributeSet(...) \
+ AttributeSet::get(Ctx, ArrayRef<Attribute>({__VA_ARGS__}))
+
+#ifndef OMP_ATTRS_SET
+#define OMP_ATTRS_SET(VarName, AttrSet)
+#endif
+
+#define __OMP_ATTRS_SET(VarName, AttrSet) OMP_ATTRS_SET(VarName, AttrSet)
+
+__OMP_ATTRS_SET(
+ GetterAttrs,
+ OptimisticAttributes
+ ? AttributeSet(
+ EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
+ EnumAttr(WillReturn),
+ MemoryAttr(MemoryEffects::inaccessibleMemOnly(ModRefInfo::Ref)))
+ : AttributeSet(EnumAttr(NoUnwind)))
+__OMP_ATTRS_SET(
+ GetterArgWriteAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
+ EnumAttr(WillReturn),
+ MemoryAttr(MemoryEffects::inaccessibleOrArgMemOnly()))
+ : AttributeSet(EnumAttr(NoUnwind)))
+__OMP_ATTRS_SET(
+ SetterAttrs,
+ OptimisticAttributes
+ ? AttributeSet(
+ EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
+ EnumAttr(WillReturn),
+ MemoryAttr(MemoryEffects::inaccessibleMemOnly(ModRefInfo::Mod)))
+ : AttributeSet(EnumAttr(NoUnwind)))
+
+__OMP_ATTRS_SET(DefaultAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync),
+ EnumAttr(WillReturn), EnumAttr(NoFree))
+ : AttributeSet(EnumAttr(NoUnwind)))
+
+__OMP_ATTRS_SET(BarrierAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(Convergent))
+ : AttributeSet(EnumAttr(NoUnwind), EnumAttr(Convergent)))
+
+__OMP_ATTRS_SET(
+ InaccessibleArgOnlyAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
+ EnumAttr(WillReturn),
+ MemoryAttr(MemoryEffects::inaccessibleOrArgMemOnly()))
+ : AttributeSet(EnumAttr(NoUnwind)))
+
+__OMP_ATTRS_SET(AlwaysInlineAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(AlwaysInline))
+ : AttributeSet(EnumAttr(AlwaysInline)))
+
+#if 0
+__OMP_ATTRS_SET(
+ InaccessibleOnlyAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
+ EnumAttr(WillReturn),
+ MemoryAttr(MemoryEffects::inaccessibleMemOnly()))
+ : AttributeSet(EnumAttr(NoUnwind)))
+#endif
+
+__OMP_ATTRS_SET(AllocAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync),
+ EnumAttr(WillReturn))
+ : AttributeSet(EnumAttr(NoUnwind)))
+
+__OMP_ATTRS_SET(ForkAttrs, OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind))
+ : AttributeSet(EnumAttr(NoUnwind)))
+
+__OMP_ATTRS_SET(ReadOnlyPtrAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(ReadOnly), EnumAttr(NoFree),
+ EnumAttr(NoCapture))
+ : AttributeSet())
+
+__OMP_ATTRS_SET(DeviceAllocAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync))
+ : AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync)))
+
+#if 0
+__OMP_ATTRS_SET(WriteOnlyPtrAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(WriteOnly), EnumAttr(NoFree),
+ EnumAttr(NoCapture))
+ : AttributeSet())
+#endif
+
+__OMP_ATTRS_SET(ArgPtrAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoCapture), EnumAttr(NoFree))
+ : AttributeSet())
+
+__OMP_ATTRS_SET(ReturnPtrAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoAlias))
+ : AttributeSet())
+
+__OMP_ATTRS_SET(ZExt, AttributeSet(EnumAttr(ZExt)))
+__OMP_ATTRS_SET(SExt, AttributeSet(EnumAttr(SExt)))
+__OMP_ATTRS_SET(SizeTyExt,
+ M.getDataLayout().getIntPtrType(Ctx)->getBitWidth() < 64
+ ? AttributeSet(EnumAttr(ZExt))
+ : AttributeSet())
+
+#if 0
+__OMP_ATTRS_SET(ReturnAlignedPtrAttrs,
+ OptimisticAttributes
+ ? AttributeSet(EnumAttr(NoAlias), EnumAttrInt(Alignment, 8),
+ EnumAttrInt(DereferenceableOrNull, 8))
+ : AttributeSet())
+#endif
+
+#undef __OMP_ATTRS_SET
+#undef OMP_ATTRS_SET
+
+#ifndef OMP_RTL_ATTRS
+#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets)
+#endif
+
+#define __OMP_RTL_ATTRS(Name, FnAttrSet, RetAttrSet, ArgAttrSets) \
+ OMP_RTL_ATTRS(OMPRTL_##Name, FnAttrSet, RetAttrSet, ArgAttrSets)
+
+__OMP_RTL_ATTRS(__kmpc_barrier, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_barrier_simple_spmd, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_barrier_simple_generic, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_warp_active_thread_mask, BarrierAttrs, AttributeSet(),
+ ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_syncwarp, BarrierAttrs, AttributeSet(), ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_cancel, InaccessibleArgOnlyAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_cancel_barrier, BarrierAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_error, AttributeSet(), AttributeSet(),
+ ParamAttrs(AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__kmpc_flush, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_global_thread_num, GetterAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_get_hardware_thread_id_in_block, GetterAttrs, ZExt,
+ ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_fork_call, ForkAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_fork_call_if, AttributeSet(), AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_taskwait, BarrierAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_taskyield, InaccessibleArgOnlyAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_push_num_threads, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_push_proc_bind, InaccessibleArgOnlyAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_reg_task_with_affinity, DefaultAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs,
+ SExt, ReadOnlyPtrAttrs))
+
+__OMP_RTL_ATTRS(__kmpc_get_hardware_num_blocks, GetterAttrs, ZExt, ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_get_hardware_num_threads_in_block, GetterAttrs, ZExt, ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_get_warp_size, GetterAttrs, ZExt, ParamAttrs())
+
+__OMP_RTL_ATTRS(omp_get_thread_num, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_num_threads, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_max_threads, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_in_parallel, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_dynamic, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_cancellation, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_nested, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(
+ omp_get_schedule, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(EnumAttr(NoCapture), EnumAttr(WriteOnly)),
+ AttributeSet(EnumAttr(NoCapture), EnumAttr(WriteOnly))))
+__OMP_RTL_ATTRS(omp_get_thread_limit, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_supported_active_levels, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_max_active_levels, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_level, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_ancestor_thread_num, GetterAttrs, SExt, ParamAttrs(SExt))
+__OMP_RTL_ATTRS(omp_get_team_size, GetterAttrs, SExt, ParamAttrs(SExt))
+__OMP_RTL_ATTRS(omp_get_active_level, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_in_final, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_proc_bind, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_num_places, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_num_procs, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_place_proc_ids, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(SExt, AttributeSet(EnumAttr(NoCapture),
+ EnumAttr(WriteOnly))))
+__OMP_RTL_ATTRS(omp_get_place_num, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_partition_num_places, GetterAttrs, SExt, ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_partition_place_nums, GetterAttrs, AttributeSet(),
+ ParamAttrs())
+__OMP_RTL_ATTRS(omp_get_wtime, GetterArgWriteAttrs, AttributeSet(), ParamAttrs())
+
+__OMP_RTL_ATTRS(omp_set_num_threads, SetterAttrs, AttributeSet(),
+ ParamAttrs(SExt))
+__OMP_RTL_ATTRS(omp_set_dynamic, SetterAttrs, AttributeSet(), ParamAttrs(SExt))
+__OMP_RTL_ATTRS(omp_set_nested, SetterAttrs, AttributeSet(), ParamAttrs(SExt))
+__OMP_RTL_ATTRS(omp_set_schedule, SetterAttrs, AttributeSet(),
+ ParamAttrs(SExt, SExt))
+__OMP_RTL_ATTRS(omp_set_max_active_levels, SetterAttrs, AttributeSet(),
+ ParamAttrs(SExt))
+
+__OMP_RTL_ATTRS(__kmpc_master, InaccessibleArgOnlyAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_end_master, InaccessibleArgOnlyAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_masked, InaccessibleArgOnlyAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_end_masked, InaccessibleArgOnlyAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_critical, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_critical_with_hint, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), ZExt))
+__OMP_RTL_ATTRS(__kmpc_end_critical, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
+
+__OMP_RTL_ATTRS(__kmpc_begin, DefaultAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_end, DefaultAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs))
+
+__OMP_RTL_ATTRS(__kmpc_reduce, BarrierAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt,
+ ReadOnlyPtrAttrs, AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_reduce_nowait, BarrierAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt,
+ ReadOnlyPtrAttrs, AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_end_reduce, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_end_reduce_nowait, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
+
+__OMP_RTL_ATTRS(__kmpc_ordered, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_end_ordered, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+
+__OMP_RTL_ATTRS(__kmpc_for_static_init_4, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_for_static_init_4u, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_for_static_init_8, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
+ AttributeSet(), AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_for_static_init_8u, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
+ AttributeSet(), AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_for_static_fini, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_distribute_static_init_4, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_distribute_static_init_4u, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_distribute_static_init_8, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
+ AttributeSet(), AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_distribute_static_init_8u, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
+ AttributeSet(), AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_distribute_static_fini, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_4, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs, SExt,
+ SExt, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_4u, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs, ZExt,
+ ZExt, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_8, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_8u, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dispatch_init_4, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SExt, SExt, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dispatch_init_4u, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ZExt, ZExt, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dispatch_init_8, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dispatch_init_8u, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dispatch_next_4, GetterArgWriteAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dispatch_next_4u, GetterArgWriteAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dispatch_next_8, GetterArgWriteAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dispatch_next_8u, GetterArgWriteAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dispatch_fini_4, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_dispatch_fini_4u, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_dispatch_fini_8, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_dispatch_fini_8u, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_team_static_init_4, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_team_static_init_4u, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_team_static_init_8, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_team_static_init_8u, GetterArgWriteAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_4, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_4u, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
+ ArgPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_8, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_8u, GetterArgWriteAttrs,
+ AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
+ ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs))
+
+__OMP_RTL_ATTRS(__kmpc_single, BarrierAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_end_single, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+
+__OMP_RTL_ATTRS(__kmpc_omp_task_alloc, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt, SizeTyExt,
+ ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_omp_task, DefaultAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_end_taskgroup, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_taskgroup, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_task_begin_if0, DefaultAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_task_complete_if0, DefaultAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_task_with_deps, DefaultAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), SExt,
+ ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_taskloop, DefaultAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), SExt,
+ ArgPtrAttrs, ArgPtrAttrs, AttributeSet(), SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_target_task_alloc, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt, SizeTyExt,
+ ReadOnlyPtrAttrs, AttributeSet()))
+__OMP_RTL_ATTRS(__kmpc_taskred_modifier_init, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_taskred_init, DefaultAttrs, AttributeSet(),
+ ParamAttrs(SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_task_reduction_modifier_fini, BarrierAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_task_reduction_get_th_data, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(SExt))
+__OMP_RTL_ATTRS(__kmpc_task_reduction_init, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_task_reduction_modifier_init, DefaultAttrs,
+ ReturnPtrAttrs, ParamAttrs(AttributeSet(), SExt, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_proxy_task_completed_ooo, DefaultAttrs, AttributeSet(),
+ ParamAttrs())
+
+__OMP_RTL_ATTRS(__kmpc_omp_wait_deps, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_omp_taskwait_deps_51, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_cancellationpoint, DefaultAttrs, SExt,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+
+__OMP_RTL_ATTRS(__kmpc_fork_teams, ForkAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_push_num_teams, InaccessibleArgOnlyAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SExt))
+
+__OMP_RTL_ATTRS(__kmpc_copyprivate, DefaultAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SizeTyExt,
+ ReadOnlyPtrAttrs, AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__kmpc_threadprivate_cached, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), SizeTyExt))
+__OMP_RTL_ATTRS(__kmpc_threadprivate_register, DefaultAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, AttributeSet(), ReadOnlyPtrAttrs,
+ ReadOnlyPtrAttrs, ReadOnlyPtrAttrs))
+
+__OMP_RTL_ATTRS(__kmpc_doacross_init, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_doacross_post, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_doacross_wait, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
+__OMP_RTL_ATTRS(__kmpc_doacross_fini, BarrierAttrs, AttributeSet(),
+ ParamAttrs(ReadOnlyPtrAttrs, SExt))
+
+__OMP_RTL_ATTRS(__kmpc_alloc_shared,
+ AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync),
+ AllocSizeAttr(0, std::nullopt)),
+ ReturnPtrAttrs, ParamAttrs(SizeTyExt))
+__OMP_RTL_ATTRS(__kmpc_free_shared, DeviceAllocAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(EnumAttr(NoCapture),
+ EnumAttr(AllocatedPointer)),
+ SizeTyExt))
+__OMP_RTL_ATTRS(__kmpc_begin_sharing_variables, AttributeSet(), AttributeSet(),
+ ParamAttrs(AttributeSet(), SizeTyExt))
+
+__OMP_RTL_ATTRS(__kmpc_alloc, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(SExt, SizeTyExt))
+__OMP_RTL_ATTRS(__kmpc_aligned_alloc, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(SExt, SizeTyExt, SizeTyExt))
+__OMP_RTL_ATTRS(__kmpc_free, AllocAttrs, AttributeSet(),
+ ParamAttrs(SExt))
+
+__OMP_RTL_ATTRS(__tgt_interop_init, AttributeSet(), AttributeSet(),
+ ParamAttrs(AttributeSet(), SExt, AttributeSet(), SExt,
+ SExt, AttributeSet(), AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_interop_destroy, AttributeSet(), AttributeSet(),
+ ParamAttrs(AttributeSet(), SExt, AttributeSet(), SExt, SExt,
+ AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_interop_use, AttributeSet(), AttributeSet(),
+ ParamAttrs(AttributeSet(), SExt, AttributeSet(), SExt, SExt,
+ AttributeSet(), SExt))
+
+__OMP_RTL_ATTRS(__kmpc_init_allocator, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs(SExt, AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__kmpc_destroy_allocator, AllocAttrs, AttributeSet(),
+ ParamAttrs(SExt))
+
+__OMP_RTL_ATTRS(__kmpc_push_target_tripcount_mapper, SetterAttrs,
+ AttributeSet(), ParamAttrs())
+__OMP_RTL_ATTRS(__tgt_target_mapper, ForkAttrs, SExt,
+ ParamAttrs(AttributeSet(),AttributeSet(),AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_target_nowait_mapper, ForkAttrs, SExt,
+ ParamAttrs(AttributeSet(), AttributeSet(), AttributeSet(), SExt,
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ SExt, AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_target_teams_mapper, ForkAttrs, SExt,
+ ParamAttrs(AttributeSet(), AttributeSet(), AttributeSet(), SExt,
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ AttributeSet(), AttributeSet(), AttributeSet(), SExt,
+ SExt))
+__OMP_RTL_ATTRS(__tgt_target_teams_nowait_mapper, ForkAttrs, SExt,
+ ParamAttrs(AttributeSet(), AttributeSet(), AttributeSet(), SExt,
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ SExt, SExt, SExt, AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_target_kernel, ForkAttrs, SExt,
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt, SExt))
+__OMP_RTL_ATTRS(__tgt_target_kernel_nowait, ForkAttrs, SExt,
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt, SExt,
+ AttributeSet(), AttributeSet(), SExt, AttributeSet(),
+ SExt))
+__OMP_RTL_ATTRS(__tgt_register_requires, ForkAttrs, AttributeSet(),
+ ParamAttrs())
+__OMP_RTL_ATTRS(__tgt_target_data_begin_mapper, ForkAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_target_data_begin_nowait_mapper, ForkAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt, AttributeSet(),
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ AttributeSet(), AttributeSet()))
+__OMP_RTL_ATTRS(__tgt_target_data_begin_mapper_issue, AttributeSet(),
+ AttributeSet(),
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_target_data_end_mapper, ForkAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_target_data_end_nowait_mapper, ForkAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt, AttributeSet(),
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ AttributeSet(), AttributeSet()))
+__OMP_RTL_ATTRS(__tgt_target_data_update_mapper, ForkAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__tgt_target_data_update_nowait_mapper, ForkAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(), AttributeSet(), SExt, AttributeSet(),
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ AttributeSet(), AttributeSet()))
+__OMP_RTL_ATTRS(__tgt_mapper_num_components, ForkAttrs, AttributeSet(),
+ ParamAttrs())
+__OMP_RTL_ATTRS(__tgt_push_mapper_component, ForkAttrs, AttributeSet(),
+ ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_task_allow_completion_event, DefaultAttrs,
+ ReturnPtrAttrs, ParamAttrs(ReadOnlyPtrAttrs, SExt))
+
+__OMP_RTL_ATTRS(__kmpc_target_init, AttributeSet(), SExt,
+ ParamAttrs(AttributeSet(), SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_target_deinit, AttributeSet(), AttributeSet(),
+ ParamAttrs(AttributeSet(), SExt))
+__OMP_RTL_ATTRS(__kmpc_parallel_51, AlwaysInlineAttrs, AttributeSet(),
+ ParamAttrs(AttributeSet(), SExt, SExt, SExt, SExt,
+ AttributeSet(), AttributeSet(), AttributeSet(),
+ SizeTyExt))
+__OMP_RTL_ATTRS(__kmpc_serialized_parallel, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_end_serialized_parallel, InaccessibleArgOnlyAttrs,
+ AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
+__OMP_RTL_ATTRS(__kmpc_shuffle_int32, AttributeSet(), SExt,
+ ParamAttrs(SExt, SExt, SExt))
+__OMP_RTL_ATTRS(__kmpc_nvptx_parallel_reduce_nowait_v2, AttributeSet(), SExt,
+ ParamAttrs(AttributeSet(), SExt, SExt, SizeTyExt))
+__OMP_RTL_ATTRS(__kmpc_nvptx_end_reduce_nowait, AttributeSet(), AttributeSet(),
+ ParamAttrs(SExt))
+__OMP_RTL_ATTRS(__kmpc_nvptx_teams_reduce_nowait_v2, AttributeSet(), SExt,
+ ParamAttrs(AttributeSet(), SExt, AttributeSet(), ZExt))
+
+__OMP_RTL_ATTRS(__kmpc_shuffle_int64, AttributeSet(), AttributeSet(),
+ ParamAttrs(AttributeSet(), SExt, SExt))
+
+__OMP_RTL_ATTRS(__kmpc_is_spmd_exec_mode, AttributeSet(), SExt, ParamAttrs())
+
+#undef __OMP_RTL_ATTRS
+#undef OMP_RTL_ATTRS
+#undef AttributeSet
+#undef EnumAttr
+#undef EnumAttrInt
+#undef ParamAttrs
+#undef AllocSizeAttr
+
+///}
+
+/// KMP ident_t bit flags
+///
+/// In accordance with the values in `openmp/runtime/src/kmp.h`.
+///
+///{
+
+#ifndef OMP_IDENT_FLAG
+#define OMP_IDENT_FLAG(Enum, Str, Value)
+#endif
+
+#define __OMP_IDENT_FLAG(Name, Value) \
+ OMP_IDENT_FLAG(OMP_IDENT_FLAG_##Name, #Name, Value)
+
+__OMP_IDENT_FLAG(KMPC, 0x02)
+__OMP_IDENT_FLAG(ATOMIC_REDUCE, 0x10)
+__OMP_IDENT_FLAG(BARRIER_EXPL, 0x20)
+__OMP_IDENT_FLAG(BARRIER_IMPL, 0x0040)
+__OMP_IDENT_FLAG(BARRIER_IMPL_MASK, 0x01C0)
+__OMP_IDENT_FLAG(BARRIER_IMPL_FOR, 0x0040)
+__OMP_IDENT_FLAG(BARRIER_IMPL_SECTIONS, 0x00C0)
+__OMP_IDENT_FLAG(BARRIER_IMPL_SINGLE, 0x0140)
+__OMP_IDENT_FLAG(BARRIER_IMPL_WORKSHARE, 0x01C0)
+
+#undef __OMP_IDENT_FLAG
+#undef OMP_IDENT_FLAG
+
+///}
+
+/// KMP cancel kind
+///
+///{
+
+#ifndef OMP_CANCEL_KIND
+#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value)
+#endif
+
+#define __OMP_CANCEL_KIND(Name, Value) \
+ OMP_CANCEL_KIND(OMP_CANCEL_KIND_##Name, #Name, OMPD_##Name, Value)
+
+__OMP_CANCEL_KIND(parallel, 1)
+__OMP_CANCEL_KIND(for, 2)
+__OMP_CANCEL_KIND(sections, 3)
+__OMP_CANCEL_KIND(taskgroup, 4)
+
+#undef __OMP_CANCEL_KIND
+#undef OMP_CANCEL_KIND
+
+///}
+
+/// Default kinds
+///
+///{
+
+#ifndef OMP_DEFAULT_KIND
+#define OMP_DEFAULT_KIND(Enum, Str)
+#endif
+
+#define __OMP_DEFAULT_KIND(Name) OMP_DEFAULT_KIND(OMP_DEFAULT_##Name, #Name)
+
+__OMP_DEFAULT_KIND(none)
+__OMP_DEFAULT_KIND(shared)
+__OMP_DEFAULT_KIND(private)
+__OMP_DEFAULT_KIND(firstprivate)
+__OMP_DEFAULT_KIND(unknown)
+
+#undef __OMP_DEFAULT_KIND
+#undef OMP_DEFAULT_KIND
+
+///}
+
+/// Proc bind kinds
+///
+///{
+
+#ifndef OMP_PROC_BIND_KIND
+#define OMP_PROC_BIND_KIND(Enum, Str, Value)
+#endif
+
+#define __OMP_PROC_BIND_KIND(Name, Value) \
+ OMP_PROC_BIND_KIND(OMP_PROC_BIND_##Name, #Name, Value)
+
+__OMP_PROC_BIND_KIND(master, 2)
+__OMP_PROC_BIND_KIND(close, 3)
+__OMP_PROC_BIND_KIND(spread, 4)
+__OMP_PROC_BIND_KIND(primary, 5)
+__OMP_PROC_BIND_KIND(default, 6)
+__OMP_PROC_BIND_KIND(unknown, 7)
+
+#undef __OMP_PROC_BIND_KIND
+#undef OMP_PROC_BIND_KIND
+
+///}
+
+/// OpenMP context related definitions:
+/// - trait set selector
+/// - trait selector
+/// - trait property
+///
+///{
+
+#ifndef OMP_TRAIT_SET
+#define OMP_TRAIT_SET(Enum, Str)
+#endif
+#ifndef OMP_TRAIT_SELECTOR
+#define OMP_TRAIT_SELECTOR(Enum, TraitSetEnum, Str, RequiresProperty)
+#endif
+#ifndef OMP_TRAIT_PROPERTY
+#define OMP_TRAIT_PROPERTY(Enum, TraitSetEnum, TraitSelectorEnum, Str)
+#endif
+#ifndef OMP_LAST_TRAIT_PROPERTY
+#define OMP_LAST_TRAIT_PROPERTY(Enum)
+#endif
+
+#define __OMP_TRAIT_SET(Name) OMP_TRAIT_SET(Name, #Name)
+#define __OMP_TRAIT_SELECTOR(TraitSet, Name, RequiresProperty) \
+ OMP_TRAIT_SELECTOR(TraitSet##_##Name, TraitSet, #Name, RequiresProperty)
+#define __OMP_TRAIT_SELECTOR_AND_PROPERTY(TraitSet, Name) \
+ OMP_TRAIT_SELECTOR(TraitSet##_##Name, TraitSet, #Name, false) \
+ OMP_TRAIT_PROPERTY(TraitSet##_##Name##_##Name, TraitSet, TraitSet##_##Name, \
+ #Name)
+#define __OMP_TRAIT_PROPERTY(TraitSet, TraitSelector, Name) \
+ OMP_TRAIT_PROPERTY(TraitSet##_##TraitSelector##_##Name, TraitSet, \
+ TraitSet##_##TraitSelector, #Name)
+
+// "invalid" must go first.
+OMP_TRAIT_SET(invalid, "invalid")
+OMP_TRAIT_SELECTOR(invalid, invalid, "invalid", false)
+OMP_TRAIT_PROPERTY(invalid, invalid, invalid, "invalid")
+
+__OMP_TRAIT_SET(construct)
+__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, target)
+__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, teams)
+__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, parallel)
+__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, for)
+__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, simd)
+
+__OMP_TRAIT_SET(device)
+
+__OMP_TRAIT_SELECTOR(device, kind, true)
+
+__OMP_TRAIT_PROPERTY(device, kind, host)
+__OMP_TRAIT_PROPERTY(device, kind, nohost)
+__OMP_TRAIT_PROPERTY(device, kind, cpu)
+__OMP_TRAIT_PROPERTY(device, kind, gpu)
+__OMP_TRAIT_PROPERTY(device, kind, fpga)
+__OMP_TRAIT_PROPERTY(device, kind, any)
+
+__OMP_TRAIT_SELECTOR(device, arch, true)
+
+__OMP_TRAIT_PROPERTY(device, arch, arm)
+__OMP_TRAIT_PROPERTY(device, arch, armeb)
+__OMP_TRAIT_PROPERTY(device, arch, aarch64)
+__OMP_TRAIT_PROPERTY(device, arch, aarch64_be)
+__OMP_TRAIT_PROPERTY(device, arch, aarch64_32)
+__OMP_TRAIT_PROPERTY(device, arch, ppc)
+__OMP_TRAIT_PROPERTY(device, arch, ppcle)
+__OMP_TRAIT_PROPERTY(device, arch, ppc64)
+__OMP_TRAIT_PROPERTY(device, arch, ppc64le)
+__OMP_TRAIT_PROPERTY(device, arch, x86)
+__OMP_TRAIT_PROPERTY(device, arch, x86_64)
+__OMP_TRAIT_PROPERTY(device, arch, amdgcn)
+__OMP_TRAIT_PROPERTY(device, arch, nvptx)
+__OMP_TRAIT_PROPERTY(device, arch, nvptx64)
+
+__OMP_TRAIT_SET(implementation)
+
+__OMP_TRAIT_SELECTOR(implementation, vendor, true)
+
+__OMP_TRAIT_PROPERTY(implementation, vendor, amd)
+__OMP_TRAIT_PROPERTY(implementation, vendor, arm)
+__OMP_TRAIT_PROPERTY(implementation, vendor, bsc)
+__OMP_TRAIT_PROPERTY(implementation, vendor, cray)
+__OMP_TRAIT_PROPERTY(implementation, vendor, fujitsu)
+__OMP_TRAIT_PROPERTY(implementation, vendor, gnu)
+__OMP_TRAIT_PROPERTY(implementation, vendor, ibm)
+__OMP_TRAIT_PROPERTY(implementation, vendor, intel)
+__OMP_TRAIT_PROPERTY(implementation, vendor, llvm)
+__OMP_TRAIT_PROPERTY(implementation, vendor, nec)
+__OMP_TRAIT_PROPERTY(implementation, vendor, nvidia)
+__OMP_TRAIT_PROPERTY(implementation, vendor, pgi)
+__OMP_TRAIT_PROPERTY(implementation, vendor, ti)
+__OMP_TRAIT_PROPERTY(implementation, vendor, unknown)
+
+__OMP_TRAIT_SELECTOR(implementation, extension, true)
+__OMP_TRAIT_PROPERTY(implementation, extension, match_all)
+__OMP_TRAIT_PROPERTY(implementation, extension, match_any)
+__OMP_TRAIT_PROPERTY(implementation, extension, match_none)
+__OMP_TRAIT_PROPERTY(implementation, extension, disable_implicit_base)
+__OMP_TRAIT_PROPERTY(implementation, extension, allow_templates)
+__OMP_TRAIT_PROPERTY(implementation, extension, bind_to_declaration)
+
+__OMP_TRAIT_SET(user)
+
+__OMP_TRAIT_SELECTOR(user, condition, true)
+
+__OMP_TRAIT_PROPERTY(user, condition, true)
+__OMP_TRAIT_PROPERTY(user, condition, false)
+__OMP_TRAIT_PROPERTY(user, condition, unknown)
+
+__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, dispatch)
+
+// Note that we put isa last so that the other conditions are checked first.
+// This allows us to issue warnings wrt. isa only if we match otherwise.
+__OMP_TRAIT_SELECTOR(device, isa, true)
+
+// We use "__ANY" as a placeholder in the isa property to denote the
+// conceptual "any", not the literal `any` used in kind. The string we
+// we use is not important except that it will show up in diagnostics.
+OMP_TRAIT_PROPERTY(device_isa___ANY, device, device_isa,
+ "<any, entirely target dependent>")
+
+
+#undef OMP_TRAIT_SET
+#undef __OMP_TRAIT_SET
+///}
+
+/// Traits for the requires directive
+///
+/// These will (potentially) become trait selectors for the OpenMP context if
+/// the OMP_REQUIRES_TRAIT macro is not defined.
+///
+///{
+
+#ifdef OMP_REQUIRES_TRAIT
+#define __OMP_REQUIRES_TRAIT(Name) \
+ OMP_REQUIRES_TRAIT(OMP_REQUIRES_TRAIT_##Name, #Name)
+#else
+#define __OMP_REQUIRES_TRAIT(Name) \
+ __OMP_TRAIT_SELECTOR_AND_PROPERTY(implementation, Name)
+#endif
+
+__OMP_REQUIRES_TRAIT(unified_address)
+__OMP_REQUIRES_TRAIT(unified_shared_memory)
+__OMP_REQUIRES_TRAIT(reverse_offload)
+__OMP_REQUIRES_TRAIT(dynamic_allocators)
+__OMP_REQUIRES_TRAIT(atomic_default_mem_order)
+
+OMP_LAST_TRAIT_PROPERTY(
+ implementation_atomic_default_mem_order_atomic_default_mem_order)
+
+#undef __OMP_TRAIT_SELECTOR_AND_PROPERTY
+#undef OMP_TRAIT_SELECTOR
+#undef __OMP_TRAIT_SELECTOR
+#undef OMP_TRAIT_PROPERTY
+#undef OMP_LAST_TRAIT_PROPERTY
+#undef __OMP_TRAIT_PROPERTY
+#undef __OMP_REQUIRES_TRAIT
+#undef OMP_REQUIRES_TRAIT
+///}
+
+
+/// Assumption clauses
+///
+///{
+
+#ifdef OMP_ASSUME_CLAUSE
+#define __OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList, HasExpression) \
+OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList, HasExpression)
+#else
+#define __OMP_ASSUME_CLAUSE(...)
+#endif
+
+__OMP_ASSUME_CLAUSE(llvm::StringLiteral("ext_"), true, false, false)
+__OMP_ASSUME_CLAUSE(llvm::StringLiteral("absent"), false, true, false)
+__OMP_ASSUME_CLAUSE(llvm::StringLiteral("contains"), false, true, false)
+__OMP_ASSUME_CLAUSE(llvm::StringLiteral("holds"), false, false, true)
+__OMP_ASSUME_CLAUSE(llvm::StringLiteral("no_openmp"), false, false, false)
+__OMP_ASSUME_CLAUSE(llvm::StringLiteral("no_openmp_routines"), false, false, false)
+__OMP_ASSUME_CLAUSE(llvm::StringLiteral("no_parallelism"), false, false, false)
+
+#undef __OMP_ASSUME_CLAUSE
+#undef OMP_ASSUME_CLAUSE
+///}