aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/cxxsupp/openmp
diff options
context:
space:
mode:
authordanlark <danlark@yandex-team.ru>2022-02-10 16:46:08 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:08 +0300
commit3426a9bc7f169ae9da54cef557ad2a33f6e8eee0 (patch)
tree26154e1e9990f1bb4525d3e3fb5b6dac2c2c1da2 /contrib/libs/cxxsupp/openmp
parentcb68f224c46a8ee52ac3fdd2a32534b8bb8dc134 (diff)
downloadydb-3426a9bc7f169ae9da54cef557ad2a33f6e8eee0.tar.gz
Restoring authorship annotation for <danlark@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/cxxsupp/openmp')
-rw-r--r--contrib/libs/cxxsupp/openmp/kmp_alloc.c2
-rw-r--r--contrib/libs/cxxsupp/openmp/kmp_barrier.cpp180
-rw-r--r--contrib/libs/cxxsupp/openmp/kmp_dispatch.cpp46
-rw-r--r--contrib/libs/cxxsupp/openmp/kmp_lock.cpp6
-rw-r--r--contrib/libs/cxxsupp/openmp/kmp_sched.cpp52
-rw-r--r--contrib/libs/cxxsupp/openmp/thirdparty/ittnotify/ittnotify_static.c8
-rw-r--r--contrib/libs/cxxsupp/openmp/z_Windows_NT_util.c2
7 files changed, 148 insertions, 148 deletions
diff --git a/contrib/libs/cxxsupp/openmp/kmp_alloc.c b/contrib/libs/cxxsupp/openmp/kmp_alloc.c
index 4e4656c6e8..2dfba3122f 100644
--- a/contrib/libs/cxxsupp/openmp/kmp_alloc.c
+++ b/contrib/libs/cxxsupp/openmp/kmp_alloc.c
@@ -878,7 +878,7 @@ brel( kmp_info_t *th, void *buf )
released, since it's negative to indicate that the buffer is
allocated. */
- bufsize size = b->bh.bb.bsize;
+ bufsize size = b->bh.bb.bsize;
/* Make the previous buffer the one we're working on. */
KMP_DEBUG_ASSERT(BH((char *) b - b->bh.bb.prevfree)->bb.bsize == b->bh.bb.prevfree);
diff --git a/contrib/libs/cxxsupp/openmp/kmp_barrier.cpp b/contrib/libs/cxxsupp/openmp/kmp_barrier.cpp
index 6b66dabba2..73ec77ccd3 100644
--- a/contrib/libs/cxxsupp/openmp/kmp_barrier.cpp
+++ b/contrib/libs/cxxsupp/openmp/kmp_barrier.cpp
@@ -49,9 +49,9 @@ __kmp_linear_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid
USE_ITT_BUILD_ARG(void * itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_linear_gather);
- kmp_team_t *team = this_thr->th.th_team;
- kmp_bstate_t *thr_bar = & this_thr->th.th_bar[bt].bb;
- kmp_info_t **other_threads = team->t.t_threads;
+ kmp_team_t *team = this_thr->th.th_team;
+ kmp_bstate_t *thr_bar = & this_thr->th.th_bar[bt].bb;
+ kmp_info_t **other_threads = team->t.t_threads;
KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
gtid, team->t.t_id, tid, bt));
@@ -75,11 +75,11 @@ __kmp_linear_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid
kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[0]);
flag.release();
} else {
- kmp_balign_team_t *team_bar = &team->t.t_bar[bt];
- int nproc = this_thr->th.th_team_nproc;
- int i;
+ kmp_balign_team_t *team_bar = &team->t.t_bar[bt];
+ int nproc = this_thr->th.th_team_nproc;
+ int i;
// Don't have to worry about sleep bit here or atomic since team setting
- kmp_uint64 new_state = team_bar->b_arrived + KMP_BARRIER_STATE_BUMP;
+ kmp_uint64 new_state = team_bar->b_arrived + KMP_BARRIER_STATE_BUMP;
// Collect all the worker team member threads.
for (i=1; i<nproc; ++i) {
@@ -126,13 +126,13 @@ __kmp_linear_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gti
USE_ITT_BUILD_ARG(void *itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_linear_release);
- kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
- kmp_team_t *team;
+ kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
+ kmp_team_t *team;
if (KMP_MASTER_TID(tid)) {
- unsigned int i;
- kmp_uint32 nproc = this_thr->th.th_team_nproc;
- kmp_info_t **other_threads;
+ unsigned int i;
+ kmp_uint32 nproc = this_thr->th.th_team_nproc;
+ kmp_info_t **other_threads;
team = __kmp_threads[gtid]->th.th_team;
KMP_DEBUG_ASSERT(team != NULL);
@@ -221,15 +221,15 @@ __kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
USE_ITT_BUILD_ARG(void *itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_tree_gather);
- kmp_team_t *team = this_thr->th.th_team;
- kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
- kmp_info_t **other_threads = team->t.t_threads;
- kmp_uint32 nproc = this_thr->th.th_team_nproc;
- kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
- kmp_uint32 branch_factor = 1 << branch_bits;
- kmp_uint32 child;
- kmp_uint32 child_tid;
- kmp_uint64 new_state;
+ kmp_team_t *team = this_thr->th.th_team;
+ kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
+ kmp_info_t **other_threads = team->t.t_threads;
+ kmp_uint32 nproc = this_thr->th.th_team_nproc;
+ kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
+ kmp_uint32 branch_factor = 1 << branch_bits;
+ kmp_uint32 child;
+ kmp_uint32 child_tid;
+ kmp_uint64 new_state;
KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
gtid, team->t.t_id, tid, bt));
@@ -248,8 +248,8 @@ __kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
child = 1;
do {
- kmp_info_t *child_thr = other_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = other_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
#if KMP_CACHE_MANAGE
// Prefetch next thread's arrived count
if (child+1 <= branch_factor && child_tid+1 < nproc)
@@ -283,7 +283,7 @@ __kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
}
if (!KMP_MASTER_TID(tid)) { // Worker threads
- kmp_int32 parent_tid = (tid - 1) >> branch_bits;
+ kmp_int32 parent_tid = (tid - 1) >> branch_bits;
KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
"arrived(%p): %llu => %llu\n", gtid, team->t.t_id, tid,
@@ -316,13 +316,13 @@ __kmp_tree_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
USE_ITT_BUILD_ARG(void *itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_tree_release);
- kmp_team_t *team;
- kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
- kmp_uint32 nproc;
- kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt];
- kmp_uint32 branch_factor = 1 << branch_bits;
- kmp_uint32 child;
- kmp_uint32 child_tid;
+ kmp_team_t *team;
+ kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
+ kmp_uint32 nproc;
+ kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt];
+ kmp_uint32 branch_factor = 1 << branch_bits;
+ kmp_uint32 child;
+ kmp_uint32 child_tid;
// Perform a tree release for all of the threads that have been gathered
if (!KMP_MASTER_TID(tid)) { // Handle fork barrier workers who aren't part of a team yet
@@ -371,12 +371,12 @@ __kmp_tree_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
child_tid = (tid << branch_bits) + 1;
if (child_tid < nproc) {
- kmp_info_t **other_threads = team->t.t_threads;
+ kmp_info_t **other_threads = team->t.t_threads;
child = 1;
// Parent threads release all their children
do {
- kmp_info_t *child_thr = other_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = other_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
#if KMP_CACHE_MANAGE
// Prefetch next thread's go count
if (child+1 <= branch_factor && child_tid+1 < nproc)
@@ -419,15 +419,15 @@ __kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
USE_ITT_BUILD_ARG(void *itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_hyper_gather);
- kmp_team_t *team = this_thr->th.th_team;
- kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
- kmp_info_t **other_threads = team->t.t_threads;
- kmp_uint64 new_state = KMP_BARRIER_UNUSED_STATE;
- kmp_uint32 num_threads = this_thr->th.th_team_nproc;
- kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
- kmp_uint32 branch_factor = 1 << branch_bits;
- kmp_uint32 offset;
- kmp_uint32 level;
+ kmp_team_t *team = this_thr->th.th_team;
+ kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
+ kmp_info_t **other_threads = team->t.t_threads;
+ kmp_uint64 new_state = KMP_BARRIER_UNUSED_STATE;
+ kmp_uint32 num_threads = this_thr->th.th_team_nproc;
+ kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
+ kmp_uint32 branch_factor = 1 << branch_bits;
+ kmp_uint32 offset;
+ kmp_uint32 level;
KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
gtid, team->t.t_id, tid, bt));
@@ -445,11 +445,11 @@ __kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
kmp_flag_64 p_flag(&thr_bar->b_arrived);
for (level=0, offset=1; offset<num_threads; level+=branch_bits, offset<<=branch_bits)
{
- kmp_uint32 child;
- kmp_uint32 child_tid;
+ kmp_uint32 child;
+ kmp_uint32 child_tid;
if (((tid >> level) & (branch_factor - 1)) != 0) {
- kmp_int32 parent_tid = tid & ~((1 << (level + branch_bits)) -1);
+ kmp_int32 parent_tid = tid & ~((1 << (level + branch_bits)) -1);
KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
"arrived(%p): %llu => %llu\n", gtid, team->t.t_id, tid,
@@ -471,10 +471,10 @@ __kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid,
for (child=1, child_tid=tid+(1 << level); child<branch_factor && child_tid<num_threads;
child++, child_tid+=(1 << level))
{
- kmp_info_t *child_thr = other_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = other_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
#if KMP_CACHE_MANAGE
- kmp_uint32 next_child_tid = child_tid + (1 << level);
+ kmp_uint32 next_child_tid = child_tid + (1 << level);
// Prefetch next thread's arrived count
if (child+1 < branch_factor && next_child_tid < num_threads)
KMP_CACHE_PREFETCH(&other_threads[next_child_tid]->th.th_bar[bt].bb.b_arrived);
@@ -525,16 +525,16 @@ __kmp_hyper_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid
USE_ITT_BUILD_ARG(void *itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_hyper_release);
- kmp_team_t *team;
- kmp_bstate_t *thr_bar = & this_thr -> th.th_bar[ bt ].bb;
- kmp_info_t **other_threads;
- kmp_uint32 num_threads;
- kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[ bt ];
- kmp_uint32 branch_factor = 1 << branch_bits;
- kmp_uint32 child;
- kmp_uint32 child_tid;
- kmp_uint32 offset;
- kmp_uint32 level;
+ kmp_team_t *team;
+ kmp_bstate_t *thr_bar = & this_thr -> th.th_bar[ bt ].bb;
+ kmp_info_t **other_threads;
+ kmp_uint32 num_threads;
+ kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[ bt ];
+ kmp_uint32 branch_factor = 1 << branch_bits;
+ kmp_uint32 child;
+ kmp_uint32 child_tid;
+ kmp_uint32 offset;
+ kmp_uint32 level;
/* Perform a hypercube-embedded tree release for all of the threads that have been gathered.
If KMP_REVERSE_HYPER_BAR is defined (default) the threads are released in the reverse
@@ -620,10 +620,10 @@ __kmp_hyper_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid
{
if (child_tid >= num_threads) continue; // Child doesn't exist so keep going
else {
- kmp_info_t *child_thr = other_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = other_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
#if KMP_CACHE_MANAGE
- kmp_uint32 next_child_tid = child_tid - (1 << level);
+ kmp_uint32 next_child_tid = child_tid - (1 << level);
// Prefetch next thread's go count
# ifdef KMP_REVERSE_HYPER_BAR
if (child-1 >= 1 && next_child_tid < num_threads)
@@ -732,11 +732,11 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr,
USE_ITT_BUILD_ARG(void * itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_hier_gather);
- kmp_team_t *team = this_thr->th.th_team;
- kmp_bstate_t *thr_bar = & this_thr->th.th_bar[bt].bb;
- kmp_uint32 nproc = this_thr->th.th_team_nproc;
- kmp_info_t **other_threads = team->t.t_threads;
- kmp_uint64 new_state;
+ kmp_team_t *team = this_thr->th.th_team;
+ kmp_bstate_t *thr_bar = & this_thr->th.th_bar[bt].bb;
+ kmp_uint32 nproc = this_thr->th.th_team_nproc;
+ kmp_info_t **other_threads = team->t.t_threads;
+ kmp_uint64 new_state;
int level = team->t.t_level;
#if OMP_40_ENABLED
@@ -761,7 +761,7 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr,
(void)__kmp_init_hierarchical_barrier_thread(bt, thr_bar, nproc, gtid, tid, team);
if (thr_bar->my_level) { // not a leaf (my_level==0 means leaf)
- kmp_int32 child_tid;
+ kmp_int32 child_tid;
new_state = (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && thr_bar->use_oncore_barrier) {
if (thr_bar->leaf_kids) { // First, wait for leaf children to check-in on my b_arrived flag
@@ -786,8 +786,8 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr,
kmp_uint32 last = tid+thr_bar->skip_per_level[d+1], skip = thr_bar->skip_per_level[d];
if (last > nproc) last = nproc;
for (child_tid=tid+skip; child_tid<(int)last; child_tid+=skip) {
- kmp_info_t *child_thr = other_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = other_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%d) "
"arrived(%p) == %llu\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -809,8 +809,8 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr,
kmp_uint32 last = tid+thr_bar->skip_per_level[d+1], skip = thr_bar->skip_per_level[d];
if (last > nproc) last = nproc;
for (child_tid=tid+skip; child_tid<(int)last; child_tid+=skip) {
- kmp_info_t *child_thr = other_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = other_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%d) "
"arrived(%p) == %llu\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -864,9 +864,9 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i
USE_ITT_BUILD_ARG(void * itt_sync_obj) )
{
KMP_TIME_DEVELOPER_BLOCK(KMP_hier_release);
- kmp_team_t *team;
- kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
- kmp_uint32 nproc;
+ kmp_team_t *team;
+ kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
+ kmp_uint32 nproc;
bool team_change = false; // indicates on-core barrier shouldn't be used
if (KMP_MASTER_TID(tid)) {
@@ -959,7 +959,7 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i
// Now, release my children
if (thr_bar->my_level) { // not a leaf
- kmp_int32 child_tid;
+ kmp_int32 child_tid;
kmp_uint32 last;
if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && thr_bar->use_oncore_barrier) {
if (KMP_MASTER_TID(tid)) { // do a flat release
@@ -969,7 +969,7 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i
ngo_load(&thr_bar->th_fixed_icvs);
// This loops over all the threads skipping only the leaf nodes in the hierarchy
for (child_tid=thr_bar->skip_per_level[1]; child_tid<(int)nproc; child_tid+=thr_bar->skip_per_level[1]) {
- kmp_bstate_t *child_bar = &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
+ kmp_bstate_t *child_bar = &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%d)"
" go(%p): %u => %u\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -992,8 +992,8 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i
last = tid+thr_bar->skip_per_level[1];
if (last > nproc) last = nproc;
for (child_tid=tid+1+old_leaf_kids; child_tid<(int)last; ++child_tid) { // skip_per_level[0]=1
- kmp_info_t *child_thr = team->t.t_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = team->t.t_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing"
" T#%d(%d:%d) go(%p): %u => %u\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -1015,8 +1015,8 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i
kmp_uint32 skip = thr_bar->skip_per_level[d];
if (last > nproc) last = nproc;
for (child_tid=tid+skip; child_tid<(int)last; child_tid+=skip) {
- kmp_info_t *child_thr = team->t.t_threads[child_tid];
- kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
+ kmp_info_t *child_thr = team->t.t_threads[child_tid];
+ kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%d)"
" go(%p): %u => %u\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -1048,10 +1048,10 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
void *reduce_data, void (*reduce)(void *, void *))
{
KMP_TIME_DEVELOPER_BLOCK(KMP_barrier);
- int tid = __kmp_tid_from_gtid(gtid);
- kmp_info_t *this_thr = __kmp_threads[gtid];
- kmp_team_t *team = this_thr->th.th_team;
- int status = 0;
+ int tid = __kmp_tid_from_gtid(gtid);
+ kmp_info_t *this_thr = __kmp_threads[gtid];
+ kmp_team_t *team = this_thr->th.th_team;
+ int status = 0;
ident_t *loc = __kmp_threads[gtid]->th.th_ident;
#if OMPT_SUPPORT
ompt_task_id_t my_task_id;
@@ -1346,9 +1346,9 @@ void
__kmp_join_barrier(int gtid)
{
KMP_TIME_DEVELOPER_BLOCK(KMP_join_barrier);
- kmp_info_t *this_thr = __kmp_threads[gtid];
- kmp_team_t *team;
- kmp_uint nproc;
+ kmp_info_t *this_thr = __kmp_threads[gtid];
+ kmp_team_t *team;
+ kmp_uint nproc;
kmp_info_t *master_thread;
int tid;
#ifdef KMP_DEBUG
@@ -1563,8 +1563,8 @@ __kmp_fork_barrier(int gtid, int tid)
#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
#ifdef KMP_DEBUG
- kmp_info_t **other_threads = team->t.t_threads;
- int i;
+ kmp_info_t **other_threads = team->t.t_threads;
+ int i;
// Verify state
KMP_MB();
diff --git a/contrib/libs/cxxsupp/openmp/kmp_dispatch.cpp b/contrib/libs/cxxsupp/openmp/kmp_dispatch.cpp
index c91bb8da3c..8dccef616a 100644
--- a/contrib/libs/cxxsupp/openmp/kmp_dispatch.cpp
+++ b/contrib/libs/cxxsupp/openmp/kmp_dispatch.cpp
@@ -293,11 +293,11 @@ __kmp_wait_yield( volatile UT * spinner,
)
{
// note: we may not belong to a team at this point
- volatile UT * spin = spinner;
- UT check = checker;
- kmp_uint32 spins;
- kmp_uint32 (*f) ( UT, UT ) = pred;
- UT r;
+ volatile UT * spin = spinner;
+ UT check = checker;
+ kmp_uint32 spins;
+ kmp_uint32 (*f) ( UT, UT ) = pred;
+ UT r;
KMP_FSYNC_SPIN_INIT( obj, (void*) spin );
KMP_INIT_YIELD( spins );
@@ -2190,10 +2190,10 @@ __kmp_dist_get_bounds(
) {
typedef typename traits_t< T >::unsigned_t UT;
typedef typename traits_t< T >::signed_t ST;
- kmp_uint32 team_id;
- kmp_uint32 nteams;
- UT trip_count;
- kmp_team_t *team;
+ kmp_uint32 team_id;
+ kmp_uint32 nteams;
+ UT trip_count;
+ kmp_team_t *team;
kmp_info_t * th;
KMP_DEBUG_ASSERT( plastiter && plower && pupper );
@@ -2261,16 +2261,16 @@ __kmp_dist_get_bounds(
*plastiter = ( team_id == trip_count - 1 );
} else {
if( __kmp_static == kmp_sch_static_balanced ) {
- UT chunk = trip_count / nteams;
- UT extras = trip_count % nteams;
+ UT chunk = trip_count / nteams;
+ UT extras = trip_count % nteams;
*plower += incr * ( team_id * chunk + ( team_id < extras ? team_id : extras ) );
*pupper = *plower + chunk * incr - ( team_id < extras ? 0 : incr );
if( plastiter != NULL )
*plastiter = ( team_id == nteams - 1 );
} else {
- T chunk_inc_count =
+ T chunk_inc_count =
( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
- T upper = *pupper;
+ T upper = *pupper;
KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
// Unknown static scheduling type.
*plower += team_id * chunk_inc_count;
@@ -2543,11 +2543,11 @@ __kmp_wait_yield_4(volatile kmp_uint32 * spinner,
)
{
// note: we may not belong to a team at this point
- volatile kmp_uint32 * spin = spinner;
- kmp_uint32 check = checker;
- kmp_uint32 spins;
- kmp_uint32 (*f) ( kmp_uint32, kmp_uint32 ) = pred;
- kmp_uint32 r;
+ volatile kmp_uint32 * spin = spinner;
+ kmp_uint32 check = checker;
+ kmp_uint32 spins;
+ kmp_uint32 (*f) ( kmp_uint32, kmp_uint32 ) = pred;
+ kmp_uint32 r;
KMP_FSYNC_SPIN_INIT( obj, (void*) spin );
KMP_INIT_YIELD( spins );
@@ -2576,11 +2576,11 @@ __kmp_wait_yield_8( volatile kmp_uint64 * spinner,
)
{
// note: we may not belong to a team at this point
- volatile kmp_uint64 * spin = spinner;
- kmp_uint64 check = checker;
- kmp_uint32 spins;
- kmp_uint32 (*f) ( kmp_uint64, kmp_uint64 ) = pred;
- kmp_uint64 r;
+ volatile kmp_uint64 * spin = spinner;
+ kmp_uint64 check = checker;
+ kmp_uint32 spins;
+ kmp_uint32 (*f) ( kmp_uint64, kmp_uint64 ) = pred;
+ kmp_uint64 r;
KMP_FSYNC_SPIN_INIT( obj, (void*) spin );
KMP_INIT_YIELD( spins );
diff --git a/contrib/libs/cxxsupp/openmp/kmp_lock.cpp b/contrib/libs/cxxsupp/openmp/kmp_lock.cpp
index becf7eddf6..3145d0c8d9 100644
--- a/contrib/libs/cxxsupp/openmp/kmp_lock.cpp
+++ b/contrib/libs/cxxsupp/openmp/kmp_lock.cpp
@@ -731,7 +731,7 @@ __kmp_is_ticket_lock_nestable( kmp_ticket_lock_t *lck )
static kmp_uint32
__kmp_bakery_check(kmp_uint value, kmp_uint checker)
{
- kmp_uint32 pause;
+ kmp_uint32 pause;
if (value == checker) {
return TRUE;
@@ -1210,7 +1210,7 @@ __forceinline static int
__kmp_acquire_queuing_lock_timed_template( kmp_queuing_lock_t *lck,
kmp_int32 gtid )
{
- kmp_info_t *this_thr = __kmp_thread_from_gtid( gtid );
+ kmp_info_t *this_thr = __kmp_thread_from_gtid( gtid );
volatile kmp_int32 *head_id_p = & lck->lk.head_id;
volatile kmp_int32 *tail_id_p = & lck->lk.tail_id;
volatile kmp_uint32 *spin_here_p;
@@ -1488,7 +1488,7 @@ __kmp_test_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
int
__kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
{
- kmp_info_t *this_thr;
+ kmp_info_t *this_thr;
volatile kmp_int32 *head_id_p = & lck->lk.head_id;
volatile kmp_int32 *tail_id_p = & lck->lk.tail_id;
diff --git a/contrib/libs/cxxsupp/openmp/kmp_sched.cpp b/contrib/libs/cxxsupp/openmp/kmp_sched.cpp
index 80ad960a8d..8c005ddb0c 100644
--- a/contrib/libs/cxxsupp/openmp/kmp_sched.cpp
+++ b/contrib/libs/cxxsupp/openmp/kmp_sched.cpp
@@ -89,12 +89,12 @@ __kmp_for_static_init(
typedef typename traits_t< T >::unsigned_t UT;
typedef typename traits_t< T >::signed_t ST;
/* this all has to be changed back to TID and such.. */
- kmp_int32 gtid = global_tid;
- kmp_uint32 tid;
- kmp_uint32 nth;
- UT trip_count;
- kmp_team_t *team;
- kmp_info_t *th = __kmp_threads[ gtid ];
+ kmp_int32 gtid = global_tid;
+ kmp_uint32 tid;
+ kmp_uint32 nth;
+ UT trip_count;
+ kmp_team_t *team;
+ kmp_info_t *th = __kmp_threads[ gtid ];
#if OMPT_SUPPORT && OMPT_TRACE
ompt_team_info_t *team_info = NULL;
@@ -275,16 +275,16 @@ __kmp_for_static_init(
*plastiter = ( tid == trip_count - 1 );
} else {
if ( __kmp_static == kmp_sch_static_balanced ) {
- UT small_chunk = trip_count / nth;
- UT extras = trip_count % nth;
+ UT small_chunk = trip_count / nth;
+ UT extras = trip_count % nth;
*plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
*pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
if( plastiter != NULL )
*plastiter = ( tid == nth - 1 );
} else {
- T big_chunk_inc_count = ( trip_count/nth +
+ T big_chunk_inc_count = ( trip_count/nth +
( ( trip_count % nth ) ? 1 : 0) ) * incr;
- T old_upper = *pupper;
+ T old_upper = *pupper;
KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
// Unknown static scheduling type.
@@ -310,7 +310,7 @@ __kmp_for_static_init(
}
case kmp_sch_static_chunked:
{
- ST span;
+ ST span;
if ( chunk < 1 ) {
chunk = 1;
}
@@ -385,12 +385,12 @@ __kmp_dist_for_static_init(
KMP_COUNT_BLOCK(OMP_DISTRIBUTE);
typedef typename traits_t< T >::unsigned_t UT;
typedef typename traits_t< T >::signed_t ST;
- kmp_uint32 tid;
- kmp_uint32 nth;
- kmp_uint32 team_id;
- kmp_uint32 nteams;
- UT trip_count;
- kmp_team_t *team;
+ kmp_uint32 tid;
+ kmp_uint32 nth;
+ kmp_uint32 team_id;
+ kmp_uint32 nteams;
+ UT trip_count;
+ kmp_team_t *team;
kmp_info_t * th;
KMP_DEBUG_ASSERT( plastiter && plower && pupper && pupperDist && pstride );
@@ -466,16 +466,16 @@ __kmp_dist_for_static_init(
} else {
// Get the team's chunk first (each team gets at most one chunk)
if( __kmp_static == kmp_sch_static_balanced ) {
- UT chunkD = trip_count / nteams;
- UT extras = trip_count % nteams;
+ UT chunkD = trip_count / nteams;
+ UT extras = trip_count % nteams;
*plower += incr * ( team_id * chunkD + ( team_id < extras ? team_id : extras ) );
*pupperDist = *plower + chunkD * incr - ( team_id < extras ? 0 : incr );
if( plastiter != NULL )
*plastiter = ( team_id == nteams - 1 );
} else {
- T chunk_inc_count =
+ T chunk_inc_count =
( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
- T upper = *pupper;
+ T upper = *pupper;
KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
// Unknown static scheduling type.
*plower += team_id * chunk_inc_count;
@@ -532,17 +532,17 @@ __kmp_dist_for_static_init(
*plastiter = 0;
} else {
if( __kmp_static == kmp_sch_static_balanced ) {
- UT chunkL = trip_count / nth;
- UT extras = trip_count % nth;
+ UT chunkL = trip_count / nth;
+ UT extras = trip_count % nth;
*plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
*pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
if( plastiter != NULL )
if( *plastiter != 0 && !( tid == nth - 1 ) )
*plastiter = 0;
} else {
- T chunk_inc_count =
+ T chunk_inc_count =
( trip_count / nth + ( ( trip_count % nth ) ? 1 : 0) ) * incr;
- T upper = *pupperDist;
+ T upper = *pupperDist;
KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
// Unknown static scheduling type.
*plower += tid * chunk_inc_count;
@@ -570,7 +570,7 @@ __kmp_dist_for_static_init(
}
case kmp_sch_static_chunked:
{
- ST span;
+ ST span;
if( chunk < 1 )
chunk = 1;
span = chunk * incr;
diff --git a/contrib/libs/cxxsupp/openmp/thirdparty/ittnotify/ittnotify_static.c b/contrib/libs/cxxsupp/openmp/thirdparty/ittnotify/ittnotify_static.c
index a2723aa670..5a4ce445ff 100644
--- a/contrib/libs/cxxsupp/openmp/thirdparty/ittnotify/ittnotify_static.c
+++ b/contrib/libs/cxxsupp/openmp/thirdparty/ittnotify/ittnotify_static.c
@@ -804,7 +804,7 @@ static const char* __itt_get_lib_name(void)
static __itt_group_id __itt_get_groups(void)
{
- int i;
+ int i;
__itt_group_id res = __itt_group_none;
const char* var_name = "INTEL_ITTNOTIFY_GROUPS";
const char* group_str = __itt_get_env_var(var_name);
@@ -862,7 +862,7 @@ static int __itt_lib_version(lib_t lib)
/* It's not used right now! Comment it out to avoid warnings.
static void __itt_reinit_all_pointers(void)
{
- int i;
+ int i;
// Fill all pointers with initial stubs
for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)
*_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].init_func;
@@ -871,7 +871,7 @@ static void __itt_reinit_all_pointers(void)
static void __itt_nullify_all_pointers(void)
{
- int i;
+ int i;
/* Nulify all pointers except domain_create and string_handle_create */
for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)
*_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func;
@@ -917,7 +917,7 @@ ITT_EXTERN_C void _N_(fini_ittlib)(void)
ITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_groups)
{
- int i;
+ int i;
__itt_group_id groups;
#ifdef ITT_COMPLETE_GROUP
__itt_group_id zero_group = __itt_group_none;
diff --git a/contrib/libs/cxxsupp/openmp/z_Windows_NT_util.c b/contrib/libs/cxxsupp/openmp/z_Windows_NT_util.c
index 03a4afe5e1..7f5ccd4552 100644
--- a/contrib/libs/cxxsupp/openmp/z_Windows_NT_util.c
+++ b/contrib/libs/cxxsupp/openmp/z_Windows_NT_util.c
@@ -1549,7 +1549,7 @@ __kmp_reap_common( kmp_info_t * th )
// TODO: This code is very similar to KMP_WAIT_YIELD. Need to generalize KMP_WAIT_YIELD to
// cover this usage also.
void * obj = NULL;
- kmp_uint32 spins;
+ kmp_uint32 spins;
#if USE_ITT_BUILD
KMP_FSYNC_SPIN_INIT( obj, (void*) & th->th.th_info.ds.ds_alive );
#endif /* USE_ITT_BUILD */