aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/bison/src/ielr.c
diff options
context:
space:
mode:
authorAlexander Smirnov <alex@ydb.tech>2024-07-08 15:54:05 +0000
committerAlexander Smirnov <alex@ydb.tech>2024-07-08 15:54:05 +0000
commitfc7be18c76af2e700641f3598c4856baeef1428e (patch)
tree11dbca45eb321c3a4dd08b12152acc6ef5dd3fa9 /contrib/tools/bison/src/ielr.c
parentec0e7ed6da6fb317741fd8468602949a1362eca5 (diff)
parentc92cb9d3a19331916f0c274d80e67f02a62caa9b (diff)
downloadydb-fc7be18c76af2e700641f3598c4856baeef1428e.tar.gz
Merge branch 'rightlib' into mergelibs-240708-1553
Diffstat (limited to 'contrib/tools/bison/src/ielr.c')
-rw-r--r--contrib/tools/bison/src/ielr.c231
1 files changed, 96 insertions, 135 deletions
diff --git a/contrib/tools/bison/src/ielr.c b/contrib/tools/bison/src/ielr.c
index 5500523170..867b8a0a2a 100644
--- a/contrib/tools/bison/src/ielr.c
+++ b/contrib/tools/bison/src/ielr.c
@@ -1,6 +1,6 @@
/* IELR main implementation.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
+ Copyright (C) 2009-2015, 2018-2019 Free Software Foundation, Inc.
This file is part of Bison, the GNU Compiler Compiler.
@@ -49,8 +49,8 @@ static bitset
ielr_compute_ritem_sees_lookahead_set (void)
{
bitset result = bitset_create (nritems, BITSET_FIXED);
- unsigned int i = nritems-1;
- while (i>0)
+ unsigned i = nritems-1;
+ while (0 < i)
{
--i;
while (!item_number_is_rule_number (ritem[i])
@@ -59,7 +59,7 @@ ielr_compute_ritem_sees_lookahead_set (void)
bitset_set (result, i--);
if (!item_number_is_rule_number (ritem[i]) && ISVAR (ritem[i]))
bitset_set (result, i--);
- while (!item_number_is_rule_number (ritem[i]) && i>0)
+ while (!item_number_is_rule_number (ritem[i]) && 0 < i)
--i;
}
if (trace_flag & trace_ielr)
@@ -101,16 +101,14 @@ ielr_compute_internal_follow_edges (bitset ritem_sees_lookahead_set,
*edge_countsp = xnmalloc (ngotos, sizeof **edge_countsp);
{
bitset sources = bitset_create (ngotos, BITSET_FIXED);
- goto_number i;
- for (i = 0; i < ngotos; ++i)
+ for (goto_number i = 0; i < ngotos; ++i)
(*edge_countsp)[i] = 0;
- for (i = 0; i < ngotos; ++i)
+ for (goto_number i = 0; i < ngotos; ++i)
{
int nsources = 0;
{
- rule **rulep;
- for (rulep = derives[states[to_state[i]]->accessing_symbol
- - ntokens];
+ for (rule **rulep = derives[states[to_state[i]]->accessing_symbol
+ - ntokens];
*rulep;
++rulep)
{
@@ -198,30 +196,25 @@ ielr_compute_follow_kernel_items (bitset ritem_sees_lookahead_set,
{
{
size_t max_nitems = 0;
- state_number i;
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
if (states[i]->nitems > max_nitems)
max_nitems = states[i]->nitems;
*follow_kernel_itemsp = bitsetv_create (ngotos, max_nitems, BITSET_FIXED);
}
- {
- goto_number i;
- for (i = 0; i < ngotos; ++i)
- {
- size_t nitems = states[from_state[i]]->nitems;
- item_number *items = states[from_state[i]]->items;
- size_t j;
- for (j = 0; j < nitems; ++j)
- /* If this item has this goto and if all subsequent symbols in this
- RHS (if any) are nullable nonterminals, then record this item as
- one whose lookahead set is included in this goto's follows. */
- if (item_number_is_symbol_number (ritem[items[j]])
- && item_number_as_symbol_number (ritem[items[j]])
- == states[to_state[i]]->accessing_symbol
- && bitset_test (ritem_sees_lookahead_set, items[j]))
- bitset_set ((*follow_kernel_itemsp)[i], j);
- }
- }
+ for (goto_number i = 0; i < ngotos; ++i)
+ {
+ size_t nitems = states[from_state[i]]->nitems;
+ item_number *items = states[from_state[i]]->items;
+ for (size_t j = 0; j < nitems; ++j)
+ /* If this item has this goto and if all subsequent symbols in this
+ RHS (if any) are nullable nonterminals, then record this item as
+ one whose lookahead set is included in this goto's follows. */
+ if (item_number_is_symbol_number (ritem[items[j]])
+ && item_number_as_symbol_number (ritem[items[j]])
+ == states[to_state[i]]->accessing_symbol
+ && bitset_test (ritem_sees_lookahead_set, items[j]))
+ bitset_set ((*follow_kernel_itemsp)[i], j);
+ }
relation_digraph (internal_follow_edges, ngotos, follow_kernel_itemsp);
if (trace_flag & trace_ielr)
@@ -252,8 +245,7 @@ ielr_compute_always_follows (goto_number ***edgesp,
*always_followsp = bitsetv_create (ngotos, ntokens, BITSET_FIXED);
{
goto_number *edge_array = xnmalloc (ngotos, sizeof *edge_array);
- goto_number i;
- for (i = 0; i < ngotos; ++i)
+ for (goto_number i = 0; i < ngotos; ++i)
{
goto_number nedges = edge_counts[i];
{
@@ -300,32 +292,25 @@ ielr_compute_always_follows (goto_number ***edgesp,
static state ***
ielr_compute_predecessors (void)
{
- state_number i;
int *predecessor_counts = xnmalloc (nstates, sizeof *predecessor_counts);
state ***result = xnmalloc (nstates, sizeof *result);
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
predecessor_counts[i] = 0;
- for (i = 0; i < nstates; ++i)
- {
- int j;
- for (j = 0; j < states[i]->transitions->num; ++j)
- ++predecessor_counts[states[i]->transitions->states[j]->number];
- }
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
+ for (int j = 0; j < states[i]->transitions->num; ++j)
+ ++predecessor_counts[states[i]->transitions->states[j]->number];
+ for (state_number i = 0; i < nstates; ++i)
{
result[i] = xnmalloc (predecessor_counts[i]+1, sizeof *result[i]);
result[i][predecessor_counts[i]] = NULL;
predecessor_counts[i] = 0;
}
- for (i = 0; i < nstates; ++i)
- {
- int j;
- for (j = 0; j < states[i]->transitions->num; ++j)
- {
- state_number k = states[i]->transitions->states[j]->number;
- result[k][predecessor_counts[k]++] = states[i];
- }
- }
+ for (state_number i = 0; i < nstates; ++i)
+ for (int j = 0; j < states[i]->transitions->num; ++j)
+ {
+ state_number k = states[i]->transitions->states[j]->number;
+ result[k][predecessor_counts[k]++] = states[i];
+ }
free (predecessor_counts);
return result;
}
@@ -354,11 +339,8 @@ ielr_compute_auxiliary_tables (bitsetv *follow_kernel_itemsp,
bitset_free (ritem_sees_lookahead_set);
}
ielr_compute_always_follows (&edges, edge_counts, always_followsp);
- {
- int i;
- for (i = 0; i < ngotos; ++i)
- free (edges[i]);
- }
+ for (int i = 0; i < ngotos; ++i)
+ free (edges[i]);
free (edges);
free (edge_counts);
if (predecessorsp)
@@ -381,10 +363,9 @@ ielr_item_has_lookahead (state *s, symbol_number lhs, size_t item,
{
if (!item_lookahead_sets[s->number])
{
- size_t i;
item_lookahead_sets[s->number] =
xnmalloc (s->nitems, sizeof item_lookahead_sets[s->number][0]);
- for (i = 0; i < s->nitems; ++i)
+ for (size_t i = 0; i < s->nitems; ++i)
item_lookahead_sets[s->number][i] = NULL;
}
if (!item_lookahead_sets[s->number][item])
@@ -412,20 +393,19 @@ ielr_item_has_lookahead (state *s, symbol_number lhs, size_t item,
top-level invocation), go get it. */
if (!lhs)
{
- unsigned int i;
+ unsigned i;
for (i = s->items[item];
!item_number_is_rule_number (ritem[i]);
++i)
- ;
+ continue;
lhs = rules[item_number_as_rule_number (ritem[i])].lhs->number;
}
/* If this kernel item is next to the beginning of the RHS, then
check all predecessors' goto follows for the LHS. */
if (item_number_is_rule_number (ritem[s->items[item] - 2]))
{
- state **predecessor;
- aver (lhs != accept->number);
- for (predecessor = predecessors[s->number];
+ aver (lhs != accept->content->number);
+ for (state **predecessor = predecessors[s->number];
*predecessor;
++predecessor)
bitset_or (item_lookahead_sets[s->number][item],
@@ -437,8 +417,7 @@ ielr_item_has_lookahead (state *s, symbol_number lhs, size_t item,
predecessor items' lookahead sets. */
else
{
- state **predecessor;
- for (predecessor = predecessors[s->number];
+ for (state **predecessor = predecessors[s->number];
*predecessor;
++predecessor)
{
@@ -492,12 +471,11 @@ ielr_compute_annotation_lists (bitsetv follow_kernel_items,
AnnotationIndex *annotation_counts =
xnmalloc (nstates, sizeof *annotation_counts);
ContributionIndex max_contributions = 0;
- unsigned int total_annotations = 0;
- state_number i;
+ unsigned total_annotations = 0;
*inadequacy_listsp = xnmalloc (nstates, sizeof **inadequacy_listsp);
*annotation_listsp = xnmalloc (nstates, sizeof **annotation_listsp);
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
{
item_lookahead_sets[i] = NULL;
(*inadequacy_listsp)[i] = NULL;
@@ -506,7 +484,7 @@ ielr_compute_annotation_lists (bitsetv follow_kernel_items,
}
{
InadequacyListNodeCount inadequacy_list_node_count = 0;
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
AnnotationList__compute_from_inadequacies (
states[i], follow_kernel_items, always_follows, predecessors,
item_lookahead_sets, *inadequacy_listsp, *annotation_listsp,
@@ -514,7 +492,7 @@ ielr_compute_annotation_lists (bitsetv follow_kernel_items,
&inadequacy_list_node_count);
}
*max_annotationsp = 0;
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
{
if (annotation_counts[i] > *max_annotationsp)
*max_annotationsp = annotation_counts[i];
@@ -522,7 +500,7 @@ ielr_compute_annotation_lists (bitsetv follow_kernel_items,
}
if (trace_flag & trace_ielr)
{
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
{
fprintf (stderr, "Inadequacy annotations for state %d:\n", i);
AnnotationList__debug ((*annotation_listsp)[i],
@@ -536,11 +514,10 @@ ielr_compute_annotation_lists (bitsetv follow_kernel_items,
fprintf (stderr, "Max number of contributions per annotation: %d\n",
max_contributions);
}
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
if (item_lookahead_sets[i])
{
- size_t j;
- for (j = 0; j < states[i]->nitems; ++j)
+ for (size_t j = 0; j < states[i]->nitems; ++j)
if (item_lookahead_sets[i][j])
bitset_free (item_lookahead_sets[i][j]);
free (item_lookahead_sets[i]);
@@ -549,7 +526,8 @@ ielr_compute_annotation_lists (bitsetv follow_kernel_items,
free (annotation_counts);
}
-typedef struct state_list {
+typedef struct state_list
+{
struct state_list *next;
state *state;
/** Has this state been recomputed as a successor of another state? */
@@ -580,7 +558,7 @@ typedef struct state_list {
static void
ielr_compute_goto_follow_set (bitsetv follow_kernel_items,
bitsetv always_follows, state_list *s,
- symbol *n, bitset follow_set)
+ sym_content *n, bitset follow_set)
{
goto_number n_goto = map_goto (s->lr0Isocore->state->number, n->number);
bitset_copy (follow_set, always_follows[n_goto]);
@@ -615,9 +593,8 @@ ielr_compute_lookaheads (bitsetv follow_kernel_items, bitsetv always_follows,
bitsetv lookaheads)
{
size_t s_item = 0;
- size_t t_item;
bitsetv_zero (lookaheads);
- for (t_item = 0; t_item < t->nitems; ++t_item)
+ for (size_t t_item = 0; t_item < t->nitems; ++t_item)
{
/* If this kernel item is the beginning of a RHS, it must be the
kernel item in the start state, but t is supposed to be a successor
@@ -631,7 +608,7 @@ ielr_compute_lookaheads (bitsetv follow_kernel_items, bitsetv always_follows,
{
if (item_number_is_rule_number (ritem[t->items[t_item] - 2]))
{
- unsigned int rule_item;
+ unsigned rule_item;
for (rule_item = t->items[t_item];
!item_number_is_rule_number (ritem[rule_item]);
++rule_item)
@@ -703,7 +680,6 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
{
state_list *lr0_isocore = t->state_list->lr0Isocore;
state_list **this_isocorep;
- bool has_lookaheads;
/* Determine whether there's an isocore of t with which these lookaheads can
be merged. */
@@ -770,16 +746,13 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
}
}
- has_lookaheads = false;
- {
- size_t i;
- for (i = 0; i < lr0_isocore->state->nitems; ++i)
- if (!bitset_empty_p (lookaheads[i]))
- {
- has_lookaheads = true;
- break;
- }
- }
+ bool has_lookaheads = false;
+ for (size_t i = 0; i < lr0_isocore->state->nitems; ++i)
+ if (!bitset_empty_p (lookaheads[i]))
+ {
+ has_lookaheads = true;
+ break;
+ }
/* Merge with an existing isocore. */
if (this_isocorep == &t->state_list || *this_isocorep != t->state_list)
@@ -822,11 +795,10 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
(*this_isocorep)->recomputedAsSuccessor = true;
else if (new_lookaheads)
{
- int i;
/* When merging demands identical lookahead sets, it is impossible to
merge new lookaheads. */
aver (annotation_lists);
- for (i = 0; i < (*tp)->transitions->num; ++i)
+ for (int i = 0; i < (*tp)->transitions->num; ++i)
{
state *t2 = (*tp)->transitions->states[i];
/* At any time, there's at most one state for which we have so
@@ -918,10 +890,9 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
(*last_statep)->lookaheads = NULL;
if (has_lookaheads)
{
- size_t i;
(*last_statep)->lookaheads =
xnmalloc (t->nitems, sizeof (*last_statep)->lookaheads);
- for (i = 0; i < t->nitems; ++i)
+ for (size_t i = 0; i < t->nitems; ++i)
{
if (bitset_empty_p (lookaheads[i]))
(*last_statep)->lookaheads[i] = NULL;
@@ -953,7 +924,7 @@ ielr_compute_state (bitsetv follow_kernel_items, bitsetv always_follows,
* compatibility or, if <tt>annotation_lists = NULL</tt>, the canonical
* LR(1) state compatibility test was used.
* - If <tt>annotation_lists = NULL</tt>, reduction lookahead sets were
- * computed in all states. TV_IELR_PHASE4 was pushed while they were
+ * computed in all states. tv_ielr_phase4 was pushed while they were
* computed from item lookahead sets.
*/
static void
@@ -969,9 +940,8 @@ ielr_split_states (bitsetv follow_kernel_items, bitsetv always_follows,
/* Set up state list and some reusable bitsets. */
{
size_t max_nitems = 0;
- state_number i;
state_list **nodep = &first_state;
- for (i = 0; i < nstates; ++i)
+ for (state_number i = 0; i < nstates; ++i)
{
*nodep = states[i]->state_list = last_state = xmalloc (sizeof **nodep);
(*nodep)->state = states[i];
@@ -993,12 +963,12 @@ ielr_split_states (bitsetv follow_kernel_items, bitsetv always_follows,
/* Recompute states. */
{
ContributionIndex *work = xnmalloc (max_annotations, sizeof *work);
- state_list *this_state;
- for (this_state = first_state; this_state; this_state = this_state->next)
+ for (state_list *this_state = first_state;
+ this_state;
+ this_state = this_state->next)
{
state *s = this_state->state;
- int i;
- for (i = 0; i < s->transitions->num; ++i)
+ for (int i = 0; i < s->transitions->num; ++i)
{
state *t = s->transitions->states[i];
if (annotation_lists)
@@ -1022,26 +992,21 @@ ielr_split_states (bitsetv follow_kernel_items, bitsetv always_follows,
/* Store states back in the states array. */
states = xnrealloc (states, nstates, sizeof *states);
- {
- state_list *node;
- for (node = first_state; node; node = node->next)
- states[node->state->number] = node->state;
- }
+ for (state_list *node = first_state; node; node = node->next)
+ states[node->state->number] = node->state;
/* In the case of canonical LR(1), copy item lookahead sets to reduction
lookahead sets. */
if (!annotation_lists)
{
- timevar_push (TV_IELR_PHASE4);
+ timevar_push (tv_ielr_phase4);
initialize_LA ();
- state_list *node;
- for (node = first_state; node; node = node->next)
+ for (state_list *node = first_state; node; node = node->next)
if (!node->state->consistent)
{
size_t i = 0;
item_number *itemset = node->state->items;
- size_t r;
- for (r = 0; r < node->state->reductions->num; ++r)
+ for (size_t r = 0; r < node->state->reductions->num; ++r)
{
rule *this_rule = node->state->reductions->rules[r];
bitset lookahead_set =
@@ -1067,7 +1032,7 @@ ielr_split_states (bitsetv follow_kernel_items, bitsetv always_follows,
}
}
}
- timevar_pop (TV_IELR_PHASE4);
+ timevar_pop (tv_ielr_phase4);
}
/* Free state list. */
@@ -1076,8 +1041,7 @@ ielr_split_states (bitsetv follow_kernel_items, bitsetv always_follows,
state_list *node = first_state;
if (node->lookaheads)
{
- size_t i;
- for (i = 0; i < node->state->nitems; ++i)
+ for (size_t i = 0; i < node->state->nitems; ++i)
if (node->lookaheads[i])
bitset_free (node->lookaheads[i]);
free (node->lookaheads);
@@ -1102,12 +1066,15 @@ ielr (void)
else if (STREQ (type, "canonical-lr"))
lr_type = LR_TYPE__CANONICAL_LR;
else
- aver (false);
+ {
+ aver (false);
+ abort ();
+ }
free (type);
}
/* Phase 0: LALR(1). */
- timevar_push (TV_LALR);
+ timevar_push (tv_lalr);
if (lr_type == LR_TYPE__CANONICAL_LR)
set_goto_map ();
else
@@ -1115,10 +1082,10 @@ ielr (void)
if (lr_type == LR_TYPE__LALR)
{
bitsetv_free (goto_follows);
- timevar_pop (TV_LALR);
+ timevar_pop (tv_lalr);
return;
}
- timevar_pop (TV_LALR);
+ timevar_pop (tv_lalr);
{
bitsetv follow_kernel_items;
@@ -1131,14 +1098,14 @@ ielr (void)
{
/* Phase 1: Compute Auxiliary Tables. */
state ***predecessors;
- timevar_push (TV_IELR_PHASE1);
+ timevar_push (tv_ielr_phase1);
ielr_compute_auxiliary_tables (
&follow_kernel_items, &always_follows,
lr_type == LR_TYPE__CANONICAL_LR ? NULL : &predecessors);
- timevar_pop (TV_IELR_PHASE1);
+ timevar_pop (tv_ielr_phase1);
/* Phase 2: Compute Annotations. */
- timevar_push (TV_IELR_PHASE2);
+ timevar_push (tv_ielr_phase2);
if (lr_type != LR_TYPE__CANONICAL_LR)
{
obstack_init (&annotations_obstack);
@@ -1146,30 +1113,24 @@ ielr (void)
predecessors, &max_annotations,
&inadequacy_lists, &annotation_lists,
&annotations_obstack);
- {
- state_number i;
- for (i = 0; i < nstates; ++i)
- free (predecessors[i]);
- }
+ for (state_number i = 0; i < nstates; ++i)
+ free (predecessors[i]);
free (predecessors);
bitsetv_free (goto_follows);
lalr_free ();
}
- timevar_pop (TV_IELR_PHASE2);
+ timevar_pop (tv_ielr_phase2);
}
/* Phase 3: Split States. */
- timevar_push (TV_IELR_PHASE3);
+ timevar_push (tv_ielr_phase3);
{
state_number nstates_lr0 = nstates;
ielr_split_states (follow_kernel_items, always_follows,
annotation_lists, max_annotations);
if (inadequacy_lists)
- {
- state_number i;
- for (i = 0; i < nstates_lr0; ++i)
- InadequacyList__delete (inadequacy_lists[i]);
- }
+ for (state_number i = 0; i < nstates_lr0; ++i)
+ InadequacyList__delete (inadequacy_lists[i]);
}
free (inadequacy_lists);
if (annotation_lists)
@@ -1177,11 +1138,11 @@ ielr (void)
free (annotation_lists);
bitsetv_free (follow_kernel_items);
bitsetv_free (always_follows);
- timevar_pop (TV_IELR_PHASE3);
+ timevar_pop (tv_ielr_phase3);
}
/* Phase 4: Compute Reduction Lookaheads. */
- timevar_push (TV_IELR_PHASE4);
+ timevar_push (tv_ielr_phase4);
free (goto_map);
free (from_state);
free (to_state);
@@ -1196,5 +1157,5 @@ ielr (void)
lalr ();
bitsetv_free (goto_follows);
}
- timevar_pop (TV_IELR_PHASE4);
+ timevar_pop (tv_ielr_phase4);
}