aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/pycparser
diff options
context:
space:
mode:
authorsmosker <smosker@yandex-team.ru>2022-02-10 16:48:22 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:48:22 +0300
commit01fa2667d0e5e868b18424bc1906146e5ee340db (patch)
tree5d5cb817648f650d76cf1076100726fd9b8448e8 /contrib/python/pycparser
parentdd14d17a747a9c259858faf2fcc3ea6b92df4e15 (diff)
downloadydb-01fa2667d0e5e868b18424bc1906146e5ee340db.tar.gz
Restoring authorship annotation for <smosker@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/pycparser')
-rw-r--r--contrib/python/pycparser/pycparser/__init__.py10
-rw-r--r--contrib/python/pycparser/pycparser/_ast_gen.py134
-rw-r--r--contrib/python/pycparser/pycparser/_build_tables.py2
-rw-r--r--contrib/python/pycparser/pycparser/_c_ast.cfg2
-rw-r--r--contrib/python/pycparser/pycparser/ast_transforms.py2
-rw-r--r--contrib/python/pycparser/pycparser/c_ast.py550
-rw-r--r--contrib/python/pycparser/pycparser/c_generator.py94
-rw-r--r--contrib/python/pycparser/pycparser/c_lexer.py8
-rw-r--r--contrib/python/pycparser/pycparser/c_parser.py600
-rw-r--r--contrib/python/pycparser/pycparser/lextab.py6
-rw-r--r--contrib/python/pycparser/pycparser/ply/__init__.py2
-rw-r--r--contrib/python/pycparser/pycparser/ply/cpp.py74
-rw-r--r--contrib/python/pycparser/pycparser/ply/lex.py30
-rw-r--r--contrib/python/pycparser/pycparser/ply/yacc.py136
-rw-r--r--contrib/python/pycparser/pycparser/plyparser.py156
-rw-r--r--contrib/python/pycparser/pycparser/yacctab.py116
16 files changed, 961 insertions, 961 deletions
diff --git a/contrib/python/pycparser/pycparser/__init__.py b/contrib/python/pycparser/pycparser/__init__.py
index 5c179e725f..d82eb2d6fb 100644
--- a/contrib/python/pycparser/pycparser/__init__.py
+++ b/contrib/python/pycparser/pycparser/__init__.py
@@ -4,14 +4,14 @@
# This package file exports some convenience functions for
# interacting with pycparser
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '2.21'
-import io
-from subprocess import check_output
+import io
+from subprocess import check_output
from .c_parser import CParser
@@ -39,7 +39,7 @@ def preprocess_file(filename, cpp_path='cpp', cpp_args=''):
try:
# Note the use of universal_newlines to treat all newlines
# as \n for Python's purpose
- text = check_output(path_list, universal_newlines=True)
+ text = check_output(path_list, universal_newlines=True)
except OSError as e:
raise RuntimeError("Unable to invoke 'cpp'. " +
'Make sure its path was passed correctly\n' +
@@ -82,7 +82,7 @@ def parse_file(filename, use_cpp=False, cpp_path='cpp', cpp_args='',
if use_cpp:
text = preprocess_file(filename, cpp_path, cpp_args)
else:
- with io.open(filename) as f:
+ with io.open(filename) as f:
text = f.read()
if parser is None:
diff --git a/contrib/python/pycparser/pycparser/_ast_gen.py b/contrib/python/pycparser/pycparser/_ast_gen.py
index 176a0adb4b..0f7d330ba6 100644
--- a/contrib/python/pycparser/pycparser/_ast_gen.py
+++ b/contrib/python/pycparser/pycparser/_ast_gen.py
@@ -7,7 +7,7 @@
# The design of this module was inspired by astgen.py from the
# Python 2.5 code-base.
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
from string import Template
@@ -62,7 +62,7 @@ class NodeCfg(object):
contents: a list of contents - attributes and child nodes
See comment at the top of the configuration file for details.
"""
-
+
def __init__(self, name, contents):
self.name = name
self.all_entries = []
@@ -84,7 +84,7 @@ class NodeCfg(object):
def generate_source(self):
src = self._gen_init()
src += '\n' + self._gen_children()
- src += '\n' + self._gen_iter()
+ src += '\n' + self._gen_iter()
src += '\n' + self._gen_attr_names()
return src
@@ -132,33 +132,33 @@ class NodeCfg(object):
return src
- def _gen_iter(self):
- src = ' def __iter__(self):\n'
-
- if self.all_entries:
- for child in self.child:
- src += (
- ' if self.%(child)s is not None:\n' +
- ' yield self.%(child)s\n') % (dict(child=child))
-
- for seq_child in self.seq_child:
- src += (
- ' for child in (self.%(child)s or []):\n'
- ' yield child\n') % (dict(child=seq_child))
-
- if not (self.child or self.seq_child):
- # Empty generator
- src += (
- ' return\n' +
- ' yield\n')
- else:
- # Empty generator
- src += (
- ' return\n' +
- ' yield\n')
-
- return src
-
+ def _gen_iter(self):
+ src = ' def __iter__(self):\n'
+
+ if self.all_entries:
+ for child in self.child:
+ src += (
+ ' if self.%(child)s is not None:\n' +
+ ' yield self.%(child)s\n') % (dict(child=child))
+
+ for seq_child in self.seq_child:
+ src += (
+ ' for child in (self.%(child)s or []):\n'
+ ' yield child\n') % (dict(child=seq_child))
+
+ if not (self.child or self.seq_child):
+ # Empty generator
+ src += (
+ ' return\n' +
+ ' yield\n')
+ else:
+ # Empty generator
+ src += (
+ ' return\n' +
+ ' yield\n')
+
+ return src
+
def _gen_attr_names(self):
src = " attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')'
return src
@@ -178,7 +178,7 @@ r'''#-----------------------------------------------------------------
#
# AST Node classes.
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
@@ -187,38 +187,38 @@ r'''#-----------------------------------------------------------------
_PROLOGUE_CODE = r'''
import sys
-def _repr(obj):
- """
- Get the representation of an object, with dedicated pprint-like format for lists.
- """
- if isinstance(obj, list):
- return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
- else:
+def _repr(obj):
+ """
+ Get the representation of an object, with dedicated pprint-like format for lists.
+ """
+ if isinstance(obj, list):
+ return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
+ else:
return repr(obj)
class Node(object):
__slots__ = ()
""" Abstract base class for AST nodes.
"""
- def __repr__(self):
- """ Generates a python representation of the current node
- """
- result = self.__class__.__name__ + '('
+ def __repr__(self):
+ """ Generates a python representation of the current node
+ """
+ result = self.__class__.__name__ + '('
+
+ indent = ''
+ separator = ''
+ for name in self.__slots__[:-2]:
+ result += separator
+ result += indent
+ result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
- indent = ''
- separator = ''
- for name in self.__slots__[:-2]:
- result += separator
- result += indent
- result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
+ separator = ','
+ indent = '\n ' + (' ' * len(self.__class__.__name__))
- separator = ','
- indent = '\n ' + (' ' * len(self.__class__.__name__))
+ result += indent + ')'
- result += indent + ')'
+ return result
- return result
-
def children(self):
""" A sequence of all children that are Nodes
"""
@@ -308,29 +308,29 @@ class NodeVisitor(object):
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
-
- _method_cache = None
-
+
+ _method_cache = None
+
def visit(self, node):
""" Visit a node.
"""
-
- if self._method_cache is None:
- self._method_cache = {}
-
- visitor = self._method_cache.get(node.__class__.__name__, None)
- if visitor is None:
- method = 'visit_' + node.__class__.__name__
- visitor = getattr(self, method, self.generic_visit)
- self._method_cache[node.__class__.__name__] = visitor
-
+
+ if self._method_cache is None:
+ self._method_cache = {}
+
+ visitor = self._method_cache.get(node.__class__.__name__, None)
+ if visitor is None:
+ method = 'visit_' + node.__class__.__name__
+ visitor = getattr(self, method, self.generic_visit)
+ self._method_cache[node.__class__.__name__] = visitor
+
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
- for c in node:
+ for c in node:
self.visit(c)
'''
diff --git a/contrib/python/pycparser/pycparser/_build_tables.py b/contrib/python/pycparser/pycparser/_build_tables.py
index 761cc4cf1a..958381ad0f 100644
--- a/contrib/python/pycparser/pycparser/_build_tables.py
+++ b/contrib/python/pycparser/pycparser/_build_tables.py
@@ -6,7 +6,7 @@
# Also generates AST code from the configuration file.
# Should be called from the pycparser directory.
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
diff --git a/contrib/python/pycparser/pycparser/_c_ast.cfg b/contrib/python/pycparser/pycparser/_c_ast.cfg
index 6f254f4d32..0626533e8a 100644
--- a/contrib/python/pycparser/pycparser/_c_ast.cfg
+++ b/contrib/python/pycparser/pycparser/_c_ast.cfg
@@ -9,7 +9,7 @@
# <name>** - a sequence of child nodes
# <name> - an attribute
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
diff --git a/contrib/python/pycparser/pycparser/ast_transforms.py b/contrib/python/pycparser/pycparser/ast_transforms.py
index 633ae74b3a..367dcf54c5 100644
--- a/contrib/python/pycparser/pycparser/ast_transforms.py
+++ b/contrib/python/pycparser/pycparser/ast_transforms.py
@@ -3,7 +3,7 @@
#
# Some utilities used by the parser to create a friendlier AST.
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
diff --git a/contrib/python/pycparser/pycparser/c_ast.py b/contrib/python/pycparser/pycparser/c_ast.py
index fc07648fef..6575a2ad39 100644
--- a/contrib/python/pycparser/pycparser/c_ast.py
+++ b/contrib/python/pycparser/pycparser/c_ast.py
@@ -11,45 +11,45 @@
#
# AST Node classes.
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
import sys
-def _repr(obj):
- """
- Get the representation of an object, with dedicated pprint-like format for lists.
- """
- if isinstance(obj, list):
- return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
- else:
+def _repr(obj):
+ """
+ Get the representation of an object, with dedicated pprint-like format for lists.
+ """
+ if isinstance(obj, list):
+ return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
+ else:
return repr(obj)
class Node(object):
__slots__ = ()
""" Abstract base class for AST nodes.
"""
- def __repr__(self):
- """ Generates a python representation of the current node
- """
- result = self.__class__.__name__ + '('
+ def __repr__(self):
+ """ Generates a python representation of the current node
+ """
+ result = self.__class__.__name__ + '('
+
+ indent = ''
+ separator = ''
+ for name in self.__slots__[:-2]:
+ result += separator
+ result += indent
+ result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
- indent = ''
- separator = ''
- for name in self.__slots__[:-2]:
- result += separator
- result += indent
- result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
+ separator = ','
+ indent = '\n ' + (' ' * len(self.__class__.__name__))
- separator = ','
- indent = '\n ' + (' ' * len(self.__class__.__name__))
+ result += indent + ')'
- result += indent + ')'
+ return result
- return result
-
def children(self):
""" A sequence of all children that are Nodes
"""
@@ -139,29 +139,29 @@ class NodeVisitor(object):
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
-
- _method_cache = None
-
+
+ _method_cache = None
+
def visit(self, node):
""" Visit a node.
"""
-
- if self._method_cache is None:
- self._method_cache = {}
-
- visitor = self._method_cache.get(node.__class__.__name__, None)
- if visitor is None:
- method = 'visit_' + node.__class__.__name__
- visitor = getattr(self, method, self.generic_visit)
- self._method_cache[node.__class__.__name__] = visitor
-
+
+ if self._method_cache is None:
+ self._method_cache = {}
+
+ visitor = self._method_cache.get(node.__class__.__name__, None)
+ if visitor is None:
+ method = 'visit_' + node.__class__.__name__
+ visitor = getattr(self, method, self.generic_visit)
+ self._method_cache[node.__class__.__name__] = visitor
+
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
- for c in node:
+ for c in node:
self.visit(c)
class ArrayDecl(Node):
@@ -178,12 +178,12 @@ class ArrayDecl(Node):
if self.dim is not None: nodelist.append(("dim", self.dim))
return tuple(nodelist)
- def __iter__(self):
- if self.type is not None:
- yield self.type
- if self.dim is not None:
- yield self.dim
-
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+ if self.dim is not None:
+ yield self.dim
+
attr_names = ('dim_quals', )
class ArrayRef(Node):
@@ -199,12 +199,12 @@ class ArrayRef(Node):
if self.subscript is not None: nodelist.append(("subscript", self.subscript))
return tuple(nodelist)
- def __iter__(self):
- if self.name is not None:
- yield self.name
- if self.subscript is not None:
- yield self.subscript
-
+ def __iter__(self):
+ if self.name is not None:
+ yield self.name
+ if self.subscript is not None:
+ yield self.subscript
+
attr_names = ()
class Assignment(Node):
@@ -221,12 +221,12 @@ class Assignment(Node):
if self.rvalue is not None: nodelist.append(("rvalue", self.rvalue))
return tuple(nodelist)
- def __iter__(self):
- if self.lvalue is not None:
- yield self.lvalue
- if self.rvalue is not None:
- yield self.rvalue
-
+ def __iter__(self):
+ if self.lvalue is not None:
+ yield self.lvalue
+ if self.rvalue is not None:
+ yield self.rvalue
+
attr_names = ('op', )
class Alignas(Node):
@@ -260,12 +260,12 @@ class BinaryOp(Node):
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
- def __iter__(self):
- if self.left is not None:
- yield self.left
- if self.right is not None:
- yield self.right
-
+ def __iter__(self):
+ if self.left is not None:
+ yield self.left
+ if self.right is not None:
+ yield self.right
+
attr_names = ('op', )
class Break(Node):
@@ -276,10 +276,10 @@ class Break(Node):
def children(self):
return ()
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ()
class Case(Node):
@@ -296,12 +296,12 @@ class Case(Node):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- if self.expr is not None:
- yield self.expr
- for child in (self.stmts or []):
- yield child
-
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+ for child in (self.stmts or []):
+ yield child
+
attr_names = ()
class Cast(Node):
@@ -317,12 +317,12 @@ class Cast(Node):
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
- def __iter__(self):
- if self.to_type is not None:
- yield self.to_type
- if self.expr is not None:
- yield self.expr
-
+ def __iter__(self):
+ if self.to_type is not None:
+ yield self.to_type
+ if self.expr is not None:
+ yield self.expr
+
attr_names = ()
class Compound(Node):
@@ -337,10 +337,10 @@ class Compound(Node):
nodelist.append(("block_items[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.block_items or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.block_items or []):
+ yield child
+
attr_names = ()
class CompoundLiteral(Node):
@@ -356,12 +356,12 @@ class CompoundLiteral(Node):
if self.init is not None: nodelist.append(("init", self.init))
return tuple(nodelist)
- def __iter__(self):
- if self.type is not None:
- yield self.type
- if self.init is not None:
- yield self.init
-
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+ if self.init is not None:
+ yield self.init
+
attr_names = ()
class Constant(Node):
@@ -375,10 +375,10 @@ class Constant(Node):
nodelist = []
return tuple(nodelist)
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ('type', 'value', )
class Continue(Node):
@@ -389,10 +389,10 @@ class Continue(Node):
def children(self):
return ()
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ()
class Decl(Node):
@@ -415,14 +415,14 @@ class Decl(Node):
if self.bitsize is not None: nodelist.append(("bitsize", self.bitsize))
return tuple(nodelist)
- def __iter__(self):
- if self.type is not None:
- yield self.type
- if self.init is not None:
- yield self.init
- if self.bitsize is not None:
- yield self.bitsize
-
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+ if self.init is not None:
+ yield self.init
+ if self.bitsize is not None:
+ yield self.bitsize
+
attr_names = ('name', 'quals', 'align', 'storage', 'funcspec', )
class DeclList(Node):
@@ -437,10 +437,10 @@ class DeclList(Node):
nodelist.append(("decls[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.decls or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.decls or []):
+ yield child
+
attr_names = ()
class Default(Node):
@@ -455,10 +455,10 @@ class Default(Node):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.stmts or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.stmts or []):
+ yield child
+
attr_names = ()
class DoWhile(Node):
@@ -474,12 +474,12 @@ class DoWhile(Node):
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
- def __iter__(self):
- if self.cond is not None:
- yield self.cond
- if self.stmt is not None:
- yield self.stmt
-
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.stmt is not None:
+ yield self.stmt
+
attr_names = ()
class EllipsisParam(Node):
@@ -490,10 +490,10 @@ class EllipsisParam(Node):
def children(self):
return ()
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ()
class EmptyStatement(Node):
@@ -504,10 +504,10 @@ class EmptyStatement(Node):
def children(self):
return ()
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ()
class Enum(Node):
@@ -522,10 +522,10 @@ class Enum(Node):
if self.values is not None: nodelist.append(("values", self.values))
return tuple(nodelist)
- def __iter__(self):
- if self.values is not None:
- yield self.values
-
+ def __iter__(self):
+ if self.values is not None:
+ yield self.values
+
attr_names = ('name', )
class Enumerator(Node):
@@ -540,10 +540,10 @@ class Enumerator(Node):
if self.value is not None: nodelist.append(("value", self.value))
return tuple(nodelist)
- def __iter__(self):
- if self.value is not None:
- yield self.value
-
+ def __iter__(self):
+ if self.value is not None:
+ yield self.value
+
attr_names = ('name', )
class EnumeratorList(Node):
@@ -558,10 +558,10 @@ class EnumeratorList(Node):
nodelist.append(("enumerators[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.enumerators or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.enumerators or []):
+ yield child
+
attr_names = ()
class ExprList(Node):
@@ -576,10 +576,10 @@ class ExprList(Node):
nodelist.append(("exprs[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.exprs or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.exprs or []):
+ yield child
+
attr_names = ()
class FileAST(Node):
@@ -594,10 +594,10 @@ class FileAST(Node):
nodelist.append(("ext[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.ext or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.ext or []):
+ yield child
+
attr_names = ()
class For(Node):
@@ -617,16 +617,16 @@ class For(Node):
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
- def __iter__(self):
- if self.init is not None:
- yield self.init
- if self.cond is not None:
- yield self.cond
- if self.next is not None:
- yield self.next
- if self.stmt is not None:
- yield self.stmt
-
+ def __iter__(self):
+ if self.init is not None:
+ yield self.init
+ if self.cond is not None:
+ yield self.cond
+ if self.next is not None:
+ yield self.next
+ if self.stmt is not None:
+ yield self.stmt
+
attr_names = ()
class FuncCall(Node):
@@ -642,12 +642,12 @@ class FuncCall(Node):
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
- def __iter__(self):
- if self.name is not None:
- yield self.name
- if self.args is not None:
- yield self.args
-
+ def __iter__(self):
+ if self.name is not None:
+ yield self.name
+ if self.args is not None:
+ yield self.args
+
attr_names = ()
class FuncDecl(Node):
@@ -663,12 +663,12 @@ class FuncDecl(Node):
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
- def __iter__(self):
- if self.args is not None:
- yield self.args
- if self.type is not None:
- yield self.type
-
+ def __iter__(self):
+ if self.args is not None:
+ yield self.args
+ if self.type is not None:
+ yield self.type
+
attr_names = ()
class FuncDef(Node):
@@ -687,14 +687,14 @@ class FuncDef(Node):
nodelist.append(("param_decls[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- if self.decl is not None:
- yield self.decl
- if self.body is not None:
- yield self.body
- for child in (self.param_decls or []):
- yield child
-
+ def __iter__(self):
+ if self.decl is not None:
+ yield self.decl
+ if self.body is not None:
+ yield self.body
+ for child in (self.param_decls or []):
+ yield child
+
attr_names = ()
class Goto(Node):
@@ -707,10 +707,10 @@ class Goto(Node):
nodelist = []
return tuple(nodelist)
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ('name', )
class ID(Node):
@@ -723,10 +723,10 @@ class ID(Node):
nodelist = []
return tuple(nodelist)
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ('name', )
class IdentifierType(Node):
@@ -739,10 +739,10 @@ class IdentifierType(Node):
nodelist = []
return tuple(nodelist)
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ('names', )
class If(Node):
@@ -760,14 +760,14 @@ class If(Node):
if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
return tuple(nodelist)
- def __iter__(self):
- if self.cond is not None:
- yield self.cond
- if self.iftrue is not None:
- yield self.iftrue
- if self.iffalse is not None:
- yield self.iffalse
-
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.iftrue is not None:
+ yield self.iftrue
+ if self.iffalse is not None:
+ yield self.iffalse
+
attr_names = ()
class InitList(Node):
@@ -782,10 +782,10 @@ class InitList(Node):
nodelist.append(("exprs[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.exprs or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.exprs or []):
+ yield child
+
attr_names = ()
class Label(Node):
@@ -800,10 +800,10 @@ class Label(Node):
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
- def __iter__(self):
- if self.stmt is not None:
- yield self.stmt
-
+ def __iter__(self):
+ if self.stmt is not None:
+ yield self.stmt
+
attr_names = ('name', )
class NamedInitializer(Node):
@@ -820,12 +820,12 @@ class NamedInitializer(Node):
nodelist.append(("name[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- if self.expr is not None:
- yield self.expr
- for child in (self.name or []):
- yield child
-
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+ for child in (self.name or []):
+ yield child
+
attr_names = ()
class ParamList(Node):
@@ -840,10 +840,10 @@ class ParamList(Node):
nodelist.append(("params[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.params or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.params or []):
+ yield child
+
attr_names = ()
class PtrDecl(Node):
@@ -858,10 +858,10 @@ class PtrDecl(Node):
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
- def __iter__(self):
- if self.type is not None:
- yield self.type
-
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
attr_names = ('quals', )
class Return(Node):
@@ -875,10 +875,10 @@ class Return(Node):
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
- def __iter__(self):
- if self.expr is not None:
- yield self.expr
-
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+
attr_names = ()
class StaticAssert(Node):
@@ -915,10 +915,10 @@ class Struct(Node):
nodelist.append(("decls[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.decls or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.decls or []):
+ yield child
+
attr_names = ('name', )
class StructRef(Node):
@@ -935,12 +935,12 @@ class StructRef(Node):
if self.field is not None: nodelist.append(("field", self.field))
return tuple(nodelist)
- def __iter__(self):
- if self.name is not None:
- yield self.name
- if self.field is not None:
- yield self.field
-
+ def __iter__(self):
+ if self.name is not None:
+ yield self.name
+ if self.field is not None:
+ yield self.field
+
attr_names = ('type', )
class Switch(Node):
@@ -956,12 +956,12 @@ class Switch(Node):
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
- def __iter__(self):
- if self.cond is not None:
- yield self.cond
- if self.stmt is not None:
- yield self.stmt
-
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.stmt is not None:
+ yield self.stmt
+
attr_names = ()
class TernaryOp(Node):
@@ -979,14 +979,14 @@ class TernaryOp(Node):
if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
return tuple(nodelist)
- def __iter__(self):
- if self.cond is not None:
- yield self.cond
- if self.iftrue is not None:
- yield self.iftrue
- if self.iffalse is not None:
- yield self.iffalse
-
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.iftrue is not None:
+ yield self.iftrue
+ if self.iffalse is not None:
+ yield self.iffalse
+
attr_names = ()
class TypeDecl(Node):
@@ -1003,10 +1003,10 @@ class TypeDecl(Node):
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
- def __iter__(self):
- if self.type is not None:
- yield self.type
-
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
attr_names = ('declname', 'quals', 'align', )
class Typedef(Node):
@@ -1023,10 +1023,10 @@ class Typedef(Node):
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
- def __iter__(self):
- if self.type is not None:
- yield self.type
-
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
attr_names = ('name', 'quals', 'storage', )
class Typename(Node):
@@ -1043,10 +1043,10 @@ class Typename(Node):
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
- def __iter__(self):
- if self.type is not None:
- yield self.type
-
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
attr_names = ('name', 'quals', 'align', )
class UnaryOp(Node):
@@ -1061,10 +1061,10 @@ class UnaryOp(Node):
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
- def __iter__(self):
- if self.expr is not None:
- yield self.expr
-
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+
attr_names = ('op', )
class Union(Node):
@@ -1080,10 +1080,10 @@ class Union(Node):
nodelist.append(("decls[%d]" % i, child))
return tuple(nodelist)
- def __iter__(self):
- for child in (self.decls or []):
- yield child
-
+ def __iter__(self):
+ for child in (self.decls or []):
+ yield child
+
attr_names = ('name', )
class While(Node):
@@ -1099,12 +1099,12 @@ class While(Node):
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
- def __iter__(self):
- if self.cond is not None:
- yield self.cond
- if self.stmt is not None:
- yield self.stmt
-
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.stmt is not None:
+ yield self.stmt
+
attr_names = ()
class Pragma(Node):
@@ -1117,9 +1117,9 @@ class Pragma(Node):
nodelist = []
return tuple(nodelist)
- def __iter__(self):
- return
- yield
-
+ def __iter__(self):
+ return
+ yield
+
attr_names = ('string', )
diff --git a/contrib/python/pycparser/pycparser/c_generator.py b/contrib/python/pycparser/pycparser/c_generator.py
index 7edeca7fcd..1057b2c62e 100644
--- a/contrib/python/pycparser/pycparser/c_generator.py
+++ b/contrib/python/pycparser/pycparser/c_generator.py
@@ -3,7 +3,7 @@
#
# C code generator from pycparser AST nodes.
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
from . import c_ast
@@ -43,7 +43,7 @@ class CGenerator(object):
def visit_ID(self, n):
return n.name
-
+
def visit_Pragma(self, n):
ret = '#pragma'
if n.string:
@@ -178,24 +178,24 @@ class CGenerator(object):
return ', '.join(visited_subexprs)
def visit_Enum(self, n):
- return self._generate_struct_union_enum(n, name='enum')
+ return self._generate_struct_union_enum(n, name='enum')
def visit_Alignas(self, n):
return '_Alignas({})'.format(self.visit(n.alignment))
- def visit_Enumerator(self, n):
- if not n.value:
- return '{indent}{name},\n'.format(
- indent=self._make_indent(),
- name=n.name,
- )
- else:
- return '{indent}{name} = {value},\n'.format(
- indent=self._make_indent(),
- name=n.name,
- value=self.visit(n.value),
- )
-
+ def visit_Enumerator(self, n):
+ if not n.value:
+ return '{indent}{name},\n'.format(
+ indent=self._make_indent(),
+ name=n.name,
+ )
+ else:
+ return '{indent}{name} = {value},\n'.format(
+ indent=self._make_indent(),
+ name=n.name,
+ value=self.visit(n.value),
+ )
+
def visit_FuncDef(self, n):
decl = self.visit(n.decl)
self.indent_level = 0
@@ -226,10 +226,10 @@ class CGenerator(object):
s += self._make_indent() + '}\n'
return s
- def visit_CompoundLiteral(self, n):
- return '(' + self.visit(n.type) + '){' + self.visit(n.init) + '}'
-
-
+ def visit_CompoundLiteral(self, n):
+ return '(' + self.visit(n.type) + '){' + self.visit(n.init) + '}'
+
+
def visit_EmptyStatement(self, n):
return ';'
@@ -325,21 +325,21 @@ class CGenerator(object):
return '...'
def visit_Struct(self, n):
- return self._generate_struct_union_enum(n, 'struct')
+ return self._generate_struct_union_enum(n, 'struct')
def visit_Typename(self, n):
return self._generate_type(n.type)
def visit_Union(self, n):
- return self._generate_struct_union_enum(n, 'union')
+ return self._generate_struct_union_enum(n, 'union')
def visit_NamedInitializer(self, n):
s = ''
for name in n.name:
if isinstance(name, c_ast.ID):
s += '.' + name.name
- else:
- s += '[' + self.visit(name) + ']'
+ else:
+ s += '[' + self.visit(name) + ']'
s += ' = ' + self._visit_expr(n.expr)
return s
@@ -355,37 +355,37 @@ class CGenerator(object):
def visit_PtrDecl(self, n):
return self._generate_type(n, emit_declname=False)
- def _generate_struct_union_enum(self, n, name):
- """ Generates code for structs, unions, and enums. name should be
- 'struct', 'union', or 'enum'.
+ def _generate_struct_union_enum(self, n, name):
+ """ Generates code for structs, unions, and enums. name should be
+ 'struct', 'union', or 'enum'.
"""
- if name in ('struct', 'union'):
- members = n.decls
- body_function = self._generate_struct_union_body
- else:
- assert name == 'enum'
- members = None if n.values is None else n.values.enumerators
- body_function = self._generate_enum_body
+ if name in ('struct', 'union'):
+ members = n.decls
+ body_function = self._generate_struct_union_body
+ else:
+ assert name == 'enum'
+ members = None if n.values is None else n.values.enumerators
+ body_function = self._generate_enum_body
s = name + ' ' + (n.name or '')
- if members is not None:
- # None means no members
- # Empty sequence means an empty list of members
+ if members is not None:
+ # None means no members
+ # Empty sequence means an empty list of members
s += '\n'
s += self._make_indent()
self.indent_level += 2
s += '{\n'
- s += body_function(members)
+ s += body_function(members)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
- def _generate_struct_union_body(self, members):
- return ''.join(self._generate_stmt(decl) for decl in members)
-
- def _generate_enum_body(self, members):
- # `[:-2] + '\n'` removes the final `,` from the enumerator list
- return ''.join(self.visit(value) for value in members)[:-2] + '\n'
-
+ def _generate_struct_union_body(self, members):
+ return ''.join(self._generate_stmt(decl) for decl in members)
+
+ def _generate_enum_body(self, members):
+ # `[:-2] + '\n'` removes the final `,` from the enumerator list
+ return ''.join(self.visit(value) for value in members)[:-2] + '\n'
+
def _generate_stmt(self, n, add_indent=False):
""" Generation from a statement node. This method exists as a wrapper
for individual visit_* methods to handle different treatment of
@@ -498,5 +498,5 @@ class CGenerator(object):
""" Returns True for nodes that are "simple" - i.e. nodes that always
have higher precedence than operators.
"""
- return isinstance(n, (c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
- c_ast.StructRef, c_ast.FuncCall))
+ return isinstance(n, (c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
+ c_ast.StructRef, c_ast.FuncCall))
diff --git a/contrib/python/pycparser/pycparser/c_lexer.py b/contrib/python/pycparser/pycparser/c_lexer.py
index 5ac2231352..d68d8ebfa3 100644
--- a/contrib/python/pycparser/pycparser/c_lexer.py
+++ b/contrib/python/pycparser/pycparser/c_lexer.py
@@ -3,7 +3,7 @@
#
# CLexer class: lexer for the C language
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
import re
@@ -51,8 +51,8 @@ class CLexer(object):
# Allow either "# line" or "# <num>" to support GCC's
# cpp output
#
- self.line_pattern = re.compile(r'([ \t]*line\W)|([ \t]*\d+)')
- self.pragma_pattern = re.compile(r'[ \t]*pragma\W')
+ self.line_pattern = re.compile(r'([ \t]*line\W)|([ \t]*\d+)')
+ self.pragma_pattern = re.compile(r'[ \t]*pragma\W')
def build(self, **kwargs):
""" Builds the lexer from the specification. Must be
@@ -105,7 +105,7 @@ class CLexer(object):
'REGISTER', 'OFFSETOF',
'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
- 'VOLATILE', 'WHILE', '__INT128',
+ 'VOLATILE', 'WHILE', '__INT128',
)
keywords_new = (
diff --git a/contrib/python/pycparser/pycparser/c_parser.py b/contrib/python/pycparser/pycparser/c_parser.py
index 87d10e8486..640a759406 100644
--- a/contrib/python/pycparser/pycparser/c_parser.py
+++ b/contrib/python/pycparser/pycparser/c_parser.py
@@ -3,7 +3,7 @@
#
# CParser class: Parser and AST builder for the C language
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
from .ply import yacc
@@ -14,7 +14,7 @@ from .plyparser import PLYParser, ParseError, parameterized, template
from .ast_transforms import fix_switch_cases, fix_atomic_specifiers
-@template
+@template
class CParser(PLYParser):
def __init__(
self,
@@ -41,11 +41,11 @@ class CParser(PLYParser):
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
-
+
lexer:
Set this parameter to define the lexer to use if
you're not using the default CLexer.
-
+
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
@@ -90,12 +90,12 @@ class CParser(PLYParser):
'abstract_declarator',
'assignment_expression',
'declaration_list',
- 'declaration_specifiers_no_type',
+ 'declaration_specifiers_no_type',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
- 'id_init_declarator_list',
+ 'id_init_declarator_list',
'initializer_list',
'parameter_type_list',
'block_item_list',
@@ -340,7 +340,7 @@ class CParser(PLYParser):
coord=typename[0].coord)
return decl
- def _add_declaration_specifier(self, declspec, newspec, kind, append=False):
+ def _add_declaration_specifier(self, declspec, newspec, kind, append=False):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
@@ -351,18 +351,18 @@ class CParser(PLYParser):
This method is given a declaration specifier, and a
new specifier of a given kind.
- If `append` is True, the new specifier is added to the end of
- the specifiers list, otherwise it's added at the beginning.
+ If `append` is True, the new specifier is added to the end of
+ the specifiers list, otherwise it's added at the beginning.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[], alignment=[])
-
- if append:
- spec[kind].append(newspec)
- else:
- spec[kind].insert(0, newspec)
-
+
+ if append:
+ spec[kind].append(newspec)
+ else:
+ spec[kind].insert(0, newspec)
+
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
@@ -569,21 +569,21 @@ class CParser(PLYParser):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
- self._token_coord(p, 1))
+ self._token_coord(p, 1))
def p_pppragma_directive(self, p):
""" pppragma_directive : PPPRAGMA
| PPPRAGMA PPPRAGMASTR
"""
if len(p) == 3:
- p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2))
+ p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2))
else:
- p[0] = c_ast.Pragma("", self._token_coord(p, 1))
+ p[0] = c_ast.Pragma("", self._token_coord(p, 1))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
def p_function_definition_1(self, p):
- """ function_definition : id_declarator declaration_list_opt compound_statement
+ """ function_definition : id_declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
@@ -591,7 +591,7 @@ class CParser(PLYParser):
alignment=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
- coord=self._token_coord(p, 1))],
+ coord=self._token_coord(p, 1))],
function=[])
p[0] = self._build_function_definition(
@@ -601,7 +601,7 @@ class CParser(PLYParser):
body=p[3])
def p_function_definition_2(self, p):
- """ function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement
+ """ function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement
"""
spec = p[1]
@@ -634,53 +634,53 @@ class CParser(PLYParser):
# such as labeled_statements, selection_statements, and
# iteration_statements, causing a misleading structure in the AST. For
# example, consider the following C code.
- #
- # for (int i = 0; i < 3; i++)
- # #pragma omp critical
- # sum += 1;
- #
+ #
+ # for (int i = 0; i < 3; i++)
+ # #pragma omp critical
+ # sum += 1;
+ #
# This code will compile and execute "sum += 1;" as the body of the for
# loop. Previous implementations of PyCParser would render the AST for this
- # block of code as follows:
- #
- # For:
- # DeclList:
- # Decl: i, [], [], []
- # TypeDecl: i, []
- # IdentifierType: ['int']
- # Constant: int, 0
- # BinaryOp: <
- # ID: i
- # Constant: int, 3
- # UnaryOp: p++
- # ID: i
- # Pragma: omp critical
- # Assignment: +=
- # ID: sum
- # Constant: int, 1
- #
- # This AST misleadingly takes the Pragma as the body of the loop and the
- # assignment then becomes a sibling of the loop.
- #
- # To solve edge cases like these, the pragmacomp_or_statement rule groups
- # a pragma and its following statement (which would otherwise be orphaned)
- # using a compound block, effectively turning the above code into:
- #
- # for (int i = 0; i < 3; i++) {
- # #pragma omp critical
- # sum += 1;
- # }
- def p_pragmacomp_or_statement(self, p):
- """ pragmacomp_or_statement : pppragma_directive statement
- | statement
- """
- if isinstance(p[1], c_ast.Pragma) and len(p) == 3:
- p[0] = c_ast.Compound(
- block_items=[p[1], p[2]],
- coord=self._token_coord(p, 1))
- else:
- p[0] = p[1]
-
+ # block of code as follows:
+ #
+ # For:
+ # DeclList:
+ # Decl: i, [], [], []
+ # TypeDecl: i, []
+ # IdentifierType: ['int']
+ # Constant: int, 0
+ # BinaryOp: <
+ # ID: i
+ # Constant: int, 3
+ # UnaryOp: p++
+ # ID: i
+ # Pragma: omp critical
+ # Assignment: +=
+ # ID: sum
+ # Constant: int, 1
+ #
+ # This AST misleadingly takes the Pragma as the body of the loop and the
+ # assignment then becomes a sibling of the loop.
+ #
+ # To solve edge cases like these, the pragmacomp_or_statement rule groups
+ # a pragma and its following statement (which would otherwise be orphaned)
+ # using a compound block, effectively turning the above code into:
+ #
+ # for (int i = 0; i < 3; i++) {
+ # #pragma omp critical
+ # sum += 1;
+ # }
+ def p_pragmacomp_or_statement(self, p):
+ """ pragmacomp_or_statement : pppragma_directive statement
+ | statement
+ """
+ if isinstance(p[1], c_ast.Pragma) and len(p) == 3:
+ p[0] = c_ast.Compound(
+ block_items=[p[1], p[2]],
+ coord=self._token_coord(p, 1))
+ else:
+ p[0] = p[1]
+
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
@@ -692,7 +692,7 @@ class CParser(PLYParser):
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
- | declaration_specifiers_no_type id_init_declarator_list_opt
+ | declaration_specifiers_no_type id_init_declarator_list_opt
"""
spec = p[1]
@@ -766,73 +766,73 @@ class CParser(PLYParser):
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
- # To know when declaration-specifiers end and declarators begin,
- # we require declaration-specifiers to have at least one
- # type-specifier, and disallow typedef-names after we've seen any
- # type-specifier. These are both required by the spec.
- #
- def p_declaration_specifiers_no_type_1(self, p):
- """ declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt
+ # To know when declaration-specifiers end and declarators begin,
+ # we require declaration-specifiers to have at least one
+ # type-specifier, and disallow typedef-names after we've seen any
+ # type-specifier. These are both required by the spec.
+ #
+ def p_declaration_specifiers_no_type_1(self, p):
+ """ declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
- def p_declaration_specifiers_no_type_2(self, p):
- """ declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt
- """
- p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
-
- def p_declaration_specifiers_no_type_3(self, p):
- """ declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt
- """
- p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
-
+ def p_declaration_specifiers_no_type_2(self, p):
+ """ declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt
+ """
+ p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
+
+ def p_declaration_specifiers_no_type_3(self, p):
+ """ declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt
+ """
+ p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
+
# Without this, `typedef _Atomic(T) U` will parse incorrectly because the
# _Atomic qualifier will match, instead of the specifier.
def p_declaration_specifiers_no_type_4(self, p):
""" declaration_specifiers_no_type : atomic_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
-
+
def p_declaration_specifiers_no_type_5(self, p):
""" declaration_specifiers_no_type : alignment_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'alignment')
- def p_declaration_specifiers_1(self, p):
- """ declaration_specifiers : declaration_specifiers type_qualifier
- """
- p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
-
+ def p_declaration_specifiers_1(self, p):
+ """ declaration_specifiers : declaration_specifiers type_qualifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
+
def p_declaration_specifiers_2(self, p):
- """ declaration_specifiers : declaration_specifiers storage_class_specifier
+ """ declaration_specifiers : declaration_specifiers storage_class_specifier
"""
- p[0] = self._add_declaration_specifier(p[1], p[2], 'storage', append=True)
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'storage', append=True)
def p_declaration_specifiers_3(self, p):
- """ declaration_specifiers : declaration_specifiers function_specifier
+ """ declaration_specifiers : declaration_specifiers function_specifier
"""
- p[0] = self._add_declaration_specifier(p[1], p[2], 'function', append=True)
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'function', append=True)
def p_declaration_specifiers_4(self, p):
- """ declaration_specifiers : declaration_specifiers type_specifier_no_typeid
- """
- p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
-
- def p_declaration_specifiers_5(self, p):
- """ declaration_specifiers : type_specifier
- """
- p[0] = self._add_declaration_specifier(None, p[1], 'type')
-
- def p_declaration_specifiers_6(self, p):
- """ declaration_specifiers : declaration_specifiers_no_type type_specifier
- """
- p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
-
+ """ declaration_specifiers : declaration_specifiers type_specifier_no_typeid
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
+
+ def p_declaration_specifiers_5(self, p):
+ """ declaration_specifiers : type_specifier
+ """
+ p[0] = self._add_declaration_specifier(None, p[1], 'type')
+
+ def p_declaration_specifiers_6(self, p):
+ """ declaration_specifiers : declaration_specifiers_no_type type_specifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
+
def p_declaration_specifiers_7(self, p):
""" declaration_specifiers : declaration_specifiers alignment_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'alignment', append=True)
-
+
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
@@ -849,27 +849,27 @@ class CParser(PLYParser):
"""
p[0] = p[1]
- def p_type_specifier_no_typeid(self, p):
- """ type_specifier_no_typeid : VOID
- | _BOOL
- | CHAR
- | SHORT
- | INT
- | LONG
- | FLOAT
- | DOUBLE
- | _COMPLEX
- | SIGNED
- | UNSIGNED
- | __INT128
- """
- p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
-
- def p_type_specifier(self, p):
+ def p_type_specifier_no_typeid(self, p):
+ """ type_specifier_no_typeid : VOID
+ | _BOOL
+ | CHAR
+ | SHORT
+ | INT
+ | LONG
+ | FLOAT
+ | DOUBLE
+ | _COMPLEX
+ | SIGNED
+ | UNSIGNED
+ | __INT128
+ """
+ p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
+
+ def p_type_specifier(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
- | type_specifier_no_typeid
+ | type_specifier_no_typeid
| atomic_specifier
"""
p[0] = p[1]
@@ -890,7 +890,7 @@ class CParser(PLYParser):
"""
p[0] = p[1]
- def p_init_declarator_list(self, p):
+ def p_init_declarator_list(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
@@ -905,40 +905,40 @@ class CParser(PLYParser):
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
- def p_id_init_declarator_list(self, p):
- """ id_init_declarator_list : id_init_declarator
- | id_init_declarator_list COMMA init_declarator
- """
- p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
-
- def p_id_init_declarator(self, p):
- """ id_init_declarator : id_declarator
- | id_declarator EQUALS initializer
- """
- p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
-
- # Require at least one type specifier in a specifier-qualifier-list
- #
+ def p_id_init_declarator_list(self, p):
+ """ id_init_declarator_list : id_init_declarator
+ | id_init_declarator_list COMMA init_declarator
+ """
+ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
+
+ def p_id_init_declarator(self, p):
+ """ id_init_declarator : id_declarator
+ | id_declarator EQUALS initializer
+ """
+ p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
+
+ # Require at least one type specifier in a specifier-qualifier-list
+ #
def p_specifier_qualifier_list_1(self, p):
- """ specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid
+ """ specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid
"""
- p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
def p_specifier_qualifier_list_2(self, p):
- """ specifier_qualifier_list : specifier_qualifier_list type_qualifier
- """
- p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
-
- def p_specifier_qualifier_list_3(self, p):
- """ specifier_qualifier_list : type_specifier
- """
- p[0] = self._add_declaration_specifier(None, p[1], 'type')
-
- def p_specifier_qualifier_list_4(self, p):
- """ specifier_qualifier_list : type_qualifier_list type_specifier
- """
+ """ specifier_qualifier_list : specifier_qualifier_list type_qualifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
+
+ def p_specifier_qualifier_list_3(self, p):
+ """ specifier_qualifier_list : type_specifier
+ """
+ p[0] = self._add_declaration_specifier(None, p[1], 'type')
+
+ def p_specifier_qualifier_list_4(self, p):
+ """ specifier_qualifier_list : type_qualifier_list type_specifier
+ """
p[0] = dict(qual=p[1], alignment=[], storage=[], type=[p[2]], function=[])
-
+
def p_specifier_qualifier_list_5(self, p):
""" specifier_qualifier_list : alignment_specifier
"""
@@ -957,48 +957,48 @@ class CParser(PLYParser):
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
- # None means no list of members
+ # None means no list of members
p[0] = klass(
name=p[2],
decls=None,
- coord=self._token_coord(p, 2))
+ coord=self._token_coord(p, 2))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
- | struct_or_union brace_open brace_close
+ | struct_or_union brace_open brace_close
"""
klass = self._select_struct_union_class(p[1])
- if len(p) == 4:
- # Empty sequence means an empty list of members
- p[0] = klass(
- name=None,
- decls=[],
- coord=self._token_coord(p, 2))
- else:
- p[0] = klass(
- name=None,
- decls=p[3],
- coord=self._token_coord(p, 2))
-
-
+ if len(p) == 4:
+ # Empty sequence means an empty list of members
+ p[0] = klass(
+ name=None,
+ decls=[],
+ coord=self._token_coord(p, 2))
+ else:
+ p[0] = klass(
+ name=None,
+ decls=p[3],
+ coord=self._token_coord(p, 2))
+
+
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
- | struct_or_union ID brace_open brace_close
+ | struct_or_union ID brace_open brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
- | struct_or_union TYPEID brace_open brace_close
+ | struct_or_union TYPEID brace_open brace_close
"""
klass = self._select_struct_union_class(p[1])
- if len(p) == 5:
- # Empty sequence means an empty list of members
- p[0] = klass(
- name=p[2],
- decls=[],
- coord=self._token_coord(p, 2))
- else:
- p[0] = klass(
- name=p[2],
- decls=p[4],
- coord=self._token_coord(p, 2))
+ if len(p) == 5:
+ # Empty sequence means an empty list of members
+ p[0] = klass(
+ name=p[2],
+ decls=[],
+ coord=self._token_coord(p, 2))
+ else:
+ p[0] = klass(
+ name=p[2],
+ decls=p[4],
+ coord=self._token_coord(p, 2))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
@@ -1056,14 +1056,14 @@ class CParser(PLYParser):
p[0] = decls
def p_struct_declaration_2(self, p):
- """ struct_declaration : SEMI
+ """ struct_declaration : SEMI
"""
- p[0] = None
+ p[0] = None
def p_struct_declaration_3(self, p):
- """ struct_declaration : pppragma_directive
+ """ struct_declaration : pppragma_directive
"""
- p[0] = [p[1]]
+ p[0] = [p[1]]
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
@@ -1092,18 +1092,18 @@ class CParser(PLYParser):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
- p[0] = c_ast.Enum(p[2], None, self._token_coord(p, 1))
+ p[0] = c_ast.Enum(p[2], None, self._token_coord(p, 1))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
- p[0] = c_ast.Enum(None, p[3], self._token_coord(p, 1))
+ p[0] = c_ast.Enum(None, p[3], self._token_coord(p, 1))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
- p[0] = c_ast.Enum(p[2], p[4], self._token_coord(p, 1))
+ p[0] = c_ast.Enum(p[2], p[4], self._token_coord(p, 1))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
@@ -1131,53 +1131,53 @@ class CParser(PLYParser):
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
- self._token_coord(p, 1))
+ self._token_coord(p, 1))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
- self._token_coord(p, 1))
+ self._token_coord(p, 1))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
- def p_declarator(self, p):
- """ declarator : id_declarator
- | typeid_declarator
+ def p_declarator(self, p):
+ """ declarator : id_declarator
+ | typeid_declarator
"""
p[0] = p[1]
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
- def p_xxx_declarator_1(self, p):
- """ xxx_declarator : direct_xxx_declarator
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_xxx_declarator_1(self, p):
+ """ xxx_declarator : direct_xxx_declarator
"""
- p[0] = p[1]
+ p[0] = p[1]
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
- def p_xxx_declarator_2(self, p):
- """ xxx_declarator : pointer direct_xxx_declarator
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_xxx_declarator_2(self, p):
+ """ xxx_declarator : pointer direct_xxx_declarator
"""
- p[0] = self._type_modify_decl(p[2], p[1])
+ p[0] = self._type_modify_decl(p[2], p[1])
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
- def p_direct_xxx_declarator_1(self, p):
- """ direct_xxx_declarator : yyy
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_1(self, p):
+ """ direct_xxx_declarator : yyy
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
align=None,
- coord=self._token_coord(p, 1))
+ coord=self._token_coord(p, 1))
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'))
- def p_direct_xxx_declarator_2(self, p):
- """ direct_xxx_declarator : LPAREN xxx_declarator RPAREN
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'))
+ def p_direct_xxx_declarator_2(self, p):
+ """ direct_xxx_declarator : LPAREN xxx_declarator RPAREN
"""
p[0] = p[2]
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
- def p_direct_xxx_declarator_3(self, p):
- """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_3(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[3] if len(p) > 5 else []) or []
# Accept dimension qualifiers
@@ -1190,10 +1190,10 @@ class CParser(PLYParser):
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
- def p_direct_xxx_declarator_4(self, p):
- """ direct_xxx_declarator : direct_xxx_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
- | direct_xxx_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_4(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
+ | direct_xxx_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
@@ -1212,22 +1212,22 @@ class CParser(PLYParser):
# Special for VLAs
#
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
- def p_direct_xxx_declarator_5(self, p):
- """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_5(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
- dim=c_ast.ID(p[4], self._token_coord(p, 4)),
+ dim=c_ast.ID(p[4], self._token_coord(p, 4)),
dim_quals=p[3] if p[3] is not None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
- @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
- def p_direct_xxx_declarator_6(self, p):
- """ direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list RPAREN
- | direct_xxx_declarator LPAREN identifier_list_opt RPAREN
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_6(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list RPAREN
+ | direct_xxx_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
@@ -1257,7 +1257,7 @@ class CParser(PLYParser):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
- coord = self._token_coord(p, 1)
+ coord = self._token_coord(p, 1)
# Pointer decls nest from inside out. This is important when different
# levels have different qualifiers. For example:
#
@@ -1265,7 +1265,7 @@ class CParser(PLYParser):
#
# Means "pointer to const pointer to char"
#
- # While:
+ # While:
#
# char ** const p;
#
@@ -1294,7 +1294,7 @@ class CParser(PLYParser):
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
- p[1].params.append(c_ast.EllipsisParam(self._token_coord(p, 3)))
+ p[1].params.append(c_ast.EllipsisParam(self._token_coord(p, 3)))
p[0] = p[1]
@@ -1308,24 +1308,24 @@ class CParser(PLYParser):
p[1].params.append(p[3])
p[0] = p[1]
- # From ISO/IEC 9899:TC2, 6.7.5.3.11:
- # "If, in a parameter declaration, an identifier can be treated either
- # as a typedef name or as a parameter name, it shall be taken as a
- # typedef name."
- #
- # Inside a parameter declaration, once we've reduced declaration specifiers,
- # if we shift in an LPAREN and see a TYPEID, it could be either an abstract
- # declarator or a declarator nested inside parens. This rule tells us to
- # always treat it as an abstract declarator. Therefore, we only accept
- # `id_declarator`s and `typeid_noparen_declarator`s.
+ # From ISO/IEC 9899:TC2, 6.7.5.3.11:
+ # "If, in a parameter declaration, an identifier can be treated either
+ # as a typedef name or as a parameter name, it shall be taken as a
+ # typedef name."
+ #
+ # Inside a parameter declaration, once we've reduced declaration specifiers,
+ # if we shift in an LPAREN and see a TYPEID, it could be either an abstract
+ # declarator or a declarator nested inside parens. This rule tells us to
+ # always treat it as an abstract declarator. Therefore, we only accept
+ # `id_declarator`s and `typeid_noparen_declarator`s.
def p_parameter_declaration_1(self, p):
- """ parameter_declaration : declaration_specifiers id_declarator
- | declaration_specifiers typeid_noparen_declarator
+ """ parameter_declaration : declaration_specifiers id_declarator
+ | declaration_specifiers typeid_noparen_declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
- coord=self._token_coord(p, 1))]
+ coord=self._token_coord(p, 1))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
@@ -1336,7 +1336,7 @@ class CParser(PLYParser):
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
- coord=self._token_coord(p, 1))]
+ coord=self._token_coord(p, 1))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
@@ -1356,7 +1356,7 @@ class CParser(PLYParser):
quals=spec['qual'],
align=None,
type=p[2] or c_ast.TypeDecl(None, None, None, None),
- coord=self._token_coord(p, 2))
+ coord=self._token_coord(p, 2))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
@@ -1382,7 +1382,7 @@ class CParser(PLYParser):
| brace_open initializer_list COMMA brace_close
"""
if p[2] is None:
- p[0] = c_ast.InitList([], self._token_coord(p, 1))
+ p[0] = c_ast.InitList([], self._token_coord(p, 1))
else:
p[0] = p[2]
@@ -1426,7 +1426,7 @@ class CParser(PLYParser):
quals=p[1]['qual'][:],
align=None,
type=p[2] or c_ast.TypeDecl(None, None, None, None),
- coord=self._token_coord(p, 2))
+ coord=self._token_coord(p, 2))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
@@ -1476,14 +1476,14 @@ class CParser(PLYParser):
type=c_ast.TypeDecl(None, None, None, None),
dim=p[3] if len(p) > 4 else p[2],
dim_quals=quals,
- coord=self._token_coord(p, 1))
+ coord=self._token_coord(p, 1))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
- dim=c_ast.ID(p[3], self._token_coord(p, 3)),
+ dim=c_ast.ID(p[3], self._token_coord(p, 3)),
dim_quals=[],
coord=p[1].coord)
@@ -1494,9 +1494,9 @@ class CParser(PLYParser):
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None, None),
- dim=c_ast.ID(p[3], self._token_coord(p, 3)),
+ dim=c_ast.ID(p[3], self._token_coord(p, 3)),
dim_quals=[],
- coord=self._token_coord(p, 1))
+ coord=self._token_coord(p, 1))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
@@ -1514,7 +1514,7 @@ class CParser(PLYParser):
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None, None),
- coord=self._token_coord(p, 1))
+ coord=self._token_coord(p, 1))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
@@ -1538,72 +1538,72 @@ class CParser(PLYParser):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
- coord=self._token_coord(p, 1))
+ coord=self._token_coord(p, 1))
def p_labeled_statement_1(self, p):
- """ labeled_statement : ID COLON pragmacomp_or_statement """
- p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1))
+ """ labeled_statement : ID COLON pragmacomp_or_statement """
+ p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1))
def p_labeled_statement_2(self, p):
- """ labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """
- p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1))
+ """ labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """
+ p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1))
def p_labeled_statement_3(self, p):
- """ labeled_statement : DEFAULT COLON pragmacomp_or_statement """
- p[0] = c_ast.Default([p[3]], self._token_coord(p, 1))
+ """ labeled_statement : DEFAULT COLON pragmacomp_or_statement """
+ p[0] = c_ast.Default([p[3]], self._token_coord(p, 1))
def p_selection_statement_1(self, p):
- """ selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """
- p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1))
+ """ selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1))
def p_selection_statement_2(self, p):
- """ selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """
- p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1))
+ """ selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """
+ p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1))
def p_selection_statement_3(self, p):
- """ selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """
+ """ selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = fix_switch_cases(
- c_ast.Switch(p[3], p[5], self._token_coord(p, 1)))
+ c_ast.Switch(p[3], p[5], self._token_coord(p, 1)))
def p_iteration_statement_1(self, p):
- """ iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """
- p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1))
+ """ iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1))
def p_iteration_statement_2(self, p):
- """ iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """
- p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1))
+ """ iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """
+ p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1))
def p_iteration_statement_3(self, p):
- """ iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
- p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1))
+ """ iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1))
def p_iteration_statement_4(self, p):
- """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
- p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)),
- p[4], p[6], p[8], self._token_coord(p, 1))
+ """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)),
+ p[4], p[6], p[8], self._token_coord(p, 1))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
- p[0] = c_ast.Goto(p[2], self._token_coord(p, 1))
+ p[0] = c_ast.Goto(p[2], self._token_coord(p, 1))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
- p[0] = c_ast.Break(self._token_coord(p, 1))
+ p[0] = c_ast.Break(self._token_coord(p, 1))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
- p[0] = c_ast.Continue(self._token_coord(p, 1))
+ p[0] = c_ast.Continue(self._token_coord(p, 1))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
- p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._token_coord(p, 1))
+ p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._token_coord(p, 1))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
- p[0] = c_ast.EmptyStatement(self._token_coord(p, 2))
+ p[0] = c_ast.EmptyStatement(self._token_coord(p, 2))
else:
p[0] = p[1]
@@ -1626,7 +1626,7 @@ class CParser(PLYParser):
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
- p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
+ p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
@@ -1702,7 +1702,7 @@ class CParser(PLYParser):
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
- p[0] = c_ast.Cast(p[2], p[4], self._token_coord(p, 1))
+ p[0] = c_ast.Cast(p[2], p[4], self._token_coord(p, 1))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
@@ -1723,7 +1723,7 @@ class CParser(PLYParser):
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
- self._token_coord(p, 1))
+ self._token_coord(p, 1))
def p_unary_operator(self, p):
""" unary_operator : AND
@@ -1755,7 +1755,7 @@ class CParser(PLYParser):
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
- field = c_ast.ID(p[3], self._token_coord(p, 3))
+ field = c_ast.ID(p[3], self._token_coord(p, 3))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
@@ -1791,7 +1791,7 @@ class CParser(PLYParser):
def p_primary_expression_5(self, p):
""" primary_expression : OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN
"""
- coord = self._token_coord(p, 1)
+ coord = self._token_coord(p, 1)
p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
c_ast.ExprList([p[3], p[5]], coord),
coord)
@@ -1822,7 +1822,7 @@ class CParser(PLYParser):
def p_identifier(self, p):
""" identifier : ID """
- p[0] = c_ast.ID(p[1], self._token_coord(p, 1))
+ p[0] = c_ast.ID(p[1], self._token_coord(p, 1))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
@@ -1851,18 +1851,18 @@ class CParser(PLYParser):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
- if 'x' in p[1].lower():
- t = 'float'
- else:
- if p[1][-1] in ('f', 'F'):
- t = 'float'
- elif p[1][-1] in ('l', 'L'):
- t = 'long double'
- else:
- t = 'double'
-
+ if 'x' in p[1].lower():
+ t = 'float'
+ else:
+ if p[1][-1] in ('f', 'F'):
+ t = 'float'
+ elif p[1][-1] in ('l', 'L'):
+ t = 'long double'
+ else:
+ t = 'double'
+
p[0] = c_ast.Constant(
- t, p[1], self._token_coord(p, 1))
+ t, p[1], self._token_coord(p, 1))
def p_constant_3(self, p):
""" constant : CHAR_CONST
@@ -1872,7 +1872,7 @@ class CParser(PLYParser):
| U32CHAR_CONST
"""
p[0] = c_ast.Constant(
- 'char', p[1], self._token_coord(p, 1))
+ 'char', p[1], self._token_coord(p, 1))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
@@ -1885,7 +1885,7 @@ class CParser(PLYParser):
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
- 'string', p[1], self._token_coord(p, 1))
+ 'string', p[1], self._token_coord(p, 1))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
@@ -1902,7 +1902,7 @@ class CParser(PLYParser):
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
- 'string', p[1], self._token_coord(p, 1))
+ 'string', p[1], self._token_coord(p, 1))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
diff --git a/contrib/python/pycparser/pycparser/lextab.py b/contrib/python/pycparser/pycparser/lextab.py
index 18f819fb13..444b4656d5 100644
--- a/contrib/python/pycparser/pycparser/lextab.py
+++ b/contrib/python/pycparser/pycparser/lextab.py
@@ -1,7 +1,7 @@
-# lextab.py. This file automatically created by PLY (version 3.10). Don't edit!
-_tabversion = '3.10'
+# lextab.py. This file automatically created by PLY (version 3.10). Don't edit!
+_tabversion = '3.10'
_lextokens = set(('INT_CONST_CHAR', 'VOID', 'LBRACKET', 'WCHAR_CONST', 'FLOAT_CONST', 'MINUS', 'RPAREN', 'STRUCT', 'LONG', 'PLUS', 'ELLIPSIS', 'U32STRING_LITERAL', 'GT', 'GOTO', 'ENUM', 'PERIOD', 'GE', 'INT_CONST_DEC', 'ARROW', '_STATIC_ASSERT', '__INT128', 'HEX_FLOAT_CONST', 'DOUBLE', 'MINUSEQUAL', 'INT_CONST_OCT', 'TIMESEQUAL', 'OR', 'SHORT', 'RETURN', 'RSHIFTEQUAL', '_ALIGNAS', 'RESTRICT', 'STATIC', 'SIZEOF', 'UNSIGNED', 'PLUSPLUS', 'COLON', 'WSTRING_LITERAL', 'DIVIDE', 'FOR', 'UNION', 'EQUALS', 'ELSE', 'ANDEQUAL', 'EQ', 'AND', 'TYPEID', 'LBRACE', 'PPHASH', 'INT', 'SIGNED', 'CONTINUE', 'NOT', 'OREQUAL', 'MOD', 'RSHIFT', 'DEFAULT', '_NORETURN', 'CHAR', 'WHILE', 'DIVEQUAL', '_ALIGNOF', 'EXTERN', 'LNOT', 'CASE', 'LAND', 'REGISTER', 'MODEQUAL', 'NE', 'SWITCH', 'INT_CONST_HEX', '_COMPLEX', 'PPPRAGMASTR', 'PLUSEQUAL', 'U32CHAR_CONST', 'CONDOP', 'U8STRING_LITERAL', 'BREAK', 'VOLATILE', 'PPPRAGMA', 'INLINE', 'INT_CONST_BIN', 'DO', 'U8CHAR_CONST', 'CONST', 'U16STRING_LITERAL', 'LOR', 'CHAR_CONST', 'LSHIFT', 'RBRACE', '_BOOL', 'LE', 'SEMI', '_THREAD_LOCAL', 'LT', 'COMMA', 'U16CHAR_CONST', 'OFFSETOF', '_ATOMIC', 'TYPEDEF', 'XOR', 'AUTO', 'TIMES', 'LPAREN', 'MINUSMINUS', 'ID', 'IF', 'STRING_LITERAL', 'FLOAT', 'XOREQUAL', 'LSHIFTEQUAL', 'RBRACKET'))
-_lexreflags = 64
+_lexreflags = 64
_lexliterals = ''
_lexstateinfo = {'ppline': 'exclusive', 'pppragma': 'exclusive', 'INITIAL': 'inclusive'}
_lexstatere = {'ppline': [('(?P<t_ppline_FILENAME>"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P<t_ppline_LINE_NUMBER>(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P<t_ppline_NEWLINE>\\n)|(?P<t_ppline_PPLINE>line)', [None, ('t_ppline_FILENAME', 'FILENAME'), None, None, ('t_ppline_LINE_NUMBER', 'LINE_NUMBER'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_ppline_NEWLINE', 'NEWLINE'), ('t_ppline_PPLINE', 'PPLINE')])], 'pppragma': [('(?P<t_pppragma_NEWLINE>\\n)|(?P<t_pppragma_PPPRAGMA>pragma)|(?P<t_pppragma_STR>.+)', [None, ('t_pppragma_NEWLINE', 'NEWLINE'), ('t_pppragma_PPPRAGMA', 'PPPRAGMA'), ('t_pppragma_STR', 'STR')])], 'INITIAL': [('(?P<t_PPHASH>[ \\t]*\\#)|(?P<t_NEWLINE>\\n+)|(?P<t_LBRACE>\\{)|(?P<t_RBRACE>\\})|(?P<t_FLOAT_CONST>((((([0-9]*\\.[0-9]+)|([0-9]+\\.))([eE][-+]?[0-9]+)?)|([0-9]+([eE][-+]?[0-9]+)))[FfLl]?))|(?P<t_HEX_FLOAT_CONST>(0[xX]([0-9a-fA-F]+|((([0-9a-fA-F]+)?\\.[0-9a-fA-F]+)|([0-9a-fA-F]+\\.)))([pP][+-]?[0-9]+)[FfLl]?))|(?P<t_INT_CONST_HEX>0[xX][0-9a-fA-F]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P<t_INT_CONST_BIN>0[bB][01]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)', [None, ('t_PPHASH', 'PPHASH'), ('t_NEWLINE', 'NEWLINE'), ('t_LBRACE', 'LBRACE'), ('t_RBRACE', 'RBRACE'), ('t_FLOAT_CONST', 'FLOAT_CONST'), None, None, None, None, None, None, None, None, None, ('t_HEX_FLOAT_CONST', 'HEX_FLOAT_CONST'), None, None, None, None, None, None, None, ('t_INT_CONST_HEX', 'INT_CONST_HEX'), None, None, None, None, None, None, None, ('t_INT_CONST_BIN', 'INT_CONST_BIN')]), ('(?P<t_BAD_CONST_OCT>0[0-7]*[89])|(?P<t_INT_CONST_OCT>0[0-7]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P<t_INT_CONST_DEC>(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P<t_INT_CONST_CHAR>\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F])))){2,4}\')|(?P<t_CHAR_CONST>\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?P<t_WCHAR_CONST>L\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?P<t_U8CHAR_CONST>u8\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?P<t_U16CHAR_CONST>u\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?P<t_U32CHAR_CONST>U\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')', [None, ('t_BAD_CONST_OCT', 'BAD_CONST_OCT'), ('t_INT_CONST_OCT', 'INT_CONST_OCT'), None, None, None, None, None, None, None, ('t_INT_CONST_DEC', 'INT_CONST_DEC'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_INT_CONST_CHAR', 'INT_CONST_CHAR'), None, None, None, None, None, None, ('t_CHAR_CONST', 'CHAR_CONST'), None, None, None, None, None, None, ('t_WCHAR_CONST', 'WCHAR_CONST'), None, None, None, None, None, None, ('t_U8CHAR_CONST', 'U8CHAR_CONST'), None, None, None, None, None, None, ('t_U16CHAR_CONST', 'U16CHAR_CONST'), None, None, None, None, None, None, ('t_U32CHAR_CONST', 'U32CHAR_CONST')]), ('(?P<t_UNMATCHED_QUOTE>(\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))*\\n)|(\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))*$))|(?P<t_BAD_CHAR_CONST>(\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))[^\'\n]+\')|(\'\')|(\'([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-9])[^\'\\n]*\'))|(?P<t_WSTRING_LITERAL>L"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P<t_U8STRING_LITERAL>u8"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P<t_U16STRING_LITERAL>u"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P<t_U32STRING_LITERAL>U"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P<t_BAD_STRING_LITERAL>"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-9])([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P<t_ID>[a-zA-Z_$][0-9a-zA-Z_$]*)|(?P<t_STRING_LITERAL>"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P<t_ELLIPSIS>\\.\\.\\.)|(?P<t_PLUSPLUS>\\+\\+)|(?P<t_LOR>\\|\\|)|(?P<t_XOREQUAL>\\^=)|(?P<t_OREQUAL>\\|=)|(?P<t_LSHIFTEQUAL><<=)|(?P<t_RSHIFTEQUAL>>>=)|(?P<t_PLUSEQUAL>\\+=)|(?P<t_TIMESEQUAL>\\*=)', [None, ('t_UNMATCHED_QUOTE', 'UNMATCHED_QUOTE'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_BAD_CHAR_CONST', 'BAD_CHAR_CONST'), None, None, None, None, None, None, None, None, None, None, ('t_WSTRING_LITERAL', 'WSTRING_LITERAL'), None, None, ('t_U8STRING_LITERAL', 'U8STRING_LITERAL'), None, None, ('t_U16STRING_LITERAL', 'U16STRING_LITERAL'), None, None, ('t_U32STRING_LITERAL', 'U32STRING_LITERAL'), None, None, ('t_BAD_STRING_LITERAL', 'BAD_STRING_LITERAL'), None, None, None, None, None, ('t_ID', 'ID'), (None, 'STRING_LITERAL'), None, None, (None, 'ELLIPSIS'), (None, 'PLUSPLUS'), (None, 'LOR'), (None, 'XOREQUAL'), (None, 'OREQUAL'), (None, 'LSHIFTEQUAL'), (None, 'RSHIFTEQUAL'), (None, 'PLUSEQUAL'), (None, 'TIMESEQUAL')]), ('(?P<t_PLUS>\\+)|(?P<t_MODEQUAL>%=)|(?P<t_DIVEQUAL>/=)|(?P<t_RBRACKET>\\])|(?P<t_CONDOP>\\?)|(?P<t_XOR>\\^)|(?P<t_LSHIFT><<)|(?P<t_LE><=)|(?P<t_LPAREN>\\()|(?P<t_ARROW>->)|(?P<t_EQ>==)|(?P<t_NE>!=)|(?P<t_MINUSMINUS>--)|(?P<t_OR>\\|)|(?P<t_TIMES>\\*)|(?P<t_LBRACKET>\\[)|(?P<t_GE>>=)|(?P<t_RPAREN>\\))|(?P<t_LAND>&&)|(?P<t_RSHIFT>>>)|(?P<t_MINUSEQUAL>-=)|(?P<t_PERIOD>\\.)|(?P<t_ANDEQUAL>&=)|(?P<t_EQUALS>=)|(?P<t_LT><)|(?P<t_COMMA>,)|(?P<t_DIVIDE>/)|(?P<t_AND>&)|(?P<t_MOD>%)|(?P<t_SEMI>;)|(?P<t_MINUS>-)|(?P<t_GT>>)|(?P<t_COLON>:)|(?P<t_NOT>~)|(?P<t_LNOT>!)', [None, (None, 'PLUS'), (None, 'MODEQUAL'), (None, 'DIVEQUAL'), (None, 'RBRACKET'), (None, 'CONDOP'), (None, 'XOR'), (None, 'LSHIFT'), (None, 'LE'), (None, 'LPAREN'), (None, 'ARROW'), (None, 'EQ'), (None, 'NE'), (None, 'MINUSMINUS'), (None, 'OR'), (None, 'TIMES'), (None, 'LBRACKET'), (None, 'GE'), (None, 'RPAREN'), (None, 'LAND'), (None, 'RSHIFT'), (None, 'MINUSEQUAL'), (None, 'PERIOD'), (None, 'ANDEQUAL'), (None, 'EQUALS'), (None, 'LT'), (None, 'COMMA'), (None, 'DIVIDE'), (None, 'AND'), (None, 'MOD'), (None, 'SEMI'), (None, 'MINUS'), (None, 'GT'), (None, 'COLON'), (None, 'NOT'), (None, 'LNOT')])]}
diff --git a/contrib/python/pycparser/pycparser/ply/__init__.py b/contrib/python/pycparser/pycparser/ply/__init__.py
index ed040086b5..6e53cddcf6 100644
--- a/contrib/python/pycparser/pycparser/ply/__init__.py
+++ b/contrib/python/pycparser/pycparser/ply/__init__.py
@@ -1,5 +1,5 @@
# PLY package
# Author: David Beazley (dave@dabeaz.com)
-__version__ = '3.9'
+__version__ = '3.9'
__all__ = ['lex','yacc']
diff --git a/contrib/python/pycparser/pycparser/ply/cpp.py b/contrib/python/pycparser/pycparser/ply/cpp.py
index 2f31763ef7..86273eac77 100644
--- a/contrib/python/pycparser/pycparser/ply/cpp.py
+++ b/contrib/python/pycparser/pycparser/ply/cpp.py
@@ -2,20 +2,20 @@
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
-# Copyright (C) 2017
+# Copyright (C) 2017
# All rights reserved
#
-# This module implements an ANSI-C style lexical preprocessor for PLY.
+# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
-import sys
-
-# Some Python 3 compatibility shims
-if sys.version_info.major < 3:
- STRING_TYPES = (str, unicode)
-else:
- STRING_TYPES = str
- xrange = range
-
+import sys
+
+# Some Python 3 compatibility shims
+if sys.version_info.major < 3:
+ STRING_TYPES = (str, unicode)
+else:
+ STRING_TYPES = str
+ xrange = range
+
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
@@ -75,8 +75,8 @@ def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
- return t
-
+ return t
+
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
@@ -90,8 +90,8 @@ import os.path
# -----------------------------------------------------------------------------
# trigraph()
-#
-# Given an input string, this function replaces all trigraph sequences.
+#
+# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
@@ -261,7 +261,7 @@ class Preprocessor(object):
# ----------------------------------------------------------------------
# add_path()
#
- # Adds a search path to the preprocessor.
+ # Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
@@ -305,7 +305,7 @@ class Preprocessor(object):
# ----------------------------------------------------------------------
# tokenstrip()
- #
+ #
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
@@ -331,7 +331,7 @@ class Preprocessor(object):
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
- # from each argument.
+ # from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
@@ -343,7 +343,7 @@ class Preprocessor(object):
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
-
+
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
@@ -377,7 +377,7 @@ class Preprocessor(object):
else:
current_arg.append(t)
i += 1
-
+
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
@@ -389,9 +389,9 @@ class Preprocessor(object):
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
-
+
def macro_prescan(self,macro):
- macro.patch = [] # Standard macro arguments
+ macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
@@ -438,7 +438,7 @@ class Preprocessor(object):
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
-
+
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
@@ -456,7 +456,7 @@ class Preprocessor(object):
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
-
+
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
@@ -493,7 +493,7 @@ class Preprocessor(object):
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
-
+
m = self.macros[t.value]
if not m.arglist:
# A simple macro
@@ -525,7 +525,7 @@ class Preprocessor(object):
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
-
+
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
@@ -538,13 +538,13 @@ class Preprocessor(object):
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
-
+
i += 1
return tokens
- # ----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# evalexpr()
- #
+ #
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
@@ -591,14 +591,14 @@ class Preprocessor(object):
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
-
+
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
- except Exception:
+ except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
@@ -616,7 +616,7 @@ class Preprocessor(object):
if not source:
source = ""
-
+
self.define("__FILE__ \"%s\"" % source)
self.source = source
@@ -635,7 +635,7 @@ class Preprocessor(object):
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
-
+
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
@@ -643,7 +643,7 @@ class Preprocessor(object):
else:
name = ""
args = []
-
+
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
@@ -703,7 +703,7 @@ class Preprocessor(object):
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
-
+
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
@@ -789,7 +789,7 @@ class Preprocessor(object):
# ----------------------------------------------------------------------
def define(self,tokens):
- if isinstance(tokens,STRING_TYPES):
+ if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
@@ -873,7 +873,7 @@ class Preprocessor(object):
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
-
+
# ----------------------------------------------------------------------
# token()
#
diff --git a/contrib/python/pycparser/pycparser/ply/lex.py b/contrib/python/pycparser/pycparser/ply/lex.py
index 2b3cadb875..4bdd76ca06 100644
--- a/contrib/python/pycparser/pycparser/ply/lex.py
+++ b/contrib/python/pycparser/pycparser/ply/lex.py
@@ -1,7 +1,7 @@
# -----------------------------------------------------------------------------
# ply: lex.py
#
-# Copyright (C) 2001-2017
+# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
@@ -31,8 +31,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
-__version__ = '3.10'
-__tabversion__ = '3.10'
+__version__ = '3.10'
+__tabversion__ = '3.10'
import re
import sys
@@ -179,12 +179,12 @@ class Lexer:
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
- tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
+ tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
- # Rewrite the lexstatere table, replacing function objects with function names
+ # Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
@@ -230,7 +230,7 @@ class Lexer:
titem = []
txtitem = []
for pat, func_name in lre:
- titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
+ titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
@@ -495,7 +495,7 @@ def _form_master_re(relist, reflags, ldict, toknames):
return []
regex = '|'.join(relist)
try:
- lexre = re.compile(regex, reflags)
+ lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
@@ -536,7 +536,7 @@ def _statetoken(s, names):
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
-
+
if i > 1:
states = tuple(parts[1:i])
else:
@@ -758,7 +758,7 @@ class LexerReflect(object):
continue
try:
- c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
+ c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
@@ -782,7 +782,7 @@ class LexerReflect(object):
continue
try:
- c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
+ c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
@@ -830,10 +830,10 @@ class LexerReflect(object):
# -----------------------------------------------------------------------------
def validate_module(self, module):
- try:
- lines, linen = inspect.getsourcelines(module)
- except IOError:
- return
+ try:
+ lines, linen = inspect.getsourcelines(module)
+ except IOError:
+ return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
@@ -861,7 +861,7 @@ class LexerReflect(object):
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
- reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
+ reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
diff --git a/contrib/python/pycparser/pycparser/ply/yacc.py b/contrib/python/pycparser/pycparser/ply/yacc.py
index 7ca728ce46..20b4f2863c 100644
--- a/contrib/python/pycparser/pycparser/ply/yacc.py
+++ b/contrib/python/pycparser/pycparser/ply/yacc.py
@@ -1,7 +1,7 @@
# -----------------------------------------------------------------------------
# ply: yacc.py
#
-# Copyright (C) 2001-2017
+# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
@@ -67,8 +67,8 @@ import inspect
import base64
import warnings
-__version__ = '3.10'
-__tabversion__ = '3.10'
+__version__ = '3.10'
+__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
@@ -309,7 +309,7 @@ class LRParser:
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
- # See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
+ # See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
@@ -497,8 +497,8 @@ class LRParser:
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
- self.state = state
- p.callable(pslice)
+ self.state = state
+ p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
@@ -508,16 +508,16 @@ class LRParser:
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- symstack.extend(targ[1:-1]) # Put the production slice back on the stack
- statestack.pop() # Pop back one state (before the reduce)
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
- sym.value = 'error'
+ sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
-
+
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -540,7 +540,7 @@ class LRParser:
try:
# Call the grammar rule with our special slice object
- self.state = state
+ self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
@@ -550,15 +550,15 @@ class LRParser:
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- statestack.pop() # Pop back one state (before the reduce)
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
- sym.value = 'error'
+ sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
-
+
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -597,7 +597,7 @@ class LRParser:
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
- self.state = state
+ self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
@@ -817,24 +817,24 @@ class LRParser:
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
- self.state = state
- p.callable(pslice)
+ self.state = state
+ p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- symstack.extend(targ[1:-1]) # Put the production slice back on the stack
- statestack.pop() # Pop back one state (before the reduce)
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
- sym.value = 'error'
+ sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
-
+
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -857,22 +857,22 @@ class LRParser:
try:
# Call the grammar rule with our special slice object
- self.state = state
+ self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- statestack.pop() # Pop back one state (before the reduce)
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
- sym.value = 'error'
+ sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
-
+
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -903,7 +903,7 @@ class LRParser:
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
- self.state = state
+ self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
@@ -1114,24 +1114,24 @@ class LRParser:
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
- self.state = state
- p.callable(pslice)
+ self.state = state
+ p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- symstack.extend(targ[1:-1]) # Put the production slice back on the stack
- statestack.pop() # Pop back one state (before the reduce)
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
- sym.value = 'error'
+ sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
-
+
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -1149,22 +1149,22 @@ class LRParser:
try:
# Call the grammar rule with our special slice object
- self.state = state
+ self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead) # Save the current lookahead token
- statestack.pop() # Pop back one state (before the reduce)
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
- sym.value = 'error'
+ sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
-
+
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -1195,7 +1195,7 @@ class LRParser:
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
- self.state = state
+ self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
@@ -2000,7 +2000,7 @@ class LRTable(object):
import cPickle as pickle
except ImportError:
import pickle
-
+
if not os.path.exists(filename):
raise ImportError
@@ -2585,13 +2585,13 @@ class LRGeneratedTable(LRTable):
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
-
- # Shift precedence comes from the token
- sprec, slevel = Precedence.get(a, ('right', 0))
-
- # Reduce precedence comes from rule being reduced (p)
- rprec, rlevel = Productions[p.number].prec
-
+
+ # Shift precedence comes from the token
+ sprec, slevel = Precedence.get(a, ('right', 0))
+
+ # Reduce precedence comes from rule being reduced (p)
+ rprec, rlevel = Productions[p.number].prec
+
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
@@ -2649,13 +2649,13 @@ class LRGeneratedTable(LRTable):
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
-
- # Shift precedence comes from the token
- sprec, slevel = Precedence.get(a, ('right', 0))
-
- # Reduce precedence comes from the rule that could have been reduced
+
+ # Shift precedence comes from the token
+ sprec, slevel = Precedence.get(a, ('right', 0))
+
+ # Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
-
+
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
@@ -2968,20 +2968,20 @@ class ParserReflect(object):
# Compute a signature over the grammar
def signature(self):
- parts = []
+ parts = []
try:
if self.start:
- parts.append(self.start)
+ parts.append(self.start)
if self.prec:
- parts.append(''.join([''.join(p) for p in self.prec]))
+ parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
- parts.append(' '.join(self.tokens))
+ parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
- parts.append(f[3])
+ parts.append(f[3])
except (TypeError, ValueError):
pass
- return ''.join(parts)
+ return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
@@ -2999,10 +2999,10 @@ class ParserReflect(object):
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
- try:
- lines, linen = inspect.getsourcelines(module)
- except IOError:
- continue
+ try:
+ lines, linen = inspect.getsourcelines(module)
+ except IOError:
+ continue
counthash = {}
for linen, line in enumerate(lines):
@@ -3130,7 +3130,7 @@ class ParserReflect(object):
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
- line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
+ line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
diff --git a/contrib/python/pycparser/pycparser/plyparser.py b/contrib/python/pycparser/pycparser/plyparser.py
index 43189489e5..b8f4c4395e 100644
--- a/contrib/python/pycparser/pycparser/plyparser.py
+++ b/contrib/python/pycparser/pycparser/plyparser.py
@@ -4,11 +4,11 @@
# PLYParser class and other utilities for simplifying programming
# parsers with PLY
#
-# Eli Bendersky [https://eli.thegreenplace.net/]
+# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
-import warnings
+import warnings
class Coord(object):
""" Coordinates of a syntactic element. Consists of:
@@ -52,82 +52,82 @@ class PLYParser(object):
line=lineno,
column=column)
- def _token_coord(self, p, token_idx):
+ def _token_coord(self, p, token_idx):
""" Returns the coordinates for the YaccProduction object 'p' indexed
- with 'token_idx'. The coordinate includes the 'lineno' and
- 'column'. Both follow the lex semantic, starting from 1.
- """
- last_cr = p.lexer.lexer.lexdata.rfind('\n', 0, p.lexpos(token_idx))
- if last_cr < 0:
- last_cr = -1
- column = (p.lexpos(token_idx) - (last_cr))
- return self._coord(p.lineno(token_idx), column)
-
+ with 'token_idx'. The coordinate includes the 'lineno' and
+ 'column'. Both follow the lex semantic, starting from 1.
+ """
+ last_cr = p.lexer.lexer.lexdata.rfind('\n', 0, p.lexpos(token_idx))
+ if last_cr < 0:
+ last_cr = -1
+ column = (p.lexpos(token_idx) - (last_cr))
+ return self._coord(p.lineno(token_idx), column)
+
def _parse_error(self, msg, coord):
raise ParseError("%s: %s" % (coord, msg))
-
-
-def parameterized(*params):
- """ Decorator to create parameterized rules.
-
- Parameterized rule methods must be named starting with 'p_' and contain
- 'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be
- replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with
- docstring 'xxx_rule : yyy' when decorated with
- ``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring
- 'id_rule : ID'. Using multiple tuples produces multiple rules.
- """
- def decorate(rule_func):
- rule_func._params = params
- return rule_func
- return decorate
-
-
-def template(cls):
- """ Class decorator to generate rules from parameterized rule templates.
-
- See `parameterized` for more information on parameterized rules.
- """
- issued_nodoc_warning = False
- for attr_name in dir(cls):
- if attr_name.startswith('p_'):
- method = getattr(cls, attr_name)
- if hasattr(method, '_params'):
- # Remove the template method
- delattr(cls, attr_name)
- # Create parameterized rules from this method; only run this if
- # the method has a docstring. This is to address an issue when
- # pycparser's users are installed in -OO mode which strips
- # docstrings away.
- # See: https://github.com/eliben/pycparser/pull/198/ and
- # https://github.com/eliben/pycparser/issues/197
- # for discussion.
- if method.__doc__ is not None:
- _create_param_rules(cls, method)
- elif not issued_nodoc_warning:
- warnings.warn(
- 'parsing methods must have __doc__ for pycparser to work properly',
- RuntimeWarning,
- stacklevel=2)
- issued_nodoc_warning = True
- return cls
-
-
-def _create_param_rules(cls, func):
- """ Create ply.yacc rules based on a parameterized rule function
-
- Generates new methods (one per each pair of parameters) based on the
- template rule function `func`, and attaches them to `cls`. The rule
- function's parameters must be accessible via its `_params` attribute.
- """
- for xxx, yyy in func._params:
- # Use the template method's body for each new method
- def param_rule(self, p):
- func(self, p)
-
- # Substitute in the params for the grammar rule and function name
- param_rule.__doc__ = func.__doc__.replace('xxx', xxx).replace('yyy', yyy)
- param_rule.__name__ = func.__name__.replace('xxx', xxx)
-
- # Attach the new method to the class
- setattr(cls, param_rule.__name__, param_rule)
+
+
+def parameterized(*params):
+ """ Decorator to create parameterized rules.
+
+ Parameterized rule methods must be named starting with 'p_' and contain
+ 'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be
+ replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with
+ docstring 'xxx_rule : yyy' when decorated with
+ ``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring
+ 'id_rule : ID'. Using multiple tuples produces multiple rules.
+ """
+ def decorate(rule_func):
+ rule_func._params = params
+ return rule_func
+ return decorate
+
+
+def template(cls):
+ """ Class decorator to generate rules from parameterized rule templates.
+
+ See `parameterized` for more information on parameterized rules.
+ """
+ issued_nodoc_warning = False
+ for attr_name in dir(cls):
+ if attr_name.startswith('p_'):
+ method = getattr(cls, attr_name)
+ if hasattr(method, '_params'):
+ # Remove the template method
+ delattr(cls, attr_name)
+ # Create parameterized rules from this method; only run this if
+ # the method has a docstring. This is to address an issue when
+ # pycparser's users are installed in -OO mode which strips
+ # docstrings away.
+ # See: https://github.com/eliben/pycparser/pull/198/ and
+ # https://github.com/eliben/pycparser/issues/197
+ # for discussion.
+ if method.__doc__ is not None:
+ _create_param_rules(cls, method)
+ elif not issued_nodoc_warning:
+ warnings.warn(
+ 'parsing methods must have __doc__ for pycparser to work properly',
+ RuntimeWarning,
+ stacklevel=2)
+ issued_nodoc_warning = True
+ return cls
+
+
+def _create_param_rules(cls, func):
+ """ Create ply.yacc rules based on a parameterized rule function
+
+ Generates new methods (one per each pair of parameters) based on the
+ template rule function `func`, and attaches them to `cls`. The rule
+ function's parameters must be accessible via its `_params` attribute.
+ """
+ for xxx, yyy in func._params:
+ # Use the template method's body for each new method
+ def param_rule(self, p):
+ func(self, p)
+
+ # Substitute in the params for the grammar rule and function name
+ param_rule.__doc__ = func.__doc__.replace('xxx', xxx).replace('yyy', yyy)
+ param_rule.__name__ = func.__name__.replace('xxx', xxx)
+
+ # Attach the new method to the class
+ setattr(cls, param_rule.__name__, param_rule)
diff --git a/contrib/python/pycparser/pycparser/yacctab.py b/contrib/python/pycparser/pycparser/yacctab.py
index c877170b69..0622c36602 100644
--- a/contrib/python/pycparser/pycparser/yacctab.py
+++ b/contrib/python/pycparser/pycparser/yacctab.py
@@ -1,7 +1,7 @@
# yacctab.py
# This file is automatically generated. Do not edit.
-_tabversion = '3.10'
+_tabversion = '3.10'
_lr_method = 'LALR'
@@ -26,63 +26,63 @@ for _k, _v in _lr_goto_items.items():
del _lr_goto_items
_lr_productions = [
("S' -> translation_unit_or_empty","S'",1,None,None,None),
- ('abstract_declarator_opt -> empty','abstract_declarator_opt',1,'p_abstract_declarator_opt','plyparser.py',43),
- ('abstract_declarator_opt -> abstract_declarator','abstract_declarator_opt',1,'p_abstract_declarator_opt','plyparser.py',44),
- ('assignment_expression_opt -> empty','assignment_expression_opt',1,'p_assignment_expression_opt','plyparser.py',43),
- ('assignment_expression_opt -> assignment_expression','assignment_expression_opt',1,'p_assignment_expression_opt','plyparser.py',44),
- ('block_item_list_opt -> empty','block_item_list_opt',1,'p_block_item_list_opt','plyparser.py',43),
- ('block_item_list_opt -> block_item_list','block_item_list_opt',1,'p_block_item_list_opt','plyparser.py',44),
- ('declaration_list_opt -> empty','declaration_list_opt',1,'p_declaration_list_opt','plyparser.py',43),
- ('declaration_list_opt -> declaration_list','declaration_list_opt',1,'p_declaration_list_opt','plyparser.py',44),
- ('declaration_specifiers_no_type_opt -> empty','declaration_specifiers_no_type_opt',1,'p_declaration_specifiers_no_type_opt','plyparser.py',43),
- ('declaration_specifiers_no_type_opt -> declaration_specifiers_no_type','declaration_specifiers_no_type_opt',1,'p_declaration_specifiers_no_type_opt','plyparser.py',44),
- ('designation_opt -> empty','designation_opt',1,'p_designation_opt','plyparser.py',43),
- ('designation_opt -> designation','designation_opt',1,'p_designation_opt','plyparser.py',44),
- ('expression_opt -> empty','expression_opt',1,'p_expression_opt','plyparser.py',43),
- ('expression_opt -> expression','expression_opt',1,'p_expression_opt','plyparser.py',44),
- ('id_init_declarator_list_opt -> empty','id_init_declarator_list_opt',1,'p_id_init_declarator_list_opt','plyparser.py',43),
- ('id_init_declarator_list_opt -> id_init_declarator_list','id_init_declarator_list_opt',1,'p_id_init_declarator_list_opt','plyparser.py',44),
- ('identifier_list_opt -> empty','identifier_list_opt',1,'p_identifier_list_opt','plyparser.py',43),
- ('identifier_list_opt -> identifier_list','identifier_list_opt',1,'p_identifier_list_opt','plyparser.py',44),
- ('init_declarator_list_opt -> empty','init_declarator_list_opt',1,'p_init_declarator_list_opt','plyparser.py',43),
- ('init_declarator_list_opt -> init_declarator_list','init_declarator_list_opt',1,'p_init_declarator_list_opt','plyparser.py',44),
- ('initializer_list_opt -> empty','initializer_list_opt',1,'p_initializer_list_opt','plyparser.py',43),
- ('initializer_list_opt -> initializer_list','initializer_list_opt',1,'p_initializer_list_opt','plyparser.py',44),
- ('parameter_type_list_opt -> empty','parameter_type_list_opt',1,'p_parameter_type_list_opt','plyparser.py',43),
- ('parameter_type_list_opt -> parameter_type_list','parameter_type_list_opt',1,'p_parameter_type_list_opt','plyparser.py',44),
- ('struct_declarator_list_opt -> empty','struct_declarator_list_opt',1,'p_struct_declarator_list_opt','plyparser.py',43),
- ('struct_declarator_list_opt -> struct_declarator_list','struct_declarator_list_opt',1,'p_struct_declarator_list_opt','plyparser.py',44),
- ('type_qualifier_list_opt -> empty','type_qualifier_list_opt',1,'p_type_qualifier_list_opt','plyparser.py',43),
- ('type_qualifier_list_opt -> type_qualifier_list','type_qualifier_list_opt',1,'p_type_qualifier_list_opt','plyparser.py',44),
- ('direct_id_declarator -> ID','direct_id_declarator',1,'p_direct_id_declarator_1','plyparser.py',126),
- ('direct_id_declarator -> LPAREN id_declarator RPAREN','direct_id_declarator',3,'p_direct_id_declarator_2','plyparser.py',126),
- ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_id_declarator',5,'p_direct_id_declarator_3','plyparser.py',126),
- ('direct_id_declarator -> direct_id_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_id_declarator',6,'p_direct_id_declarator_4','plyparser.py',126),
- ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_id_declarator',6,'p_direct_id_declarator_4','plyparser.py',127),
- ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_id_declarator',5,'p_direct_id_declarator_5','plyparser.py',126),
- ('direct_id_declarator -> direct_id_declarator LPAREN parameter_type_list RPAREN','direct_id_declarator',4,'p_direct_id_declarator_6','plyparser.py',126),
- ('direct_id_declarator -> direct_id_declarator LPAREN identifier_list_opt RPAREN','direct_id_declarator',4,'p_direct_id_declarator_6','plyparser.py',127),
- ('direct_typeid_declarator -> TYPEID','direct_typeid_declarator',1,'p_direct_typeid_declarator_1','plyparser.py',126),
- ('direct_typeid_declarator -> LPAREN typeid_declarator RPAREN','direct_typeid_declarator',3,'p_direct_typeid_declarator_2','plyparser.py',126),
- ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_typeid_declarator',5,'p_direct_typeid_declarator_3','plyparser.py',126),
- ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_typeid_declarator',6,'p_direct_typeid_declarator_4','plyparser.py',126),
- ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_typeid_declarator',6,'p_direct_typeid_declarator_4','plyparser.py',127),
- ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_typeid_declarator',5,'p_direct_typeid_declarator_5','plyparser.py',126),
- ('direct_typeid_declarator -> direct_typeid_declarator LPAREN parameter_type_list RPAREN','direct_typeid_declarator',4,'p_direct_typeid_declarator_6','plyparser.py',126),
- ('direct_typeid_declarator -> direct_typeid_declarator LPAREN identifier_list_opt RPAREN','direct_typeid_declarator',4,'p_direct_typeid_declarator_6','plyparser.py',127),
- ('direct_typeid_noparen_declarator -> TYPEID','direct_typeid_noparen_declarator',1,'p_direct_typeid_noparen_declarator_1','plyparser.py',126),
- ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_typeid_noparen_declarator',5,'p_direct_typeid_noparen_declarator_3','plyparser.py',126),
- ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_typeid_noparen_declarator',6,'p_direct_typeid_noparen_declarator_4','plyparser.py',126),
- ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_typeid_noparen_declarator',6,'p_direct_typeid_noparen_declarator_4','plyparser.py',127),
- ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_typeid_noparen_declarator',5,'p_direct_typeid_noparen_declarator_5','plyparser.py',126),
- ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LPAREN parameter_type_list RPAREN','direct_typeid_noparen_declarator',4,'p_direct_typeid_noparen_declarator_6','plyparser.py',126),
- ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LPAREN identifier_list_opt RPAREN','direct_typeid_noparen_declarator',4,'p_direct_typeid_noparen_declarator_6','plyparser.py',127),
- ('id_declarator -> direct_id_declarator','id_declarator',1,'p_id_declarator_1','plyparser.py',126),
- ('id_declarator -> pointer direct_id_declarator','id_declarator',2,'p_id_declarator_2','plyparser.py',126),
- ('typeid_declarator -> direct_typeid_declarator','typeid_declarator',1,'p_typeid_declarator_1','plyparser.py',126),
- ('typeid_declarator -> pointer direct_typeid_declarator','typeid_declarator',2,'p_typeid_declarator_2','plyparser.py',126),
- ('typeid_noparen_declarator -> direct_typeid_noparen_declarator','typeid_noparen_declarator',1,'p_typeid_noparen_declarator_1','plyparser.py',126),
- ('typeid_noparen_declarator -> pointer direct_typeid_noparen_declarator','typeid_noparen_declarator',2,'p_typeid_noparen_declarator_2','plyparser.py',126),
+ ('abstract_declarator_opt -> empty','abstract_declarator_opt',1,'p_abstract_declarator_opt','plyparser.py',43),
+ ('abstract_declarator_opt -> abstract_declarator','abstract_declarator_opt',1,'p_abstract_declarator_opt','plyparser.py',44),
+ ('assignment_expression_opt -> empty','assignment_expression_opt',1,'p_assignment_expression_opt','plyparser.py',43),
+ ('assignment_expression_opt -> assignment_expression','assignment_expression_opt',1,'p_assignment_expression_opt','plyparser.py',44),
+ ('block_item_list_opt -> empty','block_item_list_opt',1,'p_block_item_list_opt','plyparser.py',43),
+ ('block_item_list_opt -> block_item_list','block_item_list_opt',1,'p_block_item_list_opt','plyparser.py',44),
+ ('declaration_list_opt -> empty','declaration_list_opt',1,'p_declaration_list_opt','plyparser.py',43),
+ ('declaration_list_opt -> declaration_list','declaration_list_opt',1,'p_declaration_list_opt','plyparser.py',44),
+ ('declaration_specifiers_no_type_opt -> empty','declaration_specifiers_no_type_opt',1,'p_declaration_specifiers_no_type_opt','plyparser.py',43),
+ ('declaration_specifiers_no_type_opt -> declaration_specifiers_no_type','declaration_specifiers_no_type_opt',1,'p_declaration_specifiers_no_type_opt','plyparser.py',44),
+ ('designation_opt -> empty','designation_opt',1,'p_designation_opt','plyparser.py',43),
+ ('designation_opt -> designation','designation_opt',1,'p_designation_opt','plyparser.py',44),
+ ('expression_opt -> empty','expression_opt',1,'p_expression_opt','plyparser.py',43),
+ ('expression_opt -> expression','expression_opt',1,'p_expression_opt','plyparser.py',44),
+ ('id_init_declarator_list_opt -> empty','id_init_declarator_list_opt',1,'p_id_init_declarator_list_opt','plyparser.py',43),
+ ('id_init_declarator_list_opt -> id_init_declarator_list','id_init_declarator_list_opt',1,'p_id_init_declarator_list_opt','plyparser.py',44),
+ ('identifier_list_opt -> empty','identifier_list_opt',1,'p_identifier_list_opt','plyparser.py',43),
+ ('identifier_list_opt -> identifier_list','identifier_list_opt',1,'p_identifier_list_opt','plyparser.py',44),
+ ('init_declarator_list_opt -> empty','init_declarator_list_opt',1,'p_init_declarator_list_opt','plyparser.py',43),
+ ('init_declarator_list_opt -> init_declarator_list','init_declarator_list_opt',1,'p_init_declarator_list_opt','plyparser.py',44),
+ ('initializer_list_opt -> empty','initializer_list_opt',1,'p_initializer_list_opt','plyparser.py',43),
+ ('initializer_list_opt -> initializer_list','initializer_list_opt',1,'p_initializer_list_opt','plyparser.py',44),
+ ('parameter_type_list_opt -> empty','parameter_type_list_opt',1,'p_parameter_type_list_opt','plyparser.py',43),
+ ('parameter_type_list_opt -> parameter_type_list','parameter_type_list_opt',1,'p_parameter_type_list_opt','plyparser.py',44),
+ ('struct_declarator_list_opt -> empty','struct_declarator_list_opt',1,'p_struct_declarator_list_opt','plyparser.py',43),
+ ('struct_declarator_list_opt -> struct_declarator_list','struct_declarator_list_opt',1,'p_struct_declarator_list_opt','plyparser.py',44),
+ ('type_qualifier_list_opt -> empty','type_qualifier_list_opt',1,'p_type_qualifier_list_opt','plyparser.py',43),
+ ('type_qualifier_list_opt -> type_qualifier_list','type_qualifier_list_opt',1,'p_type_qualifier_list_opt','plyparser.py',44),
+ ('direct_id_declarator -> ID','direct_id_declarator',1,'p_direct_id_declarator_1','plyparser.py',126),
+ ('direct_id_declarator -> LPAREN id_declarator RPAREN','direct_id_declarator',3,'p_direct_id_declarator_2','plyparser.py',126),
+ ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_id_declarator',5,'p_direct_id_declarator_3','plyparser.py',126),
+ ('direct_id_declarator -> direct_id_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_id_declarator',6,'p_direct_id_declarator_4','plyparser.py',126),
+ ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_id_declarator',6,'p_direct_id_declarator_4','plyparser.py',127),
+ ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_id_declarator',5,'p_direct_id_declarator_5','plyparser.py',126),
+ ('direct_id_declarator -> direct_id_declarator LPAREN parameter_type_list RPAREN','direct_id_declarator',4,'p_direct_id_declarator_6','plyparser.py',126),
+ ('direct_id_declarator -> direct_id_declarator LPAREN identifier_list_opt RPAREN','direct_id_declarator',4,'p_direct_id_declarator_6','plyparser.py',127),
+ ('direct_typeid_declarator -> TYPEID','direct_typeid_declarator',1,'p_direct_typeid_declarator_1','plyparser.py',126),
+ ('direct_typeid_declarator -> LPAREN typeid_declarator RPAREN','direct_typeid_declarator',3,'p_direct_typeid_declarator_2','plyparser.py',126),
+ ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_typeid_declarator',5,'p_direct_typeid_declarator_3','plyparser.py',126),
+ ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_typeid_declarator',6,'p_direct_typeid_declarator_4','plyparser.py',126),
+ ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_typeid_declarator',6,'p_direct_typeid_declarator_4','plyparser.py',127),
+ ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_typeid_declarator',5,'p_direct_typeid_declarator_5','plyparser.py',126),
+ ('direct_typeid_declarator -> direct_typeid_declarator LPAREN parameter_type_list RPAREN','direct_typeid_declarator',4,'p_direct_typeid_declarator_6','plyparser.py',126),
+ ('direct_typeid_declarator -> direct_typeid_declarator LPAREN identifier_list_opt RPAREN','direct_typeid_declarator',4,'p_direct_typeid_declarator_6','plyparser.py',127),
+ ('direct_typeid_noparen_declarator -> TYPEID','direct_typeid_noparen_declarator',1,'p_direct_typeid_noparen_declarator_1','plyparser.py',126),
+ ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_typeid_noparen_declarator',5,'p_direct_typeid_noparen_declarator_3','plyparser.py',126),
+ ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_typeid_noparen_declarator',6,'p_direct_typeid_noparen_declarator_4','plyparser.py',126),
+ ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_typeid_noparen_declarator',6,'p_direct_typeid_noparen_declarator_4','plyparser.py',127),
+ ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_typeid_noparen_declarator',5,'p_direct_typeid_noparen_declarator_5','plyparser.py',126),
+ ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LPAREN parameter_type_list RPAREN','direct_typeid_noparen_declarator',4,'p_direct_typeid_noparen_declarator_6','plyparser.py',126),
+ ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LPAREN identifier_list_opt RPAREN','direct_typeid_noparen_declarator',4,'p_direct_typeid_noparen_declarator_6','plyparser.py',127),
+ ('id_declarator -> direct_id_declarator','id_declarator',1,'p_id_declarator_1','plyparser.py',126),
+ ('id_declarator -> pointer direct_id_declarator','id_declarator',2,'p_id_declarator_2','plyparser.py',126),
+ ('typeid_declarator -> direct_typeid_declarator','typeid_declarator',1,'p_typeid_declarator_1','plyparser.py',126),
+ ('typeid_declarator -> pointer direct_typeid_declarator','typeid_declarator',2,'p_typeid_declarator_2','plyparser.py',126),
+ ('typeid_noparen_declarator -> direct_typeid_noparen_declarator','typeid_noparen_declarator',1,'p_typeid_noparen_declarator_1','plyparser.py',126),
+ ('typeid_noparen_declarator -> pointer direct_typeid_noparen_declarator','typeid_noparen_declarator',2,'p_typeid_noparen_declarator_2','plyparser.py',126),
('translation_unit_or_empty -> translation_unit','translation_unit_or_empty',1,'p_translation_unit_or_empty','c_parser.py',509),
('translation_unit_or_empty -> empty','translation_unit_or_empty',1,'p_translation_unit_or_empty','c_parser.py',510),
('translation_unit -> external_declaration','translation_unit',1,'p_translation_unit_1','c_parser.py',518),