aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/python3/src/Parser/parsetok.c
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/tools/python3/src/Parser/parsetok.c
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/tools/python3/src/Parser/parsetok.c')
-rw-r--r--contrib/tools/python3/src/Parser/parsetok.c330
1 files changed, 165 insertions, 165 deletions
diff --git a/contrib/tools/python3/src/Parser/parsetok.c b/contrib/tools/python3/src/Parser/parsetok.c
index 1000887dbf..1ecb2c4a16 100644
--- a/contrib/tools/python3/src/Parser/parsetok.c
+++ b/contrib/tools/python3/src/Parser/parsetok.c
@@ -1,7 +1,7 @@
/* Parser-tokenizer link implementation */
-#include "Python.h"
+#include "Python.h"
#include "tokenizer.h"
#include "node.h"
#include "grammar.h"
@@ -15,51 +15,51 @@
static node *parsetok(struct tok_state *, grammar *, int, perrdetail *, int *);
static int initerr(perrdetail *err_ret, PyObject * filename);
-typedef struct {
- struct {
- int lineno;
- char *comment;
- } *items;
- size_t size;
- size_t num_items;
-} growable_comment_array;
-
-static int
-growable_comment_array_init(growable_comment_array *arr, size_t initial_size) {
- assert(initial_size > 0);
- arr->items = malloc(initial_size * sizeof(*arr->items));
- arr->size = initial_size;
- arr->num_items = 0;
-
- return arr->items != NULL;
-}
-
-static int
-growable_comment_array_add(growable_comment_array *arr, int lineno, char *comment) {
- if (arr->num_items >= arr->size) {
- size_t new_size = arr->size * 2;
- void *new_items_array = realloc(arr->items, new_size * sizeof(*arr->items));
- if (!new_items_array) {
- return 0;
- }
- arr->items = new_items_array;
- arr->size = new_size;
- }
-
- arr->items[arr->num_items].lineno = lineno;
- arr->items[arr->num_items].comment = comment;
- arr->num_items++;
- return 1;
-}
-
-static void
-growable_comment_array_deallocate(growable_comment_array *arr) {
- for (unsigned i = 0; i < arr->num_items; i++) {
- PyObject_FREE(arr->items[i].comment);
- }
- free(arr->items);
-}
-
+typedef struct {
+ struct {
+ int lineno;
+ char *comment;
+ } *items;
+ size_t size;
+ size_t num_items;
+} growable_comment_array;
+
+static int
+growable_comment_array_init(growable_comment_array *arr, size_t initial_size) {
+ assert(initial_size > 0);
+ arr->items = malloc(initial_size * sizeof(*arr->items));
+ arr->size = initial_size;
+ arr->num_items = 0;
+
+ return arr->items != NULL;
+}
+
+static int
+growable_comment_array_add(growable_comment_array *arr, int lineno, char *comment) {
+ if (arr->num_items >= arr->size) {
+ size_t new_size = arr->size * 2;
+ void *new_items_array = realloc(arr->items, new_size * sizeof(*arr->items));
+ if (!new_items_array) {
+ return 0;
+ }
+ arr->items = new_items_array;
+ arr->size = new_size;
+ }
+
+ arr->items[arr->num_items].lineno = lineno;
+ arr->items[arr->num_items].comment = comment;
+ arr->num_items++;
+ return 1;
+}
+
+static void
+growable_comment_array_deallocate(growable_comment_array *arr) {
+ for (unsigned i = 0; i < arr->num_items; i++) {
+ PyObject_FREE(arr->items[i].comment);
+ }
+ free(arr->items);
+}
+
/* Parse input coming from a string. Return error code, print some errors. */
node *
PyParser_ParseString(const char *s, grammar *g, int start, perrdetail *err_ret)
@@ -96,11 +96,11 @@ PyParser_ParseStringObject(const char *s, PyObject *filename,
if (initerr(err_ret, filename) < 0)
return NULL;
- if (PySys_Audit("compile", "yO", s, err_ret->filename) < 0) {
- err_ret->error = E_ERROR;
- return NULL;
- }
-
+ if (PySys_Audit("compile", "yO", s, err_ret->filename) < 0) {
+ err_ret->error = E_ERROR;
+ return NULL;
+ }
+
if (*flags & PyPARSE_IGNORE_COOKIE)
tok = PyTokenizer_FromUTF8(s, exec_input);
else
@@ -109,14 +109,14 @@ PyParser_ParseStringObject(const char *s, PyObject *filename,
err_ret->error = PyErr_Occurred() ? E_DECODE : E_NOMEM;
return NULL;
}
- if (*flags & PyPARSE_TYPE_COMMENTS) {
- tok->type_comments = 1;
- }
+ if (*flags & PyPARSE_TYPE_COMMENTS) {
+ tok->type_comments = 1;
+ }
Py_INCREF(err_ret->filename);
tok->filename = err_ret->filename;
- if (*flags & PyPARSE_ASYNC_HACKS)
- tok->async_hacks = 1;
+ if (*flags & PyPARSE_ASYNC_HACKS)
+ tok->async_hacks = 1;
return parsetok(tok, g, start, err_ret, flags);
}
@@ -172,17 +172,17 @@ PyParser_ParseFileObject(FILE *fp, PyObject *filename,
if (initerr(err_ret, filename) < 0)
return NULL;
- if (PySys_Audit("compile", "OO", Py_None, err_ret->filename) < 0) {
- return NULL;
- }
-
+ if (PySys_Audit("compile", "OO", Py_None, err_ret->filename) < 0) {
+ return NULL;
+ }
+
if ((tok = PyTokenizer_FromFile(fp, enc, ps1, ps2)) == NULL) {
err_ret->error = E_NOMEM;
return NULL;
}
- if (*flags & PyPARSE_TYPE_COMMENTS) {
- tok->type_comments = 1;
- }
+ if (*flags & PyPARSE_TYPE_COMMENTS) {
+ tok->type_comments = 1;
+ }
Py_INCREF(err_ret->filename);
tok->filename = err_ret->filename;
return parsetok(tok, g, start, err_ret, flags);
@@ -219,39 +219,39 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
parser_state *ps;
node *n;
int started = 0;
- int col_offset, end_col_offset;
- growable_comment_array type_ignores;
-
- if (!growable_comment_array_init(&type_ignores, 10)) {
- err_ret->error = E_NOMEM;
- PyTokenizer_Free(tok);
- return NULL;
- }
-
+ int col_offset, end_col_offset;
+ growable_comment_array type_ignores;
+
+ if (!growable_comment_array_init(&type_ignores, 10)) {
+ err_ret->error = E_NOMEM;
+ PyTokenizer_Free(tok);
+ return NULL;
+ }
+
if ((ps = PyParser_New(g, start)) == NULL) {
err_ret->error = E_NOMEM;
- growable_comment_array_deallocate(&type_ignores);
+ growable_comment_array_deallocate(&type_ignores);
PyTokenizer_Free(tok);
return NULL;
}
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
if (*flags & PyPARSE_BARRY_AS_BDFL)
ps->p_flags |= CO_FUTURE_BARRY_AS_BDFL;
- if (*flags & PyPARSE_TYPE_COMMENTS)
- ps->p_flags |= PyCF_TYPE_COMMENTS;
+ if (*flags & PyPARSE_TYPE_COMMENTS)
+ ps->p_flags |= PyCF_TYPE_COMMENTS;
#endif
for (;;) {
- const char *a, *b;
+ const char *a, *b;
int type;
size_t len;
char *str;
- col_offset = -1;
- int lineno;
- const char *line_start;
+ col_offset = -1;
+ int lineno;
+ const char *line_start;
type = PyTokenizer_Get(tok, &a, &b);
-
+
len = (a != NULL && b != NULL) ? b - a : 0;
str = (char *) PyObject_MALLOC(len + 1);
if (str == NULL) {
@@ -279,65 +279,65 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
}
}
#endif
-
- /* Nodes of type STRING, especially multi line strings
- must be handled differently in order to get both
- the starting line number and the column offset right.
- (cf. issue 16806) */
- lineno = type == STRING ? tok->first_lineno : tok->lineno;
- line_start = type == STRING ? tok->multi_line_start : tok->line_start;
- if (a != NULL && a >= line_start) {
- col_offset = Py_SAFE_DOWNCAST(a - line_start,
+
+ /* Nodes of type STRING, especially multi line strings
+ must be handled differently in order to get both
+ the starting line number and the column offset right.
+ (cf. issue 16806) */
+ lineno = type == STRING ? tok->first_lineno : tok->lineno;
+ line_start = type == STRING ? tok->multi_line_start : tok->line_start;
+ if (a != NULL && a >= line_start) {
+ col_offset = Py_SAFE_DOWNCAST(a - line_start,
intptr_t, int);
}
else {
col_offset = -1;
}
- if (b != NULL && b >= tok->line_start) {
- end_col_offset = Py_SAFE_DOWNCAST(b - tok->line_start,
- intptr_t, int);
- }
- else {
- end_col_offset = -1;
- }
-
- if (type == TYPE_IGNORE) {
- if (!growable_comment_array_add(&type_ignores, tok->lineno, str)) {
- err_ret->error = E_NOMEM;
- break;
- }
- continue;
- }
-
- if (type == ERRORTOKEN) {
- err_ret->error = tok->done;
- break;
- }
- if (type == ENDMARKER && started) {
- type = NEWLINE; /* Add an extra newline */
- started = 0;
- /* Add the right number of dedent tokens,
- except if a certain flag is given --
- codeop.py uses this. */
- if (tok->indent &&
- !(*flags & PyPARSE_DONT_IMPLY_DEDENT))
- {
- tok->pendin = -tok->indent;
- tok->indent = 0;
- }
- }
- else {
- started = 1;
- }
-
+ if (b != NULL && b >= tok->line_start) {
+ end_col_offset = Py_SAFE_DOWNCAST(b - tok->line_start,
+ intptr_t, int);
+ }
+ else {
+ end_col_offset = -1;
+ }
+
+ if (type == TYPE_IGNORE) {
+ if (!growable_comment_array_add(&type_ignores, tok->lineno, str)) {
+ err_ret->error = E_NOMEM;
+ break;
+ }
+ continue;
+ }
+
+ if (type == ERRORTOKEN) {
+ err_ret->error = tok->done;
+ break;
+ }
+ if (type == ENDMARKER && started) {
+ type = NEWLINE; /* Add an extra newline */
+ started = 0;
+ /* Add the right number of dedent tokens,
+ except if a certain flag is given --
+ codeop.py uses this. */
+ if (tok->indent &&
+ !(*flags & PyPARSE_DONT_IMPLY_DEDENT))
+ {
+ tok->pendin = -tok->indent;
+ tok->indent = 0;
+ }
+ }
+ else {
+ started = 1;
+ }
+
if ((err_ret->error =
PyParser_AddToken(ps, (int)type, str,
- lineno, col_offset, tok->lineno, end_col_offset,
+ lineno, col_offset, tok->lineno, end_col_offset,
&(err_ret->expected))) != E_OK) {
- if (tok->done == E_EOF && !ISWHITESPACE(type)) {
- tok->done = E_SYNTAX;
- }
+ if (tok->done == E_EOF && !ISWHITESPACE(type)) {
+ tok->done = E_SYNTAX;
+ }
if (err_ret->error != E_DONE) {
PyObject_FREE(str);
err_ret->token = type;
@@ -350,36 +350,36 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
n = ps->p_tree;
ps->p_tree = NULL;
- if (n->n_type == file_input) {
- /* Put type_ignore nodes in the ENDMARKER of file_input. */
- int num;
- node *ch;
- size_t i;
-
- num = NCH(n);
- ch = CHILD(n, num - 1);
- REQ(ch, ENDMARKER);
-
- for (i = 0; i < type_ignores.num_items; i++) {
- int res = PyNode_AddChild(ch, TYPE_IGNORE, type_ignores.items[i].comment,
- type_ignores.items[i].lineno, 0,
- type_ignores.items[i].lineno, 0);
- if (res != 0) {
- err_ret->error = res;
- PyNode_Free(n);
- n = NULL;
- break;
- }
- type_ignores.items[i].comment = NULL;
- }
- }
-
+ if (n->n_type == file_input) {
+ /* Put type_ignore nodes in the ENDMARKER of file_input. */
+ int num;
+ node *ch;
+ size_t i;
+
+ num = NCH(n);
+ ch = CHILD(n, num - 1);
+ REQ(ch, ENDMARKER);
+
+ for (i = 0; i < type_ignores.num_items; i++) {
+ int res = PyNode_AddChild(ch, TYPE_IGNORE, type_ignores.items[i].comment,
+ type_ignores.items[i].lineno, 0,
+ type_ignores.items[i].lineno, 0);
+ if (res != 0) {
+ err_ret->error = res;
+ PyNode_Free(n);
+ n = NULL;
+ break;
+ }
+ type_ignores.items[i].comment = NULL;
+ }
+ }
+
/* Check that the source for a single input statement really
is a single statement by looking at what is left in the
buffer after parsing. Trailing whitespace and comments
are OK. */
- if (err_ret->error == E_DONE && start == single_input) {
- const char *cur = tok->cur;
+ if (err_ret->error == E_DONE && start == single_input) {
+ const char *cur = tok->cur;
char c = *tok->cur;
for (;;) {
@@ -405,8 +405,8 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
else
n = NULL;
- growable_comment_array_deallocate(&type_ignores);
-
+ growable_comment_array_deallocate(&type_ignores);
+
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
*flags = ps->p_flags;
#endif
@@ -419,10 +419,10 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
if (tok->buf != NULL) {
size_t len;
assert(tok->cur - tok->buf < INT_MAX);
- /* if we've managed to parse a token, point the offset to its start,
- * else use the current reading position of the tokenizer
- */
- err_ret->offset = col_offset != -1 ? col_offset + 1 : ((int)(tok->cur - tok->buf));
+ /* if we've managed to parse a token, point the offset to its start,
+ * else use the current reading position of the tokenizer
+ */
+ err_ret->offset = col_offset != -1 ? col_offset + 1 : ((int)(tok->cur - tok->buf));
len = tok->inp - tok->buf;
err_ret->text = (char *) PyObject_MALLOC(len + 1);
if (err_ret->text != NULL) {
@@ -456,9 +456,9 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
done:
PyTokenizer_Free(tok);
- if (n != NULL) {
- _PyNode_FinalizeEndPos(n);
- }
+ if (n != NULL) {
+ _PyNode_FinalizeEndPos(n);
+ }
return n;
}