+++ /dev/null
-\r
-/* Parser-tokenizer link implementation */\r
-\r
-#include "pgenheaders.h"\r
-#include "tokenizer.h"\r
-#include "node.h"\r
-#include "grammar.h"\r
-#include "parser.h"\r
-#include "parsetok.h"\r
-#include "errcode.h"\r
-#include "graminit.h"\r
-\r
-int Py_TabcheckFlag;\r
-\r
-\r
-/* Forward */\r
-static node *parsetok(struct tok_state *, grammar *, int, perrdetail *, int *);\r
-static void initerr(perrdetail *err_ret, const char* filename);\r
-\r
-/* Parse input coming from a string. Return error code, print some errors. */\r
-node *\r
-PyParser_ParseString(const char *s, grammar *g, int start, perrdetail *err_ret)\r
-{\r
- return PyParser_ParseStringFlagsFilename(s, NULL, g, start, err_ret, 0);\r
-}\r
-\r
-node *\r
-PyParser_ParseStringFlags(const char *s, grammar *g, int start,\r
- perrdetail *err_ret, int flags)\r
-{\r
- return PyParser_ParseStringFlagsFilename(s, NULL,\r
- g, start, err_ret, flags);\r
-}\r
-\r
-node *\r
-PyParser_ParseStringFlagsFilename(const char *s, const char *filename,\r
- grammar *g, int start,\r
- perrdetail *err_ret, int flags)\r
-{\r
- int iflags = flags;\r
- return PyParser_ParseStringFlagsFilenameEx(s, filename, g, start,\r
- err_ret, &iflags);\r
-}\r
-\r
-node *\r
-PyParser_ParseStringFlagsFilenameEx(const char *s, const char *filename,\r
- grammar *g, int start,\r
- perrdetail *err_ret, int *flags)\r
-{\r
- struct tok_state *tok;\r
-\r
- initerr(err_ret, filename);\r
-\r
- if ((tok = PyTokenizer_FromString(s, start == file_input)) == NULL) {\r
- err_ret->error = PyErr_Occurred() ? E_DECODE : E_NOMEM;\r
- return NULL;\r
- }\r
-\r
- tok->filename = filename ? filename : "<string>";\r
- if (Py_TabcheckFlag || Py_VerboseFlag) {\r
- tok->altwarning = (tok->filename != NULL);\r
- if (Py_TabcheckFlag >= 2)\r
- tok->alterror++;\r
- }\r
-\r
- return parsetok(tok, g, start, err_ret, flags);\r
-}\r
-\r
-/* Parse input coming from a file. Return error code, print some errors. */\r
-\r
-node *\r
-PyParser_ParseFile(FILE *fp, const char *filename, grammar *g, int start,\r
- char *ps1, char *ps2, perrdetail *err_ret)\r
-{\r
- return PyParser_ParseFileFlags(fp, filename, g, start, ps1, ps2,\r
- err_ret, 0);\r
-}\r
-\r
-node *\r
-PyParser_ParseFileFlags(FILE *fp, const char *filename, grammar *g, int start,\r
- char *ps1, char *ps2, perrdetail *err_ret, int flags)\r
-{\r
- int iflags = flags;\r
- return PyParser_ParseFileFlagsEx(fp, filename, g, start, ps1, ps2, err_ret, &iflags);\r
-}\r
-\r
-node *\r
-PyParser_ParseFileFlagsEx(FILE *fp, const char *filename, grammar *g, int start,\r
- char *ps1, char *ps2, perrdetail *err_ret, int *flags)\r
-{\r
- struct tok_state *tok;\r
-\r
- initerr(err_ret, filename);\r
-\r
- if ((tok = PyTokenizer_FromFile(fp, ps1, ps2)) == NULL) {\r
- err_ret->error = E_NOMEM;\r
- return NULL;\r
- }\r
- tok->filename = filename;\r
- if (Py_TabcheckFlag || Py_VerboseFlag) {\r
- tok->altwarning = (filename != NULL);\r
- if (Py_TabcheckFlag >= 2)\r
- tok->alterror++;\r
- }\r
-\r
- return parsetok(tok, g, start, err_ret, flags);\r
-}\r
-\r
-#if 0\r
-static char with_msg[] =\r
-"%s:%d: Warning: 'with' will become a reserved keyword in Python 2.6\n";\r
-\r
-static char as_msg[] =\r
-"%s:%d: Warning: 'as' will become a reserved keyword in Python 2.6\n";\r
-\r
-static void\r
-warn(const char *msg, const char *filename, int lineno)\r
-{\r
- if (filename == NULL)\r
- filename = "<string>";\r
- PySys_WriteStderr(msg, filename, lineno);\r
-}\r
-#endif\r
-\r
-/* Parse input coming from the given tokenizer structure.\r
- Return error code. */\r
-\r
-static node *\r
-parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,\r
- int *flags)\r
-{\r
- parser_state *ps;\r
- node *n;\r
- int started = 0;\r
-\r
- if ((ps = PyParser_New(g, start)) == NULL) {\r
- fprintf(stderr, "no mem for new parser\n");\r
- err_ret->error = E_NOMEM;\r
- PyTokenizer_Free(tok);\r
- return NULL;\r
- }\r
-#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD\r
- if (*flags & PyPARSE_PRINT_IS_FUNCTION) {\r
- ps->p_flags |= CO_FUTURE_PRINT_FUNCTION;\r
- }\r
- if (*flags & PyPARSE_UNICODE_LITERALS) {\r
- ps->p_flags |= CO_FUTURE_UNICODE_LITERALS;\r
- }\r
-\r
-#endif\r
-\r
- for (;;) {\r
- char *a, *b;\r
- int type;\r
- size_t len;\r
- char *str;\r
- int col_offset;\r
-\r
- type = PyTokenizer_Get(tok, &a, &b);\r
- if (type == ERRORTOKEN) {\r
- err_ret->error = tok->done;\r
- break;\r
- }\r
- if (type == ENDMARKER && started) {\r
- type = NEWLINE; /* Add an extra newline */\r
- started = 0;\r
- /* Add the right number of dedent tokens,\r
- except if a certain flag is given --\r
- codeop.py uses this. */\r
- if (tok->indent &&\r
- !(*flags & PyPARSE_DONT_IMPLY_DEDENT))\r
- {\r
- tok->pendin = -tok->indent;\r
- tok->indent = 0;\r
- }\r
- }\r
- else\r
- started = 1;\r
- len = b - a; /* XXX this may compute NULL - NULL */\r
- str = (char *) PyObject_MALLOC(len + 1);\r
- if (str == NULL) {\r
- fprintf(stderr, "no mem for next token\n");\r
- err_ret->error = E_NOMEM;\r
- break;\r
- }\r
- if (len > 0)\r
- strncpy(str, a, len);\r
- str[len] = '\0';\r
-\r
-#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD\r
-#endif\r
- if (a >= tok->line_start)\r
- col_offset = a - tok->line_start;\r
- else\r
- col_offset = -1;\r
-\r
- if ((err_ret->error =\r
- PyParser_AddToken(ps, (int)type, str, tok->lineno, col_offset,\r
- &(err_ret->expected))) != E_OK) {\r
- if (err_ret->error != E_DONE) {\r
- PyObject_FREE(str);\r
- err_ret->token = type;\r
- }\r
- break;\r
- }\r
- }\r
-\r
- if (err_ret->error == E_DONE) {\r
- n = ps->p_tree;\r
- ps->p_tree = NULL;\r
- }\r
- else\r
- n = NULL;\r
-\r
-#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD\r
- *flags = ps->p_flags;\r
-#endif\r
- PyParser_Delete(ps);\r
-\r
- if (n == NULL) {\r
- if (tok->lineno <= 1 && tok->done == E_EOF)\r
- err_ret->error = E_EOF;\r
- err_ret->lineno = tok->lineno;\r
- if (tok->buf != NULL) {\r
- char *text = NULL;\r
- size_t len;\r
- assert(tok->cur - tok->buf < INT_MAX);\r
- err_ret->offset = (int)(tok->cur - tok->buf);\r
- len = tok->inp - tok->buf;\r
-#ifdef Py_USING_UNICODE\r
- text = PyTokenizer_RestoreEncoding(tok, len, &err_ret->offset);\r
-\r
-#endif\r
- if (text == NULL) {\r
- text = (char *) PyObject_MALLOC(len + 1);\r
- if (text != NULL) {\r
- if (len > 0)\r
- strncpy(text, tok->buf, len);\r
- text[len] = '\0';\r
- }\r
- }\r
- err_ret->text = text;\r
- }\r
- } else if (tok->encoding != NULL) {\r
- /* 'nodes->n_str' uses PyObject_*, while 'tok->encoding' was\r
- * allocated using PyMem_\r
- */\r
- node* r = PyNode_New(encoding_decl);\r
- if (r)\r
- r->n_str = PyObject_MALLOC(strlen(tok->encoding)+1);\r
- if (!r || !r->n_str) {\r
- err_ret->error = E_NOMEM;\r
- if (r)\r
- PyObject_FREE(r);\r
- n = NULL;\r
- goto done;\r
- }\r
- strcpy(r->n_str, tok->encoding);\r
- PyMem_FREE(tok->encoding);\r
- tok->encoding = NULL;\r
- r->n_nchildren = 1;\r
- r->n_child = n;\r
- n = r;\r
- }\r
-\r
-done:\r
- PyTokenizer_Free(tok);\r
-\r
- return n;\r
-}\r
-\r
-static void\r
-initerr(perrdetail *err_ret, const char *filename)\r
-{\r
- err_ret->error = E_OK;\r
- err_ret->filename = filename;\r
- err_ret->lineno = 0;\r
- err_ret->offset = 0;\r
- err_ret->text = NULL;\r
- err_ret->token = -1;\r
- err_ret->expected = -1;\r
-}\r