]> git.proxmox.com Git - mirror_edk2.git/blame - AppPkg/Applications/Python/Python-2.7.10/Parser/parsetok.c
OvmfPkg/BaseMemEncryptSevLib: list "X64/VirtualMemory.h" in the INF file
[mirror_edk2.git] / AppPkg / Applications / Python / Python-2.7.10 / Parser / parsetok.c
CommitLineData
c8042e10
DM
1\r
2/* Parser-tokenizer link implementation */\r
3\r
4#include "pgenheaders.h"\r
5#include "tokenizer.h"\r
6#include "node.h"\r
7#include "grammar.h"\r
8#include "parser.h"\r
9#include "parsetok.h"\r
10#include "errcode.h"\r
11#include "graminit.h"\r
12\r
13int Py_TabcheckFlag;\r
14\r
15\r
16/* Forward */\r
17static node *parsetok(struct tok_state *, grammar *, int, perrdetail *, int *);\r
18static void initerr(perrdetail *err_ret, const char* filename);\r
19\r
20/* Parse input coming from a string. Return error code, print some errors. */\r
21node *\r
22PyParser_ParseString(const char *s, grammar *g, int start, perrdetail *err_ret)\r
23{\r
24 return PyParser_ParseStringFlagsFilename(s, NULL, g, start, err_ret, 0);\r
25}\r
26\r
27node *\r
28PyParser_ParseStringFlags(const char *s, grammar *g, int start,\r
29 perrdetail *err_ret, int flags)\r
30{\r
31 return PyParser_ParseStringFlagsFilename(s, NULL,\r
32 g, start, err_ret, flags);\r
33}\r
34\r
35node *\r
36PyParser_ParseStringFlagsFilename(const char *s, const char *filename,\r
37 grammar *g, int start,\r
38 perrdetail *err_ret, int flags)\r
39{\r
40 int iflags = flags;\r
41 return PyParser_ParseStringFlagsFilenameEx(s, filename, g, start,\r
42 err_ret, &iflags);\r
43}\r
44\r
45node *\r
46PyParser_ParseStringFlagsFilenameEx(const char *s, const char *filename,\r
47 grammar *g, int start,\r
48 perrdetail *err_ret, int *flags)\r
49{\r
50 struct tok_state *tok;\r
51\r
52 initerr(err_ret, filename);\r
53\r
54 if ((tok = PyTokenizer_FromString(s, start == file_input)) == NULL) {\r
55 err_ret->error = PyErr_Occurred() ? E_DECODE : E_NOMEM;\r
56 return NULL;\r
57 }\r
58\r
59 tok->filename = filename ? filename : "<string>";\r
60 if (Py_TabcheckFlag || Py_VerboseFlag) {\r
61 tok->altwarning = (tok->filename != NULL);\r
62 if (Py_TabcheckFlag >= 2)\r
63 tok->alterror++;\r
64 }\r
65\r
66 return parsetok(tok, g, start, err_ret, flags);\r
67}\r
68\r
69/* Parse input coming from a file. Return error code, print some errors. */\r
70\r
71node *\r
72PyParser_ParseFile(FILE *fp, const char *filename, grammar *g, int start,\r
73 char *ps1, char *ps2, perrdetail *err_ret)\r
74{\r
75 return PyParser_ParseFileFlags(fp, filename, g, start, ps1, ps2,\r
76 err_ret, 0);\r
77}\r
78\r
79node *\r
80PyParser_ParseFileFlags(FILE *fp, const char *filename, grammar *g, int start,\r
81 char *ps1, char *ps2, perrdetail *err_ret, int flags)\r
82{\r
83 int iflags = flags;\r
84 return PyParser_ParseFileFlagsEx(fp, filename, g, start, ps1, ps2, err_ret, &iflags);\r
85}\r
86\r
87node *\r
88PyParser_ParseFileFlagsEx(FILE *fp, const char *filename, grammar *g, int start,\r
89 char *ps1, char *ps2, perrdetail *err_ret, int *flags)\r
90{\r
91 struct tok_state *tok;\r
92\r
93 initerr(err_ret, filename);\r
94\r
95 if ((tok = PyTokenizer_FromFile(fp, ps1, ps2)) == NULL) {\r
96 err_ret->error = E_NOMEM;\r
97 return NULL;\r
98 }\r
99 tok->filename = filename;\r
100 if (Py_TabcheckFlag || Py_VerboseFlag) {\r
101 tok->altwarning = (filename != NULL);\r
102 if (Py_TabcheckFlag >= 2)\r
103 tok->alterror++;\r
104 }\r
105\r
106 return parsetok(tok, g, start, err_ret, flags);\r
107}\r
108\r
109#if 0\r
110static char with_msg[] =\r
111"%s:%d: Warning: 'with' will become a reserved keyword in Python 2.6\n";\r
112\r
113static char as_msg[] =\r
114"%s:%d: Warning: 'as' will become a reserved keyword in Python 2.6\n";\r
115\r
116static void\r
117warn(const char *msg, const char *filename, int lineno)\r
118{\r
119 if (filename == NULL)\r
120 filename = "<string>";\r
121 PySys_WriteStderr(msg, filename, lineno);\r
122}\r
123#endif\r
124\r
125/* Parse input coming from the given tokenizer structure.\r
126 Return error code. */\r
127\r
128static node *\r
129parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,\r
130 int *flags)\r
131{\r
132 parser_state *ps;\r
133 node *n;\r
134 int started = 0;\r
135\r
136 if ((ps = PyParser_New(g, start)) == NULL) {\r
137 fprintf(stderr, "no mem for new parser\n");\r
138 err_ret->error = E_NOMEM;\r
139 PyTokenizer_Free(tok);\r
140 return NULL;\r
141 }\r
142#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD\r
143 if (*flags & PyPARSE_PRINT_IS_FUNCTION) {\r
144 ps->p_flags |= CO_FUTURE_PRINT_FUNCTION;\r
145 }\r
146 if (*flags & PyPARSE_UNICODE_LITERALS) {\r
147 ps->p_flags |= CO_FUTURE_UNICODE_LITERALS;\r
148 }\r
149\r
150#endif\r
151\r
152 for (;;) {\r
153 char *a, *b;\r
154 int type;\r
155 size_t len;\r
156 char *str;\r
157 int col_offset;\r
158\r
159 type = PyTokenizer_Get(tok, &a, &b);\r
160 if (type == ERRORTOKEN) {\r
161 err_ret->error = tok->done;\r
162 break;\r
163 }\r
164 if (type == ENDMARKER && started) {\r
165 type = NEWLINE; /* Add an extra newline */\r
166 started = 0;\r
167 /* Add the right number of dedent tokens,\r
168 except if a certain flag is given --\r
169 codeop.py uses this. */\r
170 if (tok->indent &&\r
171 !(*flags & PyPARSE_DONT_IMPLY_DEDENT))\r
172 {\r
173 tok->pendin = -tok->indent;\r
174 tok->indent = 0;\r
175 }\r
176 }\r
177 else\r
178 started = 1;\r
179 len = b - a; /* XXX this may compute NULL - NULL */\r
180 str = (char *) PyObject_MALLOC(len + 1);\r
181 if (str == NULL) {\r
182 fprintf(stderr, "no mem for next token\n");\r
183 err_ret->error = E_NOMEM;\r
184 break;\r
185 }\r
186 if (len > 0)\r
187 strncpy(str, a, len);\r
188 str[len] = '\0';\r
189\r
190#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD\r
191#endif\r
192 if (a >= tok->line_start)\r
193 col_offset = a - tok->line_start;\r
194 else\r
195 col_offset = -1;\r
196\r
197 if ((err_ret->error =\r
198 PyParser_AddToken(ps, (int)type, str, tok->lineno, col_offset,\r
199 &(err_ret->expected))) != E_OK) {\r
200 if (err_ret->error != E_DONE) {\r
201 PyObject_FREE(str);\r
202 err_ret->token = type;\r
203 }\r
204 break;\r
205 }\r
206 }\r
207\r
208 if (err_ret->error == E_DONE) {\r
209 n = ps->p_tree;\r
210 ps->p_tree = NULL;\r
211 }\r
212 else\r
213 n = NULL;\r
214\r
215#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD\r
216 *flags = ps->p_flags;\r
217#endif\r
218 PyParser_Delete(ps);\r
219\r
220 if (n == NULL) {\r
221 if (tok->lineno <= 1 && tok->done == E_EOF)\r
222 err_ret->error = E_EOF;\r
223 err_ret->lineno = tok->lineno;\r
224 if (tok->buf != NULL) {\r
225 char *text = NULL;\r
226 size_t len;\r
227 assert(tok->cur - tok->buf < INT_MAX);\r
228 err_ret->offset = (int)(tok->cur - tok->buf);\r
229 len = tok->inp - tok->buf;\r
230#ifdef Py_USING_UNICODE\r
231 text = PyTokenizer_RestoreEncoding(tok, len, &err_ret->offset);\r
232\r
233#endif\r
234 if (text == NULL) {\r
235 text = (char *) PyObject_MALLOC(len + 1);\r
236 if (text != NULL) {\r
237 if (len > 0)\r
238 strncpy(text, tok->buf, len);\r
239 text[len] = '\0';\r
240 }\r
241 }\r
242 err_ret->text = text;\r
243 }\r
244 } else if (tok->encoding != NULL) {\r
245 /* 'nodes->n_str' uses PyObject_*, while 'tok->encoding' was\r
246 * allocated using PyMem_\r
247 */\r
248 node* r = PyNode_New(encoding_decl);\r
249 if (r)\r
250 r->n_str = PyObject_MALLOC(strlen(tok->encoding)+1);\r
251 if (!r || !r->n_str) {\r
252 err_ret->error = E_NOMEM;\r
253 if (r)\r
254 PyObject_FREE(r);\r
255 n = NULL;\r
256 goto done;\r
257 }\r
258 strcpy(r->n_str, tok->encoding);\r
259 PyMem_FREE(tok->encoding);\r
260 tok->encoding = NULL;\r
261 r->n_nchildren = 1;\r
262 r->n_child = n;\r
263 n = r;\r
264 }\r
265\r
266done:\r
267 PyTokenizer_Free(tok);\r
268\r
269 return n;\r
270}\r
271\r
272static void\r
273initerr(perrdetail *err_ret, const char *filename)\r
274{\r
275 err_ret->error = E_OK;\r
276 err_ret->filename = filename;\r
277 err_ret->lineno = 0;\r
278 err_ret->offset = 0;\r
279 err_ret->text = NULL;\r
280 err_ret->token = -1;\r
281 err_ret->expected = -1;\r
282}\r