]> git.proxmox.com Git - mirror_edk2.git/blame - AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/refactor.py
EmbeddedPkg: Extend NvVarStoreFormattedLib LIBRARY_CLASS
[mirror_edk2.git] / AppPkg / Applications / Python / Python-2.7.2 / Lib / lib2to3 / refactor.py
CommitLineData
4710c53d 1# Copyright 2006 Google, Inc. All Rights Reserved.\r
2# Licensed to PSF under a Contributor Agreement.\r
3\r
4"""Refactoring framework.\r
5\r
6Used as a main program, this can refactor any number of files and/or\r
7recursively descend down directories. Imported as a module, this\r
8provides infrastructure to write your own refactoring tool.\r
9"""\r
10\r
11from __future__ import with_statement\r
12\r
13__author__ = "Guido van Rossum <guido@python.org>"\r
14\r
15\r
16# Python imports\r
17import os\r
18import sys\r
19import logging\r
20import operator\r
21import collections\r
22import StringIO\r
23from itertools import chain\r
24\r
25# Local imports\r
26from .pgen2 import driver, tokenize, token\r
27from .fixer_util import find_root\r
28from . import pytree, pygram\r
29from . import btm_utils as bu\r
30from . import btm_matcher as bm\r
31\r
32\r
33def get_all_fix_names(fixer_pkg, remove_prefix=True):\r
34 """Return a sorted list of all available fix names in the given package."""\r
35 pkg = __import__(fixer_pkg, [], [], ["*"])\r
36 fixer_dir = os.path.dirname(pkg.__file__)\r
37 fix_names = []\r
38 for name in sorted(os.listdir(fixer_dir)):\r
39 if name.startswith("fix_") and name.endswith(".py"):\r
40 if remove_prefix:\r
41 name = name[4:]\r
42 fix_names.append(name[:-3])\r
43 return fix_names\r
44\r
45\r
46class _EveryNode(Exception):\r
47 pass\r
48\r
49\r
50def _get_head_types(pat):\r
51 """ Accepts a pytree Pattern Node and returns a set\r
52 of the pattern types which will match first. """\r
53\r
54 if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):\r
55 # NodePatters must either have no type and no content\r
56 # or a type and content -- so they don't get any farther\r
57 # Always return leafs\r
58 if pat.type is None:\r
59 raise _EveryNode\r
60 return set([pat.type])\r
61\r
62 if isinstance(pat, pytree.NegatedPattern):\r
63 if pat.content:\r
64 return _get_head_types(pat.content)\r
65 raise _EveryNode # Negated Patterns don't have a type\r
66\r
67 if isinstance(pat, pytree.WildcardPattern):\r
68 # Recurse on each node in content\r
69 r = set()\r
70 for p in pat.content:\r
71 for x in p:\r
72 r.update(_get_head_types(x))\r
73 return r\r
74\r
75 raise Exception("Oh no! I don't understand pattern %s" %(pat))\r
76\r
77\r
78def _get_headnode_dict(fixer_list):\r
79 """ Accepts a list of fixers and returns a dictionary\r
80 of head node type --> fixer list. """\r
81 head_nodes = collections.defaultdict(list)\r
82 every = []\r
83 for fixer in fixer_list:\r
84 if fixer.pattern:\r
85 try:\r
86 heads = _get_head_types(fixer.pattern)\r
87 except _EveryNode:\r
88 every.append(fixer)\r
89 else:\r
90 for node_type in heads:\r
91 head_nodes[node_type].append(fixer)\r
92 else:\r
93 if fixer._accept_type is not None:\r
94 head_nodes[fixer._accept_type].append(fixer)\r
95 else:\r
96 every.append(fixer)\r
97 for node_type in chain(pygram.python_grammar.symbol2number.itervalues(),\r
98 pygram.python_grammar.tokens):\r
99 head_nodes[node_type].extend(every)\r
100 return dict(head_nodes)\r
101\r
102\r
103def get_fixers_from_package(pkg_name):\r
104 """\r
105 Return the fully qualified names for fixers in the package pkg_name.\r
106 """\r
107 return [pkg_name + "." + fix_name\r
108 for fix_name in get_all_fix_names(pkg_name, False)]\r
109\r
110def _identity(obj):\r
111 return obj\r
112\r
113if sys.version_info < (3, 0):\r
114 import codecs\r
115 _open_with_encoding = codecs.open\r
116 # codecs.open doesn't translate newlines sadly.\r
117 def _from_system_newlines(input):\r
118 return input.replace(u"\r\n", u"\n")\r
119 def _to_system_newlines(input):\r
120 if os.linesep != "\n":\r
121 return input.replace(u"\n", os.linesep)\r
122 else:\r
123 return input\r
124else:\r
125 _open_with_encoding = open\r
126 _from_system_newlines = _identity\r
127 _to_system_newlines = _identity\r
128\r
129\r
130def _detect_future_features(source):\r
131 have_docstring = False\r
132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)\r
133 def advance():\r
134 tok = gen.next()\r
135 return tok[0], tok[1]\r
136 ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))\r
137 features = set()\r
138 try:\r
139 while True:\r
140 tp, value = advance()\r
141 if tp in ignore:\r
142 continue\r
143 elif tp == token.STRING:\r
144 if have_docstring:\r
145 break\r
146 have_docstring = True\r
147 elif tp == token.NAME and value == u"from":\r
148 tp, value = advance()\r
149 if tp != token.NAME or value != u"__future__":\r
150 break\r
151 tp, value = advance()\r
152 if tp != token.NAME or value != u"import":\r
153 break\r
154 tp, value = advance()\r
155 if tp == token.OP and value == u"(":\r
156 tp, value = advance()\r
157 while tp == token.NAME:\r
158 features.add(value)\r
159 tp, value = advance()\r
160 if tp != token.OP or value != u",":\r
161 break\r
162 tp, value = advance()\r
163 else:\r
164 break\r
165 except StopIteration:\r
166 pass\r
167 return frozenset(features)\r
168\r
169\r
170class FixerError(Exception):\r
171 """A fixer could not be loaded."""\r
172\r
173\r
174class RefactoringTool(object):\r
175\r
176 _default_options = {"print_function" : False}\r
177\r
178 CLASS_PREFIX = "Fix" # The prefix for fixer classes\r
179 FILE_PREFIX = "fix_" # The prefix for modules with a fixer within\r
180\r
181 def __init__(self, fixer_names, options=None, explicit=None):\r
182 """Initializer.\r
183\r
184 Args:\r
185 fixer_names: a list of fixers to import\r
186 options: an dict with configuration.\r
187 explicit: a list of fixers to run even if they are explicit.\r
188 """\r
189 self.fixers = fixer_names\r
190 self.explicit = explicit or []\r
191 self.options = self._default_options.copy()\r
192 if options is not None:\r
193 self.options.update(options)\r
194 if self.options["print_function"]:\r
195 self.grammar = pygram.python_grammar_no_print_statement\r
196 else:\r
197 self.grammar = pygram.python_grammar\r
198 self.errors = []\r
199 self.logger = logging.getLogger("RefactoringTool")\r
200 self.fixer_log = []\r
201 self.wrote = False\r
202 self.driver = driver.Driver(self.grammar,\r
203 convert=pytree.convert,\r
204 logger=self.logger)\r
205 self.pre_order, self.post_order = self.get_fixers()\r
206\r
207\r
208 self.files = [] # List of files that were or should be modified\r
209\r
210 self.BM = bm.BottomMatcher()\r
211 self.bmi_pre_order = [] # Bottom Matcher incompatible fixers\r
212 self.bmi_post_order = []\r
213\r
214 for fixer in chain(self.post_order, self.pre_order):\r
215 if fixer.BM_compatible:\r
216 self.BM.add_fixer(fixer)\r
217 # remove fixers that will be handled by the bottom-up\r
218 # matcher\r
219 elif fixer in self.pre_order:\r
220 self.bmi_pre_order.append(fixer)\r
221 elif fixer in self.post_order:\r
222 self.bmi_post_order.append(fixer)\r
223\r
224 self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)\r
225 self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)\r
226\r
227\r
228\r
229 def get_fixers(self):\r
230 """Inspects the options to load the requested patterns and handlers.\r
231\r
232 Returns:\r
233 (pre_order, post_order), where pre_order is the list of fixers that\r
234 want a pre-order AST traversal, and post_order is the list that want\r
235 post-order traversal.\r
236 """\r
237 pre_order_fixers = []\r
238 post_order_fixers = []\r
239 for fix_mod_path in self.fixers:\r
240 mod = __import__(fix_mod_path, {}, {}, ["*"])\r
241 fix_name = fix_mod_path.rsplit(".", 1)[-1]\r
242 if fix_name.startswith(self.FILE_PREFIX):\r
243 fix_name = fix_name[len(self.FILE_PREFIX):]\r
244 parts = fix_name.split("_")\r
245 class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])\r
246 try:\r
247 fix_class = getattr(mod, class_name)\r
248 except AttributeError:\r
249 raise FixerError("Can't find %s.%s" % (fix_name, class_name))\r
250 fixer = fix_class(self.options, self.fixer_log)\r
251 if fixer.explicit and self.explicit is not True and \\r
252 fix_mod_path not in self.explicit:\r
253 self.log_message("Skipping implicit fixer: %s", fix_name)\r
254 continue\r
255\r
256 self.log_debug("Adding transformation: %s", fix_name)\r
257 if fixer.order == "pre":\r
258 pre_order_fixers.append(fixer)\r
259 elif fixer.order == "post":\r
260 post_order_fixers.append(fixer)\r
261 else:\r
262 raise FixerError("Illegal fixer order: %r" % fixer.order)\r
263\r
264 key_func = operator.attrgetter("run_order")\r
265 pre_order_fixers.sort(key=key_func)\r
266 post_order_fixers.sort(key=key_func)\r
267 return (pre_order_fixers, post_order_fixers)\r
268\r
269 def log_error(self, msg, *args, **kwds):\r
270 """Called when an error occurs."""\r
271 raise\r
272\r
273 def log_message(self, msg, *args):\r
274 """Hook to log a message."""\r
275 if args:\r
276 msg = msg % args\r
277 self.logger.info(msg)\r
278\r
279 def log_debug(self, msg, *args):\r
280 if args:\r
281 msg = msg % args\r
282 self.logger.debug(msg)\r
283\r
284 def print_output(self, old_text, new_text, filename, equal):\r
285 """Called with the old version, new version, and filename of a\r
286 refactored file."""\r
287 pass\r
288\r
289 def refactor(self, items, write=False, doctests_only=False):\r
290 """Refactor a list of files and directories."""\r
291\r
292 for dir_or_file in items:\r
293 if os.path.isdir(dir_or_file):\r
294 self.refactor_dir(dir_or_file, write, doctests_only)\r
295 else:\r
296 self.refactor_file(dir_or_file, write, doctests_only)\r
297\r
298 def refactor_dir(self, dir_name, write=False, doctests_only=False):\r
299 """Descends down a directory and refactor every Python file found.\r
300\r
301 Python files are assumed to have a .py extension.\r
302\r
303 Files and subdirectories starting with '.' are skipped.\r
304 """\r
305 py_ext = os.extsep + "py"\r
306 for dirpath, dirnames, filenames in os.walk(dir_name):\r
307 self.log_debug("Descending into %s", dirpath)\r
308 dirnames.sort()\r
309 filenames.sort()\r
310 for name in filenames:\r
311 if (not name.startswith(".") and\r
312 os.path.splitext(name)[1] == py_ext):\r
313 fullname = os.path.join(dirpath, name)\r
314 self.refactor_file(fullname, write, doctests_only)\r
315 # Modify dirnames in-place to remove subdirs with leading dots\r
316 dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]\r
317\r
318 def _read_python_source(self, filename):\r
319 """\r
320 Do our best to decode a Python source file correctly.\r
321 """\r
322 try:\r
323 f = open(filename, "rb")\r
324 except IOError as err:\r
325 self.log_error("Can't open %s: %s", filename, err)\r
326 return None, None\r
327 try:\r
328 encoding = tokenize.detect_encoding(f.readline)[0]\r
329 finally:\r
330 f.close()\r
331 with _open_with_encoding(filename, "r", encoding=encoding) as f:\r
332 return _from_system_newlines(f.read()), encoding\r
333\r
334 def refactor_file(self, filename, write=False, doctests_only=False):\r
335 """Refactors a file."""\r
336 input, encoding = self._read_python_source(filename)\r
337 if input is None:\r
338 # Reading the file failed.\r
339 return\r
340 input += u"\n" # Silence certain parse errors\r
341 if doctests_only:\r
342 self.log_debug("Refactoring doctests in %s", filename)\r
343 output = self.refactor_docstring(input, filename)\r
344 if output != input:\r
345 self.processed_file(output, filename, input, write, encoding)\r
346 else:\r
347 self.log_debug("No doctest changes in %s", filename)\r
348 else:\r
349 tree = self.refactor_string(input, filename)\r
350 if tree and tree.was_changed:\r
351 # The [:-1] is to take off the \n we added earlier\r
352 self.processed_file(unicode(tree)[:-1], filename,\r
353 write=write, encoding=encoding)\r
354 else:\r
355 self.log_debug("No changes in %s", filename)\r
356\r
357 def refactor_string(self, data, name):\r
358 """Refactor a given input string.\r
359\r
360 Args:\r
361 data: a string holding the code to be refactored.\r
362 name: a human-readable name for use in error/log messages.\r
363\r
364 Returns:\r
365 An AST corresponding to the refactored input stream; None if\r
366 there were errors during the parse.\r
367 """\r
368 features = _detect_future_features(data)\r
369 if "print_function" in features:\r
370 self.driver.grammar = pygram.python_grammar_no_print_statement\r
371 try:\r
372 tree = self.driver.parse_string(data)\r
373 except Exception as err:\r
374 self.log_error("Can't parse %s: %s: %s",\r
375 name, err.__class__.__name__, err)\r
376 return\r
377 finally:\r
378 self.driver.grammar = self.grammar\r
379 tree.future_features = features\r
380 self.log_debug("Refactoring %s", name)\r
381 self.refactor_tree(tree, name)\r
382 return tree\r
383\r
384 def refactor_stdin(self, doctests_only=False):\r
385 input = sys.stdin.read()\r
386 if doctests_only:\r
387 self.log_debug("Refactoring doctests in stdin")\r
388 output = self.refactor_docstring(input, "<stdin>")\r
389 if output != input:\r
390 self.processed_file(output, "<stdin>", input)\r
391 else:\r
392 self.log_debug("No doctest changes in stdin")\r
393 else:\r
394 tree = self.refactor_string(input, "<stdin>")\r
395 if tree and tree.was_changed:\r
396 self.processed_file(unicode(tree), "<stdin>", input)\r
397 else:\r
398 self.log_debug("No changes in stdin")\r
399\r
400 def refactor_tree(self, tree, name):\r
401 """Refactors a parse tree (modifying the tree in place).\r
402\r
403 For compatible patterns the bottom matcher module is\r
404 used. Otherwise the tree is traversed node-to-node for\r
405 matches.\r
406\r
407 Args:\r
408 tree: a pytree.Node instance representing the root of the tree\r
409 to be refactored.\r
410 name: a human-readable name for this tree.\r
411\r
412 Returns:\r
413 True if the tree was modified, False otherwise.\r
414 """\r
415\r
416 for fixer in chain(self.pre_order, self.post_order):\r
417 fixer.start_tree(tree, name)\r
418\r
419 #use traditional matching for the incompatible fixers\r
420 self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())\r
421 self.traverse_by(self.bmi_post_order_heads, tree.post_order())\r
422\r
423 # obtain a set of candidate nodes\r
424 match_set = self.BM.run(tree.leaves())\r
425\r
426 while any(match_set.values()):\r
427 for fixer in self.BM.fixers:\r
428 if fixer in match_set and match_set[fixer]:\r
429 #sort by depth; apply fixers from bottom(of the AST) to top\r
430 match_set[fixer].sort(key=pytree.Base.depth, reverse=True)\r
431\r
432 if fixer.keep_line_order:\r
433 #some fixers(eg fix_imports) must be applied\r
434 #with the original file's line order\r
435 match_set[fixer].sort(key=pytree.Base.get_lineno)\r
436\r
437 for node in list(match_set[fixer]):\r
438 if node in match_set[fixer]:\r
439 match_set[fixer].remove(node)\r
440\r
441 try:\r
442 find_root(node)\r
443 except AssertionError:\r
444 # this node has been cut off from a\r
445 # previous transformation ; skip\r
446 continue\r
447\r
448 if node.fixers_applied and fixer in node.fixers_applied:\r
449 # do not apply the same fixer again\r
450 continue\r
451\r
452 results = fixer.match(node)\r
453\r
454 if results:\r
455 new = fixer.transform(node, results)\r
456 if new is not None:\r
457 node.replace(new)\r
458 #new.fixers_applied.append(fixer)\r
459 for node in new.post_order():\r
460 # do not apply the fixer again to\r
461 # this or any subnode\r
462 if not node.fixers_applied:\r
463 node.fixers_applied = []\r
464 node.fixers_applied.append(fixer)\r
465\r
466 # update the original match set for\r
467 # the added code\r
468 new_matches = self.BM.run(new.leaves())\r
469 for fxr in new_matches:\r
470 if not fxr in match_set:\r
471 match_set[fxr]=[]\r
472\r
473 match_set[fxr].extend(new_matches[fxr])\r
474\r
475 for fixer in chain(self.pre_order, self.post_order):\r
476 fixer.finish_tree(tree, name)\r
477 return tree.was_changed\r
478\r
479 def traverse_by(self, fixers, traversal):\r
480 """Traverse an AST, applying a set of fixers to each node.\r
481\r
482 This is a helper method for refactor_tree().\r
483\r
484 Args:\r
485 fixers: a list of fixer instances.\r
486 traversal: a generator that yields AST nodes.\r
487\r
488 Returns:\r
489 None\r
490 """\r
491 if not fixers:\r
492 return\r
493 for node in traversal:\r
494 for fixer in fixers[node.type]:\r
495 results = fixer.match(node)\r
496 if results:\r
497 new = fixer.transform(node, results)\r
498 if new is not None:\r
499 node.replace(new)\r
500 node = new\r
501\r
502 def processed_file(self, new_text, filename, old_text=None, write=False,\r
503 encoding=None):\r
504 """\r
505 Called when a file has been refactored, and there are changes.\r
506 """\r
507 self.files.append(filename)\r
508 if old_text is None:\r
509 old_text = self._read_python_source(filename)[0]\r
510 if old_text is None:\r
511 return\r
512 equal = old_text == new_text\r
513 self.print_output(old_text, new_text, filename, equal)\r
514 if equal:\r
515 self.log_debug("No changes to %s", filename)\r
516 return\r
517 if write:\r
518 self.write_file(new_text, filename, old_text, encoding)\r
519 else:\r
520 self.log_debug("Not writing changes to %s", filename)\r
521\r
522 def write_file(self, new_text, filename, old_text, encoding=None):\r
523 """Writes a string to a file.\r
524\r
525 It first shows a unified diff between the old text and the new text, and\r
526 then rewrites the file; the latter is only done if the write option is\r
527 set.\r
528 """\r
529 try:\r
530 f = _open_with_encoding(filename, "w", encoding=encoding)\r
531 except os.error as err:\r
532 self.log_error("Can't create %s: %s", filename, err)\r
533 return\r
534 try:\r
535 f.write(_to_system_newlines(new_text))\r
536 except os.error as err:\r
537 self.log_error("Can't write %s: %s", filename, err)\r
538 finally:\r
539 f.close()\r
540 self.log_debug("Wrote changes to %s", filename)\r
541 self.wrote = True\r
542\r
543 PS1 = ">>> "\r
544 PS2 = "... "\r
545\r
546 def refactor_docstring(self, input, filename):\r
547 """Refactors a docstring, looking for doctests.\r
548\r
549 This returns a modified version of the input string. It looks\r
550 for doctests, which start with a ">>>" prompt, and may be\r
551 continued with "..." prompts, as long as the "..." is indented\r
552 the same as the ">>>".\r
553\r
554 (Unfortunately we can't use the doctest module's parser,\r
555 since, like most parsers, it is not geared towards preserving\r
556 the original source.)\r
557 """\r
558 result = []\r
559 block = None\r
560 block_lineno = None\r
561 indent = None\r
562 lineno = 0\r
563 for line in input.splitlines(True):\r
564 lineno += 1\r
565 if line.lstrip().startswith(self.PS1):\r
566 if block is not None:\r
567 result.extend(self.refactor_doctest(block, block_lineno,\r
568 indent, filename))\r
569 block_lineno = lineno\r
570 block = [line]\r
571 i = line.find(self.PS1)\r
572 indent = line[:i]\r
573 elif (indent is not None and\r
574 (line.startswith(indent + self.PS2) or\r
575 line == indent + self.PS2.rstrip() + u"\n")):\r
576 block.append(line)\r
577 else:\r
578 if block is not None:\r
579 result.extend(self.refactor_doctest(block, block_lineno,\r
580 indent, filename))\r
581 block = None\r
582 indent = None\r
583 result.append(line)\r
584 if block is not None:\r
585 result.extend(self.refactor_doctest(block, block_lineno,\r
586 indent, filename))\r
587 return u"".join(result)\r
588\r
589 def refactor_doctest(self, block, lineno, indent, filename):\r
590 """Refactors one doctest.\r
591\r
592 A doctest is given as a block of lines, the first of which starts\r
593 with ">>>" (possibly indented), while the remaining lines start\r
594 with "..." (identically indented).\r
595\r
596 """\r
597 try:\r
598 tree = self.parse_block(block, lineno, indent)\r
599 except Exception as err:\r
600 if self.logger.isEnabledFor(logging.DEBUG):\r
601 for line in block:\r
602 self.log_debug("Source: %s", line.rstrip(u"\n"))\r
603 self.log_error("Can't parse docstring in %s line %s: %s: %s",\r
604 filename, lineno, err.__class__.__name__, err)\r
605 return block\r
606 if self.refactor_tree(tree, filename):\r
607 new = unicode(tree).splitlines(True)\r
608 # Undo the adjustment of the line numbers in wrap_toks() below.\r
609 clipped, new = new[:lineno-1], new[lineno-1:]\r
610 assert clipped == [u"\n"] * (lineno-1), clipped\r
611 if not new[-1].endswith(u"\n"):\r
612 new[-1] += u"\n"\r
613 block = [indent + self.PS1 + new.pop(0)]\r
614 if new:\r
615 block += [indent + self.PS2 + line for line in new]\r
616 return block\r
617\r
618 def summarize(self):\r
619 if self.wrote:\r
620 were = "were"\r
621 else:\r
622 were = "need to be"\r
623 if not self.files:\r
624 self.log_message("No files %s modified.", were)\r
625 else:\r
626 self.log_message("Files that %s modified:", were)\r
627 for file in self.files:\r
628 self.log_message(file)\r
629 if self.fixer_log:\r
630 self.log_message("Warnings/messages while refactoring:")\r
631 for message in self.fixer_log:\r
632 self.log_message(message)\r
633 if self.errors:\r
634 if len(self.errors) == 1:\r
635 self.log_message("There was 1 error:")\r
636 else:\r
637 self.log_message("There were %d errors:", len(self.errors))\r
638 for msg, args, kwds in self.errors:\r
639 self.log_message(msg, *args, **kwds)\r
640\r
641 def parse_block(self, block, lineno, indent):\r
642 """Parses a block into a tree.\r
643\r
644 This is necessary to get correct line number / offset information\r
645 in the parser diagnostics and embedded into the parse tree.\r
646 """\r
647 tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r
648 tree.future_features = frozenset()\r
649 return tree\r
650\r
651 def wrap_toks(self, block, lineno, indent):\r
652 """Wraps a tokenize stream to systematically modify start/end."""\r
653 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)\r
654 for type, value, (line0, col0), (line1, col1), line_text in tokens:\r
655 line0 += lineno - 1\r
656 line1 += lineno - 1\r
657 # Don't bother updating the columns; this is too complicated\r
658 # since line_text would also have to be updated and it would\r
659 # still break for tokens spanning lines. Let the user guess\r
660 # that the column numbers for doctests are relative to the\r
661 # end of the prompt string (PS1 or PS2).\r
662 yield type, value, (line0, col0), (line1, col1), line_text\r
663\r
664\r
665 def gen_lines(self, block, indent):\r
666 """Generates lines as expected by tokenize from a list of lines.\r
667\r
668 This strips the first len(indent + self.PS1) characters off each line.\r
669 """\r
670 prefix1 = indent + self.PS1\r
671 prefix2 = indent + self.PS2\r
672 prefix = prefix1\r
673 for line in block:\r
674 if line.startswith(prefix):\r
675 yield line[len(prefix):]\r
676 elif line == prefix.rstrip() + u"\n":\r
677 yield u"\n"\r
678 else:\r
679 raise AssertionError("line=%r, prefix=%r" % (line, prefix))\r
680 prefix = prefix2\r
681 while True:\r
682 yield ""\r
683\r
684\r
685class MultiprocessingUnsupported(Exception):\r
686 pass\r
687\r
688\r
689class MultiprocessRefactoringTool(RefactoringTool):\r
690\r
691 def __init__(self, *args, **kwargs):\r
692 super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)\r
693 self.queue = None\r
694 self.output_lock = None\r
695\r
696 def refactor(self, items, write=False, doctests_only=False,\r
697 num_processes=1):\r
698 if num_processes == 1:\r
699 return super(MultiprocessRefactoringTool, self).refactor(\r
700 items, write, doctests_only)\r
701 try:\r
702 import multiprocessing\r
703 except ImportError:\r
704 raise MultiprocessingUnsupported\r
705 if self.queue is not None:\r
706 raise RuntimeError("already doing multiple processes")\r
707 self.queue = multiprocessing.JoinableQueue()\r
708 self.output_lock = multiprocessing.Lock()\r
709 processes = [multiprocessing.Process(target=self._child)\r
710 for i in xrange(num_processes)]\r
711 try:\r
712 for p in processes:\r
713 p.start()\r
714 super(MultiprocessRefactoringTool, self).refactor(items, write,\r
715 doctests_only)\r
716 finally:\r
717 self.queue.join()\r
718 for i in xrange(num_processes):\r
719 self.queue.put(None)\r
720 for p in processes:\r
721 if p.is_alive():\r
722 p.join()\r
723 self.queue = None\r
724\r
725 def _child(self):\r
726 task = self.queue.get()\r
727 while task is not None:\r
728 args, kwargs = task\r
729 try:\r
730 super(MultiprocessRefactoringTool, self).refactor_file(\r
731 *args, **kwargs)\r
732 finally:\r
733 self.queue.task_done()\r
734 task = self.queue.get()\r
735\r
736 def refactor_file(self, *args, **kwargs):\r
737 if self.queue is not None:\r
738 self.queue.put((args, kwargs))\r
739 else:\r
740 return super(MultiprocessRefactoringTool, self).refactor_file(\r
741 *args, **kwargs)\r