]> git.proxmox.com Git - mirror_edk2.git/blobdiff - AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/refactor.py
edk2: Remove AppPkg, StdLib, StdLibPrivateInternalFiles
[mirror_edk2.git] / AppPkg / Applications / Python / Python-2.7.2 / Lib / lib2to3 / refactor.py
diff --git a/AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/refactor.py b/AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/refactor.py
deleted file mode 100644 (file)
index 97c4f5e..0000000
+++ /dev/null
@@ -1,741 +0,0 @@
-# Copyright 2006 Google, Inc. All Rights Reserved.\r
-# Licensed to PSF under a Contributor Agreement.\r
-\r
-"""Refactoring framework.\r
-\r
-Used as a main program, this can refactor any number of files and/or\r
-recursively descend down directories.  Imported as a module, this\r
-provides infrastructure to write your own refactoring tool.\r
-"""\r
-\r
-from __future__ import with_statement\r
-\r
-__author__ = "Guido van Rossum <guido@python.org>"\r
-\r
-\r
-# Python imports\r
-import os\r
-import sys\r
-import logging\r
-import operator\r
-import collections\r
-import StringIO\r
-from itertools import chain\r
-\r
-# Local imports\r
-from .pgen2 import driver, tokenize, token\r
-from .fixer_util import find_root\r
-from . import pytree, pygram\r
-from . import btm_utils as bu\r
-from . import btm_matcher as bm\r
-\r
-\r
-def get_all_fix_names(fixer_pkg, remove_prefix=True):\r
-    """Return a sorted list of all available fix names in the given package."""\r
-    pkg = __import__(fixer_pkg, [], [], ["*"])\r
-    fixer_dir = os.path.dirname(pkg.__file__)\r
-    fix_names = []\r
-    for name in sorted(os.listdir(fixer_dir)):\r
-        if name.startswith("fix_") and name.endswith(".py"):\r
-            if remove_prefix:\r
-                name = name[4:]\r
-            fix_names.append(name[:-3])\r
-    return fix_names\r
-\r
-\r
-class _EveryNode(Exception):\r
-    pass\r
-\r
-\r
-def _get_head_types(pat):\r
-    """ Accepts a pytree Pattern Node and returns a set\r
-        of the pattern types which will match first. """\r
-\r
-    if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):\r
-        # NodePatters must either have no type and no content\r
-        #   or a type and content -- so they don't get any farther\r
-        # Always return leafs\r
-        if pat.type is None:\r
-            raise _EveryNode\r
-        return set([pat.type])\r
-\r
-    if isinstance(pat, pytree.NegatedPattern):\r
-        if pat.content:\r
-            return _get_head_types(pat.content)\r
-        raise _EveryNode # Negated Patterns don't have a type\r
-\r
-    if isinstance(pat, pytree.WildcardPattern):\r
-        # Recurse on each node in content\r
-        r = set()\r
-        for p in pat.content:\r
-            for x in p:\r
-                r.update(_get_head_types(x))\r
-        return r\r
-\r
-    raise Exception("Oh no! I don't understand pattern %s" %(pat))\r
-\r
-\r
-def _get_headnode_dict(fixer_list):\r
-    """ Accepts a list of fixers and returns a dictionary\r
-        of head node type --> fixer list.  """\r
-    head_nodes = collections.defaultdict(list)\r
-    every = []\r
-    for fixer in fixer_list:\r
-        if fixer.pattern:\r
-            try:\r
-                heads = _get_head_types(fixer.pattern)\r
-            except _EveryNode:\r
-                every.append(fixer)\r
-            else:\r
-                for node_type in heads:\r
-                    head_nodes[node_type].append(fixer)\r
-        else:\r
-            if fixer._accept_type is not None:\r
-                head_nodes[fixer._accept_type].append(fixer)\r
-            else:\r
-                every.append(fixer)\r
-    for node_type in chain(pygram.python_grammar.symbol2number.itervalues(),\r
-                           pygram.python_grammar.tokens):\r
-        head_nodes[node_type].extend(every)\r
-    return dict(head_nodes)\r
-\r
-\r
-def get_fixers_from_package(pkg_name):\r
-    """\r
-    Return the fully qualified names for fixers in the package pkg_name.\r
-    """\r
-    return [pkg_name + "." + fix_name\r
-            for fix_name in get_all_fix_names(pkg_name, False)]\r
-\r
-def _identity(obj):\r
-    return obj\r
-\r
-if sys.version_info < (3, 0):\r
-    import codecs\r
-    _open_with_encoding = codecs.open\r
-    # codecs.open doesn't translate newlines sadly.\r
-    def _from_system_newlines(input):\r
-        return input.replace(u"\r\n", u"\n")\r
-    def _to_system_newlines(input):\r
-        if os.linesep != "\n":\r
-            return input.replace(u"\n", os.linesep)\r
-        else:\r
-            return input\r
-else:\r
-    _open_with_encoding = open\r
-    _from_system_newlines = _identity\r
-    _to_system_newlines = _identity\r
-\r
-\r
-def _detect_future_features(source):\r
-    have_docstring = False\r
-    gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)\r
-    def advance():\r
-        tok = gen.next()\r
-        return tok[0], tok[1]\r
-    ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))\r
-    features = set()\r
-    try:\r
-        while True:\r
-            tp, value = advance()\r
-            if tp in ignore:\r
-                continue\r
-            elif tp == token.STRING:\r
-                if have_docstring:\r
-                    break\r
-                have_docstring = True\r
-            elif tp == token.NAME and value == u"from":\r
-                tp, value = advance()\r
-                if tp != token.NAME or value != u"__future__":\r
-                    break\r
-                tp, value = advance()\r
-                if tp != token.NAME or value != u"import":\r
-                    break\r
-                tp, value = advance()\r
-                if tp == token.OP and value == u"(":\r
-                    tp, value = advance()\r
-                while tp == token.NAME:\r
-                    features.add(value)\r
-                    tp, value = advance()\r
-                    if tp != token.OP or value != u",":\r
-                        break\r
-                    tp, value = advance()\r
-            else:\r
-                break\r
-    except StopIteration:\r
-        pass\r
-    return frozenset(features)\r
-\r
-\r
-class FixerError(Exception):\r
-    """A fixer could not be loaded."""\r
-\r
-\r
-class RefactoringTool(object):\r
-\r
-    _default_options = {"print_function" : False}\r
-\r
-    CLASS_PREFIX = "Fix" # The prefix for fixer classes\r
-    FILE_PREFIX = "fix_" # The prefix for modules with a fixer within\r
-\r
-    def __init__(self, fixer_names, options=None, explicit=None):\r
-        """Initializer.\r
-\r
-        Args:\r
-            fixer_names: a list of fixers to import\r
-            options: an dict with configuration.\r
-            explicit: a list of fixers to run even if they are explicit.\r
-        """\r
-        self.fixers = fixer_names\r
-        self.explicit = explicit or []\r
-        self.options = self._default_options.copy()\r
-        if options is not None:\r
-            self.options.update(options)\r
-        if self.options["print_function"]:\r
-            self.grammar = pygram.python_grammar_no_print_statement\r
-        else:\r
-            self.grammar = pygram.python_grammar\r
-        self.errors = []\r
-        self.logger = logging.getLogger("RefactoringTool")\r
-        self.fixer_log = []\r
-        self.wrote = False\r
-        self.driver = driver.Driver(self.grammar,\r
-                                    convert=pytree.convert,\r
-                                    logger=self.logger)\r
-        self.pre_order, self.post_order = self.get_fixers()\r
-\r
-\r
-        self.files = []  # List of files that were or should be modified\r
-\r
-        self.BM = bm.BottomMatcher()\r
-        self.bmi_pre_order = [] # Bottom Matcher incompatible fixers\r
-        self.bmi_post_order = []\r
-\r
-        for fixer in chain(self.post_order, self.pre_order):\r
-            if fixer.BM_compatible:\r
-                self.BM.add_fixer(fixer)\r
-                # remove fixers that will be handled by the bottom-up\r
-                # matcher\r
-            elif fixer in self.pre_order:\r
-                self.bmi_pre_order.append(fixer)\r
-            elif fixer in self.post_order:\r
-                self.bmi_post_order.append(fixer)\r
-\r
-        self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)\r
-        self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)\r
-\r
-\r
-\r
-    def get_fixers(self):\r
-        """Inspects the options to load the requested patterns and handlers.\r
-\r
-        Returns:\r
-          (pre_order, post_order), where pre_order is the list of fixers that\r
-          want a pre-order AST traversal, and post_order is the list that want\r
-          post-order traversal.\r
-        """\r
-        pre_order_fixers = []\r
-        post_order_fixers = []\r
-        for fix_mod_path in self.fixers:\r
-            mod = __import__(fix_mod_path, {}, {}, ["*"])\r
-            fix_name = fix_mod_path.rsplit(".", 1)[-1]\r
-            if fix_name.startswith(self.FILE_PREFIX):\r
-                fix_name = fix_name[len(self.FILE_PREFIX):]\r
-            parts = fix_name.split("_")\r
-            class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])\r
-            try:\r
-                fix_class = getattr(mod, class_name)\r
-            except AttributeError:\r
-                raise FixerError("Can't find %s.%s" % (fix_name, class_name))\r
-            fixer = fix_class(self.options, self.fixer_log)\r
-            if fixer.explicit and self.explicit is not True and \\r
-                    fix_mod_path not in self.explicit:\r
-                self.log_message("Skipping implicit fixer: %s", fix_name)\r
-                continue\r
-\r
-            self.log_debug("Adding transformation: %s", fix_name)\r
-            if fixer.order == "pre":\r
-                pre_order_fixers.append(fixer)\r
-            elif fixer.order == "post":\r
-                post_order_fixers.append(fixer)\r
-            else:\r
-                raise FixerError("Illegal fixer order: %r" % fixer.order)\r
-\r
-        key_func = operator.attrgetter("run_order")\r
-        pre_order_fixers.sort(key=key_func)\r
-        post_order_fixers.sort(key=key_func)\r
-        return (pre_order_fixers, post_order_fixers)\r
-\r
-    def log_error(self, msg, *args, **kwds):\r
-        """Called when an error occurs."""\r
-        raise\r
-\r
-    def log_message(self, msg, *args):\r
-        """Hook to log a message."""\r
-        if args:\r
-            msg = msg % args\r
-        self.logger.info(msg)\r
-\r
-    def log_debug(self, msg, *args):\r
-        if args:\r
-            msg = msg % args\r
-        self.logger.debug(msg)\r
-\r
-    def print_output(self, old_text, new_text, filename, equal):\r
-        """Called with the old version, new version, and filename of a\r
-        refactored file."""\r
-        pass\r
-\r
-    def refactor(self, items, write=False, doctests_only=False):\r
-        """Refactor a list of files and directories."""\r
-\r
-        for dir_or_file in items:\r
-            if os.path.isdir(dir_or_file):\r
-                self.refactor_dir(dir_or_file, write, doctests_only)\r
-            else:\r
-                self.refactor_file(dir_or_file, write, doctests_only)\r
-\r
-    def refactor_dir(self, dir_name, write=False, doctests_only=False):\r
-        """Descends down a directory and refactor every Python file found.\r
-\r
-        Python files are assumed to have a .py extension.\r
-\r
-        Files and subdirectories starting with '.' are skipped.\r
-        """\r
-        py_ext = os.extsep + "py"\r
-        for dirpath, dirnames, filenames in os.walk(dir_name):\r
-            self.log_debug("Descending into %s", dirpath)\r
-            dirnames.sort()\r
-            filenames.sort()\r
-            for name in filenames:\r
-                if (not name.startswith(".") and\r
-                    os.path.splitext(name)[1] == py_ext):\r
-                    fullname = os.path.join(dirpath, name)\r
-                    self.refactor_file(fullname, write, doctests_only)\r
-            # Modify dirnames in-place to remove subdirs with leading dots\r
-            dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]\r
-\r
-    def _read_python_source(self, filename):\r
-        """\r
-        Do our best to decode a Python source file correctly.\r
-        """\r
-        try:\r
-            f = open(filename, "rb")\r
-        except IOError as err:\r
-            self.log_error("Can't open %s: %s", filename, err)\r
-            return None, None\r
-        try:\r
-            encoding = tokenize.detect_encoding(f.readline)[0]\r
-        finally:\r
-            f.close()\r
-        with _open_with_encoding(filename, "r", encoding=encoding) as f:\r
-            return _from_system_newlines(f.read()), encoding\r
-\r
-    def refactor_file(self, filename, write=False, doctests_only=False):\r
-        """Refactors a file."""\r
-        input, encoding = self._read_python_source(filename)\r
-        if input is None:\r
-            # Reading the file failed.\r
-            return\r
-        input += u"\n" # Silence certain parse errors\r
-        if doctests_only:\r
-            self.log_debug("Refactoring doctests in %s", filename)\r
-            output = self.refactor_docstring(input, filename)\r
-            if output != input:\r
-                self.processed_file(output, filename, input, write, encoding)\r
-            else:\r
-                self.log_debug("No doctest changes in %s", filename)\r
-        else:\r
-            tree = self.refactor_string(input, filename)\r
-            if tree and tree.was_changed:\r
-                # The [:-1] is to take off the \n we added earlier\r
-                self.processed_file(unicode(tree)[:-1], filename,\r
-                                    write=write, encoding=encoding)\r
-            else:\r
-                self.log_debug("No changes in %s", filename)\r
-\r
-    def refactor_string(self, data, name):\r
-        """Refactor a given input string.\r
-\r
-        Args:\r
-            data: a string holding the code to be refactored.\r
-            name: a human-readable name for use in error/log messages.\r
-\r
-        Returns:\r
-            An AST corresponding to the refactored input stream; None if\r
-            there were errors during the parse.\r
-        """\r
-        features = _detect_future_features(data)\r
-        if "print_function" in features:\r
-            self.driver.grammar = pygram.python_grammar_no_print_statement\r
-        try:\r
-            tree = self.driver.parse_string(data)\r
-        except Exception as err:\r
-            self.log_error("Can't parse %s: %s: %s",\r
-                           name, err.__class__.__name__, err)\r
-            return\r
-        finally:\r
-            self.driver.grammar = self.grammar\r
-        tree.future_features = features\r
-        self.log_debug("Refactoring %s", name)\r
-        self.refactor_tree(tree, name)\r
-        return tree\r
-\r
-    def refactor_stdin(self, doctests_only=False):\r
-        input = sys.stdin.read()\r
-        if doctests_only:\r
-            self.log_debug("Refactoring doctests in stdin")\r
-            output = self.refactor_docstring(input, "<stdin>")\r
-            if output != input:\r
-                self.processed_file(output, "<stdin>", input)\r
-            else:\r
-                self.log_debug("No doctest changes in stdin")\r
-        else:\r
-            tree = self.refactor_string(input, "<stdin>")\r
-            if tree and tree.was_changed:\r
-                self.processed_file(unicode(tree), "<stdin>", input)\r
-            else:\r
-                self.log_debug("No changes in stdin")\r
-\r
-    def refactor_tree(self, tree, name):\r
-        """Refactors a parse tree (modifying the tree in place).\r
-\r
-        For compatible patterns the bottom matcher module is\r
-        used. Otherwise the tree is traversed node-to-node for\r
-        matches.\r
-\r
-        Args:\r
-            tree: a pytree.Node instance representing the root of the tree\r
-                  to be refactored.\r
-            name: a human-readable name for this tree.\r
-\r
-        Returns:\r
-            True if the tree was modified, False otherwise.\r
-        """\r
-\r
-        for fixer in chain(self.pre_order, self.post_order):\r
-            fixer.start_tree(tree, name)\r
-\r
-        #use traditional matching for the incompatible fixers\r
-        self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())\r
-        self.traverse_by(self.bmi_post_order_heads, tree.post_order())\r
-\r
-        # obtain a set of candidate nodes\r
-        match_set = self.BM.run(tree.leaves())\r
-\r
-        while any(match_set.values()):\r
-            for fixer in self.BM.fixers:\r
-                if fixer in match_set and match_set[fixer]:\r
-                    #sort by depth; apply fixers from bottom(of the AST) to top\r
-                    match_set[fixer].sort(key=pytree.Base.depth, reverse=True)\r
-\r
-                    if fixer.keep_line_order:\r
-                        #some fixers(eg fix_imports) must be applied\r
-                        #with the original file's line order\r
-                        match_set[fixer].sort(key=pytree.Base.get_lineno)\r
-\r
-                    for node in list(match_set[fixer]):\r
-                        if node in match_set[fixer]:\r
-                            match_set[fixer].remove(node)\r
-\r
-                        try:\r
-                            find_root(node)\r
-                        except AssertionError:\r
-                            # this node has been cut off from a\r
-                            # previous transformation ; skip\r
-                            continue\r
-\r
-                        if node.fixers_applied and fixer in node.fixers_applied:\r
-                            # do not apply the same fixer again\r
-                            continue\r
-\r
-                        results = fixer.match(node)\r
-\r
-                        if results:\r
-                            new = fixer.transform(node, results)\r
-                            if new is not None:\r
-                                node.replace(new)\r
-                                #new.fixers_applied.append(fixer)\r
-                                for node in new.post_order():\r
-                                    # do not apply the fixer again to\r
-                                    # this or any subnode\r
-                                    if not node.fixers_applied:\r
-                                        node.fixers_applied = []\r
-                                    node.fixers_applied.append(fixer)\r
-\r
-                                # update the original match set for\r
-                                # the added code\r
-                                new_matches = self.BM.run(new.leaves())\r
-                                for fxr in new_matches:\r
-                                    if not fxr in match_set:\r
-                                        match_set[fxr]=[]\r
-\r
-                                    match_set[fxr].extend(new_matches[fxr])\r
-\r
-        for fixer in chain(self.pre_order, self.post_order):\r
-            fixer.finish_tree(tree, name)\r
-        return tree.was_changed\r
-\r
-    def traverse_by(self, fixers, traversal):\r
-        """Traverse an AST, applying a set of fixers to each node.\r
-\r
-        This is a helper method for refactor_tree().\r
-\r
-        Args:\r
-            fixers: a list of fixer instances.\r
-            traversal: a generator that yields AST nodes.\r
-\r
-        Returns:\r
-            None\r
-        """\r
-        if not fixers:\r
-            return\r
-        for node in traversal:\r
-            for fixer in fixers[node.type]:\r
-                results = fixer.match(node)\r
-                if results:\r
-                    new = fixer.transform(node, results)\r
-                    if new is not None:\r
-                        node.replace(new)\r
-                        node = new\r
-\r
-    def processed_file(self, new_text, filename, old_text=None, write=False,\r
-                       encoding=None):\r
-        """\r
-        Called when a file has been refactored, and there are changes.\r
-        """\r
-        self.files.append(filename)\r
-        if old_text is None:\r
-            old_text = self._read_python_source(filename)[0]\r
-            if old_text is None:\r
-                return\r
-        equal = old_text == new_text\r
-        self.print_output(old_text, new_text, filename, equal)\r
-        if equal:\r
-            self.log_debug("No changes to %s", filename)\r
-            return\r
-        if write:\r
-            self.write_file(new_text, filename, old_text, encoding)\r
-        else:\r
-            self.log_debug("Not writing changes to %s", filename)\r
-\r
-    def write_file(self, new_text, filename, old_text, encoding=None):\r
-        """Writes a string to a file.\r
-\r
-        It first shows a unified diff between the old text and the new text, and\r
-        then rewrites the file; the latter is only done if the write option is\r
-        set.\r
-        """\r
-        try:\r
-            f = _open_with_encoding(filename, "w", encoding=encoding)\r
-        except os.error as err:\r
-            self.log_error("Can't create %s: %s", filename, err)\r
-            return\r
-        try:\r
-            f.write(_to_system_newlines(new_text))\r
-        except os.error as err:\r
-            self.log_error("Can't write %s: %s", filename, err)\r
-        finally:\r
-            f.close()\r
-        self.log_debug("Wrote changes to %s", filename)\r
-        self.wrote = True\r
-\r
-    PS1 = ">>> "\r
-    PS2 = "... "\r
-\r
-    def refactor_docstring(self, input, filename):\r
-        """Refactors a docstring, looking for doctests.\r
-\r
-        This returns a modified version of the input string.  It looks\r
-        for doctests, which start with a ">>>" prompt, and may be\r
-        continued with "..." prompts, as long as the "..." is indented\r
-        the same as the ">>>".\r
-\r
-        (Unfortunately we can't use the doctest module's parser,\r
-        since, like most parsers, it is not geared towards preserving\r
-        the original source.)\r
-        """\r
-        result = []\r
-        block = None\r
-        block_lineno = None\r
-        indent = None\r
-        lineno = 0\r
-        for line in input.splitlines(True):\r
-            lineno += 1\r
-            if line.lstrip().startswith(self.PS1):\r
-                if block is not None:\r
-                    result.extend(self.refactor_doctest(block, block_lineno,\r
-                                                        indent, filename))\r
-                block_lineno = lineno\r
-                block = [line]\r
-                i = line.find(self.PS1)\r
-                indent = line[:i]\r
-            elif (indent is not None and\r
-                  (line.startswith(indent + self.PS2) or\r
-                   line == indent + self.PS2.rstrip() + u"\n")):\r
-                block.append(line)\r
-            else:\r
-                if block is not None:\r
-                    result.extend(self.refactor_doctest(block, block_lineno,\r
-                                                        indent, filename))\r
-                block = None\r
-                indent = None\r
-                result.append(line)\r
-        if block is not None:\r
-            result.extend(self.refactor_doctest(block, block_lineno,\r
-                                                indent, filename))\r
-        return u"".join(result)\r
-\r
-    def refactor_doctest(self, block, lineno, indent, filename):\r
-        """Refactors one doctest.\r
-\r
-        A doctest is given as a block of lines, the first of which starts\r
-        with ">>>" (possibly indented), while the remaining lines start\r
-        with "..." (identically indented).\r
-\r
-        """\r
-        try:\r
-            tree = self.parse_block(block, lineno, indent)\r
-        except Exception as err:\r
-            if self.logger.isEnabledFor(logging.DEBUG):\r
-                for line in block:\r
-                    self.log_debug("Source: %s", line.rstrip(u"\n"))\r
-            self.log_error("Can't parse docstring in %s line %s: %s: %s",\r
-                           filename, lineno, err.__class__.__name__, err)\r
-            return block\r
-        if self.refactor_tree(tree, filename):\r
-            new = unicode(tree).splitlines(True)\r
-            # Undo the adjustment of the line numbers in wrap_toks() below.\r
-            clipped, new = new[:lineno-1], new[lineno-1:]\r
-            assert clipped == [u"\n"] * (lineno-1), clipped\r
-            if not new[-1].endswith(u"\n"):\r
-                new[-1] += u"\n"\r
-            block = [indent + self.PS1 + new.pop(0)]\r
-            if new:\r
-                block += [indent + self.PS2 + line for line in new]\r
-        return block\r
-\r
-    def summarize(self):\r
-        if self.wrote:\r
-            were = "were"\r
-        else:\r
-            were = "need to be"\r
-        if not self.files:\r
-            self.log_message("No files %s modified.", were)\r
-        else:\r
-            self.log_message("Files that %s modified:", were)\r
-            for file in self.files:\r
-                self.log_message(file)\r
-        if self.fixer_log:\r
-            self.log_message("Warnings/messages while refactoring:")\r
-            for message in self.fixer_log:\r
-                self.log_message(message)\r
-        if self.errors:\r
-            if len(self.errors) == 1:\r
-                self.log_message("There was 1 error:")\r
-            else:\r
-                self.log_message("There were %d errors:", len(self.errors))\r
-            for msg, args, kwds in self.errors:\r
-                self.log_message(msg, *args, **kwds)\r
-\r
-    def parse_block(self, block, lineno, indent):\r
-        """Parses a block into a tree.\r
-\r
-        This is necessary to get correct line number / offset information\r
-        in the parser diagnostics and embedded into the parse tree.\r
-        """\r
-        tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r
-        tree.future_features = frozenset()\r
-        return tree\r
-\r
-    def wrap_toks(self, block, lineno, indent):\r
-        """Wraps a tokenize stream to systematically modify start/end."""\r
-        tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)\r
-        for type, value, (line0, col0), (line1, col1), line_text in tokens:\r
-            line0 += lineno - 1\r
-            line1 += lineno - 1\r
-            # Don't bother updating the columns; this is too complicated\r
-            # since line_text would also have to be updated and it would\r
-            # still break for tokens spanning lines.  Let the user guess\r
-            # that the column numbers for doctests are relative to the\r
-            # end of the prompt string (PS1 or PS2).\r
-            yield type, value, (line0, col0), (line1, col1), line_text\r
-\r
-\r
-    def gen_lines(self, block, indent):\r
-        """Generates lines as expected by tokenize from a list of lines.\r
-\r
-        This strips the first len(indent + self.PS1) characters off each line.\r
-        """\r
-        prefix1 = indent + self.PS1\r
-        prefix2 = indent + self.PS2\r
-        prefix = prefix1\r
-        for line in block:\r
-            if line.startswith(prefix):\r
-                yield line[len(prefix):]\r
-            elif line == prefix.rstrip() + u"\n":\r
-                yield u"\n"\r
-            else:\r
-                raise AssertionError("line=%r, prefix=%r" % (line, prefix))\r
-            prefix = prefix2\r
-        while True:\r
-            yield ""\r
-\r
-\r
-class MultiprocessingUnsupported(Exception):\r
-    pass\r
-\r
-\r
-class MultiprocessRefactoringTool(RefactoringTool):\r
-\r
-    def __init__(self, *args, **kwargs):\r
-        super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)\r
-        self.queue = None\r
-        self.output_lock = None\r
-\r
-    def refactor(self, items, write=False, doctests_only=False,\r
-                 num_processes=1):\r
-        if num_processes == 1:\r
-            return super(MultiprocessRefactoringTool, self).refactor(\r
-                items, write, doctests_only)\r
-        try:\r
-            import multiprocessing\r
-        except ImportError:\r
-            raise MultiprocessingUnsupported\r
-        if self.queue is not None:\r
-            raise RuntimeError("already doing multiple processes")\r
-        self.queue = multiprocessing.JoinableQueue()\r
-        self.output_lock = multiprocessing.Lock()\r
-        processes = [multiprocessing.Process(target=self._child)\r
-                     for i in xrange(num_processes)]\r
-        try:\r
-            for p in processes:\r
-                p.start()\r
-            super(MultiprocessRefactoringTool, self).refactor(items, write,\r
-                                                              doctests_only)\r
-        finally:\r
-            self.queue.join()\r
-            for i in xrange(num_processes):\r
-                self.queue.put(None)\r
-            for p in processes:\r
-                if p.is_alive():\r
-                    p.join()\r
-            self.queue = None\r
-\r
-    def _child(self):\r
-        task = self.queue.get()\r
-        while task is not None:\r
-            args, kwargs = task\r
-            try:\r
-                super(MultiprocessRefactoringTool, self).refactor_file(\r
-                    *args, **kwargs)\r
-            finally:\r
-                self.queue.task_done()\r
-            task = self.queue.get()\r
-\r
-    def refactor_file(self, *args, **kwargs):\r
-        if self.queue is not None:\r
-            self.queue.put((args, kwargs))\r
-        else:\r
-            return super(MultiprocessRefactoringTool, self).refactor_file(\r
-                *args, **kwargs)\r