]>
Commit | Line | Data |
---|---|---|
4710c53d | 1 | # Module doctest.\r |
2 | # Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).\r | |
3 | # Major enhancements and refactoring by:\r | |
4 | # Jim Fulton\r | |
5 | # Edward Loper\r | |
6 | \r | |
7 | # Provided as-is; use at your own risk; no warranty; no promises; enjoy!\r | |
8 | \r | |
9 | r"""Module doctest -- a framework for running examples in docstrings.\r | |
10 | \r | |
11 | In simplest use, end each module M to be tested with:\r | |
12 | \r | |
13 | def _test():\r | |
14 | import doctest\r | |
15 | doctest.testmod()\r | |
16 | \r | |
17 | if __name__ == "__main__":\r | |
18 | _test()\r | |
19 | \r | |
20 | Then running the module as a script will cause the examples in the\r | |
21 | docstrings to get executed and verified:\r | |
22 | \r | |
23 | python M.py\r | |
24 | \r | |
25 | This won't display anything unless an example fails, in which case the\r | |
26 | failing example(s) and the cause(s) of the failure(s) are printed to stdout\r | |
27 | (why not stderr? because stderr is a lame hack <0.2 wink>), and the final\r | |
28 | line of output is "Test failed.".\r | |
29 | \r | |
30 | Run it with the -v switch instead:\r | |
31 | \r | |
32 | python M.py -v\r | |
33 | \r | |
34 | and a detailed report of all examples tried is printed to stdout, along\r | |
35 | with assorted summaries at the end.\r | |
36 | \r | |
37 | You can force verbose mode by passing "verbose=True" to testmod, or prohibit\r | |
38 | it by passing "verbose=False". In either of those cases, sys.argv is not\r | |
39 | examined by testmod.\r | |
40 | \r | |
41 | There are a variety of other ways to run doctests, including integration\r | |
42 | with the unittest framework, and support for running non-Python text\r | |
43 | files containing doctests. There are also many ways to override parts\r | |
44 | of doctest's default behaviors. See the Library Reference Manual for\r | |
45 | details.\r | |
46 | """\r | |
47 | \r | |
48 | __docformat__ = 'reStructuredText en'\r | |
49 | \r | |
50 | __all__ = [\r | |
51 | # 0, Option Flags\r | |
52 | 'register_optionflag',\r | |
53 | 'DONT_ACCEPT_TRUE_FOR_1',\r | |
54 | 'DONT_ACCEPT_BLANKLINE',\r | |
55 | 'NORMALIZE_WHITESPACE',\r | |
56 | 'ELLIPSIS',\r | |
57 | 'SKIP',\r | |
58 | 'IGNORE_EXCEPTION_DETAIL',\r | |
59 | 'COMPARISON_FLAGS',\r | |
60 | 'REPORT_UDIFF',\r | |
61 | 'REPORT_CDIFF',\r | |
62 | 'REPORT_NDIFF',\r | |
63 | 'REPORT_ONLY_FIRST_FAILURE',\r | |
64 | 'REPORTING_FLAGS',\r | |
65 | # 1. Utility Functions\r | |
66 | # 2. Example & DocTest\r | |
67 | 'Example',\r | |
68 | 'DocTest',\r | |
69 | # 3. Doctest Parser\r | |
70 | 'DocTestParser',\r | |
71 | # 4. Doctest Finder\r | |
72 | 'DocTestFinder',\r | |
73 | # 5. Doctest Runner\r | |
74 | 'DocTestRunner',\r | |
75 | 'OutputChecker',\r | |
76 | 'DocTestFailure',\r | |
77 | 'UnexpectedException',\r | |
78 | 'DebugRunner',\r | |
79 | # 6. Test Functions\r | |
80 | 'testmod',\r | |
81 | 'testfile',\r | |
82 | 'run_docstring_examples',\r | |
83 | # 7. Tester\r | |
84 | 'Tester',\r | |
85 | # 8. Unittest Support\r | |
86 | 'DocTestSuite',\r | |
87 | 'DocFileSuite',\r | |
88 | 'set_unittest_reportflags',\r | |
89 | # 9. Debugging Support\r | |
90 | 'script_from_examples',\r | |
91 | 'testsource',\r | |
92 | 'debug_src',\r | |
93 | 'debug',\r | |
94 | ]\r | |
95 | \r | |
96 | import __future__\r | |
97 | \r | |
98 | import sys, traceback, inspect, linecache, os, re\r | |
99 | import unittest, difflib, pdb, tempfile\r | |
100 | import warnings\r | |
101 | from StringIO import StringIO\r | |
102 | from collections import namedtuple\r | |
103 | \r | |
104 | TestResults = namedtuple('TestResults', 'failed attempted')\r | |
105 | \r | |
106 | # There are 4 basic classes:\r | |
107 | # - Example: a <source, want> pair, plus an intra-docstring line number.\r | |
108 | # - DocTest: a collection of examples, parsed from a docstring, plus\r | |
109 | # info about where the docstring came from (name, filename, lineno).\r | |
110 | # - DocTestFinder: extracts DocTests from a given object's docstring and\r | |
111 | # its contained objects' docstrings.\r | |
112 | # - DocTestRunner: runs DocTest cases, and accumulates statistics.\r | |
113 | #\r | |
114 | # So the basic picture is:\r | |
115 | #\r | |
116 | # list of:\r | |
117 | # +------+ +---------+ +-------+\r | |
118 | # |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|\r | |
119 | # +------+ +---------+ +-------+\r | |
120 | # | Example |\r | |
121 | # | ... |\r | |
122 | # | Example |\r | |
123 | # +---------+\r | |
124 | \r | |
125 | # Option constants.\r | |
126 | \r | |
127 | OPTIONFLAGS_BY_NAME = {}\r | |
128 | def register_optionflag(name):\r | |
129 | # Create a new flag unless `name` is already known.\r | |
130 | return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))\r | |
131 | \r | |
132 | DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')\r | |
133 | DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')\r | |
134 | NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')\r | |
135 | ELLIPSIS = register_optionflag('ELLIPSIS')\r | |
136 | SKIP = register_optionflag('SKIP')\r | |
137 | IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')\r | |
138 | \r | |
139 | COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |\r | |
140 | DONT_ACCEPT_BLANKLINE |\r | |
141 | NORMALIZE_WHITESPACE |\r | |
142 | ELLIPSIS |\r | |
143 | SKIP |\r | |
144 | IGNORE_EXCEPTION_DETAIL)\r | |
145 | \r | |
146 | REPORT_UDIFF = register_optionflag('REPORT_UDIFF')\r | |
147 | REPORT_CDIFF = register_optionflag('REPORT_CDIFF')\r | |
148 | REPORT_NDIFF = register_optionflag('REPORT_NDIFF')\r | |
149 | REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')\r | |
150 | \r | |
151 | REPORTING_FLAGS = (REPORT_UDIFF |\r | |
152 | REPORT_CDIFF |\r | |
153 | REPORT_NDIFF |\r | |
154 | REPORT_ONLY_FIRST_FAILURE)\r | |
155 | \r | |
156 | # Special string markers for use in `want` strings:\r | |
157 | BLANKLINE_MARKER = '<BLANKLINE>'\r | |
158 | ELLIPSIS_MARKER = '...'\r | |
159 | \r | |
160 | ######################################################################\r | |
161 | ## Table of Contents\r | |
162 | ######################################################################\r | |
163 | # 1. Utility Functions\r | |
164 | # 2. Example & DocTest -- store test cases\r | |
165 | # 3. DocTest Parser -- extracts examples from strings\r | |
166 | # 4. DocTest Finder -- extracts test cases from objects\r | |
167 | # 5. DocTest Runner -- runs test cases\r | |
168 | # 6. Test Functions -- convenient wrappers for testing\r | |
169 | # 7. Tester Class -- for backwards compatibility\r | |
170 | # 8. Unittest Support\r | |
171 | # 9. Debugging Support\r | |
172 | # 10. Example Usage\r | |
173 | \r | |
174 | ######################################################################\r | |
175 | ## 1. Utility Functions\r | |
176 | ######################################################################\r | |
177 | \r | |
178 | def _extract_future_flags(globs):\r | |
179 | """\r | |
180 | Return the compiler-flags associated with the future features that\r | |
181 | have been imported into the given namespace (globs).\r | |
182 | """\r | |
183 | flags = 0\r | |
184 | for fname in __future__.all_feature_names:\r | |
185 | feature = globs.get(fname, None)\r | |
186 | if feature is getattr(__future__, fname):\r | |
187 | flags |= feature.compiler_flag\r | |
188 | return flags\r | |
189 | \r | |
190 | def _normalize_module(module, depth=2):\r | |
191 | """\r | |
192 | Return the module specified by `module`. In particular:\r | |
193 | - If `module` is a module, then return module.\r | |
194 | - If `module` is a string, then import and return the\r | |
195 | module with that name.\r | |
196 | - If `module` is None, then return the calling module.\r | |
197 | The calling module is assumed to be the module of\r | |
198 | the stack frame at the given depth in the call stack.\r | |
199 | """\r | |
200 | if inspect.ismodule(module):\r | |
201 | return module\r | |
202 | elif isinstance(module, (str, unicode)):\r | |
203 | return __import__(module, globals(), locals(), ["*"])\r | |
204 | elif module is None:\r | |
205 | return sys.modules[sys._getframe(depth).f_globals['__name__']]\r | |
206 | else:\r | |
207 | raise TypeError("Expected a module, string, or None")\r | |
208 | \r | |
209 | def _load_testfile(filename, package, module_relative):\r | |
210 | if module_relative:\r | |
211 | package = _normalize_module(package, 3)\r | |
212 | filename = _module_relative_path(package, filename)\r | |
213 | if hasattr(package, '__loader__'):\r | |
214 | if hasattr(package.__loader__, 'get_data'):\r | |
215 | file_contents = package.__loader__.get_data(filename)\r | |
216 | # get_data() opens files as 'rb', so one must do the equivalent\r | |
217 | # conversion as universal newlines would do.\r | |
218 | return file_contents.replace(os.linesep, '\n'), filename\r | |
219 | with open(filename) as f:\r | |
220 | return f.read(), filename\r | |
221 | \r | |
222 | # Use sys.stdout encoding for ouput.\r | |
223 | _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'\r | |
224 | \r | |
225 | def _indent(s, indent=4):\r | |
226 | """\r | |
227 | Add the given number of space characters to the beginning of\r | |
228 | every non-blank line in `s`, and return the result.\r | |
229 | If the string `s` is Unicode, it is encoded using the stdout\r | |
230 | encoding and the `backslashreplace` error handler.\r | |
231 | """\r | |
232 | if isinstance(s, unicode):\r | |
233 | s = s.encode(_encoding, 'backslashreplace')\r | |
234 | # This regexp matches the start of non-blank lines:\r | |
235 | return re.sub('(?m)^(?!$)', indent*' ', s)\r | |
236 | \r | |
237 | def _exception_traceback(exc_info):\r | |
238 | """\r | |
239 | Return a string containing a traceback message for the given\r | |
240 | exc_info tuple (as returned by sys.exc_info()).\r | |
241 | """\r | |
242 | # Get a traceback message.\r | |
243 | excout = StringIO()\r | |
244 | exc_type, exc_val, exc_tb = exc_info\r | |
245 | traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)\r | |
246 | return excout.getvalue()\r | |
247 | \r | |
248 | # Override some StringIO methods.\r | |
249 | class _SpoofOut(StringIO):\r | |
250 | def getvalue(self):\r | |
251 | result = StringIO.getvalue(self)\r | |
252 | # If anything at all was written, make sure there's a trailing\r | |
253 | # newline. There's no way for the expected output to indicate\r | |
254 | # that a trailing newline is missing.\r | |
255 | if result and not result.endswith("\n"):\r | |
256 | result += "\n"\r | |
257 | # Prevent softspace from screwing up the next test case, in\r | |
258 | # case they used print with a trailing comma in an example.\r | |
259 | if hasattr(self, "softspace"):\r | |
260 | del self.softspace\r | |
261 | return result\r | |
262 | \r | |
263 | def truncate(self, size=None):\r | |
264 | StringIO.truncate(self, size)\r | |
265 | if hasattr(self, "softspace"):\r | |
266 | del self.softspace\r | |
267 | if not self.buf:\r | |
268 | # Reset it to an empty string, to make sure it's not unicode.\r | |
269 | self.buf = ''\r | |
270 | \r | |
271 | # Worst-case linear-time ellipsis matching.\r | |
272 | def _ellipsis_match(want, got):\r | |
273 | """\r | |
274 | Essentially the only subtle case:\r | |
275 | >>> _ellipsis_match('aa...aa', 'aaa')\r | |
276 | False\r | |
277 | """\r | |
278 | if ELLIPSIS_MARKER not in want:\r | |
279 | return want == got\r | |
280 | \r | |
281 | # Find "the real" strings.\r | |
282 | ws = want.split(ELLIPSIS_MARKER)\r | |
283 | assert len(ws) >= 2\r | |
284 | \r | |
285 | # Deal with exact matches possibly needed at one or both ends.\r | |
286 | startpos, endpos = 0, len(got)\r | |
287 | w = ws[0]\r | |
288 | if w: # starts with exact match\r | |
289 | if got.startswith(w):\r | |
290 | startpos = len(w)\r | |
291 | del ws[0]\r | |
292 | else:\r | |
293 | return False\r | |
294 | w = ws[-1]\r | |
295 | if w: # ends with exact match\r | |
296 | if got.endswith(w):\r | |
297 | endpos -= len(w)\r | |
298 | del ws[-1]\r | |
299 | else:\r | |
300 | return False\r | |
301 | \r | |
302 | if startpos > endpos:\r | |
303 | # Exact end matches required more characters than we have, as in\r | |
304 | # _ellipsis_match('aa...aa', 'aaa')\r | |
305 | return False\r | |
306 | \r | |
307 | # For the rest, we only need to find the leftmost non-overlapping\r | |
308 | # match for each piece. If there's no overall match that way alone,\r | |
309 | # there's no overall match period.\r | |
310 | for w in ws:\r | |
311 | # w may be '' at times, if there are consecutive ellipses, or\r | |
312 | # due to an ellipsis at the start or end of `want`. That's OK.\r | |
313 | # Search for an empty string succeeds, and doesn't change startpos.\r | |
314 | startpos = got.find(w, startpos, endpos)\r | |
315 | if startpos < 0:\r | |
316 | return False\r | |
317 | startpos += len(w)\r | |
318 | \r | |
319 | return True\r | |
320 | \r | |
321 | def _comment_line(line):\r | |
322 | "Return a commented form of the given line"\r | |
323 | line = line.rstrip()\r | |
324 | if line:\r | |
325 | return '# '+line\r | |
326 | else:\r | |
327 | return '#'\r | |
328 | \r | |
329 | class _OutputRedirectingPdb(pdb.Pdb):\r | |
330 | """\r | |
331 | A specialized version of the python debugger that redirects stdout\r | |
332 | to a given stream when interacting with the user. Stdout is *not*\r | |
333 | redirected when traced code is executed.\r | |
334 | """\r | |
335 | def __init__(self, out):\r | |
336 | self.__out = out\r | |
337 | self.__debugger_used = False\r | |
338 | pdb.Pdb.__init__(self, stdout=out)\r | |
339 | # still use input() to get user input\r | |
340 | self.use_rawinput = 1\r | |
341 | \r | |
342 | def set_trace(self, frame=None):\r | |
343 | self.__debugger_used = True\r | |
344 | if frame is None:\r | |
345 | frame = sys._getframe().f_back\r | |
346 | pdb.Pdb.set_trace(self, frame)\r | |
347 | \r | |
348 | def set_continue(self):\r | |
349 | # Calling set_continue unconditionally would break unit test\r | |
350 | # coverage reporting, as Bdb.set_continue calls sys.settrace(None).\r | |
351 | if self.__debugger_used:\r | |
352 | pdb.Pdb.set_continue(self)\r | |
353 | \r | |
354 | def trace_dispatch(self, *args):\r | |
355 | # Redirect stdout to the given stream.\r | |
356 | save_stdout = sys.stdout\r | |
357 | sys.stdout = self.__out\r | |
358 | # Call Pdb's trace dispatch method.\r | |
359 | try:\r | |
360 | return pdb.Pdb.trace_dispatch(self, *args)\r | |
361 | finally:\r | |
362 | sys.stdout = save_stdout\r | |
363 | \r | |
364 | # [XX] Normalize with respect to os.path.pardir?\r | |
365 | def _module_relative_path(module, path):\r | |
366 | if not inspect.ismodule(module):\r | |
367 | raise TypeError, 'Expected a module: %r' % module\r | |
368 | if path.startswith('/'):\r | |
369 | raise ValueError, 'Module-relative files may not have absolute paths'\r | |
370 | \r | |
371 | # Find the base directory for the path.\r | |
372 | if hasattr(module, '__file__'):\r | |
373 | # A normal module/package\r | |
374 | basedir = os.path.split(module.__file__)[0]\r | |
375 | elif module.__name__ == '__main__':\r | |
376 | # An interactive session.\r | |
377 | if len(sys.argv)>0 and sys.argv[0] != '':\r | |
378 | basedir = os.path.split(sys.argv[0])[0]\r | |
379 | else:\r | |
380 | basedir = os.curdir\r | |
381 | else:\r | |
382 | # A module w/o __file__ (this includes builtins)\r | |
383 | raise ValueError("Can't resolve paths relative to the module " +\r | |
384 | module + " (it has no __file__)")\r | |
385 | \r | |
386 | # Combine the base directory and the path.\r | |
387 | return os.path.join(basedir, *(path.split('/')))\r | |
388 | \r | |
389 | ######################################################################\r | |
390 | ## 2. Example & DocTest\r | |
391 | ######################################################################\r | |
392 | ## - An "example" is a <source, want> pair, where "source" is a\r | |
393 | ## fragment of source code, and "want" is the expected output for\r | |
394 | ## "source." The Example class also includes information about\r | |
395 | ## where the example was extracted from.\r | |
396 | ##\r | |
397 | ## - A "doctest" is a collection of examples, typically extracted from\r | |
398 | ## a string (such as an object's docstring). The DocTest class also\r | |
399 | ## includes information about where the string was extracted from.\r | |
400 | \r | |
401 | class Example:\r | |
402 | """\r | |
403 | A single doctest example, consisting of source code and expected\r | |
404 | output. `Example` defines the following attributes:\r | |
405 | \r | |
406 | - source: A single Python statement, always ending with a newline.\r | |
407 | The constructor adds a newline if needed.\r | |
408 | \r | |
409 | - want: The expected output from running the source code (either\r | |
410 | from stdout, or a traceback in case of exception). `want` ends\r | |
411 | with a newline unless it's empty, in which case it's an empty\r | |
412 | string. The constructor adds a newline if needed.\r | |
413 | \r | |
414 | - exc_msg: The exception message generated by the example, if\r | |
415 | the example is expected to generate an exception; or `None` if\r | |
416 | it is not expected to generate an exception. This exception\r | |
417 | message is compared against the return value of\r | |
418 | `traceback.format_exception_only()`. `exc_msg` ends with a\r | |
419 | newline unless it's `None`. The constructor adds a newline\r | |
420 | if needed.\r | |
421 | \r | |
422 | - lineno: The line number within the DocTest string containing\r | |
423 | this Example where the Example begins. This line number is\r | |
424 | zero-based, with respect to the beginning of the DocTest.\r | |
425 | \r | |
426 | - indent: The example's indentation in the DocTest string.\r | |
427 | I.e., the number of space characters that preceed the\r | |
428 | example's first prompt.\r | |
429 | \r | |
430 | - options: A dictionary mapping from option flags to True or\r | |
431 | False, which is used to override default options for this\r | |
432 | example. Any option flags not contained in this dictionary\r | |
433 | are left at their default value (as specified by the\r | |
434 | DocTestRunner's optionflags). By default, no options are set.\r | |
435 | """\r | |
436 | def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,\r | |
437 | options=None):\r | |
438 | # Normalize inputs.\r | |
439 | if not source.endswith('\n'):\r | |
440 | source += '\n'\r | |
441 | if want and not want.endswith('\n'):\r | |
442 | want += '\n'\r | |
443 | if exc_msg is not None and not exc_msg.endswith('\n'):\r | |
444 | exc_msg += '\n'\r | |
445 | # Store properties.\r | |
446 | self.source = source\r | |
447 | self.want = want\r | |
448 | self.lineno = lineno\r | |
449 | self.indent = indent\r | |
450 | if options is None: options = {}\r | |
451 | self.options = options\r | |
452 | self.exc_msg = exc_msg\r | |
453 | \r | |
454 | class DocTest:\r | |
455 | """\r | |
456 | A collection of doctest examples that should be run in a single\r | |
457 | namespace. Each `DocTest` defines the following attributes:\r | |
458 | \r | |
459 | - examples: the list of examples.\r | |
460 | \r | |
461 | - globs: The namespace (aka globals) that the examples should\r | |
462 | be run in.\r | |
463 | \r | |
464 | - name: A name identifying the DocTest (typically, the name of\r | |
465 | the object whose docstring this DocTest was extracted from).\r | |
466 | \r | |
467 | - filename: The name of the file that this DocTest was extracted\r | |
468 | from, or `None` if the filename is unknown.\r | |
469 | \r | |
470 | - lineno: The line number within filename where this DocTest\r | |
471 | begins, or `None` if the line number is unavailable. This\r | |
472 | line number is zero-based, with respect to the beginning of\r | |
473 | the file.\r | |
474 | \r | |
475 | - docstring: The string that the examples were extracted from,\r | |
476 | or `None` if the string is unavailable.\r | |
477 | """\r | |
478 | def __init__(self, examples, globs, name, filename, lineno, docstring):\r | |
479 | """\r | |
480 | Create a new DocTest containing the given examples. The\r | |
481 | DocTest's globals are initialized with a copy of `globs`.\r | |
482 | """\r | |
483 | assert not isinstance(examples, basestring), \\r | |
484 | "DocTest no longer accepts str; use DocTestParser instead"\r | |
485 | self.examples = examples\r | |
486 | self.docstring = docstring\r | |
487 | self.globs = globs.copy()\r | |
488 | self.name = name\r | |
489 | self.filename = filename\r | |
490 | self.lineno = lineno\r | |
491 | \r | |
492 | def __repr__(self):\r | |
493 | if len(self.examples) == 0:\r | |
494 | examples = 'no examples'\r | |
495 | elif len(self.examples) == 1:\r | |
496 | examples = '1 example'\r | |
497 | else:\r | |
498 | examples = '%d examples' % len(self.examples)\r | |
499 | return ('<DocTest %s from %s:%s (%s)>' %\r | |
500 | (self.name, self.filename, self.lineno, examples))\r | |
501 | \r | |
502 | \r | |
503 | # This lets us sort tests by name:\r | |
504 | def __cmp__(self, other):\r | |
505 | if not isinstance(other, DocTest):\r | |
506 | return -1\r | |
507 | return cmp((self.name, self.filename, self.lineno, id(self)),\r | |
508 | (other.name, other.filename, other.lineno, id(other)))\r | |
509 | \r | |
510 | ######################################################################\r | |
511 | ## 3. DocTestParser\r | |
512 | ######################################################################\r | |
513 | \r | |
514 | class DocTestParser:\r | |
515 | """\r | |
516 | A class used to parse strings containing doctest examples.\r | |
517 | """\r | |
518 | # This regular expression is used to find doctest examples in a\r | |
519 | # string. It defines three groups: `source` is the source code\r | |
520 | # (including leading indentation and prompts); `indent` is the\r | |
521 | # indentation of the first (PS1) line of the source code; and\r | |
522 | # `want` is the expected output (including leading indentation).\r | |
523 | _EXAMPLE_RE = re.compile(r'''\r | |
524 | # Source consists of a PS1 line followed by zero or more PS2 lines.\r | |
525 | (?P<source>\r | |
526 | (?:^(?P<indent> [ ]*) >>> .*) # PS1 line\r | |
527 | (?:\n [ ]* \.\.\. .*)*) # PS2 lines\r | |
528 | \n?\r | |
529 | # Want consists of any non-blank lines that do not start with PS1.\r | |
530 | (?P<want> (?:(?![ ]*$) # Not a blank line\r | |
531 | (?![ ]*>>>) # Not a line starting with PS1\r | |
532 | .*$\n? # But any other line\r | |
533 | )*)\r | |
534 | ''', re.MULTILINE | re.VERBOSE)\r | |
535 | \r | |
536 | # A regular expression for handling `want` strings that contain\r | |
537 | # expected exceptions. It divides `want` into three pieces:\r | |
538 | # - the traceback header line (`hdr`)\r | |
539 | # - the traceback stack (`stack`)\r | |
540 | # - the exception message (`msg`), as generated by\r | |
541 | # traceback.format_exception_only()\r | |
542 | # `msg` may have multiple lines. We assume/require that the\r | |
543 | # exception message is the first non-indented line starting with a word\r | |
544 | # character following the traceback header line.\r | |
545 | _EXCEPTION_RE = re.compile(r"""\r | |
546 | # Grab the traceback header. Different versions of Python have\r | |
547 | # said different things on the first traceback line.\r | |
548 | ^(?P<hdr> Traceback\ \(\r | |
549 | (?: most\ recent\ call\ last\r | |
550 | | innermost\ last\r | |
551 | ) \) :\r | |
552 | )\r | |
553 | \s* $ # toss trailing whitespace on the header.\r | |
554 | (?P<stack> .*?) # don't blink: absorb stuff until...\r | |
555 | ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.\r | |
556 | """, re.VERBOSE | re.MULTILINE | re.DOTALL)\r | |
557 | \r | |
558 | # A callable returning a true value iff its argument is a blank line\r | |
559 | # or contains a single comment.\r | |
560 | _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match\r | |
561 | \r | |
562 | def parse(self, string, name='<string>'):\r | |
563 | """\r | |
564 | Divide the given string into examples and intervening text,\r | |
565 | and return them as a list of alternating Examples and strings.\r | |
566 | Line numbers for the Examples are 0-based. The optional\r | |
567 | argument `name` is a name identifying this string, and is only\r | |
568 | used for error messages.\r | |
569 | """\r | |
570 | string = string.expandtabs()\r | |
571 | # If all lines begin with the same indentation, then strip it.\r | |
572 | min_indent = self._min_indent(string)\r | |
573 | if min_indent > 0:\r | |
574 | string = '\n'.join([l[min_indent:] for l in string.split('\n')])\r | |
575 | \r | |
576 | output = []\r | |
577 | charno, lineno = 0, 0\r | |
578 | # Find all doctest examples in the string:\r | |
579 | for m in self._EXAMPLE_RE.finditer(string):\r | |
580 | # Add the pre-example text to `output`.\r | |
581 | output.append(string[charno:m.start()])\r | |
582 | # Update lineno (lines before this example)\r | |
583 | lineno += string.count('\n', charno, m.start())\r | |
584 | # Extract info from the regexp match.\r | |
585 | (source, options, want, exc_msg) = \\r | |
586 | self._parse_example(m, name, lineno)\r | |
587 | # Create an Example, and add it to the list.\r | |
588 | if not self._IS_BLANK_OR_COMMENT(source):\r | |
589 | output.append( Example(source, want, exc_msg,\r | |
590 | lineno=lineno,\r | |
591 | indent=min_indent+len(m.group('indent')),\r | |
592 | options=options) )\r | |
593 | # Update lineno (lines inside this example)\r | |
594 | lineno += string.count('\n', m.start(), m.end())\r | |
595 | # Update charno.\r | |
596 | charno = m.end()\r | |
597 | # Add any remaining post-example text to `output`.\r | |
598 | output.append(string[charno:])\r | |
599 | return output\r | |
600 | \r | |
601 | def get_doctest(self, string, globs, name, filename, lineno):\r | |
602 | """\r | |
603 | Extract all doctest examples from the given string, and\r | |
604 | collect them into a `DocTest` object.\r | |
605 | \r | |
606 | `globs`, `name`, `filename`, and `lineno` are attributes for\r | |
607 | the new `DocTest` object. See the documentation for `DocTest`\r | |
608 | for more information.\r | |
609 | """\r | |
610 | return DocTest(self.get_examples(string, name), globs,\r | |
611 | name, filename, lineno, string)\r | |
612 | \r | |
613 | def get_examples(self, string, name='<string>'):\r | |
614 | """\r | |
615 | Extract all doctest examples from the given string, and return\r | |
616 | them as a list of `Example` objects. Line numbers are\r | |
617 | 0-based, because it's most common in doctests that nothing\r | |
618 | interesting appears on the same line as opening triple-quote,\r | |
619 | and so the first interesting line is called \"line 1\" then.\r | |
620 | \r | |
621 | The optional argument `name` is a name identifying this\r | |
622 | string, and is only used for error messages.\r | |
623 | """\r | |
624 | return [x for x in self.parse(string, name)\r | |
625 | if isinstance(x, Example)]\r | |
626 | \r | |
627 | def _parse_example(self, m, name, lineno):\r | |
628 | """\r | |
629 | Given a regular expression match from `_EXAMPLE_RE` (`m`),\r | |
630 | return a pair `(source, want)`, where `source` is the matched\r | |
631 | example's source code (with prompts and indentation stripped);\r | |
632 | and `want` is the example's expected output (with indentation\r | |
633 | stripped).\r | |
634 | \r | |
635 | `name` is the string's name, and `lineno` is the line number\r | |
636 | where the example starts; both are used for error messages.\r | |
637 | """\r | |
638 | # Get the example's indentation level.\r | |
639 | indent = len(m.group('indent'))\r | |
640 | \r | |
641 | # Divide source into lines; check that they're properly\r | |
642 | # indented; and then strip their indentation & prompts.\r | |
643 | source_lines = m.group('source').split('\n')\r | |
644 | self._check_prompt_blank(source_lines, indent, name, lineno)\r | |
645 | self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)\r | |
646 | source = '\n'.join([sl[indent+4:] for sl in source_lines])\r | |
647 | \r | |
648 | # Divide want into lines; check that it's properly indented; and\r | |
649 | # then strip the indentation. Spaces before the last newline should\r | |
650 | # be preserved, so plain rstrip() isn't good enough.\r | |
651 | want = m.group('want')\r | |
652 | want_lines = want.split('\n')\r | |
653 | if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):\r | |
654 | del want_lines[-1] # forget final newline & spaces after it\r | |
655 | self._check_prefix(want_lines, ' '*indent, name,\r | |
656 | lineno + len(source_lines))\r | |
657 | want = '\n'.join([wl[indent:] for wl in want_lines])\r | |
658 | \r | |
659 | # If `want` contains a traceback message, then extract it.\r | |
660 | m = self._EXCEPTION_RE.match(want)\r | |
661 | if m:\r | |
662 | exc_msg = m.group('msg')\r | |
663 | else:\r | |
664 | exc_msg = None\r | |
665 | \r | |
666 | # Extract options from the source.\r | |
667 | options = self._find_options(source, name, lineno)\r | |
668 | \r | |
669 | return source, options, want, exc_msg\r | |
670 | \r | |
671 | # This regular expression looks for option directives in the\r | |
672 | # source code of an example. Option directives are comments\r | |
673 | # starting with "doctest:". Warning: this may give false\r | |
674 | # positives for string-literals that contain the string\r | |
675 | # "#doctest:". Eliminating these false positives would require\r | |
676 | # actually parsing the string; but we limit them by ignoring any\r | |
677 | # line containing "#doctest:" that is *followed* by a quote mark.\r | |
678 | _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',\r | |
679 | re.MULTILINE)\r | |
680 | \r | |
681 | def _find_options(self, source, name, lineno):\r | |
682 | """\r | |
683 | Return a dictionary containing option overrides extracted from\r | |
684 | option directives in the given source string.\r | |
685 | \r | |
686 | `name` is the string's name, and `lineno` is the line number\r | |
687 | where the example starts; both are used for error messages.\r | |
688 | """\r | |
689 | options = {}\r | |
690 | # (note: with the current regexp, this will match at most once:)\r | |
691 | for m in self._OPTION_DIRECTIVE_RE.finditer(source):\r | |
692 | option_strings = m.group(1).replace(',', ' ').split()\r | |
693 | for option in option_strings:\r | |
694 | if (option[0] not in '+-' or\r | |
695 | option[1:] not in OPTIONFLAGS_BY_NAME):\r | |
696 | raise ValueError('line %r of the doctest for %s '\r | |
697 | 'has an invalid option: %r' %\r | |
698 | (lineno+1, name, option))\r | |
699 | flag = OPTIONFLAGS_BY_NAME[option[1:]]\r | |
700 | options[flag] = (option[0] == '+')\r | |
701 | if options and self._IS_BLANK_OR_COMMENT(source):\r | |
702 | raise ValueError('line %r of the doctest for %s has an option '\r | |
703 | 'directive on a line with no example: %r' %\r | |
704 | (lineno, name, source))\r | |
705 | return options\r | |
706 | \r | |
707 | # This regular expression finds the indentation of every non-blank\r | |
708 | # line in a string.\r | |
709 | _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)\r | |
710 | \r | |
711 | def _min_indent(self, s):\r | |
712 | "Return the minimum indentation of any non-blank line in `s`"\r | |
713 | indents = [len(indent) for indent in self._INDENT_RE.findall(s)]\r | |
714 | if len(indents) > 0:\r | |
715 | return min(indents)\r | |
716 | else:\r | |
717 | return 0\r | |
718 | \r | |
719 | def _check_prompt_blank(self, lines, indent, name, lineno):\r | |
720 | """\r | |
721 | Given the lines of a source string (including prompts and\r | |
722 | leading indentation), check to make sure that every prompt is\r | |
723 | followed by a space character. If any line is not followed by\r | |
724 | a space character, then raise ValueError.\r | |
725 | """\r | |
726 | for i, line in enumerate(lines):\r | |
727 | if len(line) >= indent+4 and line[indent+3] != ' ':\r | |
728 | raise ValueError('line %r of the docstring for %s '\r | |
729 | 'lacks blank after %s: %r' %\r | |
730 | (lineno+i+1, name,\r | |
731 | line[indent:indent+3], line))\r | |
732 | \r | |
733 | def _check_prefix(self, lines, prefix, name, lineno):\r | |
734 | """\r | |
735 | Check that every line in the given list starts with the given\r | |
736 | prefix; if any line does not, then raise a ValueError.\r | |
737 | """\r | |
738 | for i, line in enumerate(lines):\r | |
739 | if line and not line.startswith(prefix):\r | |
740 | raise ValueError('line %r of the docstring for %s has '\r | |
741 | 'inconsistent leading whitespace: %r' %\r | |
742 | (lineno+i+1, name, line))\r | |
743 | \r | |
744 | \r | |
745 | ######################################################################\r | |
746 | ## 4. DocTest Finder\r | |
747 | ######################################################################\r | |
748 | \r | |
749 | class DocTestFinder:\r | |
750 | """\r | |
751 | A class used to extract the DocTests that are relevant to a given\r | |
752 | object, from its docstring and the docstrings of its contained\r | |
753 | objects. Doctests can currently be extracted from the following\r | |
754 | object types: modules, functions, classes, methods, staticmethods,\r | |
755 | classmethods, and properties.\r | |
756 | """\r | |
757 | \r | |
758 | def __init__(self, verbose=False, parser=DocTestParser(),\r | |
759 | recurse=True, exclude_empty=True):\r | |
760 | """\r | |
761 | Create a new doctest finder.\r | |
762 | \r | |
763 | The optional argument `parser` specifies a class or\r | |
764 | function that should be used to create new DocTest objects (or\r | |
765 | objects that implement the same interface as DocTest). The\r | |
766 | signature for this factory function should match the signature\r | |
767 | of the DocTest constructor.\r | |
768 | \r | |
769 | If the optional argument `recurse` is false, then `find` will\r | |
770 | only examine the given object, and not any contained objects.\r | |
771 | \r | |
772 | If the optional argument `exclude_empty` is false, then `find`\r | |
773 | will include tests for objects with empty docstrings.\r | |
774 | """\r | |
775 | self._parser = parser\r | |
776 | self._verbose = verbose\r | |
777 | self._recurse = recurse\r | |
778 | self._exclude_empty = exclude_empty\r | |
779 | \r | |
780 | def find(self, obj, name=None, module=None, globs=None, extraglobs=None):\r | |
781 | """\r | |
782 | Return a list of the DocTests that are defined by the given\r | |
783 | object's docstring, or by any of its contained objects'\r | |
784 | docstrings.\r | |
785 | \r | |
786 | The optional parameter `module` is the module that contains\r | |
787 | the given object. If the module is not specified or is None, then\r | |
788 | the test finder will attempt to automatically determine the\r | |
789 | correct module. The object's module is used:\r | |
790 | \r | |
791 | - As a default namespace, if `globs` is not specified.\r | |
792 | - To prevent the DocTestFinder from extracting DocTests\r | |
793 | from objects that are imported from other modules.\r | |
794 | - To find the name of the file containing the object.\r | |
795 | - To help find the line number of the object within its\r | |
796 | file.\r | |
797 | \r | |
798 | Contained objects whose module does not match `module` are ignored.\r | |
799 | \r | |
800 | If `module` is False, no attempt to find the module will be made.\r | |
801 | This is obscure, of use mostly in tests: if `module` is False, or\r | |
802 | is None but cannot be found automatically, then all objects are\r | |
803 | considered to belong to the (non-existent) module, so all contained\r | |
804 | objects will (recursively) be searched for doctests.\r | |
805 | \r | |
806 | The globals for each DocTest is formed by combining `globs`\r | |
807 | and `extraglobs` (bindings in `extraglobs` override bindings\r | |
808 | in `globs`). A new copy of the globals dictionary is created\r | |
809 | for each DocTest. If `globs` is not specified, then it\r | |
810 | defaults to the module's `__dict__`, if specified, or {}\r | |
811 | otherwise. If `extraglobs` is not specified, then it defaults\r | |
812 | to {}.\r | |
813 | \r | |
814 | """\r | |
815 | # If name was not specified, then extract it from the object.\r | |
816 | if name is None:\r | |
817 | name = getattr(obj, '__name__', None)\r | |
818 | if name is None:\r | |
819 | raise ValueError("DocTestFinder.find: name must be given "\r | |
820 | "when obj.__name__ doesn't exist: %r" %\r | |
821 | (type(obj),))\r | |
822 | \r | |
823 | # Find the module that contains the given object (if obj is\r | |
824 | # a module, then module=obj.). Note: this may fail, in which\r | |
825 | # case module will be None.\r | |
826 | if module is False:\r | |
827 | module = None\r | |
828 | elif module is None:\r | |
829 | module = inspect.getmodule(obj)\r | |
830 | \r | |
831 | # Read the module's source code. This is used by\r | |
832 | # DocTestFinder._find_lineno to find the line number for a\r | |
833 | # given object's docstring.\r | |
834 | try:\r | |
835 | file = inspect.getsourcefile(obj) or inspect.getfile(obj)\r | |
836 | if module is not None:\r | |
837 | # Supply the module globals in case the module was\r | |
838 | # originally loaded via a PEP 302 loader and\r | |
839 | # file is not a valid filesystem path\r | |
840 | source_lines = linecache.getlines(file, module.__dict__)\r | |
841 | else:\r | |
842 | # No access to a loader, so assume it's a normal\r | |
843 | # filesystem path\r | |
844 | source_lines = linecache.getlines(file)\r | |
845 | if not source_lines:\r | |
846 | source_lines = None\r | |
847 | except TypeError:\r | |
848 | source_lines = None\r | |
849 | \r | |
850 | # Initialize globals, and merge in extraglobs.\r | |
851 | if globs is None:\r | |
852 | if module is None:\r | |
853 | globs = {}\r | |
854 | else:\r | |
855 | globs = module.__dict__.copy()\r | |
856 | else:\r | |
857 | globs = globs.copy()\r | |
858 | if extraglobs is not None:\r | |
859 | globs.update(extraglobs)\r | |
860 | if '__name__' not in globs:\r | |
861 | globs['__name__'] = '__main__' # provide a default module name\r | |
862 | \r | |
863 | # Recursively expore `obj`, extracting DocTests.\r | |
864 | tests = []\r | |
865 | self._find(tests, obj, name, module, source_lines, globs, {})\r | |
866 | # Sort the tests by alpha order of names, for consistency in\r | |
867 | # verbose-mode output. This was a feature of doctest in Pythons\r | |
868 | # <= 2.3 that got lost by accident in 2.4. It was repaired in\r | |
869 | # 2.4.4 and 2.5.\r | |
870 | tests.sort()\r | |
871 | return tests\r | |
872 | \r | |
873 | def _from_module(self, module, object):\r | |
874 | """\r | |
875 | Return true if the given object is defined in the given\r | |
876 | module.\r | |
877 | """\r | |
878 | if module is None:\r | |
879 | return True\r | |
880 | elif inspect.getmodule(object) is not None:\r | |
881 | return module is inspect.getmodule(object)\r | |
882 | elif inspect.isfunction(object):\r | |
883 | return module.__dict__ is object.func_globals\r | |
884 | elif inspect.isclass(object):\r | |
885 | return module.__name__ == object.__module__\r | |
886 | elif hasattr(object, '__module__'):\r | |
887 | return module.__name__ == object.__module__\r | |
888 | elif isinstance(object, property):\r | |
889 | return True # [XX] no way not be sure.\r | |
890 | else:\r | |
891 | raise ValueError("object must be a class or function")\r | |
892 | \r | |
893 | def _find(self, tests, obj, name, module, source_lines, globs, seen):\r | |
894 | """\r | |
895 | Find tests for the given object and any contained objects, and\r | |
896 | add them to `tests`.\r | |
897 | """\r | |
898 | if self._verbose:\r | |
899 | print 'Finding tests in %s' % name\r | |
900 | \r | |
901 | # If we've already processed this object, then ignore it.\r | |
902 | if id(obj) in seen:\r | |
903 | return\r | |
904 | seen[id(obj)] = 1\r | |
905 | \r | |
906 | # Find a test for this object, and add it to the list of tests.\r | |
907 | test = self._get_test(obj, name, module, globs, source_lines)\r | |
908 | if test is not None:\r | |
909 | tests.append(test)\r | |
910 | \r | |
911 | # Look for tests in a module's contained objects.\r | |
912 | if inspect.ismodule(obj) and self._recurse:\r | |
913 | for valname, val in obj.__dict__.items():\r | |
914 | valname = '%s.%s' % (name, valname)\r | |
915 | # Recurse to functions & classes.\r | |
916 | if ((inspect.isfunction(val) or inspect.isclass(val)) and\r | |
917 | self._from_module(module, val)):\r | |
918 | self._find(tests, val, valname, module, source_lines,\r | |
919 | globs, seen)\r | |
920 | \r | |
921 | # Look for tests in a module's __test__ dictionary.\r | |
922 | if inspect.ismodule(obj) and self._recurse:\r | |
923 | for valname, val in getattr(obj, '__test__', {}).items():\r | |
924 | if not isinstance(valname, basestring):\r | |
925 | raise ValueError("DocTestFinder.find: __test__ keys "\r | |
926 | "must be strings: %r" %\r | |
927 | (type(valname),))\r | |
928 | if not (inspect.isfunction(val) or inspect.isclass(val) or\r | |
929 | inspect.ismethod(val) or inspect.ismodule(val) or\r | |
930 | isinstance(val, basestring)):\r | |
931 | raise ValueError("DocTestFinder.find: __test__ values "\r | |
932 | "must be strings, functions, methods, "\r | |
933 | "classes, or modules: %r" %\r | |
934 | (type(val),))\r | |
935 | valname = '%s.__test__.%s' % (name, valname)\r | |
936 | self._find(tests, val, valname, module, source_lines,\r | |
937 | globs, seen)\r | |
938 | \r | |
939 | # Look for tests in a class's contained objects.\r | |
940 | if inspect.isclass(obj) and self._recurse:\r | |
941 | for valname, val in obj.__dict__.items():\r | |
942 | # Special handling for staticmethod/classmethod.\r | |
943 | if isinstance(val, staticmethod):\r | |
944 | val = getattr(obj, valname)\r | |
945 | if isinstance(val, classmethod):\r | |
946 | val = getattr(obj, valname).im_func\r | |
947 | \r | |
948 | # Recurse to methods, properties, and nested classes.\r | |
949 | if ((inspect.isfunction(val) or inspect.isclass(val) or\r | |
950 | isinstance(val, property)) and\r | |
951 | self._from_module(module, val)):\r | |
952 | valname = '%s.%s' % (name, valname)\r | |
953 | self._find(tests, val, valname, module, source_lines,\r | |
954 | globs, seen)\r | |
955 | \r | |
956 | def _get_test(self, obj, name, module, globs, source_lines):\r | |
957 | """\r | |
958 | Return a DocTest for the given object, if it defines a docstring;\r | |
959 | otherwise, return None.\r | |
960 | """\r | |
961 | # Extract the object's docstring. If it doesn't have one,\r | |
962 | # then return None (no test for this object).\r | |
963 | if isinstance(obj, basestring):\r | |
964 | docstring = obj\r | |
965 | else:\r | |
966 | try:\r | |
967 | if obj.__doc__ is None:\r | |
968 | docstring = ''\r | |
969 | else:\r | |
970 | docstring = obj.__doc__\r | |
971 | if not isinstance(docstring, basestring):\r | |
972 | docstring = str(docstring)\r | |
973 | except (TypeError, AttributeError):\r | |
974 | docstring = ''\r | |
975 | \r | |
976 | # Find the docstring's location in the file.\r | |
977 | lineno = self._find_lineno(obj, source_lines)\r | |
978 | \r | |
979 | # Don't bother if the docstring is empty.\r | |
980 | if self._exclude_empty and not docstring:\r | |
981 | return None\r | |
982 | \r | |
983 | # Return a DocTest for this object.\r | |
984 | if module is None:\r | |
985 | filename = None\r | |
986 | else:\r | |
987 | filename = getattr(module, '__file__', module.__name__)\r | |
988 | if filename[-4:] in (".pyc", ".pyo"):\r | |
989 | filename = filename[:-1]\r | |
990 | return self._parser.get_doctest(docstring, globs, name,\r | |
991 | filename, lineno)\r | |
992 | \r | |
993 | def _find_lineno(self, obj, source_lines):\r | |
994 | """\r | |
995 | Return a line number of the given object's docstring. Note:\r | |
996 | this method assumes that the object has a docstring.\r | |
997 | """\r | |
998 | lineno = None\r | |
999 | \r | |
1000 | # Find the line number for modules.\r | |
1001 | if inspect.ismodule(obj):\r | |
1002 | lineno = 0\r | |
1003 | \r | |
1004 | # Find the line number for classes.\r | |
1005 | # Note: this could be fooled if a class is defined multiple\r | |
1006 | # times in a single file.\r | |
1007 | if inspect.isclass(obj):\r | |
1008 | if source_lines is None:\r | |
1009 | return None\r | |
1010 | pat = re.compile(r'^\s*class\s*%s\b' %\r | |
1011 | getattr(obj, '__name__', '-'))\r | |
1012 | for i, line in enumerate(source_lines):\r | |
1013 | if pat.match(line):\r | |
1014 | lineno = i\r | |
1015 | break\r | |
1016 | \r | |
1017 | # Find the line number for functions & methods.\r | |
1018 | if inspect.ismethod(obj): obj = obj.im_func\r | |
1019 | if inspect.isfunction(obj): obj = obj.func_code\r | |
1020 | if inspect.istraceback(obj): obj = obj.tb_frame\r | |
1021 | if inspect.isframe(obj): obj = obj.f_code\r | |
1022 | if inspect.iscode(obj):\r | |
1023 | lineno = getattr(obj, 'co_firstlineno', None)-1\r | |
1024 | \r | |
1025 | # Find the line number where the docstring starts. Assume\r | |
1026 | # that it's the first line that begins with a quote mark.\r | |
1027 | # Note: this could be fooled by a multiline function\r | |
1028 | # signature, where a continuation line begins with a quote\r | |
1029 | # mark.\r | |
1030 | if lineno is not None:\r | |
1031 | if source_lines is None:\r | |
1032 | return lineno+1\r | |
1033 | pat = re.compile('(^|.*:)\s*\w*("|\')')\r | |
1034 | for lineno in range(lineno, len(source_lines)):\r | |
1035 | if pat.match(source_lines[lineno]):\r | |
1036 | return lineno\r | |
1037 | \r | |
1038 | # We couldn't find the line number.\r | |
1039 | return None\r | |
1040 | \r | |
1041 | ######################################################################\r | |
1042 | ## 5. DocTest Runner\r | |
1043 | ######################################################################\r | |
1044 | \r | |
1045 | class DocTestRunner:\r | |
1046 | """\r | |
1047 | A class used to run DocTest test cases, and accumulate statistics.\r | |
1048 | The `run` method is used to process a single DocTest case. It\r | |
1049 | returns a tuple `(f, t)`, where `t` is the number of test cases\r | |
1050 | tried, and `f` is the number of test cases that failed.\r | |
1051 | \r | |
1052 | >>> tests = DocTestFinder().find(_TestClass)\r | |
1053 | >>> runner = DocTestRunner(verbose=False)\r | |
1054 | >>> tests.sort(key = lambda test: test.name)\r | |
1055 | >>> for test in tests:\r | |
1056 | ... print test.name, '->', runner.run(test)\r | |
1057 | _TestClass -> TestResults(failed=0, attempted=2)\r | |
1058 | _TestClass.__init__ -> TestResults(failed=0, attempted=2)\r | |
1059 | _TestClass.get -> TestResults(failed=0, attempted=2)\r | |
1060 | _TestClass.square -> TestResults(failed=0, attempted=1)\r | |
1061 | \r | |
1062 | The `summarize` method prints a summary of all the test cases that\r | |
1063 | have been run by the runner, and returns an aggregated `(f, t)`\r | |
1064 | tuple:\r | |
1065 | \r | |
1066 | >>> runner.summarize(verbose=1)\r | |
1067 | 4 items passed all tests:\r | |
1068 | 2 tests in _TestClass\r | |
1069 | 2 tests in _TestClass.__init__\r | |
1070 | 2 tests in _TestClass.get\r | |
1071 | 1 tests in _TestClass.square\r | |
1072 | 7 tests in 4 items.\r | |
1073 | 7 passed and 0 failed.\r | |
1074 | Test passed.\r | |
1075 | TestResults(failed=0, attempted=7)\r | |
1076 | \r | |
1077 | The aggregated number of tried examples and failed examples is\r | |
1078 | also available via the `tries` and `failures` attributes:\r | |
1079 | \r | |
1080 | >>> runner.tries\r | |
1081 | 7\r | |
1082 | >>> runner.failures\r | |
1083 | 0\r | |
1084 | \r | |
1085 | The comparison between expected outputs and actual outputs is done\r | |
1086 | by an `OutputChecker`. This comparison may be customized with a\r | |
1087 | number of option flags; see the documentation for `testmod` for\r | |
1088 | more information. If the option flags are insufficient, then the\r | |
1089 | comparison may also be customized by passing a subclass of\r | |
1090 | `OutputChecker` to the constructor.\r | |
1091 | \r | |
1092 | The test runner's display output can be controlled in two ways.\r | |
1093 | First, an output function (`out) can be passed to\r | |
1094 | `TestRunner.run`; this function will be called with strings that\r | |
1095 | should be displayed. It defaults to `sys.stdout.write`. If\r | |
1096 | capturing the output is not sufficient, then the display output\r | |
1097 | can be also customized by subclassing DocTestRunner, and\r | |
1098 | overriding the methods `report_start`, `report_success`,\r | |
1099 | `report_unexpected_exception`, and `report_failure`.\r | |
1100 | """\r | |
1101 | # This divider string is used to separate failure messages, and to\r | |
1102 | # separate sections of the summary.\r | |
1103 | DIVIDER = "*" * 70\r | |
1104 | \r | |
1105 | def __init__(self, checker=None, verbose=None, optionflags=0):\r | |
1106 | """\r | |
1107 | Create a new test runner.\r | |
1108 | \r | |
1109 | Optional keyword arg `checker` is the `OutputChecker` that\r | |
1110 | should be used to compare the expected outputs and actual\r | |
1111 | outputs of doctest examples.\r | |
1112 | \r | |
1113 | Optional keyword arg 'verbose' prints lots of stuff if true,\r | |
1114 | only failures if false; by default, it's true iff '-v' is in\r | |
1115 | sys.argv.\r | |
1116 | \r | |
1117 | Optional argument `optionflags` can be used to control how the\r | |
1118 | test runner compares expected output to actual output, and how\r | |
1119 | it displays failures. See the documentation for `testmod` for\r | |
1120 | more information.\r | |
1121 | """\r | |
1122 | self._checker = checker or OutputChecker()\r | |
1123 | if verbose is None:\r | |
1124 | verbose = '-v' in sys.argv\r | |
1125 | self._verbose = verbose\r | |
1126 | self.optionflags = optionflags\r | |
1127 | self.original_optionflags = optionflags\r | |
1128 | \r | |
1129 | # Keep track of the examples we've run.\r | |
1130 | self.tries = 0\r | |
1131 | self.failures = 0\r | |
1132 | self._name2ft = {}\r | |
1133 | \r | |
1134 | # Create a fake output target for capturing doctest output.\r | |
1135 | self._fakeout = _SpoofOut()\r | |
1136 | \r | |
1137 | #/////////////////////////////////////////////////////////////////\r | |
1138 | # Reporting methods\r | |
1139 | #/////////////////////////////////////////////////////////////////\r | |
1140 | \r | |
1141 | def report_start(self, out, test, example):\r | |
1142 | """\r | |
1143 | Report that the test runner is about to process the given\r | |
1144 | example. (Only displays a message if verbose=True)\r | |
1145 | """\r | |
1146 | if self._verbose:\r | |
1147 | if example.want:\r | |
1148 | out('Trying:\n' + _indent(example.source) +\r | |
1149 | 'Expecting:\n' + _indent(example.want))\r | |
1150 | else:\r | |
1151 | out('Trying:\n' + _indent(example.source) +\r | |
1152 | 'Expecting nothing\n')\r | |
1153 | \r | |
1154 | def report_success(self, out, test, example, got):\r | |
1155 | """\r | |
1156 | Report that the given example ran successfully. (Only\r | |
1157 | displays a message if verbose=True)\r | |
1158 | """\r | |
1159 | if self._verbose:\r | |
1160 | out("ok\n")\r | |
1161 | \r | |
1162 | def report_failure(self, out, test, example, got):\r | |
1163 | """\r | |
1164 | Report that the given example failed.\r | |
1165 | """\r | |
1166 | out(self._failure_header(test, example) +\r | |
1167 | self._checker.output_difference(example, got, self.optionflags))\r | |
1168 | \r | |
1169 | def report_unexpected_exception(self, out, test, example, exc_info):\r | |
1170 | """\r | |
1171 | Report that the given example raised an unexpected exception.\r | |
1172 | """\r | |
1173 | out(self._failure_header(test, example) +\r | |
1174 | 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))\r | |
1175 | \r | |
1176 | def _failure_header(self, test, example):\r | |
1177 | out = [self.DIVIDER]\r | |
1178 | if test.filename:\r | |
1179 | if test.lineno is not None and example.lineno is not None:\r | |
1180 | lineno = test.lineno + example.lineno + 1\r | |
1181 | else:\r | |
1182 | lineno = '?'\r | |
1183 | out.append('File "%s", line %s, in %s' %\r | |
1184 | (test.filename, lineno, test.name))\r | |
1185 | else:\r | |
1186 | out.append('Line %s, in %s' % (example.lineno+1, test.name))\r | |
1187 | out.append('Failed example:')\r | |
1188 | source = example.source\r | |
1189 | out.append(_indent(source))\r | |
1190 | return '\n'.join(out)\r | |
1191 | \r | |
1192 | #/////////////////////////////////////////////////////////////////\r | |
1193 | # DocTest Running\r | |
1194 | #/////////////////////////////////////////////////////////////////\r | |
1195 | \r | |
1196 | def __run(self, test, compileflags, out):\r | |
1197 | """\r | |
1198 | Run the examples in `test`. Write the outcome of each example\r | |
1199 | with one of the `DocTestRunner.report_*` methods, using the\r | |
1200 | writer function `out`. `compileflags` is the set of compiler\r | |
1201 | flags that should be used to execute examples. Return a tuple\r | |
1202 | `(f, t)`, where `t` is the number of examples tried, and `f`\r | |
1203 | is the number of examples that failed. The examples are run\r | |
1204 | in the namespace `test.globs`.\r | |
1205 | """\r | |
1206 | # Keep track of the number of failures and tries.\r | |
1207 | failures = tries = 0\r | |
1208 | \r | |
1209 | # Save the option flags (since option directives can be used\r | |
1210 | # to modify them).\r | |
1211 | original_optionflags = self.optionflags\r | |
1212 | \r | |
1213 | SUCCESS, FAILURE, BOOM = range(3) # `outcome` state\r | |
1214 | \r | |
1215 | check = self._checker.check_output\r | |
1216 | \r | |
1217 | # Process each example.\r | |
1218 | for examplenum, example in enumerate(test.examples):\r | |
1219 | \r | |
1220 | # If REPORT_ONLY_FIRST_FAILURE is set, then suppress\r | |
1221 | # reporting after the first failure.\r | |
1222 | quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and\r | |
1223 | failures > 0)\r | |
1224 | \r | |
1225 | # Merge in the example's options.\r | |
1226 | self.optionflags = original_optionflags\r | |
1227 | if example.options:\r | |
1228 | for (optionflag, val) in example.options.items():\r | |
1229 | if val:\r | |
1230 | self.optionflags |= optionflag\r | |
1231 | else:\r | |
1232 | self.optionflags &= ~optionflag\r | |
1233 | \r | |
1234 | # If 'SKIP' is set, then skip this example.\r | |
1235 | if self.optionflags & SKIP:\r | |
1236 | continue\r | |
1237 | \r | |
1238 | # Record that we started this example.\r | |
1239 | tries += 1\r | |
1240 | if not quiet:\r | |
1241 | self.report_start(out, test, example)\r | |
1242 | \r | |
1243 | # Use a special filename for compile(), so we can retrieve\r | |
1244 | # the source code during interactive debugging (see\r | |
1245 | # __patched_linecache_getlines).\r | |
1246 | filename = '<doctest %s[%d]>' % (test.name, examplenum)\r | |
1247 | \r | |
1248 | # Run the example in the given context (globs), and record\r | |
1249 | # any exception that gets raised. (But don't intercept\r | |
1250 | # keyboard interrupts.)\r | |
1251 | try:\r | |
1252 | # Don't blink! This is where the user's code gets run.\r | |
1253 | exec compile(example.source, filename, "single",\r | |
1254 | compileflags, 1) in test.globs\r | |
1255 | self.debugger.set_continue() # ==== Example Finished ====\r | |
1256 | exception = None\r | |
1257 | except KeyboardInterrupt:\r | |
1258 | raise\r | |
1259 | except:\r | |
1260 | exception = sys.exc_info()\r | |
1261 | self.debugger.set_continue() # ==== Example Finished ====\r | |
1262 | \r | |
1263 | got = self._fakeout.getvalue() # the actual output\r | |
1264 | self._fakeout.truncate(0)\r | |
1265 | outcome = FAILURE # guilty until proved innocent or insane\r | |
1266 | \r | |
1267 | # If the example executed without raising any exceptions,\r | |
1268 | # verify its output.\r | |
1269 | if exception is None:\r | |
1270 | if check(example.want, got, self.optionflags):\r | |
1271 | outcome = SUCCESS\r | |
1272 | \r | |
1273 | # The example raised an exception: check if it was expected.\r | |
1274 | else:\r | |
1275 | exc_info = sys.exc_info()\r | |
1276 | exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]\r | |
1277 | if not quiet:\r | |
1278 | got += _exception_traceback(exc_info)\r | |
1279 | \r | |
1280 | # If `example.exc_msg` is None, then we weren't expecting\r | |
1281 | # an exception.\r | |
1282 | if example.exc_msg is None:\r | |
1283 | outcome = BOOM\r | |
1284 | \r | |
1285 | # We expected an exception: see whether it matches.\r | |
1286 | elif check(example.exc_msg, exc_msg, self.optionflags):\r | |
1287 | outcome = SUCCESS\r | |
1288 | \r | |
1289 | # Another chance if they didn't care about the detail.\r | |
1290 | elif self.optionflags & IGNORE_EXCEPTION_DETAIL:\r | |
1291 | m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg)\r | |
1292 | m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg)\r | |
1293 | if m1 and m2 and check(m1.group(1), m2.group(1),\r | |
1294 | self.optionflags):\r | |
1295 | outcome = SUCCESS\r | |
1296 | \r | |
1297 | # Report the outcome.\r | |
1298 | if outcome is SUCCESS:\r | |
1299 | if not quiet:\r | |
1300 | self.report_success(out, test, example, got)\r | |
1301 | elif outcome is FAILURE:\r | |
1302 | if not quiet:\r | |
1303 | self.report_failure(out, test, example, got)\r | |
1304 | failures += 1\r | |
1305 | elif outcome is BOOM:\r | |
1306 | if not quiet:\r | |
1307 | self.report_unexpected_exception(out, test, example,\r | |
1308 | exc_info)\r | |
1309 | failures += 1\r | |
1310 | else:\r | |
1311 | assert False, ("unknown outcome", outcome)\r | |
1312 | \r | |
1313 | # Restore the option flags (in case they were modified)\r | |
1314 | self.optionflags = original_optionflags\r | |
1315 | \r | |
1316 | # Record and return the number of failures and tries.\r | |
1317 | self.__record_outcome(test, failures, tries)\r | |
1318 | return TestResults(failures, tries)\r | |
1319 | \r | |
1320 | def __record_outcome(self, test, f, t):\r | |
1321 | """\r | |
1322 | Record the fact that the given DocTest (`test`) generated `f`\r | |
1323 | failures out of `t` tried examples.\r | |
1324 | """\r | |
1325 | f2, t2 = self._name2ft.get(test.name, (0,0))\r | |
1326 | self._name2ft[test.name] = (f+f2, t+t2)\r | |
1327 | self.failures += f\r | |
1328 | self.tries += t\r | |
1329 | \r | |
1330 | __LINECACHE_FILENAME_RE = re.compile(r'<doctest '\r | |
1331 | r'(?P<name>.+)'\r | |
1332 | r'\[(?P<examplenum>\d+)\]>$')\r | |
1333 | def __patched_linecache_getlines(self, filename, module_globals=None):\r | |
1334 | m = self.__LINECACHE_FILENAME_RE.match(filename)\r | |
1335 | if m and m.group('name') == self.test.name:\r | |
1336 | example = self.test.examples[int(m.group('examplenum'))]\r | |
1337 | source = example.source\r | |
1338 | if isinstance(source, unicode):\r | |
1339 | source = source.encode('ascii', 'backslashreplace')\r | |
1340 | return source.splitlines(True)\r | |
1341 | else:\r | |
1342 | return self.save_linecache_getlines(filename, module_globals)\r | |
1343 | \r | |
1344 | def run(self, test, compileflags=None, out=None, clear_globs=True):\r | |
1345 | """\r | |
1346 | Run the examples in `test`, and display the results using the\r | |
1347 | writer function `out`.\r | |
1348 | \r | |
1349 | The examples are run in the namespace `test.globs`. If\r | |
1350 | `clear_globs` is true (the default), then this namespace will\r | |
1351 | be cleared after the test runs, to help with garbage\r | |
1352 | collection. If you would like to examine the namespace after\r | |
1353 | the test completes, then use `clear_globs=False`.\r | |
1354 | \r | |
1355 | `compileflags` gives the set of flags that should be used by\r | |
1356 | the Python compiler when running the examples. If not\r | |
1357 | specified, then it will default to the set of future-import\r | |
1358 | flags that apply to `globs`.\r | |
1359 | \r | |
1360 | The output of each example is checked using\r | |
1361 | `DocTestRunner.check_output`, and the results are formatted by\r | |
1362 | the `DocTestRunner.report_*` methods.\r | |
1363 | """\r | |
1364 | self.test = test\r | |
1365 | \r | |
1366 | if compileflags is None:\r | |
1367 | compileflags = _extract_future_flags(test.globs)\r | |
1368 | \r | |
1369 | save_stdout = sys.stdout\r | |
1370 | if out is None:\r | |
1371 | out = save_stdout.write\r | |
1372 | sys.stdout = self._fakeout\r | |
1373 | \r | |
1374 | # Patch pdb.set_trace to restore sys.stdout during interactive\r | |
1375 | # debugging (so it's not still redirected to self._fakeout).\r | |
1376 | # Note that the interactive output will go to *our*\r | |
1377 | # save_stdout, even if that's not the real sys.stdout; this\r | |
1378 | # allows us to write test cases for the set_trace behavior.\r | |
1379 | save_set_trace = pdb.set_trace\r | |
1380 | self.debugger = _OutputRedirectingPdb(save_stdout)\r | |
1381 | self.debugger.reset()\r | |
1382 | pdb.set_trace = self.debugger.set_trace\r | |
1383 | \r | |
1384 | # Patch linecache.getlines, so we can see the example's source\r | |
1385 | # when we're inside the debugger.\r | |
1386 | self.save_linecache_getlines = linecache.getlines\r | |
1387 | linecache.getlines = self.__patched_linecache_getlines\r | |
1388 | \r | |
1389 | # Make sure sys.displayhook just prints the value to stdout\r | |
1390 | save_displayhook = sys.displayhook\r | |
1391 | sys.displayhook = sys.__displayhook__\r | |
1392 | \r | |
1393 | try:\r | |
1394 | return self.__run(test, compileflags, out)\r | |
1395 | finally:\r | |
1396 | sys.stdout = save_stdout\r | |
1397 | pdb.set_trace = save_set_trace\r | |
1398 | linecache.getlines = self.save_linecache_getlines\r | |
1399 | sys.displayhook = save_displayhook\r | |
1400 | if clear_globs:\r | |
1401 | test.globs.clear()\r | |
1402 | \r | |
1403 | #/////////////////////////////////////////////////////////////////\r | |
1404 | # Summarization\r | |
1405 | #/////////////////////////////////////////////////////////////////\r | |
1406 | def summarize(self, verbose=None):\r | |
1407 | """\r | |
1408 | Print a summary of all the test cases that have been run by\r | |
1409 | this DocTestRunner, and return a tuple `(f, t)`, where `f` is\r | |
1410 | the total number of failed examples, and `t` is the total\r | |
1411 | number of tried examples.\r | |
1412 | \r | |
1413 | The optional `verbose` argument controls how detailed the\r | |
1414 | summary is. If the verbosity is not specified, then the\r | |
1415 | DocTestRunner's verbosity is used.\r | |
1416 | """\r | |
1417 | if verbose is None:\r | |
1418 | verbose = self._verbose\r | |
1419 | notests = []\r | |
1420 | passed = []\r | |
1421 | failed = []\r | |
1422 | totalt = totalf = 0\r | |
1423 | for x in self._name2ft.items():\r | |
1424 | name, (f, t) = x\r | |
1425 | assert f <= t\r | |
1426 | totalt += t\r | |
1427 | totalf += f\r | |
1428 | if t == 0:\r | |
1429 | notests.append(name)\r | |
1430 | elif f == 0:\r | |
1431 | passed.append( (name, t) )\r | |
1432 | else:\r | |
1433 | failed.append(x)\r | |
1434 | if verbose:\r | |
1435 | if notests:\r | |
1436 | print len(notests), "items had no tests:"\r | |
1437 | notests.sort()\r | |
1438 | for thing in notests:\r | |
1439 | print " ", thing\r | |
1440 | if passed:\r | |
1441 | print len(passed), "items passed all tests:"\r | |
1442 | passed.sort()\r | |
1443 | for thing, count in passed:\r | |
1444 | print " %3d tests in %s" % (count, thing)\r | |
1445 | if failed:\r | |
1446 | print self.DIVIDER\r | |
1447 | print len(failed), "items had failures:"\r | |
1448 | failed.sort()\r | |
1449 | for thing, (f, t) in failed:\r | |
1450 | print " %3d of %3d in %s" % (f, t, thing)\r | |
1451 | if verbose:\r | |
1452 | print totalt, "tests in", len(self._name2ft), "items."\r | |
1453 | print totalt - totalf, "passed and", totalf, "failed."\r | |
1454 | if totalf:\r | |
1455 | print "***Test Failed***", totalf, "failures."\r | |
1456 | elif verbose:\r | |
1457 | print "Test passed."\r | |
1458 | return TestResults(totalf, totalt)\r | |
1459 | \r | |
1460 | #/////////////////////////////////////////////////////////////////\r | |
1461 | # Backward compatibility cruft to maintain doctest.master.\r | |
1462 | #/////////////////////////////////////////////////////////////////\r | |
1463 | def merge(self, other):\r | |
1464 | d = self._name2ft\r | |
1465 | for name, (f, t) in other._name2ft.items():\r | |
1466 | if name in d:\r | |
1467 | # Don't print here by default, since doing\r | |
1468 | # so breaks some of the buildbots\r | |
1469 | #print "*** DocTestRunner.merge: '" + name + "' in both" \\r | |
1470 | # " testers; summing outcomes."\r | |
1471 | f2, t2 = d[name]\r | |
1472 | f = f + f2\r | |
1473 | t = t + t2\r | |
1474 | d[name] = f, t\r | |
1475 | \r | |
1476 | class OutputChecker:\r | |
1477 | """\r | |
1478 | A class used to check the whether the actual output from a doctest\r | |
1479 | example matches the expected output. `OutputChecker` defines two\r | |
1480 | methods: `check_output`, which compares a given pair of outputs,\r | |
1481 | and returns true if they match; and `output_difference`, which\r | |
1482 | returns a string describing the differences between two outputs.\r | |
1483 | """\r | |
1484 | def check_output(self, want, got, optionflags):\r | |
1485 | """\r | |
1486 | Return True iff the actual output from an example (`got`)\r | |
1487 | matches the expected output (`want`). These strings are\r | |
1488 | always considered to match if they are identical; but\r | |
1489 | depending on what option flags the test runner is using,\r | |
1490 | several non-exact match types are also possible. See the\r | |
1491 | documentation for `TestRunner` for more information about\r | |
1492 | option flags.\r | |
1493 | """\r | |
1494 | # Handle the common case first, for efficiency:\r | |
1495 | # if they're string-identical, always return true.\r | |
1496 | if got == want:\r | |
1497 | return True\r | |
1498 | \r | |
1499 | # The values True and False replaced 1 and 0 as the return\r | |
1500 | # value for boolean comparisons in Python 2.3.\r | |
1501 | if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):\r | |
1502 | if (got,want) == ("True\n", "1\n"):\r | |
1503 | return True\r | |
1504 | if (got,want) == ("False\n", "0\n"):\r | |
1505 | return True\r | |
1506 | \r | |
1507 | # <BLANKLINE> can be used as a special sequence to signify a\r | |
1508 | # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.\r | |
1509 | if not (optionflags & DONT_ACCEPT_BLANKLINE):\r | |
1510 | # Replace <BLANKLINE> in want with a blank line.\r | |
1511 | want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),\r | |
1512 | '', want)\r | |
1513 | # If a line in got contains only spaces, then remove the\r | |
1514 | # spaces.\r | |
1515 | got = re.sub('(?m)^\s*?$', '', got)\r | |
1516 | if got == want:\r | |
1517 | return True\r | |
1518 | \r | |
1519 | # This flag causes doctest to ignore any differences in the\r | |
1520 | # contents of whitespace strings. Note that this can be used\r | |
1521 | # in conjunction with the ELLIPSIS flag.\r | |
1522 | if optionflags & NORMALIZE_WHITESPACE:\r | |
1523 | got = ' '.join(got.split())\r | |
1524 | want = ' '.join(want.split())\r | |
1525 | if got == want:\r | |
1526 | return True\r | |
1527 | \r | |
1528 | # The ELLIPSIS flag says to let the sequence "..." in `want`\r | |
1529 | # match any substring in `got`.\r | |
1530 | if optionflags & ELLIPSIS:\r | |
1531 | if _ellipsis_match(want, got):\r | |
1532 | return True\r | |
1533 | \r | |
1534 | # We didn't find any match; return false.\r | |
1535 | return False\r | |
1536 | \r | |
1537 | # Should we do a fancy diff?\r | |
1538 | def _do_a_fancy_diff(self, want, got, optionflags):\r | |
1539 | # Not unless they asked for a fancy diff.\r | |
1540 | if not optionflags & (REPORT_UDIFF |\r | |
1541 | REPORT_CDIFF |\r | |
1542 | REPORT_NDIFF):\r | |
1543 | return False\r | |
1544 | \r | |
1545 | # If expected output uses ellipsis, a meaningful fancy diff is\r | |
1546 | # too hard ... or maybe not. In two real-life failures Tim saw,\r | |
1547 | # a diff was a major help anyway, so this is commented out.\r | |
1548 | # [todo] _ellipsis_match() knows which pieces do and don't match,\r | |
1549 | # and could be the basis for a kick-ass diff in this case.\r | |
1550 | ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:\r | |
1551 | ## return False\r | |
1552 | \r | |
1553 | # ndiff does intraline difference marking, so can be useful even\r | |
1554 | # for 1-line differences.\r | |
1555 | if optionflags & REPORT_NDIFF:\r | |
1556 | return True\r | |
1557 | \r | |
1558 | # The other diff types need at least a few lines to be helpful.\r | |
1559 | return want.count('\n') > 2 and got.count('\n') > 2\r | |
1560 | \r | |
1561 | def output_difference(self, example, got, optionflags):\r | |
1562 | """\r | |
1563 | Return a string describing the differences between the\r | |
1564 | expected output for a given example (`example`) and the actual\r | |
1565 | output (`got`). `optionflags` is the set of option flags used\r | |
1566 | to compare `want` and `got`.\r | |
1567 | """\r | |
1568 | want = example.want\r | |
1569 | # If <BLANKLINE>s are being used, then replace blank lines\r | |
1570 | # with <BLANKLINE> in the actual output string.\r | |
1571 | if not (optionflags & DONT_ACCEPT_BLANKLINE):\r | |
1572 | got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)\r | |
1573 | \r | |
1574 | # Check if we should use diff.\r | |
1575 | if self._do_a_fancy_diff(want, got, optionflags):\r | |
1576 | # Split want & got into lines.\r | |
1577 | want_lines = want.splitlines(True) # True == keep line ends\r | |
1578 | got_lines = got.splitlines(True)\r | |
1579 | # Use difflib to find their differences.\r | |
1580 | if optionflags & REPORT_UDIFF:\r | |
1581 | diff = difflib.unified_diff(want_lines, got_lines, n=2)\r | |
1582 | diff = list(diff)[2:] # strip the diff header\r | |
1583 | kind = 'unified diff with -expected +actual'\r | |
1584 | elif optionflags & REPORT_CDIFF:\r | |
1585 | diff = difflib.context_diff(want_lines, got_lines, n=2)\r | |
1586 | diff = list(diff)[2:] # strip the diff header\r | |
1587 | kind = 'context diff with expected followed by actual'\r | |
1588 | elif optionflags & REPORT_NDIFF:\r | |
1589 | engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)\r | |
1590 | diff = list(engine.compare(want_lines, got_lines))\r | |
1591 | kind = 'ndiff with -expected +actual'\r | |
1592 | else:\r | |
1593 | assert 0, 'Bad diff option'\r | |
1594 | # Remove trailing whitespace on diff output.\r | |
1595 | diff = [line.rstrip() + '\n' for line in diff]\r | |
1596 | return 'Differences (%s):\n' % kind + _indent(''.join(diff))\r | |
1597 | \r | |
1598 | # If we're not using diff, then simply list the expected\r | |
1599 | # output followed by the actual output.\r | |
1600 | if want and got:\r | |
1601 | return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))\r | |
1602 | elif want:\r | |
1603 | return 'Expected:\n%sGot nothing\n' % _indent(want)\r | |
1604 | elif got:\r | |
1605 | return 'Expected nothing\nGot:\n%s' % _indent(got)\r | |
1606 | else:\r | |
1607 | return 'Expected nothing\nGot nothing\n'\r | |
1608 | \r | |
1609 | class DocTestFailure(Exception):\r | |
1610 | """A DocTest example has failed in debugging mode.\r | |
1611 | \r | |
1612 | The exception instance has variables:\r | |
1613 | \r | |
1614 | - test: the DocTest object being run\r | |
1615 | \r | |
1616 | - example: the Example object that failed\r | |
1617 | \r | |
1618 | - got: the actual output\r | |
1619 | """\r | |
1620 | def __init__(self, test, example, got):\r | |
1621 | self.test = test\r | |
1622 | self.example = example\r | |
1623 | self.got = got\r | |
1624 | \r | |
1625 | def __str__(self):\r | |
1626 | return str(self.test)\r | |
1627 | \r | |
1628 | class UnexpectedException(Exception):\r | |
1629 | """A DocTest example has encountered an unexpected exception\r | |
1630 | \r | |
1631 | The exception instance has variables:\r | |
1632 | \r | |
1633 | - test: the DocTest object being run\r | |
1634 | \r | |
1635 | - example: the Example object that failed\r | |
1636 | \r | |
1637 | - exc_info: the exception info\r | |
1638 | """\r | |
1639 | def __init__(self, test, example, exc_info):\r | |
1640 | self.test = test\r | |
1641 | self.example = example\r | |
1642 | self.exc_info = exc_info\r | |
1643 | \r | |
1644 | def __str__(self):\r | |
1645 | return str(self.test)\r | |
1646 | \r | |
1647 | class DebugRunner(DocTestRunner):\r | |
1648 | r"""Run doc tests but raise an exception as soon as there is a failure.\r | |
1649 | \r | |
1650 | If an unexpected exception occurs, an UnexpectedException is raised.\r | |
1651 | It contains the test, the example, and the original exception:\r | |
1652 | \r | |
1653 | >>> runner = DebugRunner(verbose=False)\r | |
1654 | >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',\r | |
1655 | ... {}, 'foo', 'foo.py', 0)\r | |
1656 | >>> try:\r | |
1657 | ... runner.run(test)\r | |
1658 | ... except UnexpectedException, failure:\r | |
1659 | ... pass\r | |
1660 | \r | |
1661 | >>> failure.test is test\r | |
1662 | True\r | |
1663 | \r | |
1664 | >>> failure.example.want\r | |
1665 | '42\n'\r | |
1666 | \r | |
1667 | >>> exc_info = failure.exc_info\r | |
1668 | >>> raise exc_info[0], exc_info[1], exc_info[2]\r | |
1669 | Traceback (most recent call last):\r | |
1670 | ...\r | |
1671 | KeyError\r | |
1672 | \r | |
1673 | We wrap the original exception to give the calling application\r | |
1674 | access to the test and example information.\r | |
1675 | \r | |
1676 | If the output doesn't match, then a DocTestFailure is raised:\r | |
1677 | \r | |
1678 | >>> test = DocTestParser().get_doctest('''\r | |
1679 | ... >>> x = 1\r | |
1680 | ... >>> x\r | |
1681 | ... 2\r | |
1682 | ... ''', {}, 'foo', 'foo.py', 0)\r | |
1683 | \r | |
1684 | >>> try:\r | |
1685 | ... runner.run(test)\r | |
1686 | ... except DocTestFailure, failure:\r | |
1687 | ... pass\r | |
1688 | \r | |
1689 | DocTestFailure objects provide access to the test:\r | |
1690 | \r | |
1691 | >>> failure.test is test\r | |
1692 | True\r | |
1693 | \r | |
1694 | As well as to the example:\r | |
1695 | \r | |
1696 | >>> failure.example.want\r | |
1697 | '2\n'\r | |
1698 | \r | |
1699 | and the actual output:\r | |
1700 | \r | |
1701 | >>> failure.got\r | |
1702 | '1\n'\r | |
1703 | \r | |
1704 | If a failure or error occurs, the globals are left intact:\r | |
1705 | \r | |
1706 | >>> del test.globs['__builtins__']\r | |
1707 | >>> test.globs\r | |
1708 | {'x': 1}\r | |
1709 | \r | |
1710 | >>> test = DocTestParser().get_doctest('''\r | |
1711 | ... >>> x = 2\r | |
1712 | ... >>> raise KeyError\r | |
1713 | ... ''', {}, 'foo', 'foo.py', 0)\r | |
1714 | \r | |
1715 | >>> runner.run(test)\r | |
1716 | Traceback (most recent call last):\r | |
1717 | ...\r | |
1718 | UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>\r | |
1719 | \r | |
1720 | >>> del test.globs['__builtins__']\r | |
1721 | >>> test.globs\r | |
1722 | {'x': 2}\r | |
1723 | \r | |
1724 | But the globals are cleared if there is no error:\r | |
1725 | \r | |
1726 | >>> test = DocTestParser().get_doctest('''\r | |
1727 | ... >>> x = 2\r | |
1728 | ... ''', {}, 'foo', 'foo.py', 0)\r | |
1729 | \r | |
1730 | >>> runner.run(test)\r | |
1731 | TestResults(failed=0, attempted=1)\r | |
1732 | \r | |
1733 | >>> test.globs\r | |
1734 | {}\r | |
1735 | \r | |
1736 | """\r | |
1737 | \r | |
1738 | def run(self, test, compileflags=None, out=None, clear_globs=True):\r | |
1739 | r = DocTestRunner.run(self, test, compileflags, out, False)\r | |
1740 | if clear_globs:\r | |
1741 | test.globs.clear()\r | |
1742 | return r\r | |
1743 | \r | |
1744 | def report_unexpected_exception(self, out, test, example, exc_info):\r | |
1745 | raise UnexpectedException(test, example, exc_info)\r | |
1746 | \r | |
1747 | def report_failure(self, out, test, example, got):\r | |
1748 | raise DocTestFailure(test, example, got)\r | |
1749 | \r | |
1750 | ######################################################################\r | |
1751 | ## 6. Test Functions\r | |
1752 | ######################################################################\r | |
1753 | # These should be backwards compatible.\r | |
1754 | \r | |
1755 | # For backward compatibility, a global instance of a DocTestRunner\r | |
1756 | # class, updated by testmod.\r | |
1757 | master = None\r | |
1758 | \r | |
1759 | def testmod(m=None, name=None, globs=None, verbose=None,\r | |
1760 | report=True, optionflags=0, extraglobs=None,\r | |
1761 | raise_on_error=False, exclude_empty=False):\r | |
1762 | """m=None, name=None, globs=None, verbose=None, report=True,\r | |
1763 | optionflags=0, extraglobs=None, raise_on_error=False,\r | |
1764 | exclude_empty=False\r | |
1765 | \r | |
1766 | Test examples in docstrings in functions and classes reachable\r | |
1767 | from module m (or the current module if m is not supplied), starting\r | |
1768 | with m.__doc__.\r | |
1769 | \r | |
1770 | Also test examples reachable from dict m.__test__ if it exists and is\r | |
1771 | not None. m.__test__ maps names to functions, classes and strings;\r | |
1772 | function and class docstrings are tested even if the name is private;\r | |
1773 | strings are tested directly, as if they were docstrings.\r | |
1774 | \r | |
1775 | Return (#failures, #tests).\r | |
1776 | \r | |
1777 | See help(doctest) for an overview.\r | |
1778 | \r | |
1779 | Optional keyword arg "name" gives the name of the module; by default\r | |
1780 | use m.__name__.\r | |
1781 | \r | |
1782 | Optional keyword arg "globs" gives a dict to be used as the globals\r | |
1783 | when executing examples; by default, use m.__dict__. A copy of this\r | |
1784 | dict is actually used for each docstring, so that each docstring's\r | |
1785 | examples start with a clean slate.\r | |
1786 | \r | |
1787 | Optional keyword arg "extraglobs" gives a dictionary that should be\r | |
1788 | merged into the globals that are used to execute examples. By\r | |
1789 | default, no extra globals are used. This is new in 2.4.\r | |
1790 | \r | |
1791 | Optional keyword arg "verbose" prints lots of stuff if true, prints\r | |
1792 | only failures if false; by default, it's true iff "-v" is in sys.argv.\r | |
1793 | \r | |
1794 | Optional keyword arg "report" prints a summary at the end when true,\r | |
1795 | else prints nothing at the end. In verbose mode, the summary is\r | |
1796 | detailed, else very brief (in fact, empty if all tests passed).\r | |
1797 | \r | |
1798 | Optional keyword arg "optionflags" or's together module constants,\r | |
1799 | and defaults to 0. This is new in 2.3. Possible values (see the\r | |
1800 | docs for details):\r | |
1801 | \r | |
1802 | DONT_ACCEPT_TRUE_FOR_1\r | |
1803 | DONT_ACCEPT_BLANKLINE\r | |
1804 | NORMALIZE_WHITESPACE\r | |
1805 | ELLIPSIS\r | |
1806 | SKIP\r | |
1807 | IGNORE_EXCEPTION_DETAIL\r | |
1808 | REPORT_UDIFF\r | |
1809 | REPORT_CDIFF\r | |
1810 | REPORT_NDIFF\r | |
1811 | REPORT_ONLY_FIRST_FAILURE\r | |
1812 | \r | |
1813 | Optional keyword arg "raise_on_error" raises an exception on the\r | |
1814 | first unexpected exception or failure. This allows failures to be\r | |
1815 | post-mortem debugged.\r | |
1816 | \r | |
1817 | Advanced tomfoolery: testmod runs methods of a local instance of\r | |
1818 | class doctest.Tester, then merges the results into (or creates)\r | |
1819 | global Tester instance doctest.master. Methods of doctest.master\r | |
1820 | can be called directly too, if you want to do something unusual.\r | |
1821 | Passing report=0 to testmod is especially useful then, to delay\r | |
1822 | displaying a summary. Invoke doctest.master.summarize(verbose)\r | |
1823 | when you're done fiddling.\r | |
1824 | """\r | |
1825 | global master\r | |
1826 | \r | |
1827 | # If no module was given, then use __main__.\r | |
1828 | if m is None:\r | |
1829 | # DWA - m will still be None if this wasn't invoked from the command\r | |
1830 | # line, in which case the following TypeError is about as good an error\r | |
1831 | # as we should expect\r | |
1832 | m = sys.modules.get('__main__')\r | |
1833 | \r | |
1834 | # Check that we were actually given a module.\r | |
1835 | if not inspect.ismodule(m):\r | |
1836 | raise TypeError("testmod: module required; %r" % (m,))\r | |
1837 | \r | |
1838 | # If no name was given, then use the module's name.\r | |
1839 | if name is None:\r | |
1840 | name = m.__name__\r | |
1841 | \r | |
1842 | # Find, parse, and run all tests in the given module.\r | |
1843 | finder = DocTestFinder(exclude_empty=exclude_empty)\r | |
1844 | \r | |
1845 | if raise_on_error:\r | |
1846 | runner = DebugRunner(verbose=verbose, optionflags=optionflags)\r | |
1847 | else:\r | |
1848 | runner = DocTestRunner(verbose=verbose, optionflags=optionflags)\r | |
1849 | \r | |
1850 | for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):\r | |
1851 | runner.run(test)\r | |
1852 | \r | |
1853 | if report:\r | |
1854 | runner.summarize()\r | |
1855 | \r | |
1856 | if master is None:\r | |
1857 | master = runner\r | |
1858 | else:\r | |
1859 | master.merge(runner)\r | |
1860 | \r | |
1861 | return TestResults(runner.failures, runner.tries)\r | |
1862 | \r | |
1863 | def testfile(filename, module_relative=True, name=None, package=None,\r | |
1864 | globs=None, verbose=None, report=True, optionflags=0,\r | |
1865 | extraglobs=None, raise_on_error=False, parser=DocTestParser(),\r | |
1866 | encoding=None):\r | |
1867 | """\r | |
1868 | Test examples in the given file. Return (#failures, #tests).\r | |
1869 | \r | |
1870 | Optional keyword arg "module_relative" specifies how filenames\r | |
1871 | should be interpreted:\r | |
1872 | \r | |
1873 | - If "module_relative" is True (the default), then "filename"\r | |
1874 | specifies a module-relative path. By default, this path is\r | |
1875 | relative to the calling module's directory; but if the\r | |
1876 | "package" argument is specified, then it is relative to that\r | |
1877 | package. To ensure os-independence, "filename" should use\r | |
1878 | "/" characters to separate path segments, and should not\r | |
1879 | be an absolute path (i.e., it may not begin with "/").\r | |
1880 | \r | |
1881 | - If "module_relative" is False, then "filename" specifies an\r | |
1882 | os-specific path. The path may be absolute or relative (to\r | |
1883 | the current working directory).\r | |
1884 | \r | |
1885 | Optional keyword arg "name" gives the name of the test; by default\r | |
1886 | use the file's basename.\r | |
1887 | \r | |
1888 | Optional keyword argument "package" is a Python package or the\r | |
1889 | name of a Python package whose directory should be used as the\r | |
1890 | base directory for a module relative filename. If no package is\r | |
1891 | specified, then the calling module's directory is used as the base\r | |
1892 | directory for module relative filenames. It is an error to\r | |
1893 | specify "package" if "module_relative" is False.\r | |
1894 | \r | |
1895 | Optional keyword arg "globs" gives a dict to be used as the globals\r | |
1896 | when executing examples; by default, use {}. A copy of this dict\r | |
1897 | is actually used for each docstring, so that each docstring's\r | |
1898 | examples start with a clean slate.\r | |
1899 | \r | |
1900 | Optional keyword arg "extraglobs" gives a dictionary that should be\r | |
1901 | merged into the globals that are used to execute examples. By\r | |
1902 | default, no extra globals are used.\r | |
1903 | \r | |
1904 | Optional keyword arg "verbose" prints lots of stuff if true, prints\r | |
1905 | only failures if false; by default, it's true iff "-v" is in sys.argv.\r | |
1906 | \r | |
1907 | Optional keyword arg "report" prints a summary at the end when true,\r | |
1908 | else prints nothing at the end. In verbose mode, the summary is\r | |
1909 | detailed, else very brief (in fact, empty if all tests passed).\r | |
1910 | \r | |
1911 | Optional keyword arg "optionflags" or's together module constants,\r | |
1912 | and defaults to 0. Possible values (see the docs for details):\r | |
1913 | \r | |
1914 | DONT_ACCEPT_TRUE_FOR_1\r | |
1915 | DONT_ACCEPT_BLANKLINE\r | |
1916 | NORMALIZE_WHITESPACE\r | |
1917 | ELLIPSIS\r | |
1918 | SKIP\r | |
1919 | IGNORE_EXCEPTION_DETAIL\r | |
1920 | REPORT_UDIFF\r | |
1921 | REPORT_CDIFF\r | |
1922 | REPORT_NDIFF\r | |
1923 | REPORT_ONLY_FIRST_FAILURE\r | |
1924 | \r | |
1925 | Optional keyword arg "raise_on_error" raises an exception on the\r | |
1926 | first unexpected exception or failure. This allows failures to be\r | |
1927 | post-mortem debugged.\r | |
1928 | \r | |
1929 | Optional keyword arg "parser" specifies a DocTestParser (or\r | |
1930 | subclass) that should be used to extract tests from the files.\r | |
1931 | \r | |
1932 | Optional keyword arg "encoding" specifies an encoding that should\r | |
1933 | be used to convert the file to unicode.\r | |
1934 | \r | |
1935 | Advanced tomfoolery: testmod runs methods of a local instance of\r | |
1936 | class doctest.Tester, then merges the results into (or creates)\r | |
1937 | global Tester instance doctest.master. Methods of doctest.master\r | |
1938 | can be called directly too, if you want to do something unusual.\r | |
1939 | Passing report=0 to testmod is especially useful then, to delay\r | |
1940 | displaying a summary. Invoke doctest.master.summarize(verbose)\r | |
1941 | when you're done fiddling.\r | |
1942 | """\r | |
1943 | global master\r | |
1944 | \r | |
1945 | if package and not module_relative:\r | |
1946 | raise ValueError("Package may only be specified for module-"\r | |
1947 | "relative paths.")\r | |
1948 | \r | |
1949 | # Relativize the path\r | |
1950 | text, filename = _load_testfile(filename, package, module_relative)\r | |
1951 | \r | |
1952 | # If no name was given, then use the file's name.\r | |
1953 | if name is None:\r | |
1954 | name = os.path.basename(filename)\r | |
1955 | \r | |
1956 | # Assemble the globals.\r | |
1957 | if globs is None:\r | |
1958 | globs = {}\r | |
1959 | else:\r | |
1960 | globs = globs.copy()\r | |
1961 | if extraglobs is not None:\r | |
1962 | globs.update(extraglobs)\r | |
1963 | if '__name__' not in globs:\r | |
1964 | globs['__name__'] = '__main__'\r | |
1965 | \r | |
1966 | if raise_on_error:\r | |
1967 | runner = DebugRunner(verbose=verbose, optionflags=optionflags)\r | |
1968 | else:\r | |
1969 | runner = DocTestRunner(verbose=verbose, optionflags=optionflags)\r | |
1970 | \r | |
1971 | if encoding is not None:\r | |
1972 | text = text.decode(encoding)\r | |
1973 | \r | |
1974 | # Read the file, convert it to a test, and run it.\r | |
1975 | test = parser.get_doctest(text, globs, name, filename, 0)\r | |
1976 | runner.run(test)\r | |
1977 | \r | |
1978 | if report:\r | |
1979 | runner.summarize()\r | |
1980 | \r | |
1981 | if master is None:\r | |
1982 | master = runner\r | |
1983 | else:\r | |
1984 | master.merge(runner)\r | |
1985 | \r | |
1986 | return TestResults(runner.failures, runner.tries)\r | |
1987 | \r | |
1988 | def run_docstring_examples(f, globs, verbose=False, name="NoName",\r | |
1989 | compileflags=None, optionflags=0):\r | |
1990 | """\r | |
1991 | Test examples in the given object's docstring (`f`), using `globs`\r | |
1992 | as globals. Optional argument `name` is used in failure messages.\r | |
1993 | If the optional argument `verbose` is true, then generate output\r | |
1994 | even if there are no failures.\r | |
1995 | \r | |
1996 | `compileflags` gives the set of flags that should be used by the\r | |
1997 | Python compiler when running the examples. If not specified, then\r | |
1998 | it will default to the set of future-import flags that apply to\r | |
1999 | `globs`.\r | |
2000 | \r | |
2001 | Optional keyword arg `optionflags` specifies options for the\r | |
2002 | testing and output. See the documentation for `testmod` for more\r | |
2003 | information.\r | |
2004 | """\r | |
2005 | # Find, parse, and run all tests in the given module.\r | |
2006 | finder = DocTestFinder(verbose=verbose, recurse=False)\r | |
2007 | runner = DocTestRunner(verbose=verbose, optionflags=optionflags)\r | |
2008 | for test in finder.find(f, name, globs=globs):\r | |
2009 | runner.run(test, compileflags=compileflags)\r | |
2010 | \r | |
2011 | ######################################################################\r | |
2012 | ## 7. Tester\r | |
2013 | ######################################################################\r | |
2014 | # This is provided only for backwards compatibility. It's not\r | |
2015 | # actually used in any way.\r | |
2016 | \r | |
2017 | class Tester:\r | |
2018 | def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):\r | |
2019 | \r | |
2020 | warnings.warn("class Tester is deprecated; "\r | |
2021 | "use class doctest.DocTestRunner instead",\r | |
2022 | DeprecationWarning, stacklevel=2)\r | |
2023 | if mod is None and globs is None:\r | |
2024 | raise TypeError("Tester.__init__: must specify mod or globs")\r | |
2025 | if mod is not None and not inspect.ismodule(mod):\r | |
2026 | raise TypeError("Tester.__init__: mod must be a module; %r" %\r | |
2027 | (mod,))\r | |
2028 | if globs is None:\r | |
2029 | globs = mod.__dict__\r | |
2030 | self.globs = globs\r | |
2031 | \r | |
2032 | self.verbose = verbose\r | |
2033 | self.optionflags = optionflags\r | |
2034 | self.testfinder = DocTestFinder()\r | |
2035 | self.testrunner = DocTestRunner(verbose=verbose,\r | |
2036 | optionflags=optionflags)\r | |
2037 | \r | |
2038 | def runstring(self, s, name):\r | |
2039 | test = DocTestParser().get_doctest(s, self.globs, name, None, None)\r | |
2040 | if self.verbose:\r | |
2041 | print "Running string", name\r | |
2042 | (f,t) = self.testrunner.run(test)\r | |
2043 | if self.verbose:\r | |
2044 | print f, "of", t, "examples failed in string", name\r | |
2045 | return TestResults(f,t)\r | |
2046 | \r | |
2047 | def rundoc(self, object, name=None, module=None):\r | |
2048 | f = t = 0\r | |
2049 | tests = self.testfinder.find(object, name, module=module,\r | |
2050 | globs=self.globs)\r | |
2051 | for test in tests:\r | |
2052 | (f2, t2) = self.testrunner.run(test)\r | |
2053 | (f,t) = (f+f2, t+t2)\r | |
2054 | return TestResults(f,t)\r | |
2055 | \r | |
2056 | def rundict(self, d, name, module=None):\r | |
2057 | import types\r | |
2058 | m = types.ModuleType(name)\r | |
2059 | m.__dict__.update(d)\r | |
2060 | if module is None:\r | |
2061 | module = False\r | |
2062 | return self.rundoc(m, name, module)\r | |
2063 | \r | |
2064 | def run__test__(self, d, name):\r | |
2065 | import types\r | |
2066 | m = types.ModuleType(name)\r | |
2067 | m.__test__ = d\r | |
2068 | return self.rundoc(m, name)\r | |
2069 | \r | |
2070 | def summarize(self, verbose=None):\r | |
2071 | return self.testrunner.summarize(verbose)\r | |
2072 | \r | |
2073 | def merge(self, other):\r | |
2074 | self.testrunner.merge(other.testrunner)\r | |
2075 | \r | |
2076 | ######################################################################\r | |
2077 | ## 8. Unittest Support\r | |
2078 | ######################################################################\r | |
2079 | \r | |
2080 | _unittest_reportflags = 0\r | |
2081 | \r | |
2082 | def set_unittest_reportflags(flags):\r | |
2083 | """Sets the unittest option flags.\r | |
2084 | \r | |
2085 | The old flag is returned so that a runner could restore the old\r | |
2086 | value if it wished to:\r | |
2087 | \r | |
2088 | >>> import doctest\r | |
2089 | >>> old = doctest._unittest_reportflags\r | |
2090 | >>> doctest.set_unittest_reportflags(REPORT_NDIFF |\r | |
2091 | ... REPORT_ONLY_FIRST_FAILURE) == old\r | |
2092 | True\r | |
2093 | \r | |
2094 | >>> doctest._unittest_reportflags == (REPORT_NDIFF |\r | |
2095 | ... REPORT_ONLY_FIRST_FAILURE)\r | |
2096 | True\r | |
2097 | \r | |
2098 | Only reporting flags can be set:\r | |
2099 | \r | |
2100 | >>> doctest.set_unittest_reportflags(ELLIPSIS)\r | |
2101 | Traceback (most recent call last):\r | |
2102 | ...\r | |
2103 | ValueError: ('Only reporting flags allowed', 8)\r | |
2104 | \r | |
2105 | >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |\r | |
2106 | ... REPORT_ONLY_FIRST_FAILURE)\r | |
2107 | True\r | |
2108 | """\r | |
2109 | global _unittest_reportflags\r | |
2110 | \r | |
2111 | if (flags & REPORTING_FLAGS) != flags:\r | |
2112 | raise ValueError("Only reporting flags allowed", flags)\r | |
2113 | old = _unittest_reportflags\r | |
2114 | _unittest_reportflags = flags\r | |
2115 | return old\r | |
2116 | \r | |
2117 | \r | |
2118 | class DocTestCase(unittest.TestCase):\r | |
2119 | \r | |
2120 | def __init__(self, test, optionflags=0, setUp=None, tearDown=None,\r | |
2121 | checker=None):\r | |
2122 | \r | |
2123 | unittest.TestCase.__init__(self)\r | |
2124 | self._dt_optionflags = optionflags\r | |
2125 | self._dt_checker = checker\r | |
2126 | self._dt_test = test\r | |
2127 | self._dt_setUp = setUp\r | |
2128 | self._dt_tearDown = tearDown\r | |
2129 | \r | |
2130 | def setUp(self):\r | |
2131 | test = self._dt_test\r | |
2132 | \r | |
2133 | if self._dt_setUp is not None:\r | |
2134 | self._dt_setUp(test)\r | |
2135 | \r | |
2136 | def tearDown(self):\r | |
2137 | test = self._dt_test\r | |
2138 | \r | |
2139 | if self._dt_tearDown is not None:\r | |
2140 | self._dt_tearDown(test)\r | |
2141 | \r | |
2142 | test.globs.clear()\r | |
2143 | \r | |
2144 | def runTest(self):\r | |
2145 | test = self._dt_test\r | |
2146 | old = sys.stdout\r | |
2147 | new = StringIO()\r | |
2148 | optionflags = self._dt_optionflags\r | |
2149 | \r | |
2150 | if not (optionflags & REPORTING_FLAGS):\r | |
2151 | # The option flags don't include any reporting flags,\r | |
2152 | # so add the default reporting flags\r | |
2153 | optionflags |= _unittest_reportflags\r | |
2154 | \r | |
2155 | runner = DocTestRunner(optionflags=optionflags,\r | |
2156 | checker=self._dt_checker, verbose=False)\r | |
2157 | \r | |
2158 | try:\r | |
2159 | runner.DIVIDER = "-"*70\r | |
2160 | failures, tries = runner.run(\r | |
2161 | test, out=new.write, clear_globs=False)\r | |
2162 | finally:\r | |
2163 | sys.stdout = old\r | |
2164 | \r | |
2165 | if failures:\r | |
2166 | raise self.failureException(self.format_failure(new.getvalue()))\r | |
2167 | \r | |
2168 | def format_failure(self, err):\r | |
2169 | test = self._dt_test\r | |
2170 | if test.lineno is None:\r | |
2171 | lineno = 'unknown line number'\r | |
2172 | else:\r | |
2173 | lineno = '%s' % test.lineno\r | |
2174 | lname = '.'.join(test.name.split('.')[-1:])\r | |
2175 | return ('Failed doctest test for %s\n'\r | |
2176 | ' File "%s", line %s, in %s\n\n%s'\r | |
2177 | % (test.name, test.filename, lineno, lname, err)\r | |
2178 | )\r | |
2179 | \r | |
2180 | def debug(self):\r | |
2181 | r"""Run the test case without results and without catching exceptions\r | |
2182 | \r | |
2183 | The unit test framework includes a debug method on test cases\r | |
2184 | and test suites to support post-mortem debugging. The test code\r | |
2185 | is run in such a way that errors are not caught. This way a\r | |
2186 | caller can catch the errors and initiate post-mortem debugging.\r | |
2187 | \r | |
2188 | The DocTestCase provides a debug method that raises\r | |
2189 | UnexpectedException errors if there is an unexpected\r | |
2190 | exception:\r | |
2191 | \r | |
2192 | >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',\r | |
2193 | ... {}, 'foo', 'foo.py', 0)\r | |
2194 | >>> case = DocTestCase(test)\r | |
2195 | >>> try:\r | |
2196 | ... case.debug()\r | |
2197 | ... except UnexpectedException, failure:\r | |
2198 | ... pass\r | |
2199 | \r | |
2200 | The UnexpectedException contains the test, the example, and\r | |
2201 | the original exception:\r | |
2202 | \r | |
2203 | >>> failure.test is test\r | |
2204 | True\r | |
2205 | \r | |
2206 | >>> failure.example.want\r | |
2207 | '42\n'\r | |
2208 | \r | |
2209 | >>> exc_info = failure.exc_info\r | |
2210 | >>> raise exc_info[0], exc_info[1], exc_info[2]\r | |
2211 | Traceback (most recent call last):\r | |
2212 | ...\r | |
2213 | KeyError\r | |
2214 | \r | |
2215 | If the output doesn't match, then a DocTestFailure is raised:\r | |
2216 | \r | |
2217 | >>> test = DocTestParser().get_doctest('''\r | |
2218 | ... >>> x = 1\r | |
2219 | ... >>> x\r | |
2220 | ... 2\r | |
2221 | ... ''', {}, 'foo', 'foo.py', 0)\r | |
2222 | >>> case = DocTestCase(test)\r | |
2223 | \r | |
2224 | >>> try:\r | |
2225 | ... case.debug()\r | |
2226 | ... except DocTestFailure, failure:\r | |
2227 | ... pass\r | |
2228 | \r | |
2229 | DocTestFailure objects provide access to the test:\r | |
2230 | \r | |
2231 | >>> failure.test is test\r | |
2232 | True\r | |
2233 | \r | |
2234 | As well as to the example:\r | |
2235 | \r | |
2236 | >>> failure.example.want\r | |
2237 | '2\n'\r | |
2238 | \r | |
2239 | and the actual output:\r | |
2240 | \r | |
2241 | >>> failure.got\r | |
2242 | '1\n'\r | |
2243 | \r | |
2244 | """\r | |
2245 | \r | |
2246 | self.setUp()\r | |
2247 | runner = DebugRunner(optionflags=self._dt_optionflags,\r | |
2248 | checker=self._dt_checker, verbose=False)\r | |
2249 | runner.run(self._dt_test, clear_globs=False)\r | |
2250 | self.tearDown()\r | |
2251 | \r | |
2252 | def id(self):\r | |
2253 | return self._dt_test.name\r | |
2254 | \r | |
2255 | def __repr__(self):\r | |
2256 | name = self._dt_test.name.split('.')\r | |
2257 | return "%s (%s)" % (name[-1], '.'.join(name[:-1]))\r | |
2258 | \r | |
2259 | __str__ = __repr__\r | |
2260 | \r | |
2261 | def shortDescription(self):\r | |
2262 | return "Doctest: " + self._dt_test.name\r | |
2263 | \r | |
2264 | class SkipDocTestCase(DocTestCase):\r | |
2265 | def __init__(self):\r | |
2266 | DocTestCase.__init__(self, None)\r | |
2267 | \r | |
2268 | def setUp(self):\r | |
2269 | self.skipTest("DocTestSuite will not work with -O2 and above")\r | |
2270 | \r | |
2271 | def test_skip(self):\r | |
2272 | pass\r | |
2273 | \r | |
2274 | def shortDescription(self):\r | |
2275 | return "Skipping tests from %s" % module.__name__\r | |
2276 | \r | |
2277 | def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,\r | |
2278 | **options):\r | |
2279 | """\r | |
2280 | Convert doctest tests for a module to a unittest test suite.\r | |
2281 | \r | |
2282 | This converts each documentation string in a module that\r | |
2283 | contains doctest tests to a unittest test case. If any of the\r | |
2284 | tests in a doc string fail, then the test case fails. An exception\r | |
2285 | is raised showing the name of the file containing the test and a\r | |
2286 | (sometimes approximate) line number.\r | |
2287 | \r | |
2288 | The `module` argument provides the module to be tested. The argument\r | |
2289 | can be either a module or a module name.\r | |
2290 | \r | |
2291 | If no argument is given, the calling module is used.\r | |
2292 | \r | |
2293 | A number of options may be provided as keyword arguments:\r | |
2294 | \r | |
2295 | setUp\r | |
2296 | A set-up function. This is called before running the\r | |
2297 | tests in each file. The setUp function will be passed a DocTest\r | |
2298 | object. The setUp function can access the test globals as the\r | |
2299 | globs attribute of the test passed.\r | |
2300 | \r | |
2301 | tearDown\r | |
2302 | A tear-down function. This is called after running the\r | |
2303 | tests in each file. The tearDown function will be passed a DocTest\r | |
2304 | object. The tearDown function can access the test globals as the\r | |
2305 | globs attribute of the test passed.\r | |
2306 | \r | |
2307 | globs\r | |
2308 | A dictionary containing initial global variables for the tests.\r | |
2309 | \r | |
2310 | optionflags\r | |
2311 | A set of doctest option flags expressed as an integer.\r | |
2312 | """\r | |
2313 | \r | |
2314 | if test_finder is None:\r | |
2315 | test_finder = DocTestFinder()\r | |
2316 | \r | |
2317 | module = _normalize_module(module)\r | |
2318 | tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)\r | |
2319 | \r | |
2320 | if not tests and sys.flags.optimize >=2:\r | |
2321 | # Skip doctests when running with -O2\r | |
2322 | suite = unittest.TestSuite()\r | |
2323 | suite.addTest(SkipDocTestCase())\r | |
2324 | return suite\r | |
2325 | elif not tests:\r | |
2326 | # Why do we want to do this? Because it reveals a bug that might\r | |
2327 | # otherwise be hidden.\r | |
2328 | raise ValueError(module, "has no tests")\r | |
2329 | \r | |
2330 | tests.sort()\r | |
2331 | suite = unittest.TestSuite()\r | |
2332 | \r | |
2333 | for test in tests:\r | |
2334 | if len(test.examples) == 0:\r | |
2335 | continue\r | |
2336 | if not test.filename:\r | |
2337 | filename = module.__file__\r | |
2338 | if filename[-4:] in (".pyc", ".pyo"):\r | |
2339 | filename = filename[:-1]\r | |
2340 | test.filename = filename\r | |
2341 | suite.addTest(DocTestCase(test, **options))\r | |
2342 | \r | |
2343 | return suite\r | |
2344 | \r | |
2345 | class DocFileCase(DocTestCase):\r | |
2346 | \r | |
2347 | def id(self):\r | |
2348 | return '_'.join(self._dt_test.name.split('.'))\r | |
2349 | \r | |
2350 | def __repr__(self):\r | |
2351 | return self._dt_test.filename\r | |
2352 | __str__ = __repr__\r | |
2353 | \r | |
2354 | def format_failure(self, err):\r | |
2355 | return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'\r | |
2356 | % (self._dt_test.name, self._dt_test.filename, err)\r | |
2357 | )\r | |
2358 | \r | |
2359 | def DocFileTest(path, module_relative=True, package=None,\r | |
2360 | globs=None, parser=DocTestParser(),\r | |
2361 | encoding=None, **options):\r | |
2362 | if globs is None:\r | |
2363 | globs = {}\r | |
2364 | else:\r | |
2365 | globs = globs.copy()\r | |
2366 | \r | |
2367 | if package and not module_relative:\r | |
2368 | raise ValueError("Package may only be specified for module-"\r | |
2369 | "relative paths.")\r | |
2370 | \r | |
2371 | # Relativize the path.\r | |
2372 | doc, path = _load_testfile(path, package, module_relative)\r | |
2373 | \r | |
2374 | if "__file__" not in globs:\r | |
2375 | globs["__file__"] = path\r | |
2376 | \r | |
2377 | # Find the file and read it.\r | |
2378 | name = os.path.basename(path)\r | |
2379 | \r | |
2380 | # If an encoding is specified, use it to convert the file to unicode\r | |
2381 | if encoding is not None:\r | |
2382 | doc = doc.decode(encoding)\r | |
2383 | \r | |
2384 | # Convert it to a test, and wrap it in a DocFileCase.\r | |
2385 | test = parser.get_doctest(doc, globs, name, path, 0)\r | |
2386 | return DocFileCase(test, **options)\r | |
2387 | \r | |
2388 | def DocFileSuite(*paths, **kw):\r | |
2389 | """A unittest suite for one or more doctest files.\r | |
2390 | \r | |
2391 | The path to each doctest file is given as a string; the\r | |
2392 | interpretation of that string depends on the keyword argument\r | |
2393 | "module_relative".\r | |
2394 | \r | |
2395 | A number of options may be provided as keyword arguments:\r | |
2396 | \r | |
2397 | module_relative\r | |
2398 | If "module_relative" is True, then the given file paths are\r | |
2399 | interpreted as os-independent module-relative paths. By\r | |
2400 | default, these paths are relative to the calling module's\r | |
2401 | directory; but if the "package" argument is specified, then\r | |
2402 | they are relative to that package. To ensure os-independence,\r | |
2403 | "filename" should use "/" characters to separate path\r | |
2404 | segments, and may not be an absolute path (i.e., it may not\r | |
2405 | begin with "/").\r | |
2406 | \r | |
2407 | If "module_relative" is False, then the given file paths are\r | |
2408 | interpreted as os-specific paths. These paths may be absolute\r | |
2409 | or relative (to the current working directory).\r | |
2410 | \r | |
2411 | package\r | |
2412 | A Python package or the name of a Python package whose directory\r | |
2413 | should be used as the base directory for module relative paths.\r | |
2414 | If "package" is not specified, then the calling module's\r | |
2415 | directory is used as the base directory for module relative\r | |
2416 | filenames. It is an error to specify "package" if\r | |
2417 | "module_relative" is False.\r | |
2418 | \r | |
2419 | setUp\r | |
2420 | A set-up function. This is called before running the\r | |
2421 | tests in each file. The setUp function will be passed a DocTest\r | |
2422 | object. The setUp function can access the test globals as the\r | |
2423 | globs attribute of the test passed.\r | |
2424 | \r | |
2425 | tearDown\r | |
2426 | A tear-down function. This is called after running the\r | |
2427 | tests in each file. The tearDown function will be passed a DocTest\r | |
2428 | object. The tearDown function can access the test globals as the\r | |
2429 | globs attribute of the test passed.\r | |
2430 | \r | |
2431 | globs\r | |
2432 | A dictionary containing initial global variables for the tests.\r | |
2433 | \r | |
2434 | optionflags\r | |
2435 | A set of doctest option flags expressed as an integer.\r | |
2436 | \r | |
2437 | parser\r | |
2438 | A DocTestParser (or subclass) that should be used to extract\r | |
2439 | tests from the files.\r | |
2440 | \r | |
2441 | encoding\r | |
2442 | An encoding that will be used to convert the files to unicode.\r | |
2443 | """\r | |
2444 | suite = unittest.TestSuite()\r | |
2445 | \r | |
2446 | # We do this here so that _normalize_module is called at the right\r | |
2447 | # level. If it were called in DocFileTest, then this function\r | |
2448 | # would be the caller and we might guess the package incorrectly.\r | |
2449 | if kw.get('module_relative', True):\r | |
2450 | kw['package'] = _normalize_module(kw.get('package'))\r | |
2451 | \r | |
2452 | for path in paths:\r | |
2453 | suite.addTest(DocFileTest(path, **kw))\r | |
2454 | \r | |
2455 | return suite\r | |
2456 | \r | |
2457 | ######################################################################\r | |
2458 | ## 9. Debugging Support\r | |
2459 | ######################################################################\r | |
2460 | \r | |
2461 | def script_from_examples(s):\r | |
2462 | r"""Extract script from text with examples.\r | |
2463 | \r | |
2464 | Converts text with examples to a Python script. Example input is\r | |
2465 | converted to regular code. Example output and all other words\r | |
2466 | are converted to comments:\r | |
2467 | \r | |
2468 | >>> text = '''\r | |
2469 | ... Here are examples of simple math.\r | |
2470 | ...\r | |
2471 | ... Python has super accurate integer addition\r | |
2472 | ...\r | |
2473 | ... >>> 2 + 2\r | |
2474 | ... 5\r | |
2475 | ...\r | |
2476 | ... And very friendly error messages:\r | |
2477 | ...\r | |
2478 | ... >>> 1/0\r | |
2479 | ... To Infinity\r | |
2480 | ... And\r | |
2481 | ... Beyond\r | |
2482 | ...\r | |
2483 | ... You can use logic if you want:\r | |
2484 | ...\r | |
2485 | ... >>> if 0:\r | |
2486 | ... ... blah\r | |
2487 | ... ... blah\r | |
2488 | ... ...\r | |
2489 | ...\r | |
2490 | ... Ho hum\r | |
2491 | ... '''\r | |
2492 | \r | |
2493 | >>> print script_from_examples(text)\r | |
2494 | # Here are examples of simple math.\r | |
2495 | #\r | |
2496 | # Python has super accurate integer addition\r | |
2497 | #\r | |
2498 | 2 + 2\r | |
2499 | # Expected:\r | |
2500 | ## 5\r | |
2501 | #\r | |
2502 | # And very friendly error messages:\r | |
2503 | #\r | |
2504 | 1/0\r | |
2505 | # Expected:\r | |
2506 | ## To Infinity\r | |
2507 | ## And\r | |
2508 | ## Beyond\r | |
2509 | #\r | |
2510 | # You can use logic if you want:\r | |
2511 | #\r | |
2512 | if 0:\r | |
2513 | blah\r | |
2514 | blah\r | |
2515 | #\r | |
2516 | # Ho hum\r | |
2517 | <BLANKLINE>\r | |
2518 | """\r | |
2519 | output = []\r | |
2520 | for piece in DocTestParser().parse(s):\r | |
2521 | if isinstance(piece, Example):\r | |
2522 | # Add the example's source code (strip trailing NL)\r | |
2523 | output.append(piece.source[:-1])\r | |
2524 | # Add the expected output:\r | |
2525 | want = piece.want\r | |
2526 | if want:\r | |
2527 | output.append('# Expected:')\r | |
2528 | output += ['## '+l for l in want.split('\n')[:-1]]\r | |
2529 | else:\r | |
2530 | # Add non-example text.\r | |
2531 | output += [_comment_line(l)\r | |
2532 | for l in piece.split('\n')[:-1]]\r | |
2533 | \r | |
2534 | # Trim junk on both ends.\r | |
2535 | while output and output[-1] == '#':\r | |
2536 | output.pop()\r | |
2537 | while output and output[0] == '#':\r | |
2538 | output.pop(0)\r | |
2539 | # Combine the output, and return it.\r | |
2540 | # Add a courtesy newline to prevent exec from choking (see bug #1172785)\r | |
2541 | return '\n'.join(output) + '\n'\r | |
2542 | \r | |
2543 | def testsource(module, name):\r | |
2544 | """Extract the test sources from a doctest docstring as a script.\r | |
2545 | \r | |
2546 | Provide the module (or dotted name of the module) containing the\r | |
2547 | test to be debugged and the name (within the module) of the object\r | |
2548 | with the doc string with tests to be debugged.\r | |
2549 | """\r | |
2550 | module = _normalize_module(module)\r | |
2551 | tests = DocTestFinder().find(module)\r | |
2552 | test = [t for t in tests if t.name == name]\r | |
2553 | if not test:\r | |
2554 | raise ValueError(name, "not found in tests")\r | |
2555 | test = test[0]\r | |
2556 | testsrc = script_from_examples(test.docstring)\r | |
2557 | return testsrc\r | |
2558 | \r | |
2559 | def debug_src(src, pm=False, globs=None):\r | |
2560 | """Debug a single doctest docstring, in argument `src`'"""\r | |
2561 | testsrc = script_from_examples(src)\r | |
2562 | debug_script(testsrc, pm, globs)\r | |
2563 | \r | |
2564 | def debug_script(src, pm=False, globs=None):\r | |
2565 | "Debug a test script. `src` is the script, as a string."\r | |
2566 | import pdb\r | |
2567 | \r | |
2568 | # Note that tempfile.NameTemporaryFile() cannot be used. As the\r | |
2569 | # docs say, a file so created cannot be opened by name a second time\r | |
2570 | # on modern Windows boxes, and execfile() needs to open it.\r | |
2571 | srcfilename = tempfile.mktemp(".py", "doctestdebug")\r | |
2572 | f = open(srcfilename, 'w')\r | |
2573 | f.write(src)\r | |
2574 | f.close()\r | |
2575 | \r | |
2576 | try:\r | |
2577 | if globs:\r | |
2578 | globs = globs.copy()\r | |
2579 | else:\r | |
2580 | globs = {}\r | |
2581 | \r | |
2582 | if pm:\r | |
2583 | try:\r | |
2584 | execfile(srcfilename, globs, globs)\r | |
2585 | except:\r | |
2586 | print sys.exc_info()[1]\r | |
2587 | pdb.post_mortem(sys.exc_info()[2])\r | |
2588 | else:\r | |
2589 | # Note that %r is vital here. '%s' instead can, e.g., cause\r | |
2590 | # backslashes to get treated as metacharacters on Windows.\r | |
2591 | pdb.run("execfile(%r)" % srcfilename, globs, globs)\r | |
2592 | \r | |
2593 | finally:\r | |
2594 | os.remove(srcfilename)\r | |
2595 | \r | |
2596 | def debug(module, name, pm=False):\r | |
2597 | """Debug a single doctest docstring.\r | |
2598 | \r | |
2599 | Provide the module (or dotted name of the module) containing the\r | |
2600 | test to be debugged and the name (within the module) of the object\r | |
2601 | with the docstring with tests to be debugged.\r | |
2602 | """\r | |
2603 | module = _normalize_module(module)\r | |
2604 | testsrc = testsource(module, name)\r | |
2605 | debug_script(testsrc, pm, module.__dict__)\r | |
2606 | \r | |
2607 | ######################################################################\r | |
2608 | ## 10. Example Usage\r | |
2609 | ######################################################################\r | |
2610 | class _TestClass:\r | |
2611 | """\r | |
2612 | A pointless class, for sanity-checking of docstring testing.\r | |
2613 | \r | |
2614 | Methods:\r | |
2615 | square()\r | |
2616 | get()\r | |
2617 | \r | |
2618 | >>> _TestClass(13).get() + _TestClass(-12).get()\r | |
2619 | 1\r | |
2620 | >>> hex(_TestClass(13).square().get())\r | |
2621 | '0xa9'\r | |
2622 | """\r | |
2623 | \r | |
2624 | def __init__(self, val):\r | |
2625 | """val -> _TestClass object with associated value val.\r | |
2626 | \r | |
2627 | >>> t = _TestClass(123)\r | |
2628 | >>> print t.get()\r | |
2629 | 123\r | |
2630 | """\r | |
2631 | \r | |
2632 | self.val = val\r | |
2633 | \r | |
2634 | def square(self):\r | |
2635 | """square() -> square TestClass's associated value\r | |
2636 | \r | |
2637 | >>> _TestClass(13).square().get()\r | |
2638 | 169\r | |
2639 | """\r | |
2640 | \r | |
2641 | self.val = self.val ** 2\r | |
2642 | return self\r | |
2643 | \r | |
2644 | def get(self):\r | |
2645 | """get() -> return TestClass's associated value.\r | |
2646 | \r | |
2647 | >>> x = _TestClass(-42)\r | |
2648 | >>> print x.get()\r | |
2649 | -42\r | |
2650 | """\r | |
2651 | \r | |
2652 | return self.val\r | |
2653 | \r | |
2654 | __test__ = {"_TestClass": _TestClass,\r | |
2655 | "string": r"""\r | |
2656 | Example of a string object, searched as-is.\r | |
2657 | >>> x = 1; y = 2\r | |
2658 | >>> x + y, x * y\r | |
2659 | (3, 2)\r | |
2660 | """,\r | |
2661 | \r | |
2662 | "bool-int equivalence": r"""\r | |
2663 | In 2.2, boolean expressions displayed\r | |
2664 | 0 or 1. By default, we still accept\r | |
2665 | them. This can be disabled by passing\r | |
2666 | DONT_ACCEPT_TRUE_FOR_1 to the new\r | |
2667 | optionflags argument.\r | |
2668 | >>> 4 == 4\r | |
2669 | 1\r | |
2670 | >>> 4 == 4\r | |
2671 | True\r | |
2672 | >>> 4 > 4\r | |
2673 | 0\r | |
2674 | >>> 4 > 4\r | |
2675 | False\r | |
2676 | """,\r | |
2677 | \r | |
2678 | "blank lines": r"""\r | |
2679 | Blank lines can be marked with <BLANKLINE>:\r | |
2680 | >>> print 'foo\n\nbar\n'\r | |
2681 | foo\r | |
2682 | <BLANKLINE>\r | |
2683 | bar\r | |
2684 | <BLANKLINE>\r | |
2685 | """,\r | |
2686 | \r | |
2687 | "ellipsis": r"""\r | |
2688 | If the ellipsis flag is used, then '...' can be used to\r | |
2689 | elide substrings in the desired output:\r | |
2690 | >>> print range(1000) #doctest: +ELLIPSIS\r | |
2691 | [0, 1, 2, ..., 999]\r | |
2692 | """,\r | |
2693 | \r | |
2694 | "whitespace normalization": r"""\r | |
2695 | If the whitespace normalization flag is used, then\r | |
2696 | differences in whitespace are ignored.\r | |
2697 | >>> print range(30) #doctest: +NORMALIZE_WHITESPACE\r | |
2698 | [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,\r | |
2699 | 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,\r | |
2700 | 27, 28, 29]\r | |
2701 | """,\r | |
2702 | }\r | |
2703 | \r | |
2704 | \r | |
2705 | def _test():\r | |
2706 | testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']\r | |
2707 | if not testfiles:\r | |
2708 | name = os.path.basename(sys.argv[0])\r | |
2709 | if '__loader__' in globals(): # python -m\r | |
2710 | name, _ = os.path.splitext(name)\r | |
2711 | print("usage: {0} [-v] file ...".format(name))\r | |
2712 | return 2\r | |
2713 | for filename in testfiles:\r | |
2714 | if filename.endswith(".py"):\r | |
2715 | # It is a module -- insert its dir into sys.path and try to\r | |
2716 | # import it. If it is part of a package, that possibly\r | |
2717 | # won't work because of package imports.\r | |
2718 | dirname, filename = os.path.split(filename)\r | |
2719 | sys.path.insert(0, dirname)\r | |
2720 | m = __import__(filename[:-3])\r | |
2721 | del sys.path[0]\r | |
2722 | failures, _ = testmod(m)\r | |
2723 | else:\r | |
2724 | failures, _ = testfile(filename, module_relative=False)\r | |
2725 | if failures:\r | |
2726 | return 1\r | |
2727 | return 0\r | |
2728 | \r | |
2729 | \r | |
2730 | if __name__ == "__main__":\r | |
2731 | sys.exit(_test())\r |