]> git.proxmox.com Git - ceph.git/blob - ceph/src/jaegertracing/opentelemetry-cpp/third_party/nlohmann-json/third_party/cpplint/cpplint.py
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / jaegertracing / opentelemetry-cpp / third_party / nlohmann-json / third_party / cpplint / cpplint.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2009 Google Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 """Does google-lint on c++ files.
32
33 The goal of this script is to identify places in the code that *may*
34 be in non-compliance with google style. It does not attempt to fix
35 up these problems -- the point is to educate. It does also not
36 attempt to find all problems, or to ensure that everything it does
37 find is legitimately a problem.
38
39 In particular, we can get very confused by /* and // inside strings!
40 We do a small hack, which is to ignore //'s with "'s after them on the
41 same line, but it is far from perfect (in either direction).
42 """
43
44 import codecs
45 import copy
46 import getopt
47 import glob
48 import itertools
49 import math # for log
50 import os
51 import re
52 import sre_compile
53 import string
54 import sys
55 import sysconfig
56 import unicodedata
57 import xml.etree.ElementTree
58
59 # if empty, use defaults
60 _valid_extensions = set([])
61
62 __VERSION__ = '1.5.5'
63
64 try:
65 xrange # Python 2
66 except NameError:
67 # -- pylint: disable=redefined-builtin
68 xrange = range # Python 3
69
70
71 _USAGE = """
72 Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
73 [--filter=-x,+y,...]
74 [--counting=total|toplevel|detailed] [--root=subdir]
75 [--repository=path]
76 [--linelength=digits] [--headers=x,y,...]
77 [--recursive]
78 [--exclude=path]
79 [--extensions=hpp,cpp,...]
80 [--includeorder=default|standardcfirst]
81 [--quiet]
82 [--version]
83 <file> [file] ...
84
85 Style checker for C/C++ source files.
86 This is a fork of the Google style checker with minor extensions.
87
88 The style guidelines this tries to follow are those in
89 https://google.github.io/styleguide/cppguide.html
90
91 Every problem is given a confidence score from 1-5, with 5 meaning we are
92 certain of the problem, and 1 meaning it could be a legitimate construct.
93 This will miss some errors, and is not a substitute for a code review.
94
95 To suppress false-positive errors of a certain category, add a
96 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
97 suppresses errors of all categories on that line.
98
99 The files passed in will be linted; at least one file must be provided.
100 Default linted extensions are %s.
101 Other file types will be ignored.
102 Change the extensions with the --extensions flag.
103
104 Flags:
105
106 output=emacs|eclipse|vs7|junit|sed|gsed
107 By default, the output is formatted to ease emacs parsing. Visual Studio
108 compatible output (vs7) may also be used. Further support exists for
109 eclipse (eclipse), and JUnit (junit). XML parsers such as those used
110 in Jenkins and Bamboo may also be used.
111 The sed format outputs sed commands that should fix some of the errors.
112 Note that this requires gnu sed. If that is installed as gsed on your
113 system (common e.g. on macOS with homebrew) you can use the gsed output
114 format. Sed commands are written to stdout, not stderr, so you should be
115 able to pipe output straight to a shell to run the fixes.
116
117 verbose=#
118 Specify a number 0-5 to restrict errors to certain verbosity levels.
119 Errors with lower verbosity levels have lower confidence and are more
120 likely to be false positives.
121
122 quiet
123 Don't print anything if no errors are found.
124
125 filter=-x,+y,...
126 Specify a comma-separated list of category-filters to apply: only
127 error messages whose category names pass the filters will be printed.
128 (Category names are printed with the message and look like
129 "[whitespace/indent]".) Filters are evaluated left to right.
130 "-FOO" means "do not print categories that start with FOO".
131 "+FOO" means "do print categories that start with FOO".
132
133 Examples: --filter=-whitespace,+whitespace/braces
134 --filter=-whitespace,-runtime/printf,+runtime/printf_format
135 --filter=-,+build/include_what_you_use
136
137 To see a list of all the categories used in cpplint, pass no arg:
138 --filter=
139
140 counting=total|toplevel|detailed
141 The total number of errors found is always printed. If
142 'toplevel' is provided, then the count of errors in each of
143 the top-level categories like 'build' and 'whitespace' will
144 also be printed. If 'detailed' is provided, then a count
145 is provided for each category like 'build/class'.
146
147 repository=path
148 The top level directory of the repository, used to derive the header
149 guard CPP variable. By default, this is determined by searching for a
150 path that contains .git, .hg, or .svn. When this flag is specified, the
151 given path is used instead. This option allows the header guard CPP
152 variable to remain consistent even if members of a team have different
153 repository root directories (such as when checking out a subdirectory
154 with SVN). In addition, users of non-mainstream version control systems
155 can use this flag to ensure readable header guard CPP variables.
156
157 Examples:
158 Assuming that Alice checks out ProjectName and Bob checks out
159 ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
160 with no --repository flag, the header guard CPP variable will be:
161
162 Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
163 Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
164
165 If Alice uses the --repository=trunk flag and Bob omits the flag or
166 uses --repository=. then the header guard CPP variable will be:
167
168 Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
169 Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
170
171 root=subdir
172 The root directory used for deriving header guard CPP variable.
173 This directory is relative to the top level directory of the repository
174 which by default is determined by searching for a directory that contains
175 .git, .hg, or .svn but can also be controlled with the --repository flag.
176 If the specified directory does not exist, this flag is ignored.
177
178 Examples:
179 Assuming that src is the top level directory of the repository (and
180 cwd=top/src), the header guard CPP variables for
181 src/chrome/browser/ui/browser.h are:
182
183 No flag => CHROME_BROWSER_UI_BROWSER_H_
184 --root=chrome => BROWSER_UI_BROWSER_H_
185 --root=chrome/browser => UI_BROWSER_H_
186 --root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_
187
188 linelength=digits
189 This is the allowed line length for the project. The default value is
190 80 characters.
191
192 Examples:
193 --linelength=120
194
195 recursive
196 Search for files to lint recursively. Each directory given in the list
197 of files to be linted is replaced by all files that descend from that
198 directory. Files with extensions not in the valid extensions list are
199 excluded.
200
201 exclude=path
202 Exclude the given path from the list of files to be linted. Relative
203 paths are evaluated relative to the current directory and shell globbing
204 is performed. This flag can be provided multiple times to exclude
205 multiple files.
206
207 Examples:
208 --exclude=one.cc
209 --exclude=src/*.cc
210 --exclude=src/*.cc --exclude=test/*.cc
211
212 extensions=extension,extension,...
213 The allowed file extensions that cpplint will check
214
215 Examples:
216 --extensions=%s
217
218 includeorder=default|standardcfirst
219 For the build/include_order rule, the default is to blindly assume angle
220 bracket includes with file extension are c-system-headers (default),
221 even knowing this will have false classifications.
222 The default is established at google.
223 standardcfirst means to instead use an allow-list of known c headers and
224 treat all others as separate group of "other system headers". The C headers
225 included are those of the C-standard lib and closely related ones.
226
227 headers=x,y,...
228 The header extensions that cpplint will treat as .h in checks. Values are
229 automatically added to --extensions list.
230 (by default, only files with extensions %s will be assumed to be headers)
231
232 Examples:
233 --headers=%s
234 --headers=hpp,hxx
235 --headers=hpp
236
237 cpplint.py supports per-directory configurations specified in CPPLINT.cfg
238 files. CPPLINT.cfg file can contain a number of key=value pairs.
239 Currently the following options are supported:
240
241 set noparent
242 filter=+filter1,-filter2,...
243 exclude_files=regex
244 linelength=80
245 root=subdir
246 headers=x,y,...
247
248 "set noparent" option prevents cpplint from traversing directory tree
249 upwards looking for more .cfg files in parent directories. This option
250 is usually placed in the top-level project directory.
251
252 The "filter" option is similar in function to --filter flag. It specifies
253 message filters in addition to the |_DEFAULT_FILTERS| and those specified
254 through --filter command-line flag.
255
256 "exclude_files" allows to specify a regular expression to be matched against
257 a file name. If the expression matches, the file is skipped and not run
258 through the linter.
259
260 "linelength" allows to specify the allowed line length for the project.
261
262 The "root" option is similar in function to the --root flag (see example
263 above). Paths are relative to the directory of the CPPLINT.cfg.
264
265 The "headers" option is similar in function to the --headers flag
266 (see example above).
267
268 CPPLINT.cfg has an effect on files in the same directory and all
269 sub-directories, unless overridden by a nested configuration file.
270
271 Example file:
272 filter=-build/include_order,+build/include_alpha
273 exclude_files=.*\\.cc
274
275 The above example disables build/include_order warning and enables
276 build/include_alpha as well as excludes all .cc from being
277 processed by linter, in the current directory (where the .cfg
278 file is located) and all sub-directories.
279 """
280
281 # We categorize each error message we print. Here are the categories.
282 # We want an explicit list so we can list them all in cpplint --filter=.
283 # If you add a new error message with a new category, add it to the list
284 # here! cpplint_unittest.py should tell you if you forget to do this.
285 _ERROR_CATEGORIES = [
286 'build/class',
287 'build/c++11',
288 'build/c++14',
289 'build/c++tr1',
290 'build/deprecated',
291 'build/endif_comment',
292 'build/explicit_make_pair',
293 'build/forward_decl',
294 'build/header_guard',
295 'build/include',
296 'build/include_subdir',
297 'build/include_alpha',
298 'build/include_order',
299 'build/include_what_you_use',
300 'build/namespaces_headers',
301 'build/namespaces_literals',
302 'build/namespaces',
303 'build/printf_format',
304 'build/storage_class',
305 'legal/copyright',
306 'readability/alt_tokens',
307 'readability/braces',
308 'readability/casting',
309 'readability/check',
310 'readability/constructors',
311 'readability/fn_size',
312 'readability/inheritance',
313 'readability/multiline_comment',
314 'readability/multiline_string',
315 'readability/namespace',
316 'readability/nolint',
317 'readability/nul',
318 'readability/strings',
319 'readability/todo',
320 'readability/utf8',
321 'runtime/arrays',
322 'runtime/casting',
323 'runtime/explicit',
324 'runtime/int',
325 'runtime/init',
326 'runtime/invalid_increment',
327 'runtime/member_string_references',
328 'runtime/memset',
329 'runtime/indentation_namespace',
330 'runtime/operator',
331 'runtime/printf',
332 'runtime/printf_format',
333 'runtime/references',
334 'runtime/string',
335 'runtime/threadsafe_fn',
336 'runtime/vlog',
337 'whitespace/blank_line',
338 'whitespace/braces',
339 'whitespace/comma',
340 'whitespace/comments',
341 'whitespace/empty_conditional_body',
342 'whitespace/empty_if_body',
343 'whitespace/empty_loop_body',
344 'whitespace/end_of_line',
345 'whitespace/ending_newline',
346 'whitespace/forcolon',
347 'whitespace/indent',
348 'whitespace/line_length',
349 'whitespace/newline',
350 'whitespace/operators',
351 'whitespace/parens',
352 'whitespace/semicolon',
353 'whitespace/tab',
354 'whitespace/todo',
355 ]
356
357 # keywords to use with --outputs which generate stdout for machine processing
358 _MACHINE_OUTPUTS = [
359 'junit',
360 'sed',
361 'gsed'
362 ]
363
364 # These error categories are no longer enforced by cpplint, but for backwards-
365 # compatibility they may still appear in NOLINT comments.
366 _LEGACY_ERROR_CATEGORIES = [
367 'readability/streams',
368 'readability/function',
369 ]
370
371 # The default state of the category filter. This is overridden by the --filter=
372 # flag. By default all errors are on, so only add here categories that should be
373 # off by default (i.e., categories that must be enabled by the --filter= flags).
374 # All entries here should start with a '-' or '+', as in the --filter= flag.
375 _DEFAULT_FILTERS = ['-build/include_alpha']
376
377 # The default list of categories suppressed for C (not C++) files.
378 _DEFAULT_C_SUPPRESSED_CATEGORIES = [
379 'readability/casting',
380 ]
381
382 # The default list of categories suppressed for Linux Kernel files.
383 _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
384 'whitespace/tab',
385 ]
386
387 # We used to check for high-bit characters, but after much discussion we
388 # decided those were OK, as long as they were in UTF-8 and didn't represent
389 # hard-coded international strings, which belong in a separate i18n file.
390
391 # C++ headers
392 _CPP_HEADERS = frozenset([
393 # Legacy
394 'algobase.h',
395 'algo.h',
396 'alloc.h',
397 'builtinbuf.h',
398 'bvector.h',
399 'complex.h',
400 'defalloc.h',
401 'deque.h',
402 'editbuf.h',
403 'fstream.h',
404 'function.h',
405 'hash_map',
406 'hash_map.h',
407 'hash_set',
408 'hash_set.h',
409 'hashtable.h',
410 'heap.h',
411 'indstream.h',
412 'iomanip.h',
413 'iostream.h',
414 'istream.h',
415 'iterator.h',
416 'list.h',
417 'map.h',
418 'multimap.h',
419 'multiset.h',
420 'ostream.h',
421 'pair.h',
422 'parsestream.h',
423 'pfstream.h',
424 'procbuf.h',
425 'pthread_alloc',
426 'pthread_alloc.h',
427 'rope',
428 'rope.h',
429 'ropeimpl.h',
430 'set.h',
431 'slist',
432 'slist.h',
433 'stack.h',
434 'stdiostream.h',
435 'stl_alloc.h',
436 'stl_relops.h',
437 'streambuf.h',
438 'stream.h',
439 'strfile.h',
440 'strstream.h',
441 'tempbuf.h',
442 'tree.h',
443 'type_traits.h',
444 'vector.h',
445 # 17.6.1.2 C++ library headers
446 'algorithm',
447 'array',
448 'atomic',
449 'bitset',
450 'chrono',
451 'codecvt',
452 'complex',
453 'condition_variable',
454 'deque',
455 'exception',
456 'forward_list',
457 'fstream',
458 'functional',
459 'future',
460 'initializer_list',
461 'iomanip',
462 'ios',
463 'iosfwd',
464 'iostream',
465 'istream',
466 'iterator',
467 'limits',
468 'list',
469 'locale',
470 'map',
471 'memory',
472 'mutex',
473 'new',
474 'numeric',
475 'ostream',
476 'queue',
477 'random',
478 'ratio',
479 'regex',
480 'scoped_allocator',
481 'set',
482 'sstream',
483 'stack',
484 'stdexcept',
485 'streambuf',
486 'string',
487 'strstream',
488 'system_error',
489 'thread',
490 'tuple',
491 'typeindex',
492 'typeinfo',
493 'type_traits',
494 'unordered_map',
495 'unordered_set',
496 'utility',
497 'valarray',
498 'vector',
499 # 17.6.1.2 C++14 headers
500 'shared_mutex',
501 # 17.6.1.2 C++17 headers
502 'any',
503 'charconv',
504 'codecvt',
505 'execution',
506 'filesystem',
507 'memory_resource',
508 'optional',
509 'string_view',
510 'variant',
511 # 17.6.1.2 C++ headers for C library facilities
512 'cassert',
513 'ccomplex',
514 'cctype',
515 'cerrno',
516 'cfenv',
517 'cfloat',
518 'cinttypes',
519 'ciso646',
520 'climits',
521 'clocale',
522 'cmath',
523 'csetjmp',
524 'csignal',
525 'cstdalign',
526 'cstdarg',
527 'cstdbool',
528 'cstddef',
529 'cstdint',
530 'cstdio',
531 'cstdlib',
532 'cstring',
533 'ctgmath',
534 'ctime',
535 'cuchar',
536 'cwchar',
537 'cwctype',
538 ])
539
540 # C headers
541 _C_HEADERS = frozenset([
542 # System C headers
543 'assert.h',
544 'complex.h',
545 'ctype.h',
546 'errno.h',
547 'fenv.h',
548 'float.h',
549 'inttypes.h',
550 'iso646.h',
551 'limits.h',
552 'locale.h',
553 'math.h',
554 'setjmp.h',
555 'signal.h',
556 'stdalign.h',
557 'stdarg.h',
558 'stdatomic.h',
559 'stdbool.h',
560 'stddef.h',
561 'stdint.h',
562 'stdio.h',
563 'stdlib.h',
564 'stdnoreturn.h',
565 'string.h',
566 'tgmath.h',
567 'threads.h',
568 'time.h',
569 'uchar.h',
570 'wchar.h',
571 'wctype.h',
572 # additional POSIX C headers
573 'aio.h',
574 'arpa/inet.h',
575 'cpio.h',
576 'dirent.h',
577 'dlfcn.h',
578 'fcntl.h',
579 'fmtmsg.h',
580 'fnmatch.h',
581 'ftw.h',
582 'glob.h',
583 'grp.h',
584 'iconv.h',
585 'langinfo.h',
586 'libgen.h',
587 'monetary.h',
588 'mqueue.h',
589 'ndbm.h',
590 'net/if.h',
591 'netdb.h',
592 'netinet/in.h',
593 'netinet/tcp.h',
594 'nl_types.h',
595 'poll.h',
596 'pthread.h',
597 'pwd.h',
598 'regex.h',
599 'sched.h',
600 'search.h',
601 'semaphore.h',
602 'setjmp.h',
603 'signal.h',
604 'spawn.h',
605 'strings.h',
606 'stropts.h',
607 'syslog.h',
608 'tar.h',
609 'termios.h',
610 'trace.h',
611 'ulimit.h',
612 'unistd.h',
613 'utime.h',
614 'utmpx.h',
615 'wordexp.h',
616 # additional GNUlib headers
617 'a.out.h',
618 'aliases.h',
619 'alloca.h',
620 'ar.h',
621 'argp.h',
622 'argz.h',
623 'byteswap.h',
624 'crypt.h',
625 'endian.h',
626 'envz.h',
627 'err.h',
628 'error.h',
629 'execinfo.h',
630 'fpu_control.h',
631 'fstab.h',
632 'fts.h',
633 'getopt.h',
634 'gshadow.h',
635 'ieee754.h',
636 'ifaddrs.h',
637 'libintl.h',
638 'mcheck.h',
639 'mntent.h',
640 'obstack.h',
641 'paths.h',
642 'printf.h',
643 'pty.h',
644 'resolv.h',
645 'shadow.h',
646 'sysexits.h',
647 'ttyent.h',
648 # Additional linux glibc headers
649 'dlfcn.h',
650 'elf.h',
651 'features.h',
652 'gconv.h',
653 'gnu-versions.h',
654 'lastlog.h',
655 'libio.h',
656 'link.h',
657 'malloc.h',
658 'memory.h',
659 'netash/ash.h',
660 'netatalk/at.h',
661 'netax25/ax25.h',
662 'neteconet/ec.h',
663 'netipx/ipx.h',
664 'netiucv/iucv.h',
665 'netpacket/packet.h',
666 'netrom/netrom.h',
667 'netrose/rose.h',
668 'nfs/nfs.h',
669 'nl_types.h',
670 'nss.h',
671 're_comp.h',
672 'regexp.h',
673 'sched.h',
674 'sgtty.h',
675 'stab.h',
676 'stdc-predef.h',
677 'stdio_ext.h',
678 'syscall.h',
679 'termio.h',
680 'thread_db.h',
681 'ucontext.h',
682 'ustat.h',
683 'utmp.h',
684 'values.h',
685 'wait.h',
686 'xlocale.h',
687 # Hardware specific headers
688 'arm_neon.h',
689 'emmintrin.h',
690 'xmmintin.h',
691 ])
692
693 # Folders of C libraries so commonly used in C++,
694 # that they have parity with standard C libraries.
695 C_STANDARD_HEADER_FOLDERS = frozenset([
696 # standard C library
697 "sys",
698 # glibc for linux
699 "arpa",
700 "asm-generic",
701 "bits",
702 "gnu",
703 "net",
704 "netinet",
705 "protocols",
706 "rpc",
707 "rpcsvc",
708 "scsi",
709 # linux kernel header
710 "drm",
711 "linux",
712 "misc",
713 "mtd",
714 "rdma",
715 "sound",
716 "video",
717 "xen",
718 ])
719
720 # Type names
721 _TYPES = re.compile(
722 r'^(?:'
723 # [dcl.type.simple]
724 r'(char(16_t|32_t)?)|wchar_t|'
725 r'bool|short|int|long|signed|unsigned|float|double|'
726 # [support.types]
727 r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
728 # [cstdint.syn]
729 r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
730 r'(u?int(max|ptr)_t)|'
731 r')$')
732
733
734 # These headers are excluded from [build/include] and [build/include_order]
735 # checks:
736 # - Anything not following google file name conventions (containing an
737 # uppercase character, such as Python.h or nsStringAPI.h, for example).
738 # - Lua headers.
739 _THIRD_PARTY_HEADERS_PATTERN = re.compile(
740 r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
741
742 # Pattern for matching FileInfo.BaseName() against test file name
743 _test_suffixes = ['_test', '_regtest', '_unittest']
744 _TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
745
746 # Pattern that matches only complete whitespace, possibly across multiple lines.
747 _EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
748
749 # Assertion macros. These are defined in base/logging.h and
750 # testing/base/public/gunit.h.
751 _CHECK_MACROS = [
752 'DCHECK', 'CHECK',
753 'EXPECT_TRUE', 'ASSERT_TRUE',
754 'EXPECT_FALSE', 'ASSERT_FALSE',
755 ]
756
757 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
758 _CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
759
760 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
761 ('>=', 'GE'), ('>', 'GT'),
762 ('<=', 'LE'), ('<', 'LT')]:
763 _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
764 _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
765 _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
766 _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
767
768 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
769 ('>=', 'LT'), ('>', 'LE'),
770 ('<=', 'GT'), ('<', 'GE')]:
771 _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
772 _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
773
774 # Alternative tokens and their replacements. For full list, see section 2.5
775 # Alternative tokens [lex.digraph] in the C++ standard.
776 #
777 # Digraphs (such as '%:') are not included here since it's a mess to
778 # match those on a word boundary.
779 _ALT_TOKEN_REPLACEMENT = {
780 'and': '&&',
781 'bitor': '|',
782 'or': '||',
783 'xor': '^',
784 'compl': '~',
785 'bitand': '&',
786 'and_eq': '&=',
787 'or_eq': '|=',
788 'xor_eq': '^=',
789 'not': '!',
790 'not_eq': '!='
791 }
792
793 # Compile regular expression that matches all the above keywords. The "[ =()]"
794 # bit is meant to avoid matching these keywords outside of boolean expressions.
795 #
796 # False positives include C-style multi-line comments and multi-line strings
797 # but those have always been troublesome for cpplint.
798 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
799 r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
800
801
802 # These constants define types of headers for use with
803 # _IncludeState.CheckNextIncludeOrder().
804 _C_SYS_HEADER = 1
805 _CPP_SYS_HEADER = 2
806 _OTHER_SYS_HEADER = 3
807 _LIKELY_MY_HEADER = 4
808 _POSSIBLE_MY_HEADER = 5
809 _OTHER_HEADER = 6
810
811 # These constants define the current inline assembly state
812 _NO_ASM = 0 # Outside of inline assembly block
813 _INSIDE_ASM = 1 # Inside inline assembly block
814 _END_ASM = 2 # Last line of inline assembly block
815 _BLOCK_ASM = 3 # The whole block is an inline assembly block
816
817 # Match start of assembly blocks
818 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
819 r'(?:\s+(volatile|__volatile__))?'
820 r'\s*[{(]')
821
822 # Match strings that indicate we're working on a C (not C++) file.
823 _SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
824 r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
825
826 # Match string that indicates we're working on a Linux Kernel file.
827 _SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
828
829 # Commands for sed to fix the problem
830 _SED_FIXUPS = {
831 'Remove spaces around =': r's/ = /=/',
832 'Remove spaces around !=': r's/ != /!=/',
833 'Remove space before ( in if (': r's/if (/if(/',
834 'Remove space before ( in for (': r's/for (/for(/',
835 'Remove space before ( in while (': r's/while (/while(/',
836 'Remove space before ( in switch (': r's/switch (/switch(/',
837 'Should have a space between // and comment': r's/\/\//\/\/ /',
838 'Missing space before {': r's/\([^ ]\){/\1 {/',
839 'Tab found, replace by spaces': r's/\t/ /g',
840 'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//',
841 'You don\'t need a ; after a }': r's/};/}/',
842 'Missing space after ,': r's/,\([^ ]\)/, \1/g',
843 }
844
845 _regexp_compile_cache = {}
846
847 # {str, set(int)}: a map from error categories to sets of linenumbers
848 # on which those errors are expected and should be suppressed.
849 _error_suppressions = {}
850
851 # The root directory used for deriving header guard CPP variable.
852 # This is set by --root flag.
853 _root = None
854 _root_debug = False
855
856 # The top level repository directory. If set, _root is calculated relative to
857 # this directory instead of the directory containing version control artifacts.
858 # This is set by the --repository flag.
859 _repository = None
860
861 # Files to exclude from linting. This is set by the --exclude flag.
862 _excludes = None
863
864 # Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
865 _quiet = False
866
867 # The allowed line length of files.
868 # This is set by --linelength flag.
869 _line_length = 80
870
871 # This allows to use different include order rule than default
872 _include_order = "default"
873
874 try:
875 unicode
876 except NameError:
877 # -- pylint: disable=redefined-builtin
878 basestring = unicode = str
879
880 try:
881 long
882 except NameError:
883 # -- pylint: disable=redefined-builtin
884 long = int
885
886 if sys.version_info < (3,):
887 # -- pylint: disable=no-member
888 # BINARY_TYPE = str
889 itervalues = dict.itervalues
890 iteritems = dict.iteritems
891 else:
892 # BINARY_TYPE = bytes
893 itervalues = dict.values
894 iteritems = dict.items
895
896 def unicode_escape_decode(x):
897 if sys.version_info < (3,):
898 return codecs.unicode_escape_decode(x)[0]
899 else:
900 return x
901
902 # Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
903 # This is set by --headers flag.
904 _hpp_headers = set([])
905
906 # {str, bool}: a map from error categories to booleans which indicate if the
907 # category should be suppressed for every line.
908 _global_error_suppressions = {}
909
910 def ProcessHppHeadersOption(val):
911 global _hpp_headers
912 try:
913 _hpp_headers = {ext.strip() for ext in val.split(',')}
914 except ValueError:
915 PrintUsage('Header extensions must be comma separated list.')
916
917 def ProcessIncludeOrderOption(val):
918 if val is None or val == "default":
919 pass
920 elif val == "standardcfirst":
921 global _include_order
922 _include_order = val
923 else:
924 PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
925
926 def IsHeaderExtension(file_extension):
927 return file_extension in GetHeaderExtensions()
928
929 def GetHeaderExtensions():
930 if _hpp_headers:
931 return _hpp_headers
932 if _valid_extensions:
933 return {h for h in _valid_extensions if 'h' in h}
934 return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
935
936 # The allowed extensions for file names
937 # This is set by --extensions flag
938 def GetAllExtensions():
939 return GetHeaderExtensions().union(_valid_extensions or set(
940 ['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
941
942 def ProcessExtensionsOption(val):
943 global _valid_extensions
944 try:
945 extensions = [ext.strip() for ext in val.split(',')]
946 _valid_extensions = set(extensions)
947 except ValueError:
948 PrintUsage('Extensions should be a comma-separated list of values;'
949 'for example: extensions=hpp,cpp\n'
950 'This could not be parsed: "%s"' % (val,))
951
952 def GetNonHeaderExtensions():
953 return GetAllExtensions().difference(GetHeaderExtensions())
954
955 def ParseNolintSuppressions(filename, raw_line, linenum, error):
956 """Updates the global list of line error-suppressions.
957
958 Parses any NOLINT comments on the current line, updating the global
959 error_suppressions store. Reports an error if the NOLINT comment
960 was malformed.
961
962 Args:
963 filename: str, the name of the input file.
964 raw_line: str, the line of input text, with comments.
965 linenum: int, the number of the current line.
966 error: function, an error handler.
967 """
968 matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
969 if matched:
970 if matched.group(1):
971 suppressed_line = linenum + 1
972 else:
973 suppressed_line = linenum
974 category = matched.group(2)
975 if category in (None, '(*)'): # => "suppress all"
976 _error_suppressions.setdefault(None, set()).add(suppressed_line)
977 else:
978 if category.startswith('(') and category.endswith(')'):
979 category = category[1:-1]
980 if category in _ERROR_CATEGORIES:
981 _error_suppressions.setdefault(category, set()).add(suppressed_line)
982 elif category not in _LEGACY_ERROR_CATEGORIES:
983 error(filename, linenum, 'readability/nolint', 5,
984 'Unknown NOLINT error category: %s' % category)
985
986
987 def ProcessGlobalSuppresions(lines):
988 """Updates the list of global error suppressions.
989
990 Parses any lint directives in the file that have global effect.
991
992 Args:
993 lines: An array of strings, each representing a line of the file, with the
994 last element being empty if the file is terminated with a newline.
995 """
996 for line in lines:
997 if _SEARCH_C_FILE.search(line):
998 for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
999 _global_error_suppressions[category] = True
1000 if _SEARCH_KERNEL_FILE.search(line):
1001 for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
1002 _global_error_suppressions[category] = True
1003
1004
1005 def ResetNolintSuppressions():
1006 """Resets the set of NOLINT suppressions to empty."""
1007 _error_suppressions.clear()
1008 _global_error_suppressions.clear()
1009
1010
1011 def IsErrorSuppressedByNolint(category, linenum):
1012 """Returns true if the specified error category is suppressed on this line.
1013
1014 Consults the global error_suppressions map populated by
1015 ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
1016
1017 Args:
1018 category: str, the category of the error.
1019 linenum: int, the current line number.
1020 Returns:
1021 bool, True iff the error should be suppressed due to a NOLINT comment or
1022 global suppression.
1023 """
1024 return (_global_error_suppressions.get(category, False) or
1025 linenum in _error_suppressions.get(category, set()) or
1026 linenum in _error_suppressions.get(None, set()))
1027
1028
1029 def Match(pattern, s):
1030 """Matches the string with the pattern, caching the compiled regexp."""
1031 # The regexp compilation caching is inlined in both Match and Search for
1032 # performance reasons; factoring it out into a separate function turns out
1033 # to be noticeably expensive.
1034 if pattern not in _regexp_compile_cache:
1035 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
1036 return _regexp_compile_cache[pattern].match(s)
1037
1038
1039 def ReplaceAll(pattern, rep, s):
1040 """Replaces instances of pattern in a string with a replacement.
1041
1042 The compiled regex is kept in a cache shared by Match and Search.
1043
1044 Args:
1045 pattern: regex pattern
1046 rep: replacement text
1047 s: search string
1048
1049 Returns:
1050 string with replacements made (or original string if no replacements)
1051 """
1052 if pattern not in _regexp_compile_cache:
1053 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
1054 return _regexp_compile_cache[pattern].sub(rep, s)
1055
1056
1057 def Search(pattern, s):
1058 """Searches the string for the pattern, caching the compiled regexp."""
1059 if pattern not in _regexp_compile_cache:
1060 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
1061 return _regexp_compile_cache[pattern].search(s)
1062
1063
1064 def _IsSourceExtension(s):
1065 """File extension (excluding dot) matches a source file extension."""
1066 return s in GetNonHeaderExtensions()
1067
1068
1069 class _IncludeState(object):
1070 """Tracks line numbers for includes, and the order in which includes appear.
1071
1072 include_list contains list of lists of (header, line number) pairs.
1073 It's a lists of lists rather than just one flat list to make it
1074 easier to update across preprocessor boundaries.
1075
1076 Call CheckNextIncludeOrder() once for each header in the file, passing
1077 in the type constants defined above. Calls in an illegal order will
1078 raise an _IncludeError with an appropriate error message.
1079
1080 """
1081 # self._section will move monotonically through this set. If it ever
1082 # needs to move backwards, CheckNextIncludeOrder will raise an error.
1083 _INITIAL_SECTION = 0
1084 _MY_H_SECTION = 1
1085 _C_SECTION = 2
1086 _CPP_SECTION = 3
1087 _OTHER_SYS_SECTION = 4
1088 _OTHER_H_SECTION = 5
1089
1090 _TYPE_NAMES = {
1091 _C_SYS_HEADER: 'C system header',
1092 _CPP_SYS_HEADER: 'C++ system header',
1093 _OTHER_SYS_HEADER: 'other system header',
1094 _LIKELY_MY_HEADER: 'header this file implements',
1095 _POSSIBLE_MY_HEADER: 'header this file may implement',
1096 _OTHER_HEADER: 'other header',
1097 }
1098 _SECTION_NAMES = {
1099 _INITIAL_SECTION: "... nothing. (This can't be an error.)",
1100 _MY_H_SECTION: 'a header this file implements',
1101 _C_SECTION: 'C system header',
1102 _CPP_SECTION: 'C++ system header',
1103 _OTHER_SYS_SECTION: 'other system header',
1104 _OTHER_H_SECTION: 'other header',
1105 }
1106
1107 def __init__(self):
1108 self.include_list = [[]]
1109 self._section = None
1110 self._last_header = None
1111 self.ResetSection('')
1112
1113 def FindHeader(self, header):
1114 """Check if a header has already been included.
1115
1116 Args:
1117 header: header to check.
1118 Returns:
1119 Line number of previous occurrence, or -1 if the header has not
1120 been seen before.
1121 """
1122 for section_list in self.include_list:
1123 for f in section_list:
1124 if f[0] == header:
1125 return f[1]
1126 return -1
1127
1128 def ResetSection(self, directive):
1129 """Reset section checking for preprocessor directive.
1130
1131 Args:
1132 directive: preprocessor directive (e.g. "if", "else").
1133 """
1134 # The name of the current section.
1135 self._section = self._INITIAL_SECTION
1136 # The path of last found header.
1137 self._last_header = ''
1138
1139 # Update list of includes. Note that we never pop from the
1140 # include list.
1141 if directive in ('if', 'ifdef', 'ifndef'):
1142 self.include_list.append([])
1143 elif directive in ('else', 'elif'):
1144 self.include_list[-1] = []
1145
1146 def SetLastHeader(self, header_path):
1147 self._last_header = header_path
1148
1149 def CanonicalizeAlphabeticalOrder(self, header_path):
1150 """Returns a path canonicalized for alphabetical comparison.
1151
1152 - replaces "-" with "_" so they both cmp the same.
1153 - removes '-inl' since we don't require them to be after the main header.
1154 - lowercase everything, just in case.
1155
1156 Args:
1157 header_path: Path to be canonicalized.
1158
1159 Returns:
1160 Canonicalized path.
1161 """
1162 return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
1163
1164 def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
1165 """Check if a header is in alphabetical order with the previous header.
1166
1167 Args:
1168 clean_lines: A CleansedLines instance containing the file.
1169 linenum: The number of the line to check.
1170 header_path: Canonicalized header to be checked.
1171
1172 Returns:
1173 Returns true if the header is in alphabetical order.
1174 """
1175 # If previous section is different from current section, _last_header will
1176 # be reset to empty string, so it's always less than current header.
1177 #
1178 # If previous line was a blank line, assume that the headers are
1179 # intentionally sorted the way they are.
1180 if (self._last_header > header_path and
1181 Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
1182 return False
1183 return True
1184
1185 def CheckNextIncludeOrder(self, header_type):
1186 """Returns a non-empty error message if the next header is out of order.
1187
1188 This function also updates the internal state to be ready to check
1189 the next include.
1190
1191 Args:
1192 header_type: One of the _XXX_HEADER constants defined above.
1193
1194 Returns:
1195 The empty string if the header is in the right order, or an
1196 error message describing what's wrong.
1197
1198 """
1199 error_message = ('Found %s after %s' %
1200 (self._TYPE_NAMES[header_type],
1201 self._SECTION_NAMES[self._section]))
1202
1203 last_section = self._section
1204
1205 if header_type == _C_SYS_HEADER:
1206 if self._section <= self._C_SECTION:
1207 self._section = self._C_SECTION
1208 else:
1209 self._last_header = ''
1210 return error_message
1211 elif header_type == _CPP_SYS_HEADER:
1212 if self._section <= self._CPP_SECTION:
1213 self._section = self._CPP_SECTION
1214 else:
1215 self._last_header = ''
1216 return error_message
1217 elif header_type == _OTHER_SYS_HEADER:
1218 if self._section <= self._OTHER_SYS_SECTION:
1219 self._section = self._OTHER_SYS_SECTION
1220 else:
1221 self._last_header = ''
1222 return error_message
1223 elif header_type == _LIKELY_MY_HEADER:
1224 if self._section <= self._MY_H_SECTION:
1225 self._section = self._MY_H_SECTION
1226 else:
1227 self._section = self._OTHER_H_SECTION
1228 elif header_type == _POSSIBLE_MY_HEADER:
1229 if self._section <= self._MY_H_SECTION:
1230 self._section = self._MY_H_SECTION
1231 else:
1232 # This will always be the fallback because we're not sure
1233 # enough that the header is associated with this file.
1234 self._section = self._OTHER_H_SECTION
1235 else:
1236 assert header_type == _OTHER_HEADER
1237 self._section = self._OTHER_H_SECTION
1238
1239 if last_section != self._section:
1240 self._last_header = ''
1241
1242 return ''
1243
1244
1245 class _CppLintState(object):
1246 """Maintains module-wide state.."""
1247
1248 def __init__(self):
1249 self.verbose_level = 1 # global setting.
1250 self.error_count = 0 # global count of reported errors
1251 # filters to apply when emitting error messages
1252 self.filters = _DEFAULT_FILTERS[:]
1253 # backup of filter list. Used to restore the state after each file.
1254 self._filters_backup = self.filters[:]
1255 self.counting = 'total' # In what way are we counting errors?
1256 self.errors_by_category = {} # string to int dict storing error counts
1257 self.quiet = False # Suppress non-error messagess?
1258
1259 # output format:
1260 # "emacs" - format that emacs can parse (default)
1261 # "eclipse" - format that eclipse can parse
1262 # "vs7" - format that Microsoft Visual Studio 7 can parse
1263 # "junit" - format that Jenkins, Bamboo, etc can parse
1264 # "sed" - returns a gnu sed command to fix the problem
1265 # "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
1266 self.output_format = 'emacs'
1267
1268 # For JUnit output, save errors and failures until the end so that they
1269 # can be written into the XML
1270 self._junit_errors = []
1271 self._junit_failures = []
1272
1273 def SetOutputFormat(self, output_format):
1274 """Sets the output format for errors."""
1275 self.output_format = output_format
1276
1277 def SetQuiet(self, quiet):
1278 """Sets the module's quiet settings, and returns the previous setting."""
1279 last_quiet = self.quiet
1280 self.quiet = quiet
1281 return last_quiet
1282
1283 def SetVerboseLevel(self, level):
1284 """Sets the module's verbosity, and returns the previous setting."""
1285 last_verbose_level = self.verbose_level
1286 self.verbose_level = level
1287 return last_verbose_level
1288
1289 def SetCountingStyle(self, counting_style):
1290 """Sets the module's counting options."""
1291 self.counting = counting_style
1292
1293 def SetFilters(self, filters):
1294 """Sets the error-message filters.
1295
1296 These filters are applied when deciding whether to emit a given
1297 error message.
1298
1299 Args:
1300 filters: A string of comma-separated filters (eg "+whitespace/indent").
1301 Each filter should start with + or -; else we die.
1302
1303 Raises:
1304 ValueError: The comma-separated filters did not all start with '+' or '-'.
1305 E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
1306 """
1307 # Default filters always have less priority than the flag ones.
1308 self.filters = _DEFAULT_FILTERS[:]
1309 self.AddFilters(filters)
1310
1311 def AddFilters(self, filters):
1312 """ Adds more filters to the existing list of error-message filters. """
1313 for filt in filters.split(','):
1314 clean_filt = filt.strip()
1315 if clean_filt:
1316 self.filters.append(clean_filt)
1317 for filt in self.filters:
1318 if not (filt.startswith('+') or filt.startswith('-')):
1319 raise ValueError('Every filter in --filters must start with + or -'
1320 ' (%s does not)' % filt)
1321
1322 def BackupFilters(self):
1323 """ Saves the current filter list to backup storage."""
1324 self._filters_backup = self.filters[:]
1325
1326 def RestoreFilters(self):
1327 """ Restores filters previously backed up."""
1328 self.filters = self._filters_backup[:]
1329
1330 def ResetErrorCounts(self):
1331 """Sets the module's error statistic back to zero."""
1332 self.error_count = 0
1333 self.errors_by_category = {}
1334
1335 def IncrementErrorCount(self, category):
1336 """Bumps the module's error statistic."""
1337 self.error_count += 1
1338 if self.counting in ('toplevel', 'detailed'):
1339 if self.counting != 'detailed':
1340 category = category.split('/')[0]
1341 if category not in self.errors_by_category:
1342 self.errors_by_category[category] = 0
1343 self.errors_by_category[category] += 1
1344
1345 def PrintErrorCounts(self):
1346 """Print a summary of errors by category, and the total."""
1347 for category, count in sorted(iteritems(self.errors_by_category)):
1348 self.PrintInfo('Category \'%s\' errors found: %d\n' %
1349 (category, count))
1350 if self.error_count > 0:
1351 self.PrintInfo('Total errors found: %d\n' % self.error_count)
1352
1353 def PrintInfo(self, message):
1354 # _quiet does not represent --quiet flag.
1355 # Hide infos from stdout to keep stdout pure for machine consumption
1356 if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
1357 sys.stdout.write(message)
1358
1359 def PrintError(self, message):
1360 if self.output_format == 'junit':
1361 self._junit_errors.append(message)
1362 else:
1363 sys.stderr.write(message)
1364
1365 def AddJUnitFailure(self, filename, linenum, message, category, confidence):
1366 self._junit_failures.append((filename, linenum, message, category,
1367 confidence))
1368
1369 def FormatJUnitXML(self):
1370 num_errors = len(self._junit_errors)
1371 num_failures = len(self._junit_failures)
1372
1373 testsuite = xml.etree.ElementTree.Element('testsuite')
1374 testsuite.attrib['errors'] = str(num_errors)
1375 testsuite.attrib['failures'] = str(num_failures)
1376 testsuite.attrib['name'] = 'cpplint'
1377
1378 if num_errors == 0 and num_failures == 0:
1379 testsuite.attrib['tests'] = str(1)
1380 xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
1381
1382 else:
1383 testsuite.attrib['tests'] = str(num_errors + num_failures)
1384 if num_errors > 0:
1385 testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1386 testcase.attrib['name'] = 'errors'
1387 error = xml.etree.ElementTree.SubElement(testcase, 'error')
1388 error.text = '\n'.join(self._junit_errors)
1389 if num_failures > 0:
1390 # Group failures by file
1391 failed_file_order = []
1392 failures_by_file = {}
1393 for failure in self._junit_failures:
1394 failed_file = failure[0]
1395 if failed_file not in failed_file_order:
1396 failed_file_order.append(failed_file)
1397 failures_by_file[failed_file] = []
1398 failures_by_file[failed_file].append(failure)
1399 # Create a testcase for each file
1400 for failed_file in failed_file_order:
1401 failures = failures_by_file[failed_file]
1402 testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1403 testcase.attrib['name'] = failed_file
1404 failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
1405 template = '{0}: {1} [{2}] [{3}]'
1406 texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
1407 failure.text = '\n'.join(texts)
1408
1409 xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
1410 return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
1411
1412
1413 _cpplint_state = _CppLintState()
1414
1415
1416 def _OutputFormat():
1417 """Gets the module's output format."""
1418 return _cpplint_state.output_format
1419
1420
1421 def _SetOutputFormat(output_format):
1422 """Sets the module's output format."""
1423 _cpplint_state.SetOutputFormat(output_format)
1424
1425 def _Quiet():
1426 """Return's the module's quiet setting."""
1427 return _cpplint_state.quiet
1428
1429 def _SetQuiet(quiet):
1430 """Set the module's quiet status, and return previous setting."""
1431 return _cpplint_state.SetQuiet(quiet)
1432
1433
1434 def _VerboseLevel():
1435 """Returns the module's verbosity setting."""
1436 return _cpplint_state.verbose_level
1437
1438
1439 def _SetVerboseLevel(level):
1440 """Sets the module's verbosity, and returns the previous setting."""
1441 return _cpplint_state.SetVerboseLevel(level)
1442
1443
1444 def _SetCountingStyle(level):
1445 """Sets the module's counting options."""
1446 _cpplint_state.SetCountingStyle(level)
1447
1448
1449 def _Filters():
1450 """Returns the module's list of output filters, as a list."""
1451 return _cpplint_state.filters
1452
1453
1454 def _SetFilters(filters):
1455 """Sets the module's error-message filters.
1456
1457 These filters are applied when deciding whether to emit a given
1458 error message.
1459
1460 Args:
1461 filters: A string of comma-separated filters (eg "whitespace/indent").
1462 Each filter should start with + or -; else we die.
1463 """
1464 _cpplint_state.SetFilters(filters)
1465
1466 def _AddFilters(filters):
1467 """Adds more filter overrides.
1468
1469 Unlike _SetFilters, this function does not reset the current list of filters
1470 available.
1471
1472 Args:
1473 filters: A string of comma-separated filters (eg "whitespace/indent").
1474 Each filter should start with + or -; else we die.
1475 """
1476 _cpplint_state.AddFilters(filters)
1477
1478 def _BackupFilters():
1479 """ Saves the current filter list to backup storage."""
1480 _cpplint_state.BackupFilters()
1481
1482 def _RestoreFilters():
1483 """ Restores filters previously backed up."""
1484 _cpplint_state.RestoreFilters()
1485
1486 class _FunctionState(object):
1487 """Tracks current function name and the number of lines in its body."""
1488
1489 _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
1490 _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
1491
1492 def __init__(self):
1493 self.in_a_function = False
1494 self.lines_in_function = 0
1495 self.current_function = ''
1496
1497 def Begin(self, function_name):
1498 """Start analyzing function body.
1499
1500 Args:
1501 function_name: The name of the function being tracked.
1502 """
1503 self.in_a_function = True
1504 self.lines_in_function = 0
1505 self.current_function = function_name
1506
1507 def Count(self):
1508 """Count line in current function body."""
1509 if self.in_a_function:
1510 self.lines_in_function += 1
1511
1512 def Check(self, error, filename, linenum):
1513 """Report if too many lines in function body.
1514
1515 Args:
1516 error: The function to call with any errors found.
1517 filename: The name of the current file.
1518 linenum: The number of the line to check.
1519 """
1520 if not self.in_a_function:
1521 return
1522
1523 if Match(r'T(EST|est)', self.current_function):
1524 base_trigger = self._TEST_TRIGGER
1525 else:
1526 base_trigger = self._NORMAL_TRIGGER
1527 trigger = base_trigger * 2**_VerboseLevel()
1528
1529 if self.lines_in_function > trigger:
1530 error_level = int(math.log(self.lines_in_function / base_trigger, 2))
1531 # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
1532 if error_level > 5:
1533 error_level = 5
1534 error(filename, linenum, 'readability/fn_size', error_level,
1535 'Small and focused functions are preferred:'
1536 ' %s has %d non-comment lines'
1537 ' (error triggered by exceeding %d lines).' % (
1538 self.current_function, self.lines_in_function, trigger))
1539
1540 def End(self):
1541 """Stop analyzing function body."""
1542 self.in_a_function = False
1543
1544
1545 class _IncludeError(Exception):
1546 """Indicates a problem with the include order in a file."""
1547 pass
1548
1549
1550 class FileInfo(object):
1551 """Provides utility functions for filenames.
1552
1553 FileInfo provides easy access to the components of a file's path
1554 relative to the project root.
1555 """
1556
1557 def __init__(self, filename):
1558 self._filename = filename
1559
1560 def FullName(self):
1561 """Make Windows paths like Unix."""
1562 return os.path.abspath(self._filename).replace('\\', '/')
1563
1564 def RepositoryName(self):
1565 r"""FullName after removing the local path to the repository.
1566
1567 If we have a real absolute path name here we can try to do something smart:
1568 detecting the root of the checkout and truncating /path/to/checkout from
1569 the name so that we get header guards that don't include things like
1570 "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
1571 people on different computers who have checked the source out to different
1572 locations won't see bogus errors.
1573 """
1574 fullname = self.FullName()
1575
1576 if os.path.exists(fullname):
1577 project_dir = os.path.dirname(fullname)
1578
1579 # If the user specified a repository path, it exists, and the file is
1580 # contained in it, use the specified repository path
1581 if _repository:
1582 repo = FileInfo(_repository).FullName()
1583 root_dir = project_dir
1584 while os.path.exists(root_dir):
1585 # allow case insensitive compare on Windows
1586 if os.path.normcase(root_dir) == os.path.normcase(repo):
1587 return os.path.relpath(fullname, root_dir).replace('\\', '/')
1588 one_up_dir = os.path.dirname(root_dir)
1589 if one_up_dir == root_dir:
1590 break
1591 root_dir = one_up_dir
1592
1593 if os.path.exists(os.path.join(project_dir, ".svn")):
1594 # If there's a .svn file in the current directory, we recursively look
1595 # up the directory tree for the top of the SVN checkout
1596 root_dir = project_dir
1597 one_up_dir = os.path.dirname(root_dir)
1598 while os.path.exists(os.path.join(one_up_dir, ".svn")):
1599 root_dir = os.path.dirname(root_dir)
1600 one_up_dir = os.path.dirname(one_up_dir)
1601
1602 prefix = os.path.commonprefix([root_dir, project_dir])
1603 return fullname[len(prefix) + 1:]
1604
1605 # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
1606 # searching up from the current path.
1607 root_dir = current_dir = os.path.dirname(fullname)
1608 while current_dir != os.path.dirname(current_dir):
1609 if (os.path.exists(os.path.join(current_dir, ".git")) or
1610 os.path.exists(os.path.join(current_dir, ".hg")) or
1611 os.path.exists(os.path.join(current_dir, ".svn"))):
1612 root_dir = current_dir
1613 current_dir = os.path.dirname(current_dir)
1614
1615 if (os.path.exists(os.path.join(root_dir, ".git")) or
1616 os.path.exists(os.path.join(root_dir, ".hg")) or
1617 os.path.exists(os.path.join(root_dir, ".svn"))):
1618 prefix = os.path.commonprefix([root_dir, project_dir])
1619 return fullname[len(prefix) + 1:]
1620
1621 # Don't know what to do; header guard warnings may be wrong...
1622 return fullname
1623
1624 def Split(self):
1625 """Splits the file into the directory, basename, and extension.
1626
1627 For 'chrome/browser/browser.cc', Split() would
1628 return ('chrome/browser', 'browser', '.cc')
1629
1630 Returns:
1631 A tuple of (directory, basename, extension).
1632 """
1633
1634 googlename = self.RepositoryName()
1635 project, rest = os.path.split(googlename)
1636 return (project,) + os.path.splitext(rest)
1637
1638 def BaseName(self):
1639 """File base name - text after the final slash, before the final period."""
1640 return self.Split()[1]
1641
1642 def Extension(self):
1643 """File extension - text following the final period, includes that period."""
1644 return self.Split()[2]
1645
1646 def NoExtension(self):
1647 """File has no source file extension."""
1648 return '/'.join(self.Split()[0:2])
1649
1650 def IsSource(self):
1651 """File has a source file extension."""
1652 return _IsSourceExtension(self.Extension()[1:])
1653
1654
1655 def _ShouldPrintError(category, confidence, linenum):
1656 """If confidence >= verbose, category passes filter and is not suppressed."""
1657
1658 # There are three ways we might decide not to print an error message:
1659 # a "NOLINT(category)" comment appears in the source,
1660 # the verbosity level isn't high enough, or the filters filter it out.
1661 if IsErrorSuppressedByNolint(category, linenum):
1662 return False
1663
1664 if confidence < _cpplint_state.verbose_level:
1665 return False
1666
1667 is_filtered = False
1668 for one_filter in _Filters():
1669 if one_filter.startswith('-'):
1670 if category.startswith(one_filter[1:]):
1671 is_filtered = True
1672 elif one_filter.startswith('+'):
1673 if category.startswith(one_filter[1:]):
1674 is_filtered = False
1675 else:
1676 assert False # should have been checked for in SetFilter.
1677 if is_filtered:
1678 return False
1679
1680 return True
1681
1682
1683 def Error(filename, linenum, category, confidence, message):
1684 """Logs the fact we've found a lint error.
1685
1686 We log where the error was found, and also our confidence in the error,
1687 that is, how certain we are this is a legitimate style regression, and
1688 not a misidentification or a use that's sometimes justified.
1689
1690 False positives can be suppressed by the use of
1691 "cpplint(category)" comments on the offending line. These are
1692 parsed into _error_suppressions.
1693
1694 Args:
1695 filename: The name of the file containing the error.
1696 linenum: The number of the line containing the error.
1697 category: A string used to describe the "category" this bug
1698 falls under: "whitespace", say, or "runtime". Categories
1699 may have a hierarchy separated by slashes: "whitespace/indent".
1700 confidence: A number from 1-5 representing a confidence score for
1701 the error, with 5 meaning that we are certain of the problem,
1702 and 1 meaning that it could be a legitimate construct.
1703 message: The error message.
1704 """
1705 if _ShouldPrintError(category, confidence, linenum):
1706 _cpplint_state.IncrementErrorCount(category)
1707 if _cpplint_state.output_format == 'vs7':
1708 _cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
1709 filename, linenum, category, message, confidence))
1710 elif _cpplint_state.output_format == 'eclipse':
1711 sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
1712 filename, linenum, message, category, confidence))
1713 elif _cpplint_state.output_format == 'junit':
1714 _cpplint_state.AddJUnitFailure(filename, linenum, message, category,
1715 confidence)
1716 elif _cpplint_state.output_format in ['sed', 'gsed']:
1717 if message in _SED_FIXUPS:
1718 sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s [%s] [%d]\n" % (
1719 linenum, _SED_FIXUPS[message], filename, message, category, confidence))
1720 else:
1721 sys.stderr.write('# %s:%s: "%s" [%s] [%d]\n' % (
1722 filename, linenum, message, category, confidence))
1723 else:
1724 final_message = '%s:%s: %s [%s] [%d]\n' % (
1725 filename, linenum, message, category, confidence)
1726 sys.stderr.write(final_message)
1727
1728 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
1729 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
1730 r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
1731 # Match a single C style comment on the same line.
1732 _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
1733 # Matches multi-line C style comments.
1734 # This RE is a little bit more complicated than one might expect, because we
1735 # have to take care of space removals tools so we can handle comments inside
1736 # statements better.
1737 # The current rule is: We only clear spaces from both sides when we're at the
1738 # end of the line. Otherwise, we try to remove spaces from the right side,
1739 # if this doesn't work we try on left side but only if there's a non-character
1740 # on the right.
1741 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
1742 r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
1743 _RE_PATTERN_C_COMMENTS + r'\s+|' +
1744 r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
1745 _RE_PATTERN_C_COMMENTS + r')')
1746
1747
1748 def IsCppString(line):
1749 """Does line terminate so, that the next symbol is in string constant.
1750
1751 This function does not consider single-line nor multi-line comments.
1752
1753 Args:
1754 line: is a partial line of code starting from the 0..n.
1755
1756 Returns:
1757 True, if next character appended to 'line' is inside a
1758 string constant.
1759 """
1760
1761 line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
1762 return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
1763
1764
1765 def CleanseRawStrings(raw_lines):
1766 """Removes C++11 raw strings from lines.
1767
1768 Before:
1769 static const char kData[] = R"(
1770 multi-line string
1771 )";
1772
1773 After:
1774 static const char kData[] = ""
1775 (replaced by blank line)
1776 "";
1777
1778 Args:
1779 raw_lines: list of raw lines.
1780
1781 Returns:
1782 list of lines with C++11 raw strings replaced by empty strings.
1783 """
1784
1785 delimiter = None
1786 lines_without_raw_strings = []
1787 for line in raw_lines:
1788 if delimiter:
1789 # Inside a raw string, look for the end
1790 end = line.find(delimiter)
1791 if end >= 0:
1792 # Found the end of the string, match leading space for this
1793 # line and resume copying the original lines, and also insert
1794 # a "" on the last line.
1795 leading_space = Match(r'^(\s*)\S', line)
1796 line = leading_space.group(1) + '""' + line[end + len(delimiter):]
1797 delimiter = None
1798 else:
1799 # Haven't found the end yet, append a blank line.
1800 line = '""'
1801
1802 # Look for beginning of a raw string, and replace them with
1803 # empty strings. This is done in a loop to handle multiple raw
1804 # strings on the same line.
1805 while delimiter is None:
1806 # Look for beginning of a raw string.
1807 # See 2.14.15 [lex.string] for syntax.
1808 #
1809 # Once we have matched a raw string, we check the prefix of the
1810 # line to make sure that the line is not part of a single line
1811 # comment. It's done this way because we remove raw strings
1812 # before removing comments as opposed to removing comments
1813 # before removing raw strings. This is because there are some
1814 # cpplint checks that requires the comments to be preserved, but
1815 # we don't want to check comments that are inside raw strings.
1816 matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
1817 if (matched and
1818 not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
1819 matched.group(1))):
1820 delimiter = ')' + matched.group(2) + '"'
1821
1822 end = matched.group(3).find(delimiter)
1823 if end >= 0:
1824 # Raw string ended on same line
1825 line = (matched.group(1) + '""' +
1826 matched.group(3)[end + len(delimiter):])
1827 delimiter = None
1828 else:
1829 # Start of a multi-line raw string
1830 line = matched.group(1) + '""'
1831 else:
1832 break
1833
1834 lines_without_raw_strings.append(line)
1835
1836 # TODO(unknown): if delimiter is not None here, we might want to
1837 # emit a warning for unterminated string.
1838 return lines_without_raw_strings
1839
1840
1841 def FindNextMultiLineCommentStart(lines, lineix):
1842 """Find the beginning marker for a multiline comment."""
1843 while lineix < len(lines):
1844 if lines[lineix].strip().startswith('/*'):
1845 # Only return this marker if the comment goes beyond this line
1846 if lines[lineix].strip().find('*/', 2) < 0:
1847 return lineix
1848 lineix += 1
1849 return len(lines)
1850
1851
1852 def FindNextMultiLineCommentEnd(lines, lineix):
1853 """We are inside a comment, find the end marker."""
1854 while lineix < len(lines):
1855 if lines[lineix].strip().endswith('*/'):
1856 return lineix
1857 lineix += 1
1858 return len(lines)
1859
1860
1861 def RemoveMultiLineCommentsFromRange(lines, begin, end):
1862 """Clears a range of lines for multi-line comments."""
1863 # Having // <empty> comments makes the lines non-empty, so we will not get
1864 # unnecessary blank line warnings later in the code.
1865 for i in range(begin, end):
1866 lines[i] = '/**/'
1867
1868
1869 def RemoveMultiLineComments(filename, lines, error):
1870 """Removes multiline (c-style) comments from lines."""
1871 lineix = 0
1872 while lineix < len(lines):
1873 lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
1874 if lineix_begin >= len(lines):
1875 return
1876 lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
1877 if lineix_end >= len(lines):
1878 error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
1879 'Could not find end of multi-line comment')
1880 return
1881 RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
1882 lineix = lineix_end + 1
1883
1884
1885 def CleanseComments(line):
1886 """Removes //-comments and single-line C-style /* */ comments.
1887
1888 Args:
1889 line: A line of C++ source.
1890
1891 Returns:
1892 The line with single-line comments removed.
1893 """
1894 commentpos = line.find('//')
1895 if commentpos != -1 and not IsCppString(line[:commentpos]):
1896 line = line[:commentpos].rstrip()
1897 # get rid of /* ... */
1898 return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
1899
1900
1901 class CleansedLines(object):
1902 """Holds 4 copies of all lines with different preprocessing applied to them.
1903
1904 1) elided member contains lines without strings and comments.
1905 2) lines member contains lines without comments.
1906 3) raw_lines member contains all the lines without processing.
1907 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
1908 strings removed.
1909 All these members are of <type 'list'>, and of the same length.
1910 """
1911
1912 def __init__(self, lines):
1913 self.elided = []
1914 self.lines = []
1915 self.raw_lines = lines
1916 self.num_lines = len(lines)
1917 self.lines_without_raw_strings = CleanseRawStrings(lines)
1918 for linenum in range(len(self.lines_without_raw_strings)):
1919 self.lines.append(CleanseComments(
1920 self.lines_without_raw_strings[linenum]))
1921 elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
1922 self.elided.append(CleanseComments(elided))
1923
1924 def NumLines(self):
1925 """Returns the number of lines represented."""
1926 return self.num_lines
1927
1928 @staticmethod
1929 def _CollapseStrings(elided):
1930 """Collapses strings and chars on a line to simple "" or '' blocks.
1931
1932 We nix strings first so we're not fooled by text like '"http://"'
1933
1934 Args:
1935 elided: The line being processed.
1936
1937 Returns:
1938 The line with collapsed strings.
1939 """
1940 if _RE_PATTERN_INCLUDE.match(elided):
1941 return elided
1942
1943 # Remove escaped characters first to make quote/single quote collapsing
1944 # basic. Things that look like escaped characters shouldn't occur
1945 # outside of strings and chars.
1946 elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
1947
1948 # Replace quoted strings and digit separators. Both single quotes
1949 # and double quotes are processed in the same loop, otherwise
1950 # nested quotes wouldn't work.
1951 collapsed = ''
1952 while True:
1953 # Find the first quote character
1954 match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
1955 if not match:
1956 collapsed += elided
1957 break
1958 head, quote, tail = match.groups()
1959
1960 if quote == '"':
1961 # Collapse double quoted strings
1962 second_quote = tail.find('"')
1963 if second_quote >= 0:
1964 collapsed += head + '""'
1965 elided = tail[second_quote + 1:]
1966 else:
1967 # Unmatched double quote, don't bother processing the rest
1968 # of the line since this is probably a multiline string.
1969 collapsed += elided
1970 break
1971 else:
1972 # Found single quote, check nearby text to eliminate digit separators.
1973 #
1974 # There is no special handling for floating point here, because
1975 # the integer/fractional/exponent parts would all be parsed
1976 # correctly as long as there are digits on both sides of the
1977 # separator. So we are fine as long as we don't see something
1978 # like "0.'3" (gcc 4.9.0 will not allow this literal).
1979 if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
1980 match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
1981 collapsed += head + match_literal.group(1).replace("'", '')
1982 elided = match_literal.group(2)
1983 else:
1984 second_quote = tail.find('\'')
1985 if second_quote >= 0:
1986 collapsed += head + "''"
1987 elided = tail[second_quote + 1:]
1988 else:
1989 # Unmatched single quote
1990 collapsed += elided
1991 break
1992
1993 return collapsed
1994
1995
1996 def FindEndOfExpressionInLine(line, startpos, stack):
1997 """Find the position just after the end of current parenthesized expression.
1998
1999 Args:
2000 line: a CleansedLines line.
2001 startpos: start searching at this position.
2002 stack: nesting stack at startpos.
2003
2004 Returns:
2005 On finding matching end: (index just after matching end, None)
2006 On finding an unclosed expression: (-1, None)
2007 Otherwise: (-1, new stack at end of this line)
2008 """
2009 for i in xrange(startpos, len(line)):
2010 char = line[i]
2011 if char in '([{':
2012 # Found start of parenthesized expression, push to expression stack
2013 stack.append(char)
2014 elif char == '<':
2015 # Found potential start of template argument list
2016 if i > 0 and line[i - 1] == '<':
2017 # Left shift operator
2018 if stack and stack[-1] == '<':
2019 stack.pop()
2020 if not stack:
2021 return (-1, None)
2022 elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
2023 # operator<, don't add to stack
2024 continue
2025 else:
2026 # Tentative start of template argument list
2027 stack.append('<')
2028 elif char in ')]}':
2029 # Found end of parenthesized expression.
2030 #
2031 # If we are currently expecting a matching '>', the pending '<'
2032 # must have been an operator. Remove them from expression stack.
2033 while stack and stack[-1] == '<':
2034 stack.pop()
2035 if not stack:
2036 return (-1, None)
2037 if ((stack[-1] == '(' and char == ')') or
2038 (stack[-1] == '[' and char == ']') or
2039 (stack[-1] == '{' and char == '}')):
2040 stack.pop()
2041 if not stack:
2042 return (i + 1, None)
2043 else:
2044 # Mismatched parentheses
2045 return (-1, None)
2046 elif char == '>':
2047 # Found potential end of template argument list.
2048
2049 # Ignore "->" and operator functions
2050 if (i > 0 and
2051 (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
2052 continue
2053
2054 # Pop the stack if there is a matching '<'. Otherwise, ignore
2055 # this '>' since it must be an operator.
2056 if stack:
2057 if stack[-1] == '<':
2058 stack.pop()
2059 if not stack:
2060 return (i + 1, None)
2061 elif char == ';':
2062 # Found something that look like end of statements. If we are currently
2063 # expecting a '>', the matching '<' must have been an operator, since
2064 # template argument list should not contain statements.
2065 while stack and stack[-1] == '<':
2066 stack.pop()
2067 if not stack:
2068 return (-1, None)
2069
2070 # Did not find end of expression or unbalanced parentheses on this line
2071 return (-1, stack)
2072
2073
2074 def CloseExpression(clean_lines, linenum, pos):
2075 """If input points to ( or { or [ or <, finds the position that closes it.
2076
2077 If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
2078 linenum/pos that correspond to the closing of the expression.
2079
2080 TODO(unknown): cpplint spends a fair bit of time matching parentheses.
2081 Ideally we would want to index all opening and closing parentheses once
2082 and have CloseExpression be just a simple lookup, but due to preprocessor
2083 tricks, this is not so easy.
2084
2085 Args:
2086 clean_lines: A CleansedLines instance containing the file.
2087 linenum: The number of the line to check.
2088 pos: A position on the line.
2089
2090 Returns:
2091 A tuple (line, linenum, pos) pointer *past* the closing brace, or
2092 (line, len(lines), -1) if we never find a close. Note we ignore
2093 strings and comments when matching; and the line we return is the
2094 'cleansed' line at linenum.
2095 """
2096
2097 line = clean_lines.elided[linenum]
2098 if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
2099 return (line, clean_lines.NumLines(), -1)
2100
2101 # Check first line
2102 (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
2103 if end_pos > -1:
2104 return (line, linenum, end_pos)
2105
2106 # Continue scanning forward
2107 while stack and linenum < clean_lines.NumLines() - 1:
2108 linenum += 1
2109 line = clean_lines.elided[linenum]
2110 (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
2111 if end_pos > -1:
2112 return (line, linenum, end_pos)
2113
2114 # Did not find end of expression before end of file, give up
2115 return (line, clean_lines.NumLines(), -1)
2116
2117
2118 def FindStartOfExpressionInLine(line, endpos, stack):
2119 """Find position at the matching start of current expression.
2120
2121 This is almost the reverse of FindEndOfExpressionInLine, but note
2122 that the input position and returned position differs by 1.
2123
2124 Args:
2125 line: a CleansedLines line.
2126 endpos: start searching at this position.
2127 stack: nesting stack at endpos.
2128
2129 Returns:
2130 On finding matching start: (index at matching start, None)
2131 On finding an unclosed expression: (-1, None)
2132 Otherwise: (-1, new stack at beginning of this line)
2133 """
2134 i = endpos
2135 while i >= 0:
2136 char = line[i]
2137 if char in ')]}':
2138 # Found end of expression, push to expression stack
2139 stack.append(char)
2140 elif char == '>':
2141 # Found potential end of template argument list.
2142 #
2143 # Ignore it if it's a "->" or ">=" or "operator>"
2144 if (i > 0 and
2145 (line[i - 1] == '-' or
2146 Match(r'\s>=\s', line[i - 1:]) or
2147 Search(r'\boperator\s*$', line[0:i]))):
2148 i -= 1
2149 else:
2150 stack.append('>')
2151 elif char == '<':
2152 # Found potential start of template argument list
2153 if i > 0 and line[i - 1] == '<':
2154 # Left shift operator
2155 i -= 1
2156 else:
2157 # If there is a matching '>', we can pop the expression stack.
2158 # Otherwise, ignore this '<' since it must be an operator.
2159 if stack and stack[-1] == '>':
2160 stack.pop()
2161 if not stack:
2162 return (i, None)
2163 elif char in '([{':
2164 # Found start of expression.
2165 #
2166 # If there are any unmatched '>' on the stack, they must be
2167 # operators. Remove those.
2168 while stack and stack[-1] == '>':
2169 stack.pop()
2170 if not stack:
2171 return (-1, None)
2172 if ((char == '(' and stack[-1] == ')') or
2173 (char == '[' and stack[-1] == ']') or
2174 (char == '{' and stack[-1] == '}')):
2175 stack.pop()
2176 if not stack:
2177 return (i, None)
2178 else:
2179 # Mismatched parentheses
2180 return (-1, None)
2181 elif char == ';':
2182 # Found something that look like end of statements. If we are currently
2183 # expecting a '<', the matching '>' must have been an operator, since
2184 # template argument list should not contain statements.
2185 while stack and stack[-1] == '>':
2186 stack.pop()
2187 if not stack:
2188 return (-1, None)
2189
2190 i -= 1
2191
2192 return (-1, stack)
2193
2194
2195 def ReverseCloseExpression(clean_lines, linenum, pos):
2196 """If input points to ) or } or ] or >, finds the position that opens it.
2197
2198 If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
2199 linenum/pos that correspond to the opening of the expression.
2200
2201 Args:
2202 clean_lines: A CleansedLines instance containing the file.
2203 linenum: The number of the line to check.
2204 pos: A position on the line.
2205
2206 Returns:
2207 A tuple (line, linenum, pos) pointer *at* the opening brace, or
2208 (line, 0, -1) if we never find the matching opening brace. Note
2209 we ignore strings and comments when matching; and the line we
2210 return is the 'cleansed' line at linenum.
2211 """
2212 line = clean_lines.elided[linenum]
2213 if line[pos] not in ')}]>':
2214 return (line, 0, -1)
2215
2216 # Check last line
2217 (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
2218 if start_pos > -1:
2219 return (line, linenum, start_pos)
2220
2221 # Continue scanning backward
2222 while stack and linenum > 0:
2223 linenum -= 1
2224 line = clean_lines.elided[linenum]
2225 (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
2226 if start_pos > -1:
2227 return (line, linenum, start_pos)
2228
2229 # Did not find start of expression before beginning of file, give up
2230 return (line, 0, -1)
2231
2232
2233 def CheckForCopyright(filename, lines, error):
2234 """Logs an error if no Copyright message appears at the top of the file."""
2235
2236 # We'll say it should occur by line 10. Don't forget there's a
2237 # placeholder line at the front.
2238 for line in xrange(1, min(len(lines), 11)):
2239 if re.search(r'Copyright', lines[line], re.I): break
2240 else: # means no copyright line was found
2241 error(filename, 0, 'legal/copyright', 5,
2242 'No copyright message found. '
2243 'You should have a line: "Copyright [year] <Copyright Owner>"')
2244
2245
2246 def GetIndentLevel(line):
2247 """Return the number of leading spaces in line.
2248
2249 Args:
2250 line: A string to check.
2251
2252 Returns:
2253 An integer count of leading spaces, possibly zero.
2254 """
2255 indent = Match(r'^( *)\S', line)
2256 if indent:
2257 return len(indent.group(1))
2258 else:
2259 return 0
2260
2261 def PathSplitToList(path):
2262 """Returns the path split into a list by the separator.
2263
2264 Args:
2265 path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
2266
2267 Returns:
2268 A list of path components (e.g. ['a', 'b', 'c]).
2269 """
2270 lst = []
2271 while True:
2272 (head, tail) = os.path.split(path)
2273 if head == path: # absolute paths end
2274 lst.append(head)
2275 break
2276 if tail == path: # relative paths end
2277 lst.append(tail)
2278 break
2279
2280 path = head
2281 lst.append(tail)
2282
2283 lst.reverse()
2284 return lst
2285
2286 def GetHeaderGuardCPPVariable(filename):
2287 """Returns the CPP variable that should be used as a header guard.
2288
2289 Args:
2290 filename: The name of a C++ header file.
2291
2292 Returns:
2293 The CPP variable that should be used as a header guard in the
2294 named file.
2295
2296 """
2297
2298 # Restores original filename in case that cpplint is invoked from Emacs's
2299 # flymake.
2300 filename = re.sub(r'_flymake\.h$', '.h', filename)
2301 filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
2302 # Replace 'c++' with 'cpp'.
2303 filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
2304
2305 fileinfo = FileInfo(filename)
2306 file_path_from_root = fileinfo.RepositoryName()
2307
2308 def FixupPathFromRoot():
2309 if _root_debug:
2310 sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
2311 % (_root, fileinfo.RepositoryName()))
2312
2313 # Process the file path with the --root flag if it was set.
2314 if not _root:
2315 if _root_debug:
2316 sys.stderr.write("_root unspecified\n")
2317 return file_path_from_root
2318
2319 def StripListPrefix(lst, prefix):
2320 # f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
2321 if lst[:len(prefix)] != prefix:
2322 return None
2323 # f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
2324 return lst[(len(prefix)):]
2325
2326 # root behavior:
2327 # --root=subdir , lstrips subdir from the header guard
2328 maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
2329 PathSplitToList(_root))
2330
2331 if _root_debug:
2332 sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
2333 " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
2334
2335 if maybe_path:
2336 return os.path.join(*maybe_path)
2337
2338 # --root=.. , will prepend the outer directory to the header guard
2339 full_path = fileinfo.FullName()
2340 # adapt slashes for windows
2341 root_abspath = os.path.abspath(_root).replace('\\', '/')
2342
2343 maybe_path = StripListPrefix(PathSplitToList(full_path),
2344 PathSplitToList(root_abspath))
2345
2346 if _root_debug:
2347 sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
2348 "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
2349
2350 if maybe_path:
2351 return os.path.join(*maybe_path)
2352
2353 if _root_debug:
2354 sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
2355
2356 # --root=FAKE_DIR is ignored
2357 return file_path_from_root
2358
2359 file_path_from_root = FixupPathFromRoot()
2360 return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
2361
2362
2363 def CheckForHeaderGuard(filename, clean_lines, error):
2364 """Checks that the file contains a header guard.
2365
2366 Logs an error if no #ifndef header guard is present. For other
2367 headers, checks that the full pathname is used.
2368
2369 Args:
2370 filename: The name of the C++ header file.
2371 clean_lines: A CleansedLines instance containing the file.
2372 error: The function to call with any errors found.
2373 """
2374
2375 # Don't check for header guards if there are error suppression
2376 # comments somewhere in this file.
2377 #
2378 # Because this is silencing a warning for a nonexistent line, we
2379 # only support the very specific NOLINT(build/header_guard) syntax,
2380 # and not the general NOLINT or NOLINT(*) syntax.
2381 raw_lines = clean_lines.lines_without_raw_strings
2382 for i in raw_lines:
2383 if Search(r'//\s*NOLINT\(build/header_guard\)', i):
2384 return
2385
2386 # Allow pragma once instead of header guards
2387 for i in raw_lines:
2388 if Search(r'^\s*#pragma\s+once', i):
2389 return
2390
2391 cppvar = GetHeaderGuardCPPVariable(filename)
2392
2393 ifndef = ''
2394 ifndef_linenum = 0
2395 define = ''
2396 endif = ''
2397 endif_linenum = 0
2398 for linenum, line in enumerate(raw_lines):
2399 linesplit = line.split()
2400 if len(linesplit) >= 2:
2401 # find the first occurrence of #ifndef and #define, save arg
2402 if not ifndef and linesplit[0] == '#ifndef':
2403 # set ifndef to the header guard presented on the #ifndef line.
2404 ifndef = linesplit[1]
2405 ifndef_linenum = linenum
2406 if not define and linesplit[0] == '#define':
2407 define = linesplit[1]
2408 # find the last occurrence of #endif, save entire line
2409 if line.startswith('#endif'):
2410 endif = line
2411 endif_linenum = linenum
2412
2413 if not ifndef or not define or ifndef != define:
2414 error(filename, 0, 'build/header_guard', 5,
2415 'No #ifndef header guard found, suggested CPP variable is: %s' %
2416 cppvar)
2417 return
2418
2419 # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
2420 # for backward compatibility.
2421 if ifndef != cppvar:
2422 error_level = 0
2423 if ifndef != cppvar + '_':
2424 error_level = 5
2425
2426 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
2427 error)
2428 error(filename, ifndef_linenum, 'build/header_guard', error_level,
2429 '#ifndef header guard has wrong style, please use: %s' % cppvar)
2430
2431 # Check for "//" comments on endif line.
2432 ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
2433 error)
2434 match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
2435 if match:
2436 if match.group(1) == '_':
2437 # Issue low severity warning for deprecated double trailing underscore
2438 error(filename, endif_linenum, 'build/header_guard', 0,
2439 '#endif line should be "#endif // %s"' % cppvar)
2440 return
2441
2442 # Didn't find the corresponding "//" comment. If this file does not
2443 # contain any "//" comments at all, it could be that the compiler
2444 # only wants "/**/" comments, look for those instead.
2445 no_single_line_comments = True
2446 for i in xrange(1, len(raw_lines) - 1):
2447 line = raw_lines[i]
2448 if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
2449 no_single_line_comments = False
2450 break
2451
2452 if no_single_line_comments:
2453 match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
2454 if match:
2455 if match.group(1) == '_':
2456 # Low severity warning for double trailing underscore
2457 error(filename, endif_linenum, 'build/header_guard', 0,
2458 '#endif line should be "#endif /* %s */"' % cppvar)
2459 return
2460
2461 # Didn't find anything
2462 error(filename, endif_linenum, 'build/header_guard', 5,
2463 '#endif line should be "#endif // %s"' % cppvar)
2464
2465
2466 def CheckHeaderFileIncluded(filename, include_state, error):
2467 """Logs an error if a source file does not include its header."""
2468
2469 # Do not check test files
2470 fileinfo = FileInfo(filename)
2471 if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
2472 return
2473
2474 for ext in GetHeaderExtensions():
2475 basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
2476 headerfile = basefilename + '.' + ext
2477 if not os.path.exists(headerfile):
2478 continue
2479 headername = FileInfo(headerfile).RepositoryName()
2480 first_include = None
2481 include_uses_unix_dir_aliases = False
2482 for section_list in include_state.include_list:
2483 for f in section_list:
2484 include_text = f[0]
2485 if "./" in include_text:
2486 include_uses_unix_dir_aliases = True
2487 if headername in include_text or include_text in headername:
2488 return
2489 if not first_include:
2490 first_include = f[1]
2491
2492 message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
2493 if include_uses_unix_dir_aliases:
2494 message += ". Relative paths like . and .. are not allowed."
2495
2496 error(filename, first_include, 'build/include', 5, message)
2497
2498
2499 def CheckForBadCharacters(filename, lines, error):
2500 """Logs an error for each line containing bad characters.
2501
2502 Two kinds of bad characters:
2503
2504 1. Unicode replacement characters: These indicate that either the file
2505 contained invalid UTF-8 (likely) or Unicode replacement characters (which
2506 it shouldn't). Note that it's possible for this to throw off line
2507 numbering if the invalid UTF-8 occurred adjacent to a newline.
2508
2509 2. NUL bytes. These are problematic for some tools.
2510
2511 Args:
2512 filename: The name of the current file.
2513 lines: An array of strings, each representing a line of the file.
2514 error: The function to call with any errors found.
2515 """
2516 for linenum, line in enumerate(lines):
2517 if unicode_escape_decode('\ufffd') in line:
2518 error(filename, linenum, 'readability/utf8', 5,
2519 'Line contains invalid UTF-8 (or Unicode replacement character).')
2520 if '\0' in line:
2521 error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
2522
2523
2524 def CheckForNewlineAtEOF(filename, lines, error):
2525 """Logs an error if there is no newline char at the end of the file.
2526
2527 Args:
2528 filename: The name of the current file.
2529 lines: An array of strings, each representing a line of the file.
2530 error: The function to call with any errors found.
2531 """
2532
2533 # The array lines() was created by adding two newlines to the
2534 # original file (go figure), then splitting on \n.
2535 # To verify that the file ends in \n, we just have to make sure the
2536 # last-but-two element of lines() exists and is empty.
2537 if len(lines) < 3 or lines[-2]:
2538 error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
2539 'Could not find a newline character at the end of the file.')
2540
2541
2542 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
2543 """Logs an error if we see /* ... */ or "..." that extend past one line.
2544
2545 /* ... */ comments are legit inside macros, for one line.
2546 Otherwise, we prefer // comments, so it's ok to warn about the
2547 other. Likewise, it's ok for strings to extend across multiple
2548 lines, as long as a line continuation character (backslash)
2549 terminates each line. Although not currently prohibited by the C++
2550 style guide, it's ugly and unnecessary. We don't do well with either
2551 in this lint program, so we warn about both.
2552
2553 Args:
2554 filename: The name of the current file.
2555 clean_lines: A CleansedLines instance containing the file.
2556 linenum: The number of the line to check.
2557 error: The function to call with any errors found.
2558 """
2559 line = clean_lines.elided[linenum]
2560
2561 # Remove all \\ (escaped backslashes) from the line. They are OK, and the
2562 # second (escaped) slash may trigger later \" detection erroneously.
2563 line = line.replace('\\\\', '')
2564
2565 if line.count('/*') > line.count('*/'):
2566 error(filename, linenum, 'readability/multiline_comment', 5,
2567 'Complex multi-line /*...*/-style comment found. '
2568 'Lint may give bogus warnings. '
2569 'Consider replacing these with //-style comments, '
2570 'with #if 0...#endif, '
2571 'or with more clearly structured multi-line comments.')
2572
2573 if (line.count('"') - line.count('\\"')) % 2:
2574 error(filename, linenum, 'readability/multiline_string', 5,
2575 'Multi-line string ("...") found. This lint script doesn\'t '
2576 'do well with such strings, and may give bogus warnings. '
2577 'Use C++11 raw strings or concatenation instead.')
2578
2579
2580 # (non-threadsafe name, thread-safe alternative, validation pattern)
2581 #
2582 # The validation pattern is used to eliminate false positives such as:
2583 # _rand(); // false positive due to substring match.
2584 # ->rand(); // some member function rand().
2585 # ACMRandom rand(seed); // some variable named rand.
2586 # ISAACRandom rand(); // another variable named rand.
2587 #
2588 # Basically we require the return value of these functions to be used
2589 # in some expression context on the same line by matching on some
2590 # operator before the function name. This eliminates constructors and
2591 # member function calls.
2592 _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
2593 _THREADING_LIST = (
2594 ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
2595 ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
2596 ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
2597 ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
2598 ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
2599 ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
2600 ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
2601 ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
2602 ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
2603 ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
2604 ('strtok(', 'strtok_r(',
2605 _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
2606 ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
2607 )
2608
2609
2610 def CheckPosixThreading(filename, clean_lines, linenum, error):
2611 """Checks for calls to thread-unsafe functions.
2612
2613 Much code has been originally written without consideration of
2614 multi-threading. Also, engineers are relying on their old experience;
2615 they have learned posix before threading extensions were added. These
2616 tests guide the engineers to use thread-safe functions (when using
2617 posix directly).
2618
2619 Args:
2620 filename: The name of the current file.
2621 clean_lines: A CleansedLines instance containing the file.
2622 linenum: The number of the line to check.
2623 error: The function to call with any errors found.
2624 """
2625 line = clean_lines.elided[linenum]
2626 for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
2627 # Additional pattern matching check to confirm that this is the
2628 # function we are looking for
2629 if Search(pattern, line):
2630 error(filename, linenum, 'runtime/threadsafe_fn', 2,
2631 'Consider using ' + multithread_safe_func +
2632 '...) instead of ' + single_thread_func +
2633 '...) for improved thread safety.')
2634
2635
2636 def CheckVlogArguments(filename, clean_lines, linenum, error):
2637 """Checks that VLOG() is only used for defining a logging level.
2638
2639 For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
2640 VLOG(FATAL) are not.
2641
2642 Args:
2643 filename: The name of the current file.
2644 clean_lines: A CleansedLines instance containing the file.
2645 linenum: The number of the line to check.
2646 error: The function to call with any errors found.
2647 """
2648 line = clean_lines.elided[linenum]
2649 if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
2650 error(filename, linenum, 'runtime/vlog', 5,
2651 'VLOG() should be used with numeric verbosity level. '
2652 'Use LOG() if you want symbolic severity levels.')
2653
2654 # Matches invalid increment: *count++, which moves pointer instead of
2655 # incrementing a value.
2656 _RE_PATTERN_INVALID_INCREMENT = re.compile(
2657 r'^\s*\*\w+(\+\+|--);')
2658
2659
2660 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
2661 """Checks for invalid increment *count++.
2662
2663 For example following function:
2664 void increment_counter(int* count) {
2665 *count++;
2666 }
2667 is invalid, because it effectively does count++, moving pointer, and should
2668 be replaced with ++*count, (*count)++ or *count += 1.
2669
2670 Args:
2671 filename: The name of the current file.
2672 clean_lines: A CleansedLines instance containing the file.
2673 linenum: The number of the line to check.
2674 error: The function to call with any errors found.
2675 """
2676 line = clean_lines.elided[linenum]
2677 if _RE_PATTERN_INVALID_INCREMENT.match(line):
2678 error(filename, linenum, 'runtime/invalid_increment', 5,
2679 'Changing pointer instead of value (or unused value of operator*).')
2680
2681
2682 def IsMacroDefinition(clean_lines, linenum):
2683 if Search(r'^#define', clean_lines[linenum]):
2684 return True
2685
2686 if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
2687 return True
2688
2689 return False
2690
2691
2692 def IsForwardClassDeclaration(clean_lines, linenum):
2693 return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
2694
2695
2696 class _BlockInfo(object):
2697 """Stores information about a generic block of code."""
2698
2699 def __init__(self, linenum, seen_open_brace):
2700 self.starting_linenum = linenum
2701 self.seen_open_brace = seen_open_brace
2702 self.open_parentheses = 0
2703 self.inline_asm = _NO_ASM
2704 self.check_namespace_indentation = False
2705
2706 def CheckBegin(self, filename, clean_lines, linenum, error):
2707 """Run checks that applies to text up to the opening brace.
2708
2709 This is mostly for checking the text after the class identifier
2710 and the "{", usually where the base class is specified. For other
2711 blocks, there isn't much to check, so we always pass.
2712
2713 Args:
2714 filename: The name of the current file.
2715 clean_lines: A CleansedLines instance containing the file.
2716 linenum: The number of the line to check.
2717 error: The function to call with any errors found.
2718 """
2719 pass
2720
2721 def CheckEnd(self, filename, clean_lines, linenum, error):
2722 """Run checks that applies to text after the closing brace.
2723
2724 This is mostly used for checking end of namespace comments.
2725
2726 Args:
2727 filename: The name of the current file.
2728 clean_lines: A CleansedLines instance containing the file.
2729 linenum: The number of the line to check.
2730 error: The function to call with any errors found.
2731 """
2732 pass
2733
2734 def IsBlockInfo(self):
2735 """Returns true if this block is a _BlockInfo.
2736
2737 This is convenient for verifying that an object is an instance of
2738 a _BlockInfo, but not an instance of any of the derived classes.
2739
2740 Returns:
2741 True for this class, False for derived classes.
2742 """
2743 return self.__class__ == _BlockInfo
2744
2745
2746 class _ExternCInfo(_BlockInfo):
2747 """Stores information about an 'extern "C"' block."""
2748
2749 def __init__(self, linenum):
2750 _BlockInfo.__init__(self, linenum, True)
2751
2752
2753 class _ClassInfo(_BlockInfo):
2754 """Stores information about a class."""
2755
2756 def __init__(self, name, class_or_struct, clean_lines, linenum):
2757 _BlockInfo.__init__(self, linenum, False)
2758 self.name = name
2759 self.is_derived = False
2760 self.check_namespace_indentation = True
2761 if class_or_struct == 'struct':
2762 self.access = 'public'
2763 self.is_struct = True
2764 else:
2765 self.access = 'private'
2766 self.is_struct = False
2767
2768 # Remember initial indentation level for this class. Using raw_lines here
2769 # instead of elided to account for leading comments.
2770 self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
2771
2772 # Try to find the end of the class. This will be confused by things like:
2773 # class A {
2774 # } *x = { ...
2775 #
2776 # But it's still good enough for CheckSectionSpacing.
2777 self.last_line = 0
2778 depth = 0
2779 for i in range(linenum, clean_lines.NumLines()):
2780 line = clean_lines.elided[i]
2781 depth += line.count('{') - line.count('}')
2782 if not depth:
2783 self.last_line = i
2784 break
2785
2786 def CheckBegin(self, filename, clean_lines, linenum, error):
2787 # Look for a bare ':'
2788 if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
2789 self.is_derived = True
2790
2791 def CheckEnd(self, filename, clean_lines, linenum, error):
2792 # If there is a DISALLOW macro, it should appear near the end of
2793 # the class.
2794 seen_last_thing_in_class = False
2795 for i in xrange(linenum - 1, self.starting_linenum, -1):
2796 match = Search(
2797 r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
2798 self.name + r'\)',
2799 clean_lines.elided[i])
2800 if match:
2801 if seen_last_thing_in_class:
2802 error(filename, i, 'readability/constructors', 3,
2803 match.group(1) + ' should be the last thing in the class')
2804 break
2805
2806 if not Match(r'^\s*$', clean_lines.elided[i]):
2807 seen_last_thing_in_class = True
2808
2809 # Check that closing brace is aligned with beginning of the class.
2810 # Only do this if the closing brace is indented by only whitespaces.
2811 # This means we will not check single-line class definitions.
2812 indent = Match(r'^( *)\}', clean_lines.elided[linenum])
2813 if indent and len(indent.group(1)) != self.class_indent:
2814 if self.is_struct:
2815 parent = 'struct ' + self.name
2816 else:
2817 parent = 'class ' + self.name
2818 error(filename, linenum, 'whitespace/indent', 3,
2819 'Closing brace should be aligned with beginning of %s' % parent)
2820
2821
2822 class _NamespaceInfo(_BlockInfo):
2823 """Stores information about a namespace."""
2824
2825 def __init__(self, name, linenum):
2826 _BlockInfo.__init__(self, linenum, False)
2827 self.name = name or ''
2828 self.check_namespace_indentation = True
2829
2830 def CheckEnd(self, filename, clean_lines, linenum, error):
2831 """Check end of namespace comments."""
2832 line = clean_lines.raw_lines[linenum]
2833
2834 # Check how many lines is enclosed in this namespace. Don't issue
2835 # warning for missing namespace comments if there aren't enough
2836 # lines. However, do apply checks if there is already an end of
2837 # namespace comment and it's incorrect.
2838 #
2839 # TODO(unknown): We always want to check end of namespace comments
2840 # if a namespace is large, but sometimes we also want to apply the
2841 # check if a short namespace contained nontrivial things (something
2842 # other than forward declarations). There is currently no logic on
2843 # deciding what these nontrivial things are, so this check is
2844 # triggered by namespace size only, which works most of the time.
2845 if (linenum - self.starting_linenum < 10
2846 and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
2847 return
2848
2849 # Look for matching comment at end of namespace.
2850 #
2851 # Note that we accept C style "/* */" comments for terminating
2852 # namespaces, so that code that terminate namespaces inside
2853 # preprocessor macros can be cpplint clean.
2854 #
2855 # We also accept stuff like "// end of namespace <name>." with the
2856 # period at the end.
2857 #
2858 # Besides these, we don't accept anything else, otherwise we might
2859 # get false negatives when existing comment is a substring of the
2860 # expected namespace.
2861 if self.name:
2862 # Named namespace
2863 if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
2864 re.escape(self.name) + r'[\*/\.\\\s]*$'),
2865 line):
2866 error(filename, linenum, 'readability/namespace', 5,
2867 'Namespace should be terminated with "// namespace %s"' %
2868 self.name)
2869 else:
2870 # Anonymous namespace
2871 if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
2872 # If "// namespace anonymous" or "// anonymous namespace (more text)",
2873 # mention "// anonymous namespace" as an acceptable form
2874 if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
2875 error(filename, linenum, 'readability/namespace', 5,
2876 'Anonymous namespace should be terminated with "// namespace"'
2877 ' or "// anonymous namespace"')
2878 else:
2879 error(filename, linenum, 'readability/namespace', 5,
2880 'Anonymous namespace should be terminated with "// namespace"')
2881
2882
2883 class _PreprocessorInfo(object):
2884 """Stores checkpoints of nesting stacks when #if/#else is seen."""
2885
2886 def __init__(self, stack_before_if):
2887 # The entire nesting stack before #if
2888 self.stack_before_if = stack_before_if
2889
2890 # The entire nesting stack up to #else
2891 self.stack_before_else = []
2892
2893 # Whether we have already seen #else or #elif
2894 self.seen_else = False
2895
2896
2897 class NestingState(object):
2898 """Holds states related to parsing braces."""
2899
2900 def __init__(self):
2901 # Stack for tracking all braces. An object is pushed whenever we
2902 # see a "{", and popped when we see a "}". Only 3 types of
2903 # objects are possible:
2904 # - _ClassInfo: a class or struct.
2905 # - _NamespaceInfo: a namespace.
2906 # - _BlockInfo: some other type of block.
2907 self.stack = []
2908
2909 # Top of the previous stack before each Update().
2910 #
2911 # Because the nesting_stack is updated at the end of each line, we
2912 # had to do some convoluted checks to find out what is the current
2913 # scope at the beginning of the line. This check is simplified by
2914 # saving the previous top of nesting stack.
2915 #
2916 # We could save the full stack, but we only need the top. Copying
2917 # the full nesting stack would slow down cpplint by ~10%.
2918 self.previous_stack_top = []
2919
2920 # Stack of _PreprocessorInfo objects.
2921 self.pp_stack = []
2922
2923 def SeenOpenBrace(self):
2924 """Check if we have seen the opening brace for the innermost block.
2925
2926 Returns:
2927 True if we have seen the opening brace, False if the innermost
2928 block is still expecting an opening brace.
2929 """
2930 return (not self.stack) or self.stack[-1].seen_open_brace
2931
2932 def InNamespaceBody(self):
2933 """Check if we are currently one level inside a namespace body.
2934
2935 Returns:
2936 True if top of the stack is a namespace block, False otherwise.
2937 """
2938 return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
2939
2940 def InExternC(self):
2941 """Check if we are currently one level inside an 'extern "C"' block.
2942
2943 Returns:
2944 True if top of the stack is an extern block, False otherwise.
2945 """
2946 return self.stack and isinstance(self.stack[-1], _ExternCInfo)
2947
2948 def InClassDeclaration(self):
2949 """Check if we are currently one level inside a class or struct declaration.
2950
2951 Returns:
2952 True if top of the stack is a class/struct, False otherwise.
2953 """
2954 return self.stack and isinstance(self.stack[-1], _ClassInfo)
2955
2956 def InAsmBlock(self):
2957 """Check if we are currently one level inside an inline ASM block.
2958
2959 Returns:
2960 True if the top of the stack is a block containing inline ASM.
2961 """
2962 return self.stack and self.stack[-1].inline_asm != _NO_ASM
2963
2964 def InTemplateArgumentList(self, clean_lines, linenum, pos):
2965 """Check if current position is inside template argument list.
2966
2967 Args:
2968 clean_lines: A CleansedLines instance containing the file.
2969 linenum: The number of the line to check.
2970 pos: position just after the suspected template argument.
2971 Returns:
2972 True if (linenum, pos) is inside template arguments.
2973 """
2974 while linenum < clean_lines.NumLines():
2975 # Find the earliest character that might indicate a template argument
2976 line = clean_lines.elided[linenum]
2977 match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
2978 if not match:
2979 linenum += 1
2980 pos = 0
2981 continue
2982 token = match.group(1)
2983 pos += len(match.group(0))
2984
2985 # These things do not look like template argument list:
2986 # class Suspect {
2987 # class Suspect x; }
2988 if token in ('{', '}', ';'): return False
2989
2990 # These things look like template argument list:
2991 # template <class Suspect>
2992 # template <class Suspect = default_value>
2993 # template <class Suspect[]>
2994 # template <class Suspect...>
2995 if token in ('>', '=', '[', ']', '.'): return True
2996
2997 # Check if token is an unmatched '<'.
2998 # If not, move on to the next character.
2999 if token != '<':
3000 pos += 1
3001 if pos >= len(line):
3002 linenum += 1
3003 pos = 0
3004 continue
3005
3006 # We can't be sure if we just find a single '<', and need to
3007 # find the matching '>'.
3008 (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
3009 if end_pos < 0:
3010 # Not sure if template argument list or syntax error in file
3011 return False
3012 linenum = end_line
3013 pos = end_pos
3014 return False
3015
3016 def UpdatePreprocessor(self, line):
3017 """Update preprocessor stack.
3018
3019 We need to handle preprocessors due to classes like this:
3020 #ifdef SWIG
3021 struct ResultDetailsPageElementExtensionPoint {
3022 #else
3023 struct ResultDetailsPageElementExtensionPoint : public Extension {
3024 #endif
3025
3026 We make the following assumptions (good enough for most files):
3027 - Preprocessor condition evaluates to true from #if up to first
3028 #else/#elif/#endif.
3029
3030 - Preprocessor condition evaluates to false from #else/#elif up
3031 to #endif. We still perform lint checks on these lines, but
3032 these do not affect nesting stack.
3033
3034 Args:
3035 line: current line to check.
3036 """
3037 if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
3038 # Beginning of #if block, save the nesting stack here. The saved
3039 # stack will allow us to restore the parsing state in the #else case.
3040 self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
3041 elif Match(r'^\s*#\s*(else|elif)\b', line):
3042 # Beginning of #else block
3043 if self.pp_stack:
3044 if not self.pp_stack[-1].seen_else:
3045 # This is the first #else or #elif block. Remember the
3046 # whole nesting stack up to this point. This is what we
3047 # keep after the #endif.
3048 self.pp_stack[-1].seen_else = True
3049 self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
3050
3051 # Restore the stack to how it was before the #if
3052 self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
3053 else:
3054 # TODO(unknown): unexpected #else, issue warning?
3055 pass
3056 elif Match(r'^\s*#\s*endif\b', line):
3057 # End of #if or #else blocks.
3058 if self.pp_stack:
3059 # If we saw an #else, we will need to restore the nesting
3060 # stack to its former state before the #else, otherwise we
3061 # will just continue from where we left off.
3062 if self.pp_stack[-1].seen_else:
3063 # Here we can just use a shallow copy since we are the last
3064 # reference to it.
3065 self.stack = self.pp_stack[-1].stack_before_else
3066 # Drop the corresponding #if
3067 self.pp_stack.pop()
3068 else:
3069 # TODO(unknown): unexpected #endif, issue warning?
3070 pass
3071
3072 # TODO(unknown): Update() is too long, but we will refactor later.
3073 def Update(self, filename, clean_lines, linenum, error):
3074 """Update nesting state with current line.
3075
3076 Args:
3077 filename: The name of the current file.
3078 clean_lines: A CleansedLines instance containing the file.
3079 linenum: The number of the line to check.
3080 error: The function to call with any errors found.
3081 """
3082 line = clean_lines.elided[linenum]
3083
3084 # Remember top of the previous nesting stack.
3085 #
3086 # The stack is always pushed/popped and not modified in place, so
3087 # we can just do a shallow copy instead of copy.deepcopy. Using
3088 # deepcopy would slow down cpplint by ~28%.
3089 if self.stack:
3090 self.previous_stack_top = self.stack[-1]
3091 else:
3092 self.previous_stack_top = None
3093
3094 # Update pp_stack
3095 self.UpdatePreprocessor(line)
3096
3097 # Count parentheses. This is to avoid adding struct arguments to
3098 # the nesting stack.
3099 if self.stack:
3100 inner_block = self.stack[-1]
3101 depth_change = line.count('(') - line.count(')')
3102 inner_block.open_parentheses += depth_change
3103
3104 # Also check if we are starting or ending an inline assembly block.
3105 if inner_block.inline_asm in (_NO_ASM, _END_ASM):
3106 if (depth_change != 0 and
3107 inner_block.open_parentheses == 1 and
3108 _MATCH_ASM.match(line)):
3109 # Enter assembly block
3110 inner_block.inline_asm = _INSIDE_ASM
3111 else:
3112 # Not entering assembly block. If previous line was _END_ASM,
3113 # we will now shift to _NO_ASM state.
3114 inner_block.inline_asm = _NO_ASM
3115 elif (inner_block.inline_asm == _INSIDE_ASM and
3116 inner_block.open_parentheses == 0):
3117 # Exit assembly block
3118 inner_block.inline_asm = _END_ASM
3119
3120 # Consume namespace declaration at the beginning of the line. Do
3121 # this in a loop so that we catch same line declarations like this:
3122 # namespace proto2 { namespace bridge { class MessageSet; } }
3123 while True:
3124 # Match start of namespace. The "\b\s*" below catches namespace
3125 # declarations even if it weren't followed by a whitespace, this
3126 # is so that we don't confuse our namespace checker. The
3127 # missing spaces will be flagged by CheckSpacing.
3128 namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
3129 if not namespace_decl_match:
3130 break
3131
3132 new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
3133 self.stack.append(new_namespace)
3134
3135 line = namespace_decl_match.group(2)
3136 if line.find('{') != -1:
3137 new_namespace.seen_open_brace = True
3138 line = line[line.find('{') + 1:]
3139
3140 # Look for a class declaration in whatever is left of the line
3141 # after parsing namespaces. The regexp accounts for decorated classes
3142 # such as in:
3143 # class LOCKABLE API Object {
3144 # };
3145 class_decl_match = Match(
3146 r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
3147 r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
3148 r'(.*)$', line)
3149 if (class_decl_match and
3150 (not self.stack or self.stack[-1].open_parentheses == 0)):
3151 # We do not want to accept classes that are actually template arguments:
3152 # template <class Ignore1,
3153 # class Ignore2 = Default<Args>,
3154 # template <Args> class Ignore3>
3155 # void Function() {};
3156 #
3157 # To avoid template argument cases, we scan forward and look for
3158 # an unmatched '>'. If we see one, assume we are inside a
3159 # template argument list.
3160 end_declaration = len(class_decl_match.group(1))
3161 if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
3162 self.stack.append(_ClassInfo(
3163 class_decl_match.group(3), class_decl_match.group(2),
3164 clean_lines, linenum))
3165 line = class_decl_match.group(4)
3166
3167 # If we have not yet seen the opening brace for the innermost block,
3168 # run checks here.
3169 if not self.SeenOpenBrace():
3170 self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
3171
3172 # Update access control if we are inside a class/struct
3173 if self.stack and isinstance(self.stack[-1], _ClassInfo):
3174 classinfo = self.stack[-1]
3175 access_match = Match(
3176 r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
3177 r':(?:[^:]|$)',
3178 line)
3179 if access_match:
3180 classinfo.access = access_match.group(2)
3181
3182 # Check that access keywords are indented +1 space. Skip this
3183 # check if the keywords are not preceded by whitespaces.
3184 indent = access_match.group(1)
3185 if (len(indent) != classinfo.class_indent + 1 and
3186 Match(r'^\s*$', indent)):
3187 if classinfo.is_struct:
3188 parent = 'struct ' + classinfo.name
3189 else:
3190 parent = 'class ' + classinfo.name
3191 slots = ''
3192 if access_match.group(3):
3193 slots = access_match.group(3)
3194 error(filename, linenum, 'whitespace/indent', 3,
3195 '%s%s: should be indented +1 space inside %s' % (
3196 access_match.group(2), slots, parent))
3197
3198 # Consume braces or semicolons from what's left of the line
3199 while True:
3200 # Match first brace, semicolon, or closed parenthesis.
3201 matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
3202 if not matched:
3203 break
3204
3205 token = matched.group(1)
3206 if token == '{':
3207 # If namespace or class hasn't seen a opening brace yet, mark
3208 # namespace/class head as complete. Push a new block onto the
3209 # stack otherwise.
3210 if not self.SeenOpenBrace():
3211 self.stack[-1].seen_open_brace = True
3212 elif Match(r'^extern\s*"[^"]*"\s*\{', line):
3213 self.stack.append(_ExternCInfo(linenum))
3214 else:
3215 self.stack.append(_BlockInfo(linenum, True))
3216 if _MATCH_ASM.match(line):
3217 self.stack[-1].inline_asm = _BLOCK_ASM
3218
3219 elif token == ';' or token == ')':
3220 # If we haven't seen an opening brace yet, but we already saw
3221 # a semicolon, this is probably a forward declaration. Pop
3222 # the stack for these.
3223 #
3224 # Similarly, if we haven't seen an opening brace yet, but we
3225 # already saw a closing parenthesis, then these are probably
3226 # function arguments with extra "class" or "struct" keywords.
3227 # Also pop these stack for these.
3228 if not self.SeenOpenBrace():
3229 self.stack.pop()
3230 else: # token == '}'
3231 # Perform end of block checks and pop the stack.
3232 if self.stack:
3233 self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
3234 self.stack.pop()
3235 line = matched.group(2)
3236
3237 def InnermostClass(self):
3238 """Get class info on the top of the stack.
3239
3240 Returns:
3241 A _ClassInfo object if we are inside a class, or None otherwise.
3242 """
3243 for i in range(len(self.stack), 0, -1):
3244 classinfo = self.stack[i - 1]
3245 if isinstance(classinfo, _ClassInfo):
3246 return classinfo
3247 return None
3248
3249 def CheckCompletedBlocks(self, filename, error):
3250 """Checks that all classes and namespaces have been completely parsed.
3251
3252 Call this when all lines in a file have been processed.
3253 Args:
3254 filename: The name of the current file.
3255 error: The function to call with any errors found.
3256 """
3257 # Note: This test can result in false positives if #ifdef constructs
3258 # get in the way of brace matching. See the testBuildClass test in
3259 # cpplint_unittest.py for an example of this.
3260 for obj in self.stack:
3261 if isinstance(obj, _ClassInfo):
3262 error(filename, obj.starting_linenum, 'build/class', 5,
3263 'Failed to find complete declaration of class %s' %
3264 obj.name)
3265 elif isinstance(obj, _NamespaceInfo):
3266 error(filename, obj.starting_linenum, 'build/namespaces', 5,
3267 'Failed to find complete declaration of namespace %s' %
3268 obj.name)
3269
3270
3271 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
3272 nesting_state, error):
3273 r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
3274
3275 Complain about several constructs which gcc-2 accepts, but which are
3276 not standard C++. Warning about these in lint is one way to ease the
3277 transition to new compilers.
3278 - put storage class first (e.g. "static const" instead of "const static").
3279 - "%lld" instead of %qd" in printf-type functions.
3280 - "%1$d" is non-standard in printf-type functions.
3281 - "\%" is an undefined character escape sequence.
3282 - text after #endif is not allowed.
3283 - invalid inner-style forward declaration.
3284 - >? and <? operators, and their >?= and <?= cousins.
3285
3286 Additionally, check for constructor/destructor style violations and reference
3287 members, as it is very convenient to do so while checking for
3288 gcc-2 compliance.
3289
3290 Args:
3291 filename: The name of the current file.
3292 clean_lines: A CleansedLines instance containing the file.
3293 linenum: The number of the line to check.
3294 nesting_state: A NestingState instance which maintains information about
3295 the current stack of nested blocks being parsed.
3296 error: A callable to which errors are reported, which takes 4 arguments:
3297 filename, line number, error level, and message
3298 """
3299
3300 # Remove comments from the line, but leave in strings for now.
3301 line = clean_lines.lines[linenum]
3302
3303 if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
3304 error(filename, linenum, 'runtime/printf_format', 3,
3305 '%q in format strings is deprecated. Use %ll instead.')
3306
3307 if Search(r'printf\s*\(.*".*%\d+\$', line):
3308 error(filename, linenum, 'runtime/printf_format', 2,
3309 '%N$ formats are unconventional. Try rewriting to avoid them.')
3310
3311 # Remove escaped backslashes before looking for undefined escapes.
3312 line = line.replace('\\\\', '')
3313
3314 if Search(r'("|\').*\\(%|\[|\(|{)', line):
3315 error(filename, linenum, 'build/printf_format', 3,
3316 '%, [, (, and { are undefined character escapes. Unescape them.')
3317
3318 # For the rest, work with both comments and strings removed.
3319 line = clean_lines.elided[linenum]
3320
3321 if Search(r'\b(const|volatile|void|char|short|int|long'
3322 r'|float|double|signed|unsigned'
3323 r'|schar|u?int8|u?int16|u?int32|u?int64)'
3324 r'\s+(register|static|extern|typedef)\b',
3325 line):
3326 error(filename, linenum, 'build/storage_class', 5,
3327 'Storage-class specifier (static, extern, typedef, etc) should be '
3328 'at the beginning of the declaration.')
3329
3330 if Match(r'\s*#\s*endif\s*[^/\s]+', line):
3331 error(filename, linenum, 'build/endif_comment', 5,
3332 'Uncommented text after #endif is non-standard. Use a comment.')
3333
3334 if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
3335 error(filename, linenum, 'build/forward_decl', 5,
3336 'Inner-style forward declarations are invalid. Remove this line.')
3337
3338 if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
3339 line):
3340 error(filename, linenum, 'build/deprecated', 3,
3341 '>? and <? (max and min) operators are non-standard and deprecated.')
3342
3343 if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
3344 # TODO(unknown): Could it be expanded safely to arbitrary references,
3345 # without triggering too many false positives? The first
3346 # attempt triggered 5 warnings for mostly benign code in the regtest, hence
3347 # the restriction.
3348 # Here's the original regexp, for the reference:
3349 # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
3350 # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
3351 error(filename, linenum, 'runtime/member_string_references', 2,
3352 'const string& members are dangerous. It is much better to use '
3353 'alternatives, such as pointers or simple constants.')
3354
3355 # Everything else in this function operates on class declarations.
3356 # Return early if the top of the nesting stack is not a class, or if
3357 # the class head is not completed yet.
3358 classinfo = nesting_state.InnermostClass()
3359 if not classinfo or not classinfo.seen_open_brace:
3360 return
3361
3362 # The class may have been declared with namespace or classname qualifiers.
3363 # The constructor and destructor will not have those qualifiers.
3364 base_classname = classinfo.name.split('::')[-1]
3365
3366 # Look for single-argument constructors that aren't marked explicit.
3367 # Technically a valid construct, but against style.
3368 explicit_constructor_match = Match(
3369 r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
3370 r'(?:(?:inline|constexpr)\s+)*%s\s*'
3371 r'\(((?:[^()]|\([^()]*\))*)\)'
3372 % re.escape(base_classname),
3373 line)
3374
3375 if explicit_constructor_match:
3376 is_marked_explicit = explicit_constructor_match.group(1)
3377
3378 if not explicit_constructor_match.group(2):
3379 constructor_args = []
3380 else:
3381 constructor_args = explicit_constructor_match.group(2).split(',')
3382
3383 # collapse arguments so that commas in template parameter lists and function
3384 # argument parameter lists don't split arguments in two
3385 i = 0
3386 while i < len(constructor_args):
3387 constructor_arg = constructor_args[i]
3388 while (constructor_arg.count('<') > constructor_arg.count('>') or
3389 constructor_arg.count('(') > constructor_arg.count(')')):
3390 constructor_arg += ',' + constructor_args[i + 1]
3391 del constructor_args[i + 1]
3392 constructor_args[i] = constructor_arg
3393 i += 1
3394
3395 variadic_args = [arg for arg in constructor_args if '&&...' in arg]
3396 defaulted_args = [arg for arg in constructor_args if '=' in arg]
3397 noarg_constructor = (not constructor_args or # empty arg list
3398 # 'void' arg specifier
3399 (len(constructor_args) == 1 and
3400 constructor_args[0].strip() == 'void'))
3401 onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
3402 not noarg_constructor) or
3403 # all but at most one arg defaulted
3404 (len(constructor_args) >= 1 and
3405 not noarg_constructor and
3406 len(defaulted_args) >= len(constructor_args) - 1) or
3407 # variadic arguments with zero or one argument
3408 (len(constructor_args) <= 2 and
3409 len(variadic_args) >= 1))
3410 initializer_list_constructor = bool(
3411 onearg_constructor and
3412 Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
3413 copy_constructor = bool(
3414 onearg_constructor and
3415 Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
3416 r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
3417 % re.escape(base_classname), constructor_args[0].strip()))
3418
3419 if (not is_marked_explicit and
3420 onearg_constructor and
3421 not initializer_list_constructor and
3422 not copy_constructor):
3423 if defaulted_args or variadic_args:
3424 error(filename, linenum, 'runtime/explicit', 5,
3425 'Constructors callable with one argument '
3426 'should be marked explicit.')
3427 else:
3428 error(filename, linenum, 'runtime/explicit', 5,
3429 'Single-parameter constructors should be marked explicit.')
3430 elif is_marked_explicit and not onearg_constructor:
3431 if noarg_constructor:
3432 error(filename, linenum, 'runtime/explicit', 5,
3433 'Zero-parameter constructors should not be marked explicit.')
3434
3435
3436 def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
3437 """Checks for the correctness of various spacing around function calls.
3438
3439 Args:
3440 filename: The name of the current file.
3441 clean_lines: A CleansedLines instance containing the file.
3442 linenum: The number of the line to check.
3443 error: The function to call with any errors found.
3444 """
3445 line = clean_lines.elided[linenum]
3446
3447 # Since function calls often occur inside if/for/while/switch
3448 # expressions - which have their own, more liberal conventions - we
3449 # first see if we should be looking inside such an expression for a
3450 # function call, to which we can apply more strict standards.
3451 fncall = line # if there's no control flow construct, look at whole line
3452 for pattern in (r'\bif\s*\((.*)\)\s*{',
3453 r'\bfor\s*\((.*)\)\s*{',
3454 r'\bwhile\s*\((.*)\)\s*[{;]',
3455 r'\bswitch\s*\((.*)\)\s*{'):
3456 match = Search(pattern, line)
3457 if match:
3458 fncall = match.group(1) # look inside the parens for function calls
3459 break
3460
3461 # Except in if/for/while/switch, there should never be space
3462 # immediately inside parens (eg "f( 3, 4 )"). We make an exception
3463 # for nested parens ( (a+b) + c ). Likewise, there should never be
3464 # a space before a ( when it's a function argument. I assume it's a
3465 # function argument when the char before the whitespace is legal in
3466 # a function name (alnum + _) and we're not starting a macro. Also ignore
3467 # pointers and references to arrays and functions coz they're too tricky:
3468 # we use a very simple way to recognize these:
3469 # " (something)(maybe-something)" or
3470 # " (something)(maybe-something," or
3471 # " (something)[something]"
3472 # Note that we assume the contents of [] to be short enough that
3473 # they'll never need to wrap.
3474 if ( # Ignore control structures.
3475 not Search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b',
3476 fncall) and
3477 # Ignore pointers/references to functions.
3478 not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
3479 # Ignore pointers/references to arrays.
3480 not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
3481 if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
3482 error(filename, linenum, 'whitespace/parens', 4,
3483 'Extra space after ( in function call')
3484 elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
3485 error(filename, linenum, 'whitespace/parens', 2,
3486 'Extra space after (')
3487 if (Search(r'\w\s+\(', fncall) and
3488 not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
3489 not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
3490 not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
3491 not Search(r'\bcase\s+\(', fncall)):
3492 # TODO(unknown): Space after an operator function seem to be a common
3493 # error, silence those for now by restricting them to highest verbosity.
3494 if Search(r'\boperator_*\b', line):
3495 error(filename, linenum, 'whitespace/parens', 0,
3496 'Extra space before ( in function call')
3497 else:
3498 error(filename, linenum, 'whitespace/parens', 4,
3499 'Extra space before ( in function call')
3500 # If the ) is followed only by a newline or a { + newline, assume it's
3501 # part of a control statement (if/while/etc), and don't complain
3502 if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
3503 # If the closing parenthesis is preceded by only whitespaces,
3504 # try to give a more descriptive error message.
3505 if Search(r'^\s+\)', fncall):
3506 error(filename, linenum, 'whitespace/parens', 2,
3507 'Closing ) should be moved to the previous line')
3508 else:
3509 error(filename, linenum, 'whitespace/parens', 2,
3510 'Extra space before )')
3511
3512
3513 def IsBlankLine(line):
3514 """Returns true if the given line is blank.
3515
3516 We consider a line to be blank if the line is empty or consists of
3517 only white spaces.
3518
3519 Args:
3520 line: A line of a string.
3521
3522 Returns:
3523 True, if the given line is blank.
3524 """
3525 return not line or line.isspace()
3526
3527
3528 def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
3529 error):
3530 is_namespace_indent_item = (
3531 len(nesting_state.stack) > 1 and
3532 nesting_state.stack[-1].check_namespace_indentation and
3533 isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
3534 nesting_state.previous_stack_top == nesting_state.stack[-2])
3535
3536 if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
3537 clean_lines.elided, line):
3538 CheckItemIndentationInNamespace(filename, clean_lines.elided,
3539 line, error)
3540
3541
3542 def CheckForFunctionLengths(filename, clean_lines, linenum,
3543 function_state, error):
3544 """Reports for long function bodies.
3545
3546 For an overview why this is done, see:
3547 https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
3548
3549 Uses a simplistic algorithm assuming other style guidelines
3550 (especially spacing) are followed.
3551 Only checks unindented functions, so class members are unchecked.
3552 Trivial bodies are unchecked, so constructors with huge initializer lists
3553 may be missed.
3554 Blank/comment lines are not counted so as to avoid encouraging the removal
3555 of vertical space and comments just to get through a lint check.
3556 NOLINT *on the last line of a function* disables this check.
3557
3558 Args:
3559 filename: The name of the current file.
3560 clean_lines: A CleansedLines instance containing the file.
3561 linenum: The number of the line to check.
3562 function_state: Current function name and lines in body so far.
3563 error: The function to call with any errors found.
3564 """
3565 lines = clean_lines.lines
3566 line = lines[linenum]
3567 joined_line = ''
3568
3569 starting_func = False
3570 regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
3571 match_result = Match(regexp, line)
3572 if match_result:
3573 # If the name is all caps and underscores, figure it's a macro and
3574 # ignore it, unless it's TEST or TEST_F.
3575 function_name = match_result.group(1).split()[-1]
3576 if function_name == 'TEST' or function_name == 'TEST_F' or (
3577 not Match(r'[A-Z_]+$', function_name)):
3578 starting_func = True
3579
3580 if starting_func:
3581 body_found = False
3582 for start_linenum in xrange(linenum, clean_lines.NumLines()):
3583 start_line = lines[start_linenum]
3584 joined_line += ' ' + start_line.lstrip()
3585 if Search(r'(;|})', start_line): # Declarations and trivial functions
3586 body_found = True
3587 break # ... ignore
3588 if Search(r'{', start_line):
3589 body_found = True
3590 function = Search(r'((\w|:)*)\(', line).group(1)
3591 if Match(r'TEST', function): # Handle TEST... macros
3592 parameter_regexp = Search(r'(\(.*\))', joined_line)
3593 if parameter_regexp: # Ignore bad syntax
3594 function += parameter_regexp.group(1)
3595 else:
3596 function += '()'
3597 function_state.Begin(function)
3598 break
3599 if not body_found:
3600 # No body for the function (or evidence of a non-function) was found.
3601 error(filename, linenum, 'readability/fn_size', 5,
3602 'Lint failed to find start of function body.')
3603 elif Match(r'^\}\s*$', line): # function end
3604 function_state.Check(error, filename, linenum)
3605 function_state.End()
3606 elif not Match(r'^\s*$', line):
3607 function_state.Count() # Count non-blank/non-comment lines.
3608
3609
3610 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
3611
3612
3613 def CheckComment(line, filename, linenum, next_line_start, error):
3614 """Checks for common mistakes in comments.
3615
3616 Args:
3617 line: The line in question.
3618 filename: The name of the current file.
3619 linenum: The number of the line to check.
3620 next_line_start: The first non-whitespace column of the next line.
3621 error: The function to call with any errors found.
3622 """
3623 commentpos = line.find('//')
3624 if commentpos != -1:
3625 # Check if the // may be in quotes. If so, ignore it
3626 if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
3627 # Allow one space for new scopes, two spaces otherwise:
3628 if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
3629 ((commentpos >= 1 and
3630 line[commentpos-1] not in string.whitespace) or
3631 (commentpos >= 2 and
3632 line[commentpos-2] not in string.whitespace))):
3633 error(filename, linenum, 'whitespace/comments', 2,
3634 'At least two spaces is best between code and comments')
3635
3636 # Checks for common mistakes in TODO comments.
3637 comment = line[commentpos:]
3638 match = _RE_PATTERN_TODO.match(comment)
3639 if match:
3640 # One whitespace is correct; zero whitespace is handled elsewhere.
3641 leading_whitespace = match.group(1)
3642 if len(leading_whitespace) > 1:
3643 error(filename, linenum, 'whitespace/todo', 2,
3644 'Too many spaces before TODO')
3645
3646 username = match.group(2)
3647 if not username:
3648 error(filename, linenum, 'readability/todo', 2,
3649 'Missing username in TODO; it should look like '
3650 '"// TODO(my_username): Stuff."')
3651
3652 middle_whitespace = match.group(3)
3653 # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
3654 if middle_whitespace != ' ' and middle_whitespace != '':
3655 error(filename, linenum, 'whitespace/todo', 2,
3656 'TODO(my_username) should be followed by a space')
3657
3658 # If the comment contains an alphanumeric character, there
3659 # should be a space somewhere between it and the // unless
3660 # it's a /// or //! Doxygen comment.
3661 if (Match(r'//[^ ]*\w', comment) and
3662 not Match(r'(///|//\!)(\s+|$)', comment)):
3663 error(filename, linenum, 'whitespace/comments', 4,
3664 'Should have a space between // and comment')
3665
3666
3667 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
3668 """Checks for the correctness of various spacing issues in the code.
3669
3670 Things we check for: spaces around operators, spaces after
3671 if/for/while/switch, no spaces around parens in function calls, two
3672 spaces between code and comment, don't start a block with a blank
3673 line, don't end a function with a blank line, don't add a blank line
3674 after public/protected/private, don't have too many blank lines in a row.
3675
3676 Args:
3677 filename: The name of the current file.
3678 clean_lines: A CleansedLines instance containing the file.
3679 linenum: The number of the line to check.
3680 nesting_state: A NestingState instance which maintains information about
3681 the current stack of nested blocks being parsed.
3682 error: The function to call with any errors found.
3683 """
3684
3685 # Don't use "elided" lines here, otherwise we can't check commented lines.
3686 # Don't want to use "raw" either, because we don't want to check inside C++11
3687 # raw strings,
3688 raw = clean_lines.lines_without_raw_strings
3689 line = raw[linenum]
3690
3691 # Before nixing comments, check if the line is blank for no good
3692 # reason. This includes the first line after a block is opened, and
3693 # blank lines at the end of a function (ie, right before a line like '}'
3694 #
3695 # Skip all the blank line checks if we are immediately inside a
3696 # namespace body. In other words, don't issue blank line warnings
3697 # for this block:
3698 # namespace {
3699 #
3700 # }
3701 #
3702 # A warning about missing end of namespace comments will be issued instead.
3703 #
3704 # Also skip blank line checks for 'extern "C"' blocks, which are formatted
3705 # like namespaces.
3706 if (IsBlankLine(line) and
3707 not nesting_state.InNamespaceBody() and
3708 not nesting_state.InExternC()):
3709 elided = clean_lines.elided
3710 prev_line = elided[linenum - 1]
3711 prevbrace = prev_line.rfind('{')
3712 # TODO(unknown): Don't complain if line before blank line, and line after,
3713 # both start with alnums and are indented the same amount.
3714 # This ignores whitespace at the start of a namespace block
3715 # because those are not usually indented.
3716 if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
3717 # OK, we have a blank line at the start of a code block. Before we
3718 # complain, we check if it is an exception to the rule: The previous
3719 # non-empty line has the parameters of a function header that are indented
3720 # 4 spaces (because they did not fit in a 80 column line when placed on
3721 # the same line as the function name). We also check for the case where
3722 # the previous line is indented 6 spaces, which may happen when the
3723 # initializers of a constructor do not fit into a 80 column line.
3724 exception = False
3725 if Match(r' {6}\w', prev_line): # Initializer list?
3726 # We are looking for the opening column of initializer list, which
3727 # should be indented 4 spaces to cause 6 space indentation afterwards.
3728 search_position = linenum-2
3729 while (search_position >= 0
3730 and Match(r' {6}\w', elided[search_position])):
3731 search_position -= 1
3732 exception = (search_position >= 0
3733 and elided[search_position][:5] == ' :')
3734 else:
3735 # Search for the function arguments or an initializer list. We use a
3736 # simple heuristic here: If the line is indented 4 spaces; and we have a
3737 # closing paren, without the opening paren, followed by an opening brace
3738 # or colon (for initializer lists) we assume that it is the last line of
3739 # a function header. If we have a colon indented 4 spaces, it is an
3740 # initializer list.
3741 exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
3742 prev_line)
3743 or Match(r' {4}:', prev_line))
3744
3745 if not exception:
3746 error(filename, linenum, 'whitespace/blank_line', 2,
3747 'Redundant blank line at the start of a code block '
3748 'should be deleted.')
3749 # Ignore blank lines at the end of a block in a long if-else
3750 # chain, like this:
3751 # if (condition1) {
3752 # // Something followed by a blank line
3753 #
3754 # } else if (condition2) {
3755 # // Something else
3756 # }
3757 if linenum + 1 < clean_lines.NumLines():
3758 next_line = raw[linenum + 1]
3759 if (next_line
3760 and Match(r'\s*}', next_line)
3761 and next_line.find('} else ') == -1):
3762 error(filename, linenum, 'whitespace/blank_line', 3,
3763 'Redundant blank line at the end of a code block '
3764 'should be deleted.')
3765
3766 matched = Match(r'\s*(public|protected|private):', prev_line)
3767 if matched:
3768 error(filename, linenum, 'whitespace/blank_line', 3,
3769 'Do not leave a blank line after "%s:"' % matched.group(1))
3770
3771 # Next, check comments
3772 next_line_start = 0
3773 if linenum + 1 < clean_lines.NumLines():
3774 next_line = raw[linenum + 1]
3775 next_line_start = len(next_line) - len(next_line.lstrip())
3776 CheckComment(line, filename, linenum, next_line_start, error)
3777
3778 # get rid of comments and strings
3779 line = clean_lines.elided[linenum]
3780
3781 # You shouldn't have spaces before your brackets, except for C++11 attributes
3782 # or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
3783 if (Search(r'\w\s+\[(?!\[)', line) and
3784 not Search(r'(?:auto&?|delete|return)\s+\[', line)):
3785 error(filename, linenum, 'whitespace/braces', 5,
3786 'Extra space before [')
3787
3788 # In range-based for, we wanted spaces before and after the colon, but
3789 # not around "::" tokens that might appear.
3790 if (Search(r'for *\(.*[^:]:[^: ]', line) or
3791 Search(r'for *\(.*[^: ]:[^:]', line)):
3792 error(filename, linenum, 'whitespace/forcolon', 2,
3793 'Missing space around colon in range-based for loop')
3794
3795
3796 def CheckOperatorSpacing(filename, clean_lines, linenum, error):
3797 """Checks for horizontal spacing around operators.
3798
3799 Args:
3800 filename: The name of the current file.
3801 clean_lines: A CleansedLines instance containing the file.
3802 linenum: The number of the line to check.
3803 error: The function to call with any errors found.
3804 """
3805 line = clean_lines.elided[linenum]
3806
3807 # Don't try to do spacing checks for operator methods. Do this by
3808 # replacing the troublesome characters with something else,
3809 # preserving column position for all other characters.
3810 #
3811 # The replacement is done repeatedly to avoid false positives from
3812 # operators that call operators.
3813 while True:
3814 match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
3815 if match:
3816 line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
3817 else:
3818 break
3819
3820 # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
3821 # Otherwise not. Note we only check for non-spaces on *both* sides;
3822 # sometimes people put non-spaces on one side when aligning ='s among
3823 # many lines (not that this is behavior that I approve of...)
3824 if ((Search(r'[\w.]=', line) or
3825 Search(r'=[\w.]', line))
3826 and not Search(r'\b(if|while|for) ', line)
3827 # Operators taken from [lex.operators] in C++11 standard.
3828 and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
3829 and not Search(r'operator=', line)):
3830 error(filename, linenum, 'whitespace/operators', 4,
3831 'Missing spaces around =')
3832
3833 # It's ok not to have spaces around binary operators like + - * /, but if
3834 # there's too little whitespace, we get concerned. It's hard to tell,
3835 # though, so we punt on this one for now. TODO.
3836
3837 # You should always have whitespace around binary operators.
3838 #
3839 # Check <= and >= first to avoid false positives with < and >, then
3840 # check non-include lines for spacing around < and >.
3841 #
3842 # If the operator is followed by a comma, assume it's be used in a
3843 # macro context and don't do any checks. This avoids false
3844 # positives.
3845 #
3846 # Note that && is not included here. This is because there are too
3847 # many false positives due to RValue references.
3848 match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
3849 if match:
3850 error(filename, linenum, 'whitespace/operators', 3,
3851 'Missing spaces around %s' % match.group(1))
3852 elif not Match(r'#.*include', line):
3853 # Look for < that is not surrounded by spaces. This is only
3854 # triggered if both sides are missing spaces, even though
3855 # technically should should flag if at least one side is missing a
3856 # space. This is done to avoid some false positives with shifts.
3857 match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
3858 if match:
3859 (_, _, end_pos) = CloseExpression(
3860 clean_lines, linenum, len(match.group(1)))
3861 if end_pos <= -1:
3862 error(filename, linenum, 'whitespace/operators', 3,
3863 'Missing spaces around <')
3864
3865 # Look for > that is not surrounded by spaces. Similar to the
3866 # above, we only trigger if both sides are missing spaces to avoid
3867 # false positives with shifts.
3868 match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
3869 if match:
3870 (_, _, start_pos) = ReverseCloseExpression(
3871 clean_lines, linenum, len(match.group(1)))
3872 if start_pos <= -1:
3873 error(filename, linenum, 'whitespace/operators', 3,
3874 'Missing spaces around >')
3875
3876 # We allow no-spaces around << when used like this: 10<<20, but
3877 # not otherwise (particularly, not when used as streams)
3878 #
3879 # We also allow operators following an opening parenthesis, since
3880 # those tend to be macros that deal with operators.
3881 match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
3882 if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
3883 not (match.group(1) == 'operator' and match.group(2) == ';')):
3884 error(filename, linenum, 'whitespace/operators', 3,
3885 'Missing spaces around <<')
3886
3887 # We allow no-spaces around >> for almost anything. This is because
3888 # C++11 allows ">>" to close nested templates, which accounts for
3889 # most cases when ">>" is not followed by a space.
3890 #
3891 # We still warn on ">>" followed by alpha character, because that is
3892 # likely due to ">>" being used for right shifts, e.g.:
3893 # value >> alpha
3894 #
3895 # When ">>" is used to close templates, the alphanumeric letter that
3896 # follows would be part of an identifier, and there should still be
3897 # a space separating the template type and the identifier.
3898 # type<type<type>> alpha
3899 match = Search(r'>>[a-zA-Z_]', line)
3900 if match:
3901 error(filename, linenum, 'whitespace/operators', 3,
3902 'Missing spaces around >>')
3903
3904 # There shouldn't be space around unary operators
3905 match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
3906 if match:
3907 error(filename, linenum, 'whitespace/operators', 4,
3908 'Extra space for operator %s' % match.group(1))
3909
3910
3911 def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
3912 """Checks for horizontal spacing around parentheses.
3913
3914 Args:
3915 filename: The name of the current file.
3916 clean_lines: A CleansedLines instance containing the file.
3917 linenum: The number of the line to check.
3918 error: The function to call with any errors found.
3919 """
3920 line = clean_lines.elided[linenum]
3921
3922 # No spaces after an if, while, switch, or for
3923 match = Search(r' (if\(|for\(|while\(|switch\()', line)
3924 if match:
3925 error(filename, linenum, 'whitespace/parens', 5,
3926 'Missing space before ( in %s' % match.group(1))
3927
3928 # For if/for/while/switch, the left and right parens should be
3929 # consistent about how many spaces are inside the parens, and
3930 # there should either be zero or one spaces inside the parens.
3931 # We don't want: "if ( foo)" or "if ( foo )".
3932 # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
3933 match = Search(r'\b(if|for|while|switch)\s*'
3934 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
3935 line)
3936 if match:
3937 if len(match.group(2)) != len(match.group(4)):
3938 if not (match.group(3) == ';' and
3939 len(match.group(2)) == 1 + len(match.group(4)) or
3940 not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
3941 error(filename, linenum, 'whitespace/parens', 5,
3942 'Mismatching spaces inside () in %s' % match.group(1))
3943 if len(match.group(2)) not in [0, 1]:
3944 error(filename, linenum, 'whitespace/parens', 5,
3945 'Should have zero or one spaces inside ( and ) in %s' %
3946 match.group(1))
3947
3948
3949 def CheckCommaSpacing(filename, clean_lines, linenum, error):
3950 """Checks for horizontal spacing near commas and semicolons.
3951
3952 Args:
3953 filename: The name of the current file.
3954 clean_lines: A CleansedLines instance containing the file.
3955 linenum: The number of the line to check.
3956 error: The function to call with any errors found.
3957 """
3958 raw = clean_lines.lines_without_raw_strings
3959 line = clean_lines.elided[linenum]
3960
3961 # You should always have a space after a comma (either as fn arg or operator)
3962 #
3963 # This does not apply when the non-space character following the
3964 # comma is another comma, since the only time when that happens is
3965 # for empty macro arguments.
3966 #
3967 # We run this check in two passes: first pass on elided lines to
3968 # verify that lines contain missing whitespaces, second pass on raw
3969 # lines to confirm that those missing whitespaces are not due to
3970 # elided comments.
3971 if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
3972 Search(r',[^,\s]', raw[linenum])):
3973 error(filename, linenum, 'whitespace/comma', 3,
3974 'Missing space after ,')
3975
3976 # You should always have a space after a semicolon
3977 # except for few corner cases
3978 # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
3979 # space after ;
3980 if Search(r';[^\s};\\)/]', line):
3981 error(filename, linenum, 'whitespace/semicolon', 3,
3982 'Missing space after ;')
3983
3984
3985 def _IsType(clean_lines, nesting_state, expr):
3986 """Check if expression looks like a type name, returns true if so.
3987
3988 Args:
3989 clean_lines: A CleansedLines instance containing the file.
3990 nesting_state: A NestingState instance which maintains information about
3991 the current stack of nested blocks being parsed.
3992 expr: The expression to check.
3993 Returns:
3994 True, if token looks like a type.
3995 """
3996 # Keep only the last token in the expression
3997 last_word = Match(r'^.*(\b\S+)$', expr)
3998 if last_word:
3999 token = last_word.group(1)
4000 else:
4001 token = expr
4002
4003 # Match native types and stdint types
4004 if _TYPES.match(token):
4005 return True
4006
4007 # Try a bit harder to match templated types. Walk up the nesting
4008 # stack until we find something that resembles a typename
4009 # declaration for what we are looking for.
4010 typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
4011 r'\b')
4012 block_index = len(nesting_state.stack) - 1
4013 while block_index >= 0:
4014 if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
4015 return False
4016
4017 # Found where the opening brace is. We want to scan from this
4018 # line up to the beginning of the function, minus a few lines.
4019 # template <typename Type1, // stop scanning here
4020 # ...>
4021 # class C
4022 # : public ... { // start scanning here
4023 last_line = nesting_state.stack[block_index].starting_linenum
4024
4025 next_block_start = 0
4026 if block_index > 0:
4027 next_block_start = nesting_state.stack[block_index - 1].starting_linenum
4028 first_line = last_line
4029 while first_line >= next_block_start:
4030 if clean_lines.elided[first_line].find('template') >= 0:
4031 break
4032 first_line -= 1
4033 if first_line < next_block_start:
4034 # Didn't find any "template" keyword before reaching the next block,
4035 # there are probably no template things to check for this block
4036 block_index -= 1
4037 continue
4038
4039 # Look for typename in the specified range
4040 for i in xrange(first_line, last_line + 1, 1):
4041 if Search(typename_pattern, clean_lines.elided[i]):
4042 return True
4043 block_index -= 1
4044
4045 return False
4046
4047
4048 def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
4049 """Checks for horizontal spacing near commas.
4050
4051 Args:
4052 filename: The name of the current file.
4053 clean_lines: A CleansedLines instance containing the file.
4054 linenum: The number of the line to check.
4055 nesting_state: A NestingState instance which maintains information about
4056 the current stack of nested blocks being parsed.
4057 error: The function to call with any errors found.
4058 """
4059 line = clean_lines.elided[linenum]
4060
4061 # Except after an opening paren, or after another opening brace (in case of
4062 # an initializer list, for instance), you should have spaces before your
4063 # braces when they are delimiting blocks, classes, namespaces etc.
4064 # And since you should never have braces at the beginning of a line,
4065 # this is an easy test. Except that braces used for initialization don't
4066 # follow the same rule; we often don't want spaces before those.
4067 match = Match(r'^(.*[^ ({>]){', line)
4068
4069 if match:
4070 # Try a bit harder to check for brace initialization. This
4071 # happens in one of the following forms:
4072 # Constructor() : initializer_list_{} { ... }
4073 # Constructor{}.MemberFunction()
4074 # Type variable{};
4075 # FunctionCall(type{}, ...);
4076 # LastArgument(..., type{});
4077 # LOG(INFO) << type{} << " ...";
4078 # map_of_type[{...}] = ...;
4079 # ternary = expr ? new type{} : nullptr;
4080 # OuterTemplate<InnerTemplateConstructor<Type>{}>
4081 #
4082 # We check for the character following the closing brace, and
4083 # silence the warning if it's one of those listed above, i.e.
4084 # "{.;,)<>]:".
4085 #
4086 # To account for nested initializer list, we allow any number of
4087 # closing braces up to "{;,)<". We can't simply silence the
4088 # warning on first sight of closing brace, because that would
4089 # cause false negatives for things that are not initializer lists.
4090 # Silence this: But not this:
4091 # Outer{ if (...) {
4092 # Inner{...} if (...){ // Missing space before {
4093 # }; }
4094 #
4095 # There is a false negative with this approach if people inserted
4096 # spurious semicolons, e.g. "if (cond){};", but we will catch the
4097 # spurious semicolon with a separate check.
4098 leading_text = match.group(1)
4099 (endline, endlinenum, endpos) = CloseExpression(
4100 clean_lines, linenum, len(match.group(1)))
4101 trailing_text = ''
4102 if endpos > -1:
4103 trailing_text = endline[endpos:]
4104 for offset in xrange(endlinenum + 1,
4105 min(endlinenum + 3, clean_lines.NumLines() - 1)):
4106 trailing_text += clean_lines.elided[offset]
4107 # We also suppress warnings for `uint64_t{expression}` etc., as the style
4108 # guide recommends brace initialization for integral types to avoid
4109 # overflow/truncation.
4110 if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
4111 and not _IsType(clean_lines, nesting_state, leading_text)):
4112 error(filename, linenum, 'whitespace/braces', 5,
4113 'Missing space before {')
4114
4115 # Make sure '} else {' has spaces.
4116 if Search(r'}else', line):
4117 error(filename, linenum, 'whitespace/braces', 5,
4118 'Missing space before else')
4119
4120 # You shouldn't have a space before a semicolon at the end of the line.
4121 # There's a special case for "for" since the style guide allows space before
4122 # the semicolon there.
4123 if Search(r':\s*;\s*$', line):
4124 error(filename, linenum, 'whitespace/semicolon', 5,
4125 'Semicolon defining empty statement. Use {} instead.')
4126 elif Search(r'^\s*;\s*$', line):
4127 error(filename, linenum, 'whitespace/semicolon', 5,
4128 'Line contains only semicolon. If this should be an empty statement, '
4129 'use {} instead.')
4130 elif (Search(r'\s+;\s*$', line) and
4131 not Search(r'\bfor\b', line)):
4132 error(filename, linenum, 'whitespace/semicolon', 5,
4133 'Extra space before last semicolon. If this should be an empty '
4134 'statement, use {} instead.')
4135
4136
4137 def IsDecltype(clean_lines, linenum, column):
4138 """Check if the token ending on (linenum, column) is decltype().
4139
4140 Args:
4141 clean_lines: A CleansedLines instance containing the file.
4142 linenum: the number of the line to check.
4143 column: end column of the token to check.
4144 Returns:
4145 True if this token is decltype() expression, False otherwise.
4146 """
4147 (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
4148 if start_col < 0:
4149 return False
4150 if Search(r'\bdecltype\s*$', text[0:start_col]):
4151 return True
4152 return False
4153
4154 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
4155 """Checks for additional blank line issues related to sections.
4156
4157 Currently the only thing checked here is blank line before protected/private.
4158
4159 Args:
4160 filename: The name of the current file.
4161 clean_lines: A CleansedLines instance containing the file.
4162 class_info: A _ClassInfo objects.
4163 linenum: The number of the line to check.
4164 error: The function to call with any errors found.
4165 """
4166 # Skip checks if the class is small, where small means 25 lines or less.
4167 # 25 lines seems like a good cutoff since that's the usual height of
4168 # terminals, and any class that can't fit in one screen can't really
4169 # be considered "small".
4170 #
4171 # Also skip checks if we are on the first line. This accounts for
4172 # classes that look like
4173 # class Foo { public: ... };
4174 #
4175 # If we didn't find the end of the class, last_line would be zero,
4176 # and the check will be skipped by the first condition.
4177 if (class_info.last_line - class_info.starting_linenum <= 24 or
4178 linenum <= class_info.starting_linenum):
4179 return
4180
4181 matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
4182 if matched:
4183 # Issue warning if the line before public/protected/private was
4184 # not a blank line, but don't do this if the previous line contains
4185 # "class" or "struct". This can happen two ways:
4186 # - We are at the beginning of the class.
4187 # - We are forward-declaring an inner class that is semantically
4188 # private, but needed to be public for implementation reasons.
4189 # Also ignores cases where the previous line ends with a backslash as can be
4190 # common when defining classes in C macros.
4191 prev_line = clean_lines.lines[linenum - 1]
4192 if (not IsBlankLine(prev_line) and
4193 not Search(r'\b(class|struct)\b', prev_line) and
4194 not Search(r'\\$', prev_line)):
4195 # Try a bit harder to find the beginning of the class. This is to
4196 # account for multi-line base-specifier lists, e.g.:
4197 # class Derived
4198 # : public Base {
4199 end_class_head = class_info.starting_linenum
4200 for i in range(class_info.starting_linenum, linenum):
4201 if Search(r'\{\s*$', clean_lines.lines[i]):
4202 end_class_head = i
4203 break
4204 if end_class_head < linenum - 1:
4205 error(filename, linenum, 'whitespace/blank_line', 3,
4206 '"%s:" should be preceded by a blank line' % matched.group(1))
4207
4208
4209 def GetPreviousNonBlankLine(clean_lines, linenum):
4210 """Return the most recent non-blank line and its line number.
4211
4212 Args:
4213 clean_lines: A CleansedLines instance containing the file contents.
4214 linenum: The number of the line to check.
4215
4216 Returns:
4217 A tuple with two elements. The first element is the contents of the last
4218 non-blank line before the current line, or the empty string if this is the
4219 first non-blank line. The second is the line number of that line, or -1
4220 if this is the first non-blank line.
4221 """
4222
4223 prevlinenum = linenum - 1
4224 while prevlinenum >= 0:
4225 prevline = clean_lines.elided[prevlinenum]
4226 if not IsBlankLine(prevline): # if not a blank line...
4227 return (prevline, prevlinenum)
4228 prevlinenum -= 1
4229 return ('', -1)
4230
4231
4232 def CheckBraces(filename, clean_lines, linenum, error):
4233 """Looks for misplaced braces (e.g. at the end of line).
4234
4235 Args:
4236 filename: The name of the current file.
4237 clean_lines: A CleansedLines instance containing the file.
4238 linenum: The number of the line to check.
4239 error: The function to call with any errors found.
4240 """
4241
4242 line = clean_lines.elided[linenum] # get rid of comments and strings
4243
4244 if Match(r'\s*{\s*$', line):
4245 # We allow an open brace to start a line in the case where someone is using
4246 # braces in a block to explicitly create a new scope, which is commonly used
4247 # to control the lifetime of stack-allocated variables. Braces are also
4248 # used for brace initializers inside function calls. We don't detect this
4249 # perfectly: we just don't complain if the last non-whitespace character on
4250 # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
4251 # previous line starts a preprocessor block. We also allow a brace on the
4252 # following line if it is part of an array initialization and would not fit
4253 # within the 80 character limit of the preceding line.
4254 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4255 if (not Search(r'[,;:}{(]\s*$', prevline) and
4256 not Match(r'\s*#', prevline) and
4257 not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
4258 error(filename, linenum, 'whitespace/braces', 4,
4259 '{ should almost always be at the end of the previous line')
4260
4261 # An else clause should be on the same line as the preceding closing brace.
4262 if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
4263 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4264 if Match(r'\s*}\s*$', prevline):
4265 error(filename, linenum, 'whitespace/newline', 4,
4266 'An else should appear on the same line as the preceding }')
4267
4268 # If braces come on one side of an else, they should be on both.
4269 # However, we have to worry about "else if" that spans multiple lines!
4270 if Search(r'else if\s*\(', line): # could be multi-line if
4271 brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
4272 # find the ( after the if
4273 pos = line.find('else if')
4274 pos = line.find('(', pos)
4275 if pos > 0:
4276 (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
4277 brace_on_right = endline[endpos:].find('{') != -1
4278 if brace_on_left != brace_on_right: # must be brace after if
4279 error(filename, linenum, 'readability/braces', 5,
4280 'If an else has a brace on one side, it should have it on both')
4281 elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
4282 error(filename, linenum, 'readability/braces', 5,
4283 'If an else has a brace on one side, it should have it on both')
4284
4285 # Likewise, an else should never have the else clause on the same line
4286 if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
4287 error(filename, linenum, 'whitespace/newline', 4,
4288 'Else clause should never be on same line as else (use 2 lines)')
4289
4290 # In the same way, a do/while should never be on one line
4291 if Match(r'\s*do [^\s{]', line):
4292 error(filename, linenum, 'whitespace/newline', 4,
4293 'do/while clauses should not be on a single line')
4294
4295 # Check single-line if/else bodies. The style guide says 'curly braces are not
4296 # required for single-line statements'. We additionally allow multi-line,
4297 # single statements, but we reject anything with more than one semicolon in
4298 # it. This means that the first semicolon after the if should be at the end of
4299 # its line, and the line after that should have an indent level equal to or
4300 # lower than the if. We also check for ambiguous if/else nesting without
4301 # braces.
4302 if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
4303 if if_else_match and not Match(r'\s*#', line):
4304 if_indent = GetIndentLevel(line)
4305 endline, endlinenum, endpos = line, linenum, if_else_match.end()
4306 if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
4307 if if_match:
4308 # This could be a multiline if condition, so find the end first.
4309 pos = if_match.end() - 1
4310 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
4311 # Check for an opening brace, either directly after the if or on the next
4312 # line. If found, this isn't a single-statement conditional.
4313 if (not Match(r'\s*{', endline[endpos:])
4314 and not (Match(r'\s*$', endline[endpos:])
4315 and endlinenum < (len(clean_lines.elided) - 1)
4316 and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
4317 while (endlinenum < len(clean_lines.elided)
4318 and ';' not in clean_lines.elided[endlinenum][endpos:]):
4319 endlinenum += 1
4320 endpos = 0
4321 if endlinenum < len(clean_lines.elided):
4322 endline = clean_lines.elided[endlinenum]
4323 # We allow a mix of whitespace and closing braces (e.g. for one-liner
4324 # methods) and a single \ after the semicolon (for macros)
4325 endpos = endline.find(';')
4326 if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
4327 # Semicolon isn't the last character, there's something trailing.
4328 # Output a warning if the semicolon is not contained inside
4329 # a lambda expression.
4330 if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
4331 endline):
4332 error(filename, linenum, 'readability/braces', 4,
4333 'If/else bodies with multiple statements require braces')
4334 elif endlinenum < len(clean_lines.elided) - 1:
4335 # Make sure the next line is dedented
4336 next_line = clean_lines.elided[endlinenum + 1]
4337 next_indent = GetIndentLevel(next_line)
4338 # With ambiguous nested if statements, this will error out on the
4339 # if that *doesn't* match the else, regardless of whether it's the
4340 # inner one or outer one.
4341 if (if_match and Match(r'\s*else\b', next_line)
4342 and next_indent != if_indent):
4343 error(filename, linenum, 'readability/braces', 4,
4344 'Else clause should be indented at the same level as if. '
4345 'Ambiguous nested if/else chains require braces.')
4346 elif next_indent > if_indent:
4347 error(filename, linenum, 'readability/braces', 4,
4348 'If/else bodies with multiple statements require braces')
4349
4350
4351 def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
4352 """Looks for redundant trailing semicolon.
4353
4354 Args:
4355 filename: The name of the current file.
4356 clean_lines: A CleansedLines instance containing the file.
4357 linenum: The number of the line to check.
4358 error: The function to call with any errors found.
4359 """
4360
4361 line = clean_lines.elided[linenum]
4362
4363 # Block bodies should not be followed by a semicolon. Due to C++11
4364 # brace initialization, there are more places where semicolons are
4365 # required than not, so we explicitly list the allowed rules rather
4366 # than listing the disallowed ones. These are the places where "};"
4367 # should be replaced by just "}":
4368 # 1. Some flavor of block following closing parenthesis:
4369 # for (;;) {};
4370 # while (...) {};
4371 # switch (...) {};
4372 # Function(...) {};
4373 # if (...) {};
4374 # if (...) else if (...) {};
4375 #
4376 # 2. else block:
4377 # if (...) else {};
4378 #
4379 # 3. const member function:
4380 # Function(...) const {};
4381 #
4382 # 4. Block following some statement:
4383 # x = 42;
4384 # {};
4385 #
4386 # 5. Block at the beginning of a function:
4387 # Function(...) {
4388 # {};
4389 # }
4390 #
4391 # Note that naively checking for the preceding "{" will also match
4392 # braces inside multi-dimensional arrays, but this is fine since
4393 # that expression will not contain semicolons.
4394 #
4395 # 6. Block following another block:
4396 # while (true) {}
4397 # {};
4398 #
4399 # 7. End of namespaces:
4400 # namespace {};
4401 #
4402 # These semicolons seems far more common than other kinds of
4403 # redundant semicolons, possibly due to people converting classes
4404 # to namespaces. For now we do not warn for this case.
4405 #
4406 # Try matching case 1 first.
4407 match = Match(r'^(.*\)\s*)\{', line)
4408 if match:
4409 # Matched closing parenthesis (case 1). Check the token before the
4410 # matching opening parenthesis, and don't warn if it looks like a
4411 # macro. This avoids these false positives:
4412 # - macro that defines a base class
4413 # - multi-line macro that defines a base class
4414 # - macro that defines the whole class-head
4415 #
4416 # But we still issue warnings for macros that we know are safe to
4417 # warn, specifically:
4418 # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
4419 # - TYPED_TEST
4420 # - INTERFACE_DEF
4421 # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
4422 #
4423 # We implement a list of safe macros instead of a list of
4424 # unsafe macros, even though the latter appears less frequently in
4425 # google code and would have been easier to implement. This is because
4426 # the downside for getting the allowed checks wrong means some extra
4427 # semicolons, while the downside for getting disallowed checks wrong
4428 # would result in compile errors.
4429 #
4430 # In addition to macros, we also don't want to warn on
4431 # - Compound literals
4432 # - Lambdas
4433 # - alignas specifier with anonymous structs
4434 # - decltype
4435 closing_brace_pos = match.group(1).rfind(')')
4436 opening_parenthesis = ReverseCloseExpression(
4437 clean_lines, linenum, closing_brace_pos)
4438 if opening_parenthesis[2] > -1:
4439 line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
4440 macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
4441 func = Match(r'^(.*\])\s*$', line_prefix)
4442 if ((macro and
4443 macro.group(1) not in (
4444 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
4445 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
4446 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
4447 (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
4448 Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
4449 Search(r'\bdecltype$', line_prefix) or
4450 Search(r'\s+=\s*$', line_prefix)):
4451 match = None
4452 if (match and
4453 opening_parenthesis[1] > 1 and
4454 Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
4455 # Multi-line lambda-expression
4456 match = None
4457
4458 else:
4459 # Try matching cases 2-3.
4460 match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
4461 if not match:
4462 # Try matching cases 4-6. These are always matched on separate lines.
4463 #
4464 # Note that we can't simply concatenate the previous line to the
4465 # current line and do a single match, otherwise we may output
4466 # duplicate warnings for the blank line case:
4467 # if (cond) {
4468 # // blank line
4469 # }
4470 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4471 if prevline and Search(r'[;{}]\s*$', prevline):
4472 match = Match(r'^(\s*)\{', line)
4473
4474 # Check matching closing brace
4475 if match:
4476 (endline, endlinenum, endpos) = CloseExpression(
4477 clean_lines, linenum, len(match.group(1)))
4478 if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
4479 # Current {} pair is eligible for semicolon check, and we have found
4480 # the redundant semicolon, output warning here.
4481 #
4482 # Note: because we are scanning forward for opening braces, and
4483 # outputting warnings for the matching closing brace, if there are
4484 # nested blocks with trailing semicolons, we will get the error
4485 # messages in reversed order.
4486
4487 # We need to check the line forward for NOLINT
4488 raw_lines = clean_lines.raw_lines
4489 ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
4490 error)
4491 ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
4492 error)
4493
4494 error(filename, endlinenum, 'readability/braces', 4,
4495 "You don't need a ; after a }")
4496
4497
4498 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
4499 """Look for empty loop/conditional body with only a single semicolon.
4500
4501 Args:
4502 filename: The name of the current file.
4503 clean_lines: A CleansedLines instance containing the file.
4504 linenum: The number of the line to check.
4505 error: The function to call with any errors found.
4506 """
4507
4508 # Search for loop keywords at the beginning of the line. Because only
4509 # whitespaces are allowed before the keywords, this will also ignore most
4510 # do-while-loops, since those lines should start with closing brace.
4511 #
4512 # We also check "if" blocks here, since an empty conditional block
4513 # is likely an error.
4514 line = clean_lines.elided[linenum]
4515 matched = Match(r'\s*(for|while|if)\s*\(', line)
4516 if matched:
4517 # Find the end of the conditional expression.
4518 (end_line, end_linenum, end_pos) = CloseExpression(
4519 clean_lines, linenum, line.find('('))
4520
4521 # Output warning if what follows the condition expression is a semicolon.
4522 # No warning for all other cases, including whitespace or newline, since we
4523 # have a separate check for semicolons preceded by whitespace.
4524 if end_pos >= 0 and Match(r';', end_line[end_pos:]):
4525 if matched.group(1) == 'if':
4526 error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
4527 'Empty conditional bodies should use {}')
4528 else:
4529 error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
4530 'Empty loop bodies should use {} or continue')
4531
4532 # Check for if statements that have completely empty bodies (no comments)
4533 # and no else clauses.
4534 if end_pos >= 0 and matched.group(1) == 'if':
4535 # Find the position of the opening { for the if statement.
4536 # Return without logging an error if it has no brackets.
4537 opening_linenum = end_linenum
4538 opening_line_fragment = end_line[end_pos:]
4539 # Loop until EOF or find anything that's not whitespace or opening {.
4540 while not Search(r'^\s*\{', opening_line_fragment):
4541 if Search(r'^(?!\s*$)', opening_line_fragment):
4542 # Conditional has no brackets.
4543 return
4544 opening_linenum += 1
4545 if opening_linenum == len(clean_lines.elided):
4546 # Couldn't find conditional's opening { or any code before EOF.
4547 return
4548 opening_line_fragment = clean_lines.elided[opening_linenum]
4549 # Set opening_line (opening_line_fragment may not be entire opening line).
4550 opening_line = clean_lines.elided[opening_linenum]
4551
4552 # Find the position of the closing }.
4553 opening_pos = opening_line_fragment.find('{')
4554 if opening_linenum == end_linenum:
4555 # We need to make opening_pos relative to the start of the entire line.
4556 opening_pos += end_pos
4557 (closing_line, closing_linenum, closing_pos) = CloseExpression(
4558 clean_lines, opening_linenum, opening_pos)
4559 if closing_pos < 0:
4560 return
4561
4562 # Now construct the body of the conditional. This consists of the portion
4563 # of the opening line after the {, all lines until the closing line,
4564 # and the portion of the closing line before the }.
4565 if (clean_lines.raw_lines[opening_linenum] !=
4566 CleanseComments(clean_lines.raw_lines[opening_linenum])):
4567 # Opening line ends with a comment, so conditional isn't empty.
4568 return
4569 if closing_linenum > opening_linenum:
4570 # Opening line after the {. Ignore comments here since we checked above.
4571 bodylist = list(opening_line[opening_pos+1:])
4572 # All lines until closing line, excluding closing line, with comments.
4573 bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
4574 # Closing line before the }. Won't (and can't) have comments.
4575 bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
4576 body = '\n'.join(bodylist)
4577 else:
4578 # If statement has brackets and fits on a single line.
4579 body = opening_line[opening_pos+1:closing_pos-1]
4580
4581 # Check if the body is empty
4582 if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
4583 return
4584 # The body is empty. Now make sure there's not an else clause.
4585 current_linenum = closing_linenum
4586 current_line_fragment = closing_line[closing_pos:]
4587 # Loop until EOF or find anything that's not whitespace or else clause.
4588 while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
4589 if Search(r'^(?=\s*else)', current_line_fragment):
4590 # Found an else clause, so don't log an error.
4591 return
4592 current_linenum += 1
4593 if current_linenum == len(clean_lines.elided):
4594 break
4595 current_line_fragment = clean_lines.elided[current_linenum]
4596
4597 # The body is empty and there's no else clause until EOF or other code.
4598 error(filename, end_linenum, 'whitespace/empty_if_body', 4,
4599 ('If statement had no body and no else clause'))
4600
4601
4602 def FindCheckMacro(line):
4603 """Find a replaceable CHECK-like macro.
4604
4605 Args:
4606 line: line to search on.
4607 Returns:
4608 (macro name, start position), or (None, -1) if no replaceable
4609 macro is found.
4610 """
4611 for macro in _CHECK_MACROS:
4612 i = line.find(macro)
4613 if i >= 0:
4614 # Find opening parenthesis. Do a regular expression match here
4615 # to make sure that we are matching the expected CHECK macro, as
4616 # opposed to some other macro that happens to contain the CHECK
4617 # substring.
4618 matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
4619 if not matched:
4620 continue
4621 return (macro, len(matched.group(1)))
4622 return (None, -1)
4623
4624
4625 def CheckCheck(filename, clean_lines, linenum, error):
4626 """Checks the use of CHECK and EXPECT macros.
4627
4628 Args:
4629 filename: The name of the current file.
4630 clean_lines: A CleansedLines instance containing the file.
4631 linenum: The number of the line to check.
4632 error: The function to call with any errors found.
4633 """
4634
4635 # Decide the set of replacement macros that should be suggested
4636 lines = clean_lines.elided
4637 (check_macro, start_pos) = FindCheckMacro(lines[linenum])
4638 if not check_macro:
4639 return
4640
4641 # Find end of the boolean expression by matching parentheses
4642 (last_line, end_line, end_pos) = CloseExpression(
4643 clean_lines, linenum, start_pos)
4644 if end_pos < 0:
4645 return
4646
4647 # If the check macro is followed by something other than a
4648 # semicolon, assume users will log their own custom error messages
4649 # and don't suggest any replacements.
4650 if not Match(r'\s*;', last_line[end_pos:]):
4651 return
4652
4653 if linenum == end_line:
4654 expression = lines[linenum][start_pos + 1:end_pos - 1]
4655 else:
4656 expression = lines[linenum][start_pos + 1:]
4657 for i in xrange(linenum + 1, end_line):
4658 expression += lines[i]
4659 expression += last_line[0:end_pos - 1]
4660
4661 # Parse expression so that we can take parentheses into account.
4662 # This avoids false positives for inputs like "CHECK((a < 4) == b)",
4663 # which is not replaceable by CHECK_LE.
4664 lhs = ''
4665 rhs = ''
4666 operator = None
4667 while expression:
4668 matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
4669 r'==|!=|>=|>|<=|<|\()(.*)$', expression)
4670 if matched:
4671 token = matched.group(1)
4672 if token == '(':
4673 # Parenthesized operand
4674 expression = matched.group(2)
4675 (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
4676 if end < 0:
4677 return # Unmatched parenthesis
4678 lhs += '(' + expression[0:end]
4679 expression = expression[end:]
4680 elif token in ('&&', '||'):
4681 # Logical and/or operators. This means the expression
4682 # contains more than one term, for example:
4683 # CHECK(42 < a && a < b);
4684 #
4685 # These are not replaceable with CHECK_LE, so bail out early.
4686 return
4687 elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
4688 # Non-relational operator
4689 lhs += token
4690 expression = matched.group(2)
4691 else:
4692 # Relational operator
4693 operator = token
4694 rhs = matched.group(2)
4695 break
4696 else:
4697 # Unparenthesized operand. Instead of appending to lhs one character
4698 # at a time, we do another regular expression match to consume several
4699 # characters at once if possible. Trivial benchmark shows that this
4700 # is more efficient when the operands are longer than a single
4701 # character, which is generally the case.
4702 matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
4703 if not matched:
4704 matched = Match(r'^(\s*\S)(.*)$', expression)
4705 if not matched:
4706 break
4707 lhs += matched.group(1)
4708 expression = matched.group(2)
4709
4710 # Only apply checks if we got all parts of the boolean expression
4711 if not (lhs and operator and rhs):
4712 return
4713
4714 # Check that rhs do not contain logical operators. We already know
4715 # that lhs is fine since the loop above parses out && and ||.
4716 if rhs.find('&&') > -1 or rhs.find('||') > -1:
4717 return
4718
4719 # At least one of the operands must be a constant literal. This is
4720 # to avoid suggesting replacements for unprintable things like
4721 # CHECK(variable != iterator)
4722 #
4723 # The following pattern matches decimal, hex integers, strings, and
4724 # characters (in that order).
4725 lhs = lhs.strip()
4726 rhs = rhs.strip()
4727 match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
4728 if Match(match_constant, lhs) or Match(match_constant, rhs):
4729 # Note: since we know both lhs and rhs, we can provide a more
4730 # descriptive error message like:
4731 # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
4732 # Instead of:
4733 # Consider using CHECK_EQ instead of CHECK(a == b)
4734 #
4735 # We are still keeping the less descriptive message because if lhs
4736 # or rhs gets long, the error message might become unreadable.
4737 error(filename, linenum, 'readability/check', 2,
4738 'Consider using %s instead of %s(a %s b)' % (
4739 _CHECK_REPLACEMENT[check_macro][operator],
4740 check_macro, operator))
4741
4742
4743 def CheckAltTokens(filename, clean_lines, linenum, error):
4744 """Check alternative keywords being used in boolean expressions.
4745
4746 Args:
4747 filename: The name of the current file.
4748 clean_lines: A CleansedLines instance containing the file.
4749 linenum: The number of the line to check.
4750 error: The function to call with any errors found.
4751 """
4752 line = clean_lines.elided[linenum]
4753
4754 # Avoid preprocessor lines
4755 if Match(r'^\s*#', line):
4756 return
4757
4758 # Last ditch effort to avoid multi-line comments. This will not help
4759 # if the comment started before the current line or ended after the
4760 # current line, but it catches most of the false positives. At least,
4761 # it provides a way to workaround this warning for people who use
4762 # multi-line comments in preprocessor macros.
4763 #
4764 # TODO(unknown): remove this once cpplint has better support for
4765 # multi-line comments.
4766 if line.find('/*') >= 0 or line.find('*/') >= 0:
4767 return
4768
4769 for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
4770 error(filename, linenum, 'readability/alt_tokens', 2,
4771 'Use operator %s instead of %s' % (
4772 _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
4773
4774
4775 def GetLineWidth(line):
4776 """Determines the width of the line in column positions.
4777
4778 Args:
4779 line: A string, which may be a Unicode string.
4780
4781 Returns:
4782 The width of the line in column positions, accounting for Unicode
4783 combining characters and wide characters.
4784 """
4785 if isinstance(line, unicode):
4786 width = 0
4787 for uc in unicodedata.normalize('NFC', line):
4788 if unicodedata.east_asian_width(uc) in ('W', 'F'):
4789 width += 2
4790 elif not unicodedata.combining(uc):
4791 # Issue 337
4792 # https://mail.python.org/pipermail/python-list/2012-August/628809.html
4793 if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
4794 # https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
4795 is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
4796 # https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
4797 is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
4798 if not is_wide_build and is_low_surrogate:
4799 width -= 1
4800
4801 width += 1
4802 return width
4803 else:
4804 return len(line)
4805
4806
4807 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
4808 error):
4809 """Checks rules from the 'C++ style rules' section of cppguide.html.
4810
4811 Most of these rules are hard to test (naming, comment style), but we
4812 do what we can. In particular we check for 2-space indents, line lengths,
4813 tab usage, spaces inside code, etc.
4814
4815 Args:
4816 filename: The name of the current file.
4817 clean_lines: A CleansedLines instance containing the file.
4818 linenum: The number of the line to check.
4819 file_extension: The extension (without the dot) of the filename.
4820 nesting_state: A NestingState instance which maintains information about
4821 the current stack of nested blocks being parsed.
4822 error: The function to call with any errors found.
4823 """
4824
4825 # Don't use "elided" lines here, otherwise we can't check commented lines.
4826 # Don't want to use "raw" either, because we don't want to check inside C++11
4827 # raw strings,
4828 raw_lines = clean_lines.lines_without_raw_strings
4829 line = raw_lines[linenum]
4830 prev = raw_lines[linenum - 1] if linenum > 0 else ''
4831
4832 if line.find('\t') != -1:
4833 error(filename, linenum, 'whitespace/tab', 1,
4834 'Tab found; better to use spaces')
4835
4836 # One or three blank spaces at the beginning of the line is weird; it's
4837 # hard to reconcile that with 2-space indents.
4838 # NOTE: here are the conditions rob pike used for his tests. Mine aren't
4839 # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
4840 # if(RLENGTH > 20) complain = 0;
4841 # if(match($0, " +(error|private|public|protected):")) complain = 0;
4842 # if(match(prev, "&& *$")) complain = 0;
4843 # if(match(prev, "\\|\\| *$")) complain = 0;
4844 # if(match(prev, "[\",=><] *$")) complain = 0;
4845 # if(match($0, " <<")) complain = 0;
4846 # if(match(prev, " +for \\(")) complain = 0;
4847 # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
4848 scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
4849 classinfo = nesting_state.InnermostClass()
4850 initial_spaces = 0
4851 cleansed_line = clean_lines.elided[linenum]
4852 while initial_spaces < len(line) and line[initial_spaces] == ' ':
4853 initial_spaces += 1
4854 # There are certain situations we allow one space, notably for
4855 # section labels, and also lines containing multi-line raw strings.
4856 # We also don't check for lines that look like continuation lines
4857 # (of lines ending in double quotes, commas, equals, or angle brackets)
4858 # because the rules for how to indent those are non-trivial.
4859 if (not Search(r'[",=><] *$', prev) and
4860 (initial_spaces == 1 or initial_spaces == 3) and
4861 not Match(scope_or_label_pattern, cleansed_line) and
4862 not (clean_lines.raw_lines[linenum] != line and
4863 Match(r'^\s*""', line))):
4864 error(filename, linenum, 'whitespace/indent', 3,
4865 'Weird number of spaces at line-start. '
4866 'Are you using a 2-space indent?')
4867
4868 if line and line[-1].isspace():
4869 error(filename, linenum, 'whitespace/end_of_line', 4,
4870 'Line ends in whitespace. Consider deleting these extra spaces.')
4871
4872 # Check if the line is a header guard.
4873 is_header_guard = False
4874 if IsHeaderExtension(file_extension):
4875 cppvar = GetHeaderGuardCPPVariable(filename)
4876 if (line.startswith('#ifndef %s' % cppvar) or
4877 line.startswith('#define %s' % cppvar) or
4878 line.startswith('#endif // %s' % cppvar)):
4879 is_header_guard = True
4880 # #include lines and header guards can be long, since there's no clean way to
4881 # split them.
4882 #
4883 # URLs can be long too. It's possible to split these, but it makes them
4884 # harder to cut&paste.
4885 #
4886 # The "$Id:...$" comment may also get very long without it being the
4887 # developers fault.
4888 #
4889 # Doxygen documentation copying can get pretty long when using an overloaded
4890 # function declaration
4891 if (not line.startswith('#include') and not is_header_guard and
4892 not Match(r'^\s*//.*http(s?)://\S*$', line) and
4893 not Match(r'^\s*//\s*[^\s]*$', line) and
4894 not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
4895 not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
4896 line_width = GetLineWidth(line)
4897 if line_width > _line_length:
4898 error(filename, linenum, 'whitespace/line_length', 2,
4899 'Lines should be <= %i characters long' % _line_length)
4900
4901 if (cleansed_line.count(';') > 1 and
4902 # allow simple single line lambdas
4903 not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
4904 line) and
4905 # for loops are allowed two ;'s (and may run over two lines).
4906 cleansed_line.find('for') == -1 and
4907 (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
4908 GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
4909 # It's ok to have many commands in a switch case that fits in 1 line
4910 not ((cleansed_line.find('case ') != -1 or
4911 cleansed_line.find('default:') != -1) and
4912 cleansed_line.find('break;') != -1)):
4913 error(filename, linenum, 'whitespace/newline', 0,
4914 'More than one command on the same line')
4915
4916 # Some more style checks
4917 CheckBraces(filename, clean_lines, linenum, error)
4918 CheckTrailingSemicolon(filename, clean_lines, linenum, error)
4919 CheckEmptyBlockBody(filename, clean_lines, linenum, error)
4920 CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
4921 CheckOperatorSpacing(filename, clean_lines, linenum, error)
4922 CheckParenthesisSpacing(filename, clean_lines, linenum, error)
4923 CheckCommaSpacing(filename, clean_lines, linenum, error)
4924 CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
4925 CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
4926 CheckCheck(filename, clean_lines, linenum, error)
4927 CheckAltTokens(filename, clean_lines, linenum, error)
4928 classinfo = nesting_state.InnermostClass()
4929 if classinfo:
4930 CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
4931
4932
4933 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
4934 # Matches the first component of a filename delimited by -s and _s. That is:
4935 # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
4936 # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
4937 # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
4938 # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
4939 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
4940
4941
4942 def _DropCommonSuffixes(filename):
4943 """Drops common suffixes like _test.cc or -inl.h from filename.
4944
4945 For example:
4946 >>> _DropCommonSuffixes('foo/foo-inl.h')
4947 'foo/foo'
4948 >>> _DropCommonSuffixes('foo/bar/foo.cc')
4949 'foo/bar/foo'
4950 >>> _DropCommonSuffixes('foo/foo_internal.h')
4951 'foo/foo'
4952 >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
4953 'foo/foo_unusualinternal'
4954
4955 Args:
4956 filename: The input filename.
4957
4958 Returns:
4959 The filename with the common suffix removed.
4960 """
4961 for suffix in itertools.chain(
4962 ('%s.%s' % (test_suffix.lstrip('_'), ext)
4963 for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
4964 ('%s.%s' % (suffix, ext)
4965 for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
4966 if (filename.endswith(suffix) and len(filename) > len(suffix) and
4967 filename[-len(suffix) - 1] in ('-', '_')):
4968 return filename[:-len(suffix) - 1]
4969 return os.path.splitext(filename)[0]
4970
4971
4972 def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
4973 """Figures out what kind of header 'include' is.
4974
4975 Args:
4976 fileinfo: The current file cpplint is running over. A FileInfo instance.
4977 include: The path to a #included file.
4978 used_angle_brackets: True if the #include used <> rather than "".
4979 include_order: "default" or other value allowed in program arguments
4980
4981 Returns:
4982 One of the _XXX_HEADER constants.
4983
4984 For example:
4985 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
4986 _C_SYS_HEADER
4987 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
4988 _CPP_SYS_HEADER
4989 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
4990 _OTHER_SYS_HEADER
4991 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
4992 _LIKELY_MY_HEADER
4993 >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
4994 ... 'bar/foo_other_ext.h', False)
4995 _POSSIBLE_MY_HEADER
4996 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
4997 _OTHER_HEADER
4998 """
4999 # This is a list of all standard c++ header files, except
5000 # those already checked for above.
5001 is_cpp_header = include in _CPP_HEADERS
5002
5003 # Mark include as C header if in list or in a known folder for standard-ish C headers.
5004 is_std_c_header = (include_order == "default") or (include in _C_HEADERS
5005 # additional linux glibc header folders
5006 or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
5007
5008 # Headers with C++ extensions shouldn't be considered C system headers
5009 is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
5010
5011 if is_system:
5012 if is_cpp_header:
5013 return _CPP_SYS_HEADER
5014 if is_std_c_header:
5015 return _C_SYS_HEADER
5016 else:
5017 return _OTHER_SYS_HEADER
5018
5019 # If the target file and the include we're checking share a
5020 # basename when we drop common extensions, and the include
5021 # lives in . , then it's likely to be owned by the target file.
5022 target_dir, target_base = (
5023 os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
5024 include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
5025 target_dir_pub = os.path.normpath(target_dir + '/../public')
5026 target_dir_pub = target_dir_pub.replace('\\', '/')
5027 if target_base == include_base and (
5028 include_dir == target_dir or
5029 include_dir == target_dir_pub):
5030 return _LIKELY_MY_HEADER
5031
5032 # If the target and include share some initial basename
5033 # component, it's possible the target is implementing the
5034 # include, so it's allowed to be first, but we'll never
5035 # complain if it's not there.
5036 target_first_component = _RE_FIRST_COMPONENT.match(target_base)
5037 include_first_component = _RE_FIRST_COMPONENT.match(include_base)
5038 if (target_first_component and include_first_component and
5039 target_first_component.group(0) ==
5040 include_first_component.group(0)):
5041 return _POSSIBLE_MY_HEADER
5042
5043 return _OTHER_HEADER
5044
5045
5046
5047 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
5048 """Check rules that are applicable to #include lines.
5049
5050 Strings on #include lines are NOT removed from elided line, to make
5051 certain tasks easier. However, to prevent false positives, checks
5052 applicable to #include lines in CheckLanguage must be put here.
5053
5054 Args:
5055 filename: The name of the current file.
5056 clean_lines: A CleansedLines instance containing the file.
5057 linenum: The number of the line to check.
5058 include_state: An _IncludeState instance in which the headers are inserted.
5059 error: The function to call with any errors found.
5060 """
5061 fileinfo = FileInfo(filename)
5062 line = clean_lines.lines[linenum]
5063
5064 # "include" should use the new style "foo/bar.h" instead of just "bar.h"
5065 # Only do this check if the included header follows google naming
5066 # conventions. If not, assume that it's a 3rd party API that
5067 # requires special include conventions.
5068 #
5069 # We also make an exception for Lua headers, which follow google
5070 # naming convention but not the include convention.
5071 match = Match(r'#include\s*"([^/]+\.h)"', line)
5072 if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
5073 error(filename, linenum, 'build/include_subdir', 4,
5074 'Include the directory when naming .h files')
5075
5076 # we shouldn't include a file more than once. actually, there are a
5077 # handful of instances where doing so is okay, but in general it's
5078 # not.
5079 match = _RE_PATTERN_INCLUDE.search(line)
5080 if match:
5081 include = match.group(2)
5082 used_angle_brackets = (match.group(1) == '<')
5083 duplicate_line = include_state.FindHeader(include)
5084 if duplicate_line >= 0:
5085 error(filename, linenum, 'build/include', 4,
5086 '"%s" already included at %s:%s' %
5087 (include, filename, duplicate_line))
5088 return
5089
5090 for extension in GetNonHeaderExtensions():
5091 if (include.endswith('.' + extension) and
5092 os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
5093 error(filename, linenum, 'build/include', 4,
5094 'Do not include .' + extension + ' files from other packages')
5095 return
5096
5097 # We DO want to include a 3rd party looking header if it matches the
5098 # filename. Otherwise we get an erroneous error "...should include its
5099 # header" error later.
5100 third_src_header = False
5101 for ext in GetHeaderExtensions():
5102 basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
5103 headerfile = basefilename + '.' + ext
5104 headername = FileInfo(headerfile).RepositoryName()
5105 if headername in include or include in headername:
5106 third_src_header = True
5107 break
5108
5109 if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
5110 include_state.include_list[-1].append((include, linenum))
5111
5112 # We want to ensure that headers appear in the right order:
5113 # 1) for foo.cc, foo.h (preferred location)
5114 # 2) c system files
5115 # 3) cpp system files
5116 # 4) for foo.cc, foo.h (deprecated location)
5117 # 5) other google headers
5118 #
5119 # We classify each include statement as one of those 5 types
5120 # using a number of techniques. The include_state object keeps
5121 # track of the highest type seen, and complains if we see a
5122 # lower type after that.
5123 error_message = include_state.CheckNextIncludeOrder(
5124 _ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
5125 if error_message:
5126 error(filename, linenum, 'build/include_order', 4,
5127 '%s. Should be: %s.h, c system, c++ system, other.' %
5128 (error_message, fileinfo.BaseName()))
5129 canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
5130 if not include_state.IsInAlphabeticalOrder(
5131 clean_lines, linenum, canonical_include):
5132 error(filename, linenum, 'build/include_alpha', 4,
5133 'Include "%s" not in alphabetical order' % include)
5134 include_state.SetLastHeader(canonical_include)
5135
5136
5137
5138 def _GetTextInside(text, start_pattern):
5139 r"""Retrieves all the text between matching open and close parentheses.
5140
5141 Given a string of lines and a regular expression string, retrieve all the text
5142 following the expression and between opening punctuation symbols like
5143 (, [, or {, and the matching close-punctuation symbol. This properly nested
5144 occurrences of the punctuations, so for the text like
5145 printf(a(), b(c()));
5146 a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
5147 start_pattern must match string having an open punctuation symbol at the end.
5148
5149 Args:
5150 text: The lines to extract text. Its comments and strings must be elided.
5151 It can be single line and can span multiple lines.
5152 start_pattern: The regexp string indicating where to start extracting
5153 the text.
5154 Returns:
5155 The extracted text.
5156 None if either the opening string or ending punctuation could not be found.
5157 """
5158 # TODO(unknown): Audit cpplint.py to see what places could be profitably
5159 # rewritten to use _GetTextInside (and use inferior regexp matching today).
5160
5161 # Give opening punctuations to get the matching close-punctuations.
5162 matching_punctuation = {'(': ')', '{': '}', '[': ']'}
5163 closing_punctuation = set(itervalues(matching_punctuation))
5164
5165 # Find the position to start extracting text.
5166 match = re.search(start_pattern, text, re.M)
5167 if not match: # start_pattern not found in text.
5168 return None
5169 start_position = match.end(0)
5170
5171 assert start_position > 0, (
5172 'start_pattern must ends with an opening punctuation.')
5173 assert text[start_position - 1] in matching_punctuation, (
5174 'start_pattern must ends with an opening punctuation.')
5175 # Stack of closing punctuations we expect to have in text after position.
5176 punctuation_stack = [matching_punctuation[text[start_position - 1]]]
5177 position = start_position
5178 while punctuation_stack and position < len(text):
5179 if text[position] == punctuation_stack[-1]:
5180 punctuation_stack.pop()
5181 elif text[position] in closing_punctuation:
5182 # A closing punctuation without matching opening punctuations.
5183 return None
5184 elif text[position] in matching_punctuation:
5185 punctuation_stack.append(matching_punctuation[text[position]])
5186 position += 1
5187 if punctuation_stack:
5188 # Opening punctuations left without matching close-punctuations.
5189 return None
5190 # punctuations match.
5191 return text[start_position:position - 1]
5192
5193
5194 # Patterns for matching call-by-reference parameters.
5195 #
5196 # Supports nested templates up to 2 levels deep using this messy pattern:
5197 # < (?: < (?: < [^<>]*
5198 # >
5199 # | [^<>] )*
5200 # >
5201 # | [^<>] )*
5202 # >
5203 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
5204 _RE_PATTERN_TYPE = (
5205 r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
5206 r'(?:\w|'
5207 r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
5208 r'::)+')
5209 # A call-by-reference parameter ends with '& identifier'.
5210 _RE_PATTERN_REF_PARAM = re.compile(
5211 r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
5212 r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
5213 # A call-by-const-reference parameter either ends with 'const& identifier'
5214 # or looks like 'const type& identifier' when 'type' is atomic.
5215 _RE_PATTERN_CONST_REF_PARAM = (
5216 r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
5217 r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
5218 # Stream types.
5219 _RE_PATTERN_REF_STREAM_PARAM = (
5220 r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
5221
5222
5223 def CheckLanguage(filename, clean_lines, linenum, file_extension,
5224 include_state, nesting_state, error):
5225 """Checks rules from the 'C++ language rules' section of cppguide.html.
5226
5227 Some of these rules are hard to test (function overloading, using
5228 uint32 inappropriately), but we do the best we can.
5229
5230 Args:
5231 filename: The name of the current file.
5232 clean_lines: A CleansedLines instance containing the file.
5233 linenum: The number of the line to check.
5234 file_extension: The extension (without the dot) of the filename.
5235 include_state: An _IncludeState instance in which the headers are inserted.
5236 nesting_state: A NestingState instance which maintains information about
5237 the current stack of nested blocks being parsed.
5238 error: The function to call with any errors found.
5239 """
5240 # If the line is empty or consists of entirely a comment, no need to
5241 # check it.
5242 line = clean_lines.elided[linenum]
5243 if not line:
5244 return
5245
5246 match = _RE_PATTERN_INCLUDE.search(line)
5247 if match:
5248 CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
5249 return
5250
5251 # Reset include state across preprocessor directives. This is meant
5252 # to silence warnings for conditional includes.
5253 match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
5254 if match:
5255 include_state.ResetSection(match.group(1))
5256
5257
5258 # Perform other checks now that we are sure that this is not an include line
5259 CheckCasts(filename, clean_lines, linenum, error)
5260 CheckGlobalStatic(filename, clean_lines, linenum, error)
5261 CheckPrintf(filename, clean_lines, linenum, error)
5262
5263 if IsHeaderExtension(file_extension):
5264 # TODO(unknown): check that 1-arg constructors are explicit.
5265 # How to tell it's a constructor?
5266 # (handled in CheckForNonStandardConstructs for now)
5267 # TODO(unknown): check that classes declare or disable copy/assign
5268 # (level 1 error)
5269 pass
5270
5271 # Check if people are using the verboten C basic types. The only exception
5272 # we regularly allow is "unsigned short port" for port.
5273 if Search(r'\bshort port\b', line):
5274 if not Search(r'\bunsigned short port\b', line):
5275 error(filename, linenum, 'runtime/int', 4,
5276 'Use "unsigned short" for ports, not "short"')
5277 else:
5278 match = Search(r'\b(short|long(?! +double)|long long)\b', line)
5279 if match:
5280 error(filename, linenum, 'runtime/int', 4,
5281 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
5282
5283 # Check if some verboten operator overloading is going on
5284 # TODO(unknown): catch out-of-line unary operator&:
5285 # class X {};
5286 # int operator&(const X& x) { return 42; } // unary operator&
5287 # The trick is it's hard to tell apart from binary operator&:
5288 # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
5289 if Search(r'\boperator\s*&\s*\(\s*\)', line):
5290 error(filename, linenum, 'runtime/operator', 4,
5291 'Unary operator& is dangerous. Do not use it.')
5292
5293 # Check for suspicious usage of "if" like
5294 # } if (a == b) {
5295 if Search(r'\}\s*if\s*\(', line):
5296 error(filename, linenum, 'readability/braces', 4,
5297 'Did you mean "else if"? If not, start a new line for "if".')
5298
5299 # Check for potential format string bugs like printf(foo).
5300 # We constrain the pattern not to pick things like DocidForPrintf(foo).
5301 # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
5302 # TODO(unknown): Catch the following case. Need to change the calling
5303 # convention of the whole function to process multiple line to handle it.
5304 # printf(
5305 # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
5306 printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
5307 if printf_args:
5308 match = Match(r'([\w.\->()]+)$', printf_args)
5309 if match and match.group(1) != '__VA_ARGS__':
5310 function_name = re.search(r'\b((?:string)?printf)\s*\(',
5311 line, re.I).group(1)
5312 error(filename, linenum, 'runtime/printf', 4,
5313 'Potential format string bug. Do %s("%%s", %s) instead.'
5314 % (function_name, match.group(1)))
5315
5316 # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
5317 match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
5318 if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
5319 error(filename, linenum, 'runtime/memset', 4,
5320 'Did you mean "memset(%s, 0, %s)"?'
5321 % (match.group(1), match.group(2)))
5322
5323 if Search(r'\busing namespace\b', line):
5324 if Search(r'\bliterals\b', line):
5325 error(filename, linenum, 'build/namespaces_literals', 5,
5326 'Do not use namespace using-directives. '
5327 'Use using-declarations instead.')
5328 else:
5329 error(filename, linenum, 'build/namespaces', 5,
5330 'Do not use namespace using-directives. '
5331 'Use using-declarations instead.')
5332
5333 # Detect variable-length arrays.
5334 match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
5335 if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
5336 match.group(3).find(']') == -1):
5337 # Split the size using space and arithmetic operators as delimiters.
5338 # If any of the resulting tokens are not compile time constants then
5339 # report the error.
5340 tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
5341 is_const = True
5342 skip_next = False
5343 for tok in tokens:
5344 if skip_next:
5345 skip_next = False
5346 continue
5347
5348 if Search(r'sizeof\(.+\)', tok): continue
5349 if Search(r'arraysize\(\w+\)', tok): continue
5350
5351 tok = tok.lstrip('(')
5352 tok = tok.rstrip(')')
5353 if not tok: continue
5354 if Match(r'\d+', tok): continue
5355 if Match(r'0[xX][0-9a-fA-F]+', tok): continue
5356 if Match(r'k[A-Z0-9]\w*', tok): continue
5357 if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
5358 if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
5359 # A catch all for tricky sizeof cases, including 'sizeof expression',
5360 # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
5361 # requires skipping the next token because we split on ' ' and '*'.
5362 if tok.startswith('sizeof'):
5363 skip_next = True
5364 continue
5365 is_const = False
5366 break
5367 if not is_const:
5368 error(filename, linenum, 'runtime/arrays', 1,
5369 'Do not use variable-length arrays. Use an appropriately named '
5370 "('k' followed by CamelCase) compile-time constant for the size.")
5371
5372 # Check for use of unnamed namespaces in header files. Registration
5373 # macros are typically OK, so we allow use of "namespace {" on lines
5374 # that end with backslashes.
5375 if (IsHeaderExtension(file_extension)
5376 and Search(r'\bnamespace\s*{', line)
5377 and line[-1] != '\\'):
5378 error(filename, linenum, 'build/namespaces_headers', 4,
5379 'Do not use unnamed namespaces in header files. See '
5380 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
5381 ' for more information.')
5382
5383
5384 def CheckGlobalStatic(filename, clean_lines, linenum, error):
5385 """Check for unsafe global or static objects.
5386
5387 Args:
5388 filename: The name of the current file.
5389 clean_lines: A CleansedLines instance containing the file.
5390 linenum: The number of the line to check.
5391 error: The function to call with any errors found.
5392 """
5393 line = clean_lines.elided[linenum]
5394
5395 # Match two lines at a time to support multiline declarations
5396 if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
5397 line += clean_lines.elided[linenum + 1].strip()
5398
5399 # Check for people declaring static/global STL strings at the top level.
5400 # This is dangerous because the C++ language does not guarantee that
5401 # globals with constructors are initialized before the first access, and
5402 # also because globals can be destroyed when some threads are still running.
5403 # TODO(unknown): Generalize this to also find static unique_ptr instances.
5404 # TODO(unknown): File bugs for clang-tidy to find these.
5405 match = Match(
5406 r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
5407 r'([a-zA-Z0-9_:]+)\b(.*)',
5408 line)
5409
5410 # Remove false positives:
5411 # - String pointers (as opposed to values).
5412 # string *pointer
5413 # const string *pointer
5414 # string const *pointer
5415 # string *const pointer
5416 #
5417 # - Functions and template specializations.
5418 # string Function<Type>(...
5419 # string Class<Type>::Method(...
5420 #
5421 # - Operators. These are matched separately because operator names
5422 # cross non-word boundaries, and trying to match both operators
5423 # and functions at the same time would decrease accuracy of
5424 # matching identifiers.
5425 # string Class::operator*()
5426 if (match and
5427 not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
5428 not Search(r'\boperator\W', line) and
5429 not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
5430 if Search(r'\bconst\b', line):
5431 error(filename, linenum, 'runtime/string', 4,
5432 'For a static/global string constant, use a C style string '
5433 'instead: "%schar%s %s[]".' %
5434 (match.group(1), match.group(2) or '', match.group(3)))
5435 else:
5436 error(filename, linenum, 'runtime/string', 4,
5437 'Static/global string variables are not permitted.')
5438
5439 if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
5440 Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
5441 error(filename, linenum, 'runtime/init', 4,
5442 'You seem to be initializing a member variable with itself.')
5443
5444
5445 def CheckPrintf(filename, clean_lines, linenum, error):
5446 """Check for printf related issues.
5447
5448 Args:
5449 filename: The name of the current file.
5450 clean_lines: A CleansedLines instance containing the file.
5451 linenum: The number of the line to check.
5452 error: The function to call with any errors found.
5453 """
5454 line = clean_lines.elided[linenum]
5455
5456 # When snprintf is used, the second argument shouldn't be a literal.
5457 match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
5458 if match and match.group(2) != '0':
5459 # If 2nd arg is zero, snprintf is used to calculate size.
5460 error(filename, linenum, 'runtime/printf', 3,
5461 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
5462 'to snprintf.' % (match.group(1), match.group(2)))
5463
5464 # Check if some verboten C functions are being used.
5465 if Search(r'\bsprintf\s*\(', line):
5466 error(filename, linenum, 'runtime/printf', 5,
5467 'Never use sprintf. Use snprintf instead.')
5468 match = Search(r'\b(strcpy|strcat)\s*\(', line)
5469 if match:
5470 error(filename, linenum, 'runtime/printf', 4,
5471 'Almost always, snprintf is better than %s' % match.group(1))
5472
5473
5474 def IsDerivedFunction(clean_lines, linenum):
5475 """Check if current line contains an inherited function.
5476
5477 Args:
5478 clean_lines: A CleansedLines instance containing the file.
5479 linenum: The number of the line to check.
5480 Returns:
5481 True if current line contains a function with "override"
5482 virt-specifier.
5483 """
5484 # Scan back a few lines for start of current function
5485 for i in xrange(linenum, max(-1, linenum - 10), -1):
5486 match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
5487 if match:
5488 # Look for "override" after the matching closing parenthesis
5489 line, _, closing_paren = CloseExpression(
5490 clean_lines, i, len(match.group(1)))
5491 return (closing_paren >= 0 and
5492 Search(r'\boverride\b', line[closing_paren:]))
5493 return False
5494
5495
5496 def IsOutOfLineMethodDefinition(clean_lines, linenum):
5497 """Check if current line contains an out-of-line method definition.
5498
5499 Args:
5500 clean_lines: A CleansedLines instance containing the file.
5501 linenum: The number of the line to check.
5502 Returns:
5503 True if current line contains an out-of-line method definition.
5504 """
5505 # Scan back a few lines for start of current function
5506 for i in xrange(linenum, max(-1, linenum - 10), -1):
5507 if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
5508 return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
5509 return False
5510
5511
5512 def IsInitializerList(clean_lines, linenum):
5513 """Check if current line is inside constructor initializer list.
5514
5515 Args:
5516 clean_lines: A CleansedLines instance containing the file.
5517 linenum: The number of the line to check.
5518 Returns:
5519 True if current line appears to be inside constructor initializer
5520 list, False otherwise.
5521 """
5522 for i in xrange(linenum, 1, -1):
5523 line = clean_lines.elided[i]
5524 if i == linenum:
5525 remove_function_body = Match(r'^(.*)\{\s*$', line)
5526 if remove_function_body:
5527 line = remove_function_body.group(1)
5528
5529 if Search(r'\s:\s*\w+[({]', line):
5530 # A lone colon tend to indicate the start of a constructor
5531 # initializer list. It could also be a ternary operator, which
5532 # also tend to appear in constructor initializer lists as
5533 # opposed to parameter lists.
5534 return True
5535 if Search(r'\}\s*,\s*$', line):
5536 # A closing brace followed by a comma is probably the end of a
5537 # brace-initialized member in constructor initializer list.
5538 return True
5539 if Search(r'[{};]\s*$', line):
5540 # Found one of the following:
5541 # - A closing brace or semicolon, probably the end of the previous
5542 # function.
5543 # - An opening brace, probably the start of current class or namespace.
5544 #
5545 # Current line is probably not inside an initializer list since
5546 # we saw one of those things without seeing the starting colon.
5547 return False
5548
5549 # Got to the beginning of the file without seeing the start of
5550 # constructor initializer list.
5551 return False
5552
5553
5554 def CheckForNonConstReference(filename, clean_lines, linenum,
5555 nesting_state, error):
5556 """Check for non-const references.
5557
5558 Separate from CheckLanguage since it scans backwards from current
5559 line, instead of scanning forward.
5560
5561 Args:
5562 filename: The name of the current file.
5563 clean_lines: A CleansedLines instance containing the file.
5564 linenum: The number of the line to check.
5565 nesting_state: A NestingState instance which maintains information about
5566 the current stack of nested blocks being parsed.
5567 error: The function to call with any errors found.
5568 """
5569 # Do nothing if there is no '&' on current line.
5570 line = clean_lines.elided[linenum]
5571 if '&' not in line:
5572 return
5573
5574 # If a function is inherited, current function doesn't have much of
5575 # a choice, so any non-const references should not be blamed on
5576 # derived function.
5577 if IsDerivedFunction(clean_lines, linenum):
5578 return
5579
5580 # Don't warn on out-of-line method definitions, as we would warn on the
5581 # in-line declaration, if it isn't marked with 'override'.
5582 if IsOutOfLineMethodDefinition(clean_lines, linenum):
5583 return
5584
5585 # Long type names may be broken across multiple lines, usually in one
5586 # of these forms:
5587 # LongType
5588 # ::LongTypeContinued &identifier
5589 # LongType::
5590 # LongTypeContinued &identifier
5591 # LongType<
5592 # ...>::LongTypeContinued &identifier
5593 #
5594 # If we detected a type split across two lines, join the previous
5595 # line to current line so that we can match const references
5596 # accordingly.
5597 #
5598 # Note that this only scans back one line, since scanning back
5599 # arbitrary number of lines would be expensive. If you have a type
5600 # that spans more than 2 lines, please use a typedef.
5601 if linenum > 1:
5602 previous = None
5603 if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
5604 # previous_line\n + ::current_line
5605 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
5606 clean_lines.elided[linenum - 1])
5607 elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
5608 # previous_line::\n + current_line
5609 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
5610 clean_lines.elided[linenum - 1])
5611 if previous:
5612 line = previous.group(1) + line.lstrip()
5613 else:
5614 # Check for templated parameter that is split across multiple lines
5615 endpos = line.rfind('>')
5616 if endpos > -1:
5617 (_, startline, startpos) = ReverseCloseExpression(
5618 clean_lines, linenum, endpos)
5619 if startpos > -1 and startline < linenum:
5620 # Found the matching < on an earlier line, collect all
5621 # pieces up to current line.
5622 line = ''
5623 for i in xrange(startline, linenum + 1):
5624 line += clean_lines.elided[i].strip()
5625
5626 # Check for non-const references in function parameters. A single '&' may
5627 # found in the following places:
5628 # inside expression: binary & for bitwise AND
5629 # inside expression: unary & for taking the address of something
5630 # inside declarators: reference parameter
5631 # We will exclude the first two cases by checking that we are not inside a
5632 # function body, including one that was just introduced by a trailing '{'.
5633 # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
5634 if (nesting_state.previous_stack_top and
5635 not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
5636 isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
5637 # Not at toplevel, not within a class, and not within a namespace
5638 return
5639
5640 # Avoid initializer lists. We only need to scan back from the
5641 # current line for something that starts with ':'.
5642 #
5643 # We don't need to check the current line, since the '&' would
5644 # appear inside the second set of parentheses on the current line as
5645 # opposed to the first set.
5646 if linenum > 0:
5647 for i in xrange(linenum - 1, max(0, linenum - 10), -1):
5648 previous_line = clean_lines.elided[i]
5649 if not Search(r'[),]\s*$', previous_line):
5650 break
5651 if Match(r'^\s*:\s+\S', previous_line):
5652 return
5653
5654 # Avoid preprocessors
5655 if Search(r'\\\s*$', line):
5656 return
5657
5658 # Avoid constructor initializer lists
5659 if IsInitializerList(clean_lines, linenum):
5660 return
5661
5662 # We allow non-const references in a few standard places, like functions
5663 # called "swap()" or iostream operators like "<<" or ">>". Do not check
5664 # those function parameters.
5665 #
5666 # We also accept & in static_assert, which looks like a function but
5667 # it's actually a declaration expression.
5668 allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
5669 r'operator\s*[<>][<>]|'
5670 r'static_assert|COMPILE_ASSERT'
5671 r')\s*\(')
5672 if Search(allowed_functions, line):
5673 return
5674 elif not Search(r'\S+\([^)]*$', line):
5675 # Don't see an allowed function on this line. Actually we
5676 # didn't see any function name on this line, so this is likely a
5677 # multi-line parameter list. Try a bit harder to catch this case.
5678 for i in xrange(2):
5679 if (linenum > i and
5680 Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
5681 return
5682
5683 decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
5684 for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
5685 if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
5686 not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
5687 error(filename, linenum, 'runtime/references', 2,
5688 'Is this a non-const reference? '
5689 'If so, make const or use a pointer: ' +
5690 ReplaceAll(' *<', '<', parameter))
5691
5692
5693 def CheckCasts(filename, clean_lines, linenum, error):
5694 """Various cast related checks.
5695
5696 Args:
5697 filename: The name of the current file.
5698 clean_lines: A CleansedLines instance containing the file.
5699 linenum: The number of the line to check.
5700 error: The function to call with any errors found.
5701 """
5702 line = clean_lines.elided[linenum]
5703
5704 # Check to see if they're using an conversion function cast.
5705 # I just try to capture the most common basic types, though there are more.
5706 # Parameterless conversion functions, such as bool(), are allowed as they are
5707 # probably a member operator declaration or default constructor.
5708 match = Search(
5709 r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
5710 r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
5711 r'(\([^)].*)', line)
5712 expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
5713 if match and not expecting_function:
5714 matched_type = match.group(2)
5715
5716 # matched_new_or_template is used to silence two false positives:
5717 # - New operators
5718 # - Template arguments with function types
5719 #
5720 # For template arguments, we match on types immediately following
5721 # an opening bracket without any spaces. This is a fast way to
5722 # silence the common case where the function type is the first
5723 # template argument. False negative with less-than comparison is
5724 # avoided because those operators are usually followed by a space.
5725 #
5726 # function<double(double)> // bracket + no space = false positive
5727 # value < double(42) // bracket + space = true positive
5728 matched_new_or_template = match.group(1)
5729
5730 # Avoid arrays by looking for brackets that come after the closing
5731 # parenthesis.
5732 if Match(r'\([^()]+\)\s*\[', match.group(3)):
5733 return
5734
5735 # Other things to ignore:
5736 # - Function pointers
5737 # - Casts to pointer types
5738 # - Placement new
5739 # - Alias declarations
5740 matched_funcptr = match.group(3)
5741 if (matched_new_or_template is None and
5742 not (matched_funcptr and
5743 (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
5744 matched_funcptr) or
5745 matched_funcptr.startswith('(*)'))) and
5746 not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
5747 not Search(r'new\(\S+\)\s*' + matched_type, line)):
5748 error(filename, linenum, 'readability/casting', 4,
5749 'Using deprecated casting style. '
5750 'Use static_cast<%s>(...) instead' %
5751 matched_type)
5752
5753 if not expecting_function:
5754 CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
5755 r'\((int|float|double|bool|char|u?int(16|32|64)|size_t)\)', error)
5756
5757 # This doesn't catch all cases. Consider (const char * const)"hello".
5758 #
5759 # (char *) "foo" should always be a const_cast (reinterpret_cast won't
5760 # compile).
5761 if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
5762 r'\((char\s?\*+\s?)\)\s*"', error):
5763 pass
5764 else:
5765 # Check pointer casts for other than string constants
5766 CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
5767 r'\((\w+\s?\*+\s?)\)', error)
5768
5769 # In addition, we look for people taking the address of a cast. This
5770 # is dangerous -- casts can assign to temporaries, so the pointer doesn't
5771 # point where you think.
5772 #
5773 # Some non-identifier character is required before the '&' for the
5774 # expression to be recognized as a cast. These are casts:
5775 # expression = &static_cast<int*>(temporary());
5776 # function(&(int*)(temporary()));
5777 #
5778 # This is not a cast:
5779 # reference_type&(int* function_param);
5780 match = Search(
5781 r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
5782 r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
5783 if match:
5784 # Try a better error message when the & is bound to something
5785 # dereferenced by the casted pointer, as opposed to the casted
5786 # pointer itself.
5787 parenthesis_error = False
5788 match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
5789 if match:
5790 _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
5791 if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
5792 _, y2, x2 = CloseExpression(clean_lines, y1, x1)
5793 if x2 >= 0:
5794 extended_line = clean_lines.elided[y2][x2:]
5795 if y2 < clean_lines.NumLines() - 1:
5796 extended_line += clean_lines.elided[y2 + 1]
5797 if Match(r'\s*(?:->|\[)', extended_line):
5798 parenthesis_error = True
5799
5800 if parenthesis_error:
5801 error(filename, linenum, 'readability/casting', 4,
5802 ('Are you taking an address of something dereferenced '
5803 'from a cast? Wrapping the dereferenced expression in '
5804 'parentheses will make the binding more obvious'))
5805 else:
5806 error(filename, linenum, 'runtime/casting', 4,
5807 ('Are you taking an address of a cast? '
5808 'This is dangerous: could be a temp var. '
5809 'Take the address before doing the cast, rather than after'))
5810
5811
5812 def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
5813 """Checks for a C-style cast by looking for the pattern.
5814
5815 Args:
5816 filename: The name of the current file.
5817 clean_lines: A CleansedLines instance containing the file.
5818 linenum: The number of the line to check.
5819 cast_type: The string for the C++ cast to recommend. This is either
5820 reinterpret_cast, static_cast, or const_cast, depending.
5821 pattern: The regular expression used to find C-style casts.
5822 error: The function to call with any errors found.
5823
5824 Returns:
5825 True if an error was emitted.
5826 False otherwise.
5827 """
5828 line = clean_lines.elided[linenum]
5829 match = Search(pattern, line)
5830 if not match:
5831 return False
5832
5833 # Exclude lines with keywords that tend to look like casts
5834 context = line[0:match.start(1) - 1]
5835 if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
5836 return False
5837
5838 # Try expanding current context to see if we one level of
5839 # parentheses inside a macro.
5840 if linenum > 0:
5841 for i in xrange(linenum - 1, max(0, linenum - 5), -1):
5842 context = clean_lines.elided[i] + context
5843 if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
5844 return False
5845
5846 # operator++(int) and operator--(int)
5847 if context.endswith(' operator++') or context.endswith(' operator--'):
5848 return False
5849
5850 # A single unnamed argument for a function tends to look like old style cast.
5851 # If we see those, don't issue warnings for deprecated casts.
5852 remainder = line[match.end(0):]
5853 if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
5854 remainder):
5855 return False
5856
5857 # At this point, all that should be left is actual casts.
5858 error(filename, linenum, 'readability/casting', 4,
5859 'Using C-style cast. Use %s<%s>(...) instead' %
5860 (cast_type, match.group(1)))
5861
5862 return True
5863
5864
5865 def ExpectingFunctionArgs(clean_lines, linenum):
5866 """Checks whether where function type arguments are expected.
5867
5868 Args:
5869 clean_lines: A CleansedLines instance containing the file.
5870 linenum: The number of the line to check.
5871
5872 Returns:
5873 True if the line at 'linenum' is inside something that expects arguments
5874 of function types.
5875 """
5876 line = clean_lines.elided[linenum]
5877 return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
5878 (linenum >= 2 and
5879 (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
5880 clean_lines.elided[linenum - 1]) or
5881 Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
5882 clean_lines.elided[linenum - 2]) or
5883 Search(r'\bstd::m?function\s*\<\s*$',
5884 clean_lines.elided[linenum - 1]))))
5885
5886
5887 _HEADERS_CONTAINING_TEMPLATES = (
5888 ('<deque>', ('deque',)),
5889 ('<functional>', ('unary_function', 'binary_function',
5890 'plus', 'minus', 'multiplies', 'divides', 'modulus',
5891 'negate',
5892 'equal_to', 'not_equal_to', 'greater', 'less',
5893 'greater_equal', 'less_equal',
5894 'logical_and', 'logical_or', 'logical_not',
5895 'unary_negate', 'not1', 'binary_negate', 'not2',
5896 'bind1st', 'bind2nd',
5897 'pointer_to_unary_function',
5898 'pointer_to_binary_function',
5899 'ptr_fun',
5900 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
5901 'mem_fun_ref_t',
5902 'const_mem_fun_t', 'const_mem_fun1_t',
5903 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
5904 'mem_fun_ref',
5905 )),
5906 ('<limits>', ('numeric_limits',)),
5907 ('<list>', ('list',)),
5908 ('<map>', ('multimap',)),
5909 ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
5910 'unique_ptr', 'weak_ptr')),
5911 ('<queue>', ('queue', 'priority_queue',)),
5912 ('<set>', ('multiset',)),
5913 ('<stack>', ('stack',)),
5914 ('<string>', ('char_traits', 'basic_string',)),
5915 ('<tuple>', ('tuple',)),
5916 ('<unordered_map>', ('unordered_map', 'unordered_multimap')),
5917 ('<unordered_set>', ('unordered_set', 'unordered_multiset')),
5918 ('<utility>', ('pair',)),
5919 ('<vector>', ('vector',)),
5920
5921 # gcc extensions.
5922 # Note: std::hash is their hash, ::hash is our hash
5923 ('<hash_map>', ('hash_map', 'hash_multimap',)),
5924 ('<hash_set>', ('hash_set', 'hash_multiset',)),
5925 ('<slist>', ('slist',)),
5926 )
5927
5928 _HEADERS_MAYBE_TEMPLATES = (
5929 ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
5930 'transform',
5931 )),
5932 ('<utility>', ('forward', 'make_pair', 'move', 'swap')),
5933 )
5934
5935 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
5936
5937 _re_pattern_headers_maybe_templates = []
5938 for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
5939 for _template in _templates:
5940 # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
5941 # 'type::max()'.
5942 _re_pattern_headers_maybe_templates.append(
5943 (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
5944 _template,
5945 _header))
5946 # Match set<type>, but not foo->set<type>, foo.set<type>
5947 _re_pattern_headers_maybe_templates.append(
5948 (re.compile(r'[^>.]\bset\s*\<'),
5949 'set<>',
5950 '<set>'))
5951 # Match 'map<type> var' and 'std::map<type>(...)', but not 'map<type>(...)''
5952 _re_pattern_headers_maybe_templates.append(
5953 (re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'),
5954 'map<>',
5955 '<map>'))
5956
5957 # Other scripts may reach in and modify this pattern.
5958 _re_pattern_templates = []
5959 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
5960 for _template in _templates:
5961 _re_pattern_templates.append(
5962 (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
5963 _template + '<>',
5964 _header))
5965
5966
5967 def FilesBelongToSameModule(filename_cc, filename_h):
5968 """Check if these two filenames belong to the same module.
5969
5970 The concept of a 'module' here is a as follows:
5971 foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
5972 same 'module' if they are in the same directory.
5973 some/path/public/xyzzy and some/path/internal/xyzzy are also considered
5974 to belong to the same module here.
5975
5976 If the filename_cc contains a longer path than the filename_h, for example,
5977 '/absolute/path/to/base/sysinfo.cc', and this file would include
5978 'base/sysinfo.h', this function also produces the prefix needed to open the
5979 header. This is used by the caller of this function to more robustly open the
5980 header file. We don't have access to the real include paths in this context,
5981 so we need this guesswork here.
5982
5983 Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
5984 according to this implementation. Because of this, this function gives
5985 some false positives. This should be sufficiently rare in practice.
5986
5987 Args:
5988 filename_cc: is the path for the source (e.g. .cc) file
5989 filename_h: is the path for the header path
5990
5991 Returns:
5992 Tuple with a bool and a string:
5993 bool: True if filename_cc and filename_h belong to the same module.
5994 string: the additional prefix needed to open the header file.
5995 """
5996 fileinfo_cc = FileInfo(filename_cc)
5997 if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
5998 return (False, '')
5999
6000 fileinfo_h = FileInfo(filename_h)
6001 if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')):
6002 return (False, '')
6003
6004 filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
6005 matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
6006 if matched_test_suffix:
6007 filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
6008
6009 filename_cc = filename_cc.replace('/public/', '/')
6010 filename_cc = filename_cc.replace('/internal/', '/')
6011
6012 filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
6013 if filename_h.endswith('-inl'):
6014 filename_h = filename_h[:-len('-inl')]
6015 filename_h = filename_h.replace('/public/', '/')
6016 filename_h = filename_h.replace('/internal/', '/')
6017
6018 files_belong_to_same_module = filename_cc.endswith(filename_h)
6019 common_path = ''
6020 if files_belong_to_same_module:
6021 common_path = filename_cc[:-len(filename_h)]
6022 return files_belong_to_same_module, common_path
6023
6024
6025 def UpdateIncludeState(filename, include_dict, io=codecs):
6026 """Fill up the include_dict with new includes found from the file.
6027
6028 Args:
6029 filename: the name of the header to read.
6030 include_dict: a dictionary in which the headers are inserted.
6031 io: The io factory to use to read the file. Provided for testability.
6032
6033 Returns:
6034 True if a header was successfully added. False otherwise.
6035 """
6036 headerfile = None
6037 try:
6038 with io.open(filename, 'r', 'utf8', 'replace') as headerfile:
6039 linenum = 0
6040 for line in headerfile:
6041 linenum += 1
6042 clean_line = CleanseComments(line)
6043 match = _RE_PATTERN_INCLUDE.search(clean_line)
6044 if match:
6045 include = match.group(2)
6046 include_dict.setdefault(include, linenum)
6047 return True
6048 except IOError:
6049 return False
6050
6051
6052
6053 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
6054 io=codecs):
6055 """Reports for missing stl includes.
6056
6057 This function will output warnings to make sure you are including the headers
6058 necessary for the stl containers and functions that you use. We only give one
6059 reason to include a header. For example, if you use both equal_to<> and
6060 less<> in a .h file, only one (the latter in the file) of these will be
6061 reported as a reason to include the <functional>.
6062
6063 Args:
6064 filename: The name of the current file.
6065 clean_lines: A CleansedLines instance containing the file.
6066 include_state: An _IncludeState instance.
6067 error: The function to call with any errors found.
6068 io: The IO factory to use to read the header file. Provided for unittest
6069 injection.
6070 """
6071 required = {} # A map of header name to linenumber and the template entity.
6072 # Example of required: { '<functional>': (1219, 'less<>') }
6073
6074 for linenum in xrange(clean_lines.NumLines()):
6075 line = clean_lines.elided[linenum]
6076 if not line or line[0] == '#':
6077 continue
6078
6079 # String is special -- it is a non-templatized type in STL.
6080 matched = _RE_PATTERN_STRING.search(line)
6081 if matched:
6082 # Don't warn about strings in non-STL namespaces:
6083 # (We check only the first match per line; good enough.)
6084 prefix = line[:matched.start()]
6085 if prefix.endswith('std::') or not prefix.endswith('::'):
6086 required['<string>'] = (linenum, 'string')
6087
6088 for pattern, template, header in _re_pattern_headers_maybe_templates:
6089 if pattern.search(line):
6090 required[header] = (linenum, template)
6091
6092 # The following function is just a speed up, no semantics are changed.
6093 if not '<' in line: # Reduces the cpu time usage by skipping lines.
6094 continue
6095
6096 for pattern, template, header in _re_pattern_templates:
6097 matched = pattern.search(line)
6098 if matched:
6099 # Don't warn about IWYU in non-STL namespaces:
6100 # (We check only the first match per line; good enough.)
6101 prefix = line[:matched.start()]
6102 if prefix.endswith('std::') or not prefix.endswith('::'):
6103 required[header] = (linenum, template)
6104
6105 # The policy is that if you #include something in foo.h you don't need to
6106 # include it again in foo.cc. Here, we will look at possible includes.
6107 # Let's flatten the include_state include_list and copy it into a dictionary.
6108 include_dict = dict([item for sublist in include_state.include_list
6109 for item in sublist])
6110
6111 # Did we find the header for this file (if any) and successfully load it?
6112 header_found = False
6113
6114 # Use the absolute path so that matching works properly.
6115 abs_filename = FileInfo(filename).FullName()
6116
6117 # For Emacs's flymake.
6118 # If cpplint is invoked from Emacs's flymake, a temporary file is generated
6119 # by flymake and that file name might end with '_flymake.cc'. In that case,
6120 # restore original file name here so that the corresponding header file can be
6121 # found.
6122 # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
6123 # instead of 'foo_flymake.h'
6124 abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
6125
6126 # include_dict is modified during iteration, so we iterate over a copy of
6127 # the keys.
6128 header_keys = list(include_dict.keys())
6129 for header in header_keys:
6130 (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
6131 fullpath = common_path + header
6132 if same_module and UpdateIncludeState(fullpath, include_dict, io):
6133 header_found = True
6134
6135 # If we can't find the header file for a .cc, assume it's because we don't
6136 # know where to look. In that case we'll give up as we're not sure they
6137 # didn't include it in the .h file.
6138 # TODO(unknown): Do a better job of finding .h files so we are confident that
6139 # not having the .h file means there isn't one.
6140 if not header_found:
6141 for extension in GetNonHeaderExtensions():
6142 if filename.endswith('.' + extension):
6143 return
6144
6145 # All the lines have been processed, report the errors found.
6146 for required_header_unstripped in sorted(required, key=required.__getitem__):
6147 template = required[required_header_unstripped][1]
6148 if required_header_unstripped.strip('<>"') not in include_dict:
6149 error(filename, required[required_header_unstripped][0],
6150 'build/include_what_you_use', 4,
6151 'Add #include ' + required_header_unstripped + ' for ' + template)
6152
6153
6154 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
6155
6156
6157 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
6158 """Check that make_pair's template arguments are deduced.
6159
6160 G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
6161 specified explicitly, and such use isn't intended in any case.
6162
6163 Args:
6164 filename: The name of the current file.
6165 clean_lines: A CleansedLines instance containing the file.
6166 linenum: The number of the line to check.
6167 error: The function to call with any errors found.
6168 """
6169 line = clean_lines.elided[linenum]
6170 match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
6171 if match:
6172 error(filename, linenum, 'build/explicit_make_pair',
6173 4, # 4 = high confidence
6174 'For C++11-compatibility, omit template arguments from make_pair'
6175 ' OR use pair directly OR if appropriate, construct a pair directly')
6176
6177
6178 def CheckRedundantVirtual(filename, clean_lines, linenum, error):
6179 """Check if line contains a redundant "virtual" function-specifier.
6180
6181 Args:
6182 filename: The name of the current file.
6183 clean_lines: A CleansedLines instance containing the file.
6184 linenum: The number of the line to check.
6185 error: The function to call with any errors found.
6186 """
6187 # Look for "virtual" on current line.
6188 line = clean_lines.elided[linenum]
6189 virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
6190 if not virtual: return
6191
6192 # Ignore "virtual" keywords that are near access-specifiers. These
6193 # are only used in class base-specifier and do not apply to member
6194 # functions.
6195 if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
6196 Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
6197 return
6198
6199 # Ignore the "virtual" keyword from virtual base classes. Usually
6200 # there is a column on the same line in these cases (virtual base
6201 # classes are rare in google3 because multiple inheritance is rare).
6202 if Match(r'^.*[^:]:[^:].*$', line): return
6203
6204 # Look for the next opening parenthesis. This is the start of the
6205 # parameter list (possibly on the next line shortly after virtual).
6206 # TODO(unknown): doesn't work if there are virtual functions with
6207 # decltype() or other things that use parentheses, but csearch suggests
6208 # that this is rare.
6209 end_col = -1
6210 end_line = -1
6211 start_col = len(virtual.group(2))
6212 for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
6213 line = clean_lines.elided[start_line][start_col:]
6214 parameter_list = Match(r'^([^(]*)\(', line)
6215 if parameter_list:
6216 # Match parentheses to find the end of the parameter list
6217 (_, end_line, end_col) = CloseExpression(
6218 clean_lines, start_line, start_col + len(parameter_list.group(1)))
6219 break
6220 start_col = 0
6221
6222 if end_col < 0:
6223 return # Couldn't find end of parameter list, give up
6224
6225 # Look for "override" or "final" after the parameter list
6226 # (possibly on the next few lines).
6227 for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
6228 line = clean_lines.elided[i][end_col:]
6229 match = Search(r'\b(override|final)\b', line)
6230 if match:
6231 error(filename, linenum, 'readability/inheritance', 4,
6232 ('"virtual" is redundant since function is '
6233 'already declared as "%s"' % match.group(1)))
6234
6235 # Set end_col to check whole lines after we are done with the
6236 # first line.
6237 end_col = 0
6238 if Search(r'[^\w]\s*$', line):
6239 break
6240
6241
6242 def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
6243 """Check if line contains a redundant "override" or "final" virt-specifier.
6244
6245 Args:
6246 filename: The name of the current file.
6247 clean_lines: A CleansedLines instance containing the file.
6248 linenum: The number of the line to check.
6249 error: The function to call with any errors found.
6250 """
6251 # Look for closing parenthesis nearby. We need one to confirm where
6252 # the declarator ends and where the virt-specifier starts to avoid
6253 # false positives.
6254 line = clean_lines.elided[linenum]
6255 declarator_end = line.rfind(')')
6256 if declarator_end >= 0:
6257 fragment = line[declarator_end:]
6258 else:
6259 if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
6260 fragment = line
6261 else:
6262 return
6263
6264 # Check that at most one of "override" or "final" is present, not both
6265 if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
6266 error(filename, linenum, 'readability/inheritance', 4,
6267 ('"override" is redundant since function is '
6268 'already declared as "final"'))
6269
6270
6271
6272
6273 # Returns true if we are at a new block, and it is directly
6274 # inside of a namespace.
6275 def IsBlockInNameSpace(nesting_state, is_forward_declaration):
6276 """Checks that the new block is directly in a namespace.
6277
6278 Args:
6279 nesting_state: The _NestingState object that contains info about our state.
6280 is_forward_declaration: If the class is a forward declared class.
6281 Returns:
6282 Whether or not the new block is directly in a namespace.
6283 """
6284 if is_forward_declaration:
6285 return len(nesting_state.stack) >= 1 and (
6286 isinstance(nesting_state.stack[-1], _NamespaceInfo))
6287
6288
6289 return (len(nesting_state.stack) > 1 and
6290 nesting_state.stack[-1].check_namespace_indentation and
6291 isinstance(nesting_state.stack[-2], _NamespaceInfo))
6292
6293
6294 def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
6295 raw_lines_no_comments, linenum):
6296 """This method determines if we should apply our namespace indentation check.
6297
6298 Args:
6299 nesting_state: The current nesting state.
6300 is_namespace_indent_item: If we just put a new class on the stack, True.
6301 If the top of the stack is not a class, or we did not recently
6302 add the class, False.
6303 raw_lines_no_comments: The lines without the comments.
6304 linenum: The current line number we are processing.
6305
6306 Returns:
6307 True if we should apply our namespace indentation check. Currently, it
6308 only works for classes and namespaces inside of a namespace.
6309 """
6310
6311 is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
6312 linenum)
6313
6314 if not (is_namespace_indent_item or is_forward_declaration):
6315 return False
6316
6317 # If we are in a macro, we do not want to check the namespace indentation.
6318 if IsMacroDefinition(raw_lines_no_comments, linenum):
6319 return False
6320
6321 return IsBlockInNameSpace(nesting_state, is_forward_declaration)
6322
6323
6324 # Call this method if the line is directly inside of a namespace.
6325 # If the line above is blank (excluding comments) or the start of
6326 # an inner namespace, it cannot be indented.
6327 def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
6328 error):
6329 line = raw_lines_no_comments[linenum]
6330 if Match(r'^\s+', line):
6331 error(filename, linenum, 'runtime/indentation_namespace', 4,
6332 'Do not indent within a namespace')
6333
6334
6335 def ProcessLine(filename, file_extension, clean_lines, line,
6336 include_state, function_state, nesting_state, error,
6337 extra_check_functions=None):
6338 """Processes a single line in the file.
6339
6340 Args:
6341 filename: Filename of the file that is being processed.
6342 file_extension: The extension (dot not included) of the file.
6343 clean_lines: An array of strings, each representing a line of the file,
6344 with comments stripped.
6345 line: Number of line being processed.
6346 include_state: An _IncludeState instance in which the headers are inserted.
6347 function_state: A _FunctionState instance which counts function lines, etc.
6348 nesting_state: A NestingState instance which maintains information about
6349 the current stack of nested blocks being parsed.
6350 error: A callable to which errors are reported, which takes 4 arguments:
6351 filename, line number, error level, and message
6352 extra_check_functions: An array of additional check functions that will be
6353 run on each source line. Each function takes 4
6354 arguments: filename, clean_lines, line, error
6355 """
6356 raw_lines = clean_lines.raw_lines
6357 ParseNolintSuppressions(filename, raw_lines[line], line, error)
6358 nesting_state.Update(filename, clean_lines, line, error)
6359 CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
6360 error)
6361 if nesting_state.InAsmBlock(): return
6362 CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
6363 CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
6364 CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
6365 CheckLanguage(filename, clean_lines, line, file_extension, include_state,
6366 nesting_state, error)
6367 CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
6368 CheckForNonStandardConstructs(filename, clean_lines, line,
6369 nesting_state, error)
6370 CheckVlogArguments(filename, clean_lines, line, error)
6371 CheckPosixThreading(filename, clean_lines, line, error)
6372 CheckInvalidIncrement(filename, clean_lines, line, error)
6373 CheckMakePairUsesDeduction(filename, clean_lines, line, error)
6374 CheckRedundantVirtual(filename, clean_lines, line, error)
6375 CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
6376 if extra_check_functions:
6377 for check_fn in extra_check_functions:
6378 check_fn(filename, clean_lines, line, error)
6379
6380 def FlagCxx11Features(filename, clean_lines, linenum, error):
6381 """Flag those c++11 features that we only allow in certain places.
6382
6383 Args:
6384 filename: The name of the current file.
6385 clean_lines: A CleansedLines instance containing the file.
6386 linenum: The number of the line to check.
6387 error: The function to call with any errors found.
6388 """
6389 line = clean_lines.elided[linenum]
6390
6391 include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
6392
6393 # Flag unapproved C++ TR1 headers.
6394 if include and include.group(1).startswith('tr1/'):
6395 error(filename, linenum, 'build/c++tr1', 5,
6396 ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
6397
6398 # Flag unapproved C++11 headers.
6399 if include and include.group(1) in ('cfenv',
6400 'condition_variable',
6401 'fenv.h',
6402 'future',
6403 'mutex',
6404 'thread',
6405 'chrono',
6406 'ratio',
6407 'regex',
6408 'system_error',
6409 ):
6410 error(filename, linenum, 'build/c++11', 5,
6411 ('<%s> is an unapproved C++11 header.') % include.group(1))
6412
6413 # The only place where we need to worry about C++11 keywords and library
6414 # features in preprocessor directives is in macro definitions.
6415 if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
6416
6417 # These are classes and free functions. The classes are always
6418 # mentioned as std::*, but we only catch the free functions if
6419 # they're not found by ADL. They're alphabetical by header.
6420 for top_name in (
6421 # type_traits
6422 'alignment_of',
6423 'aligned_union',
6424 ):
6425 if Search(r'\bstd::%s\b' % top_name, line):
6426 error(filename, linenum, 'build/c++11', 5,
6427 ('std::%s is an unapproved C++11 class or function. Send c-style '
6428 'an example of where it would make your code more readable, and '
6429 'they may let you use it.') % top_name)
6430
6431
6432 def FlagCxx14Features(filename, clean_lines, linenum, error):
6433 """Flag those C++14 features that we restrict.
6434
6435 Args:
6436 filename: The name of the current file.
6437 clean_lines: A CleansedLines instance containing the file.
6438 linenum: The number of the line to check.
6439 error: The function to call with any errors found.
6440 """
6441 line = clean_lines.elided[linenum]
6442
6443 include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
6444
6445 # Flag unapproved C++14 headers.
6446 if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
6447 error(filename, linenum, 'build/c++14', 5,
6448 ('<%s> is an unapproved C++14 header.') % include.group(1))
6449
6450
6451 def ProcessFileData(filename, file_extension, lines, error,
6452 extra_check_functions=None):
6453 """Performs lint checks and reports any errors to the given error function.
6454
6455 Args:
6456 filename: Filename of the file that is being processed.
6457 file_extension: The extension (dot not included) of the file.
6458 lines: An array of strings, each representing a line of the file, with the
6459 last element being empty if the file is terminated with a newline.
6460 error: A callable to which errors are reported, which takes 4 arguments:
6461 filename, line number, error level, and message
6462 extra_check_functions: An array of additional check functions that will be
6463 run on each source line. Each function takes 4
6464 arguments: filename, clean_lines, line, error
6465 """
6466 lines = (['// marker so line numbers and indices both start at 1'] + lines +
6467 ['// marker so line numbers end in a known way'])
6468
6469 include_state = _IncludeState()
6470 function_state = _FunctionState()
6471 nesting_state = NestingState()
6472
6473 ResetNolintSuppressions()
6474
6475 CheckForCopyright(filename, lines, error)
6476 ProcessGlobalSuppresions(lines)
6477 RemoveMultiLineComments(filename, lines, error)
6478 clean_lines = CleansedLines(lines)
6479
6480 if IsHeaderExtension(file_extension):
6481 CheckForHeaderGuard(filename, clean_lines, error)
6482
6483 for line in xrange(clean_lines.NumLines()):
6484 ProcessLine(filename, file_extension, clean_lines, line,
6485 include_state, function_state, nesting_state, error,
6486 extra_check_functions)
6487 FlagCxx11Features(filename, clean_lines, line, error)
6488 nesting_state.CheckCompletedBlocks(filename, error)
6489
6490 CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
6491
6492 # Check that the .cc file has included its header if it exists.
6493 if _IsSourceExtension(file_extension):
6494 CheckHeaderFileIncluded(filename, include_state, error)
6495
6496 # We check here rather than inside ProcessLine so that we see raw
6497 # lines rather than "cleaned" lines.
6498 CheckForBadCharacters(filename, lines, error)
6499
6500 CheckForNewlineAtEOF(filename, lines, error)
6501
6502 def ProcessConfigOverrides(filename):
6503 """ Loads the configuration files and processes the config overrides.
6504
6505 Args:
6506 filename: The name of the file being processed by the linter.
6507
6508 Returns:
6509 False if the current |filename| should not be processed further.
6510 """
6511
6512 abs_filename = os.path.abspath(filename)
6513 cfg_filters = []
6514 keep_looking = True
6515 while keep_looking:
6516 abs_path, base_name = os.path.split(abs_filename)
6517 if not base_name:
6518 break # Reached the root directory.
6519
6520 cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
6521 abs_filename = abs_path
6522 if not os.path.isfile(cfg_file):
6523 continue
6524
6525 try:
6526 with open(cfg_file) as file_handle:
6527 for line in file_handle:
6528 line, _, _ = line.partition('#') # Remove comments.
6529 if not line.strip():
6530 continue
6531
6532 name, _, val = line.partition('=')
6533 name = name.strip()
6534 val = val.strip()
6535 if name == 'set noparent':
6536 keep_looking = False
6537 elif name == 'filter':
6538 cfg_filters.append(val)
6539 elif name == 'exclude_files':
6540 # When matching exclude_files pattern, use the base_name of
6541 # the current file name or the directory name we are processing.
6542 # For example, if we are checking for lint errors in /foo/bar/baz.cc
6543 # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
6544 # file's "exclude_files" filter is meant to be checked against "bar"
6545 # and not "baz" nor "bar/baz.cc".
6546 if base_name:
6547 pattern = re.compile(val)
6548 if pattern.match(base_name):
6549 if _cpplint_state.quiet:
6550 # Suppress "Ignoring file" warning when using --quiet.
6551 return False
6552 _cpplint_state.PrintInfo('Ignoring "%s": file excluded by "%s". '
6553 'File path component "%s" matches '
6554 'pattern "%s"\n' %
6555 (filename, cfg_file, base_name, val))
6556 return False
6557 elif name == 'linelength':
6558 global _line_length
6559 try:
6560 _line_length = int(val)
6561 except ValueError:
6562 _cpplint_state.PrintError('Line length must be numeric.')
6563 elif name == 'extensions':
6564 ProcessExtensionsOption(val)
6565 elif name == 'root':
6566 global _root
6567 # root directories are specified relative to CPPLINT.cfg dir.
6568 _root = os.path.join(os.path.dirname(cfg_file), val)
6569 elif name == 'headers':
6570 ProcessHppHeadersOption(val)
6571 elif name == 'includeorder':
6572 ProcessIncludeOrderOption(val)
6573 else:
6574 _cpplint_state.PrintError(
6575 'Invalid configuration option (%s) in file %s\n' %
6576 (name, cfg_file))
6577
6578 except IOError:
6579 _cpplint_state.PrintError(
6580 "Skipping config file '%s': Can't open for reading\n" % cfg_file)
6581 keep_looking = False
6582
6583 # Apply all the accumulated filters in reverse order (top-level directory
6584 # config options having the least priority).
6585 for cfg_filter in reversed(cfg_filters):
6586 _AddFilters(cfg_filter)
6587
6588 return True
6589
6590
6591 def ProcessFile(filename, vlevel, extra_check_functions=None):
6592 """Does google-lint on a single file.
6593
6594 Args:
6595 filename: The name of the file to parse.
6596
6597 vlevel: The level of errors to report. Every error of confidence
6598 >= verbose_level will be reported. 0 is a good default.
6599
6600 extra_check_functions: An array of additional check functions that will be
6601 run on each source line. Each function takes 4
6602 arguments: filename, clean_lines, line, error
6603 """
6604
6605 _SetVerboseLevel(vlevel)
6606 _BackupFilters()
6607 old_errors = _cpplint_state.error_count
6608
6609 if not ProcessConfigOverrides(filename):
6610 _RestoreFilters()
6611 return
6612
6613 lf_lines = []
6614 crlf_lines = []
6615 try:
6616 # Support the UNIX convention of using "-" for stdin. Note that
6617 # we are not opening the file with universal newline support
6618 # (which codecs doesn't support anyway), so the resulting lines do
6619 # contain trailing '\r' characters if we are reading a file that
6620 # has CRLF endings.
6621 # If after the split a trailing '\r' is present, it is removed
6622 # below.
6623 if filename == '-':
6624 lines = codecs.StreamReaderWriter(sys.stdin,
6625 codecs.getreader('utf8'),
6626 codecs.getwriter('utf8'),
6627 'replace').read().split('\n')
6628 else:
6629 with codecs.open(filename, 'r', 'utf8', 'replace') as target_file:
6630 lines = target_file.read().split('\n')
6631
6632 # Remove trailing '\r'.
6633 # The -1 accounts for the extra trailing blank line we get from split()
6634 for linenum in range(len(lines) - 1):
6635 if lines[linenum].endswith('\r'):
6636 lines[linenum] = lines[linenum].rstrip('\r')
6637 crlf_lines.append(linenum + 1)
6638 else:
6639 lf_lines.append(linenum + 1)
6640
6641 except IOError:
6642 _cpplint_state.PrintError(
6643 "Skipping input '%s': Can't open for reading\n" % filename)
6644 _RestoreFilters()
6645 return
6646
6647 # Note, if no dot is found, this will give the entire filename as the ext.
6648 file_extension = filename[filename.rfind('.') + 1:]
6649
6650 # When reading from stdin, the extension is unknown, so no cpplint tests
6651 # should rely on the extension.
6652 if filename != '-' and file_extension not in GetAllExtensions():
6653 _cpplint_state.PrintError('Ignoring %s; not a valid file name '
6654 '(%s)\n' % (filename, ', '.join(GetAllExtensions())))
6655 else:
6656 ProcessFileData(filename, file_extension, lines, Error,
6657 extra_check_functions)
6658
6659 # If end-of-line sequences are a mix of LF and CR-LF, issue
6660 # warnings on the lines with CR.
6661 #
6662 # Don't issue any warnings if all lines are uniformly LF or CR-LF,
6663 # since critique can handle these just fine, and the style guide
6664 # doesn't dictate a particular end of line sequence.
6665 #
6666 # We can't depend on os.linesep to determine what the desired
6667 # end-of-line sequence should be, since that will return the
6668 # server-side end-of-line sequence.
6669 if lf_lines and crlf_lines:
6670 # Warn on every line with CR. An alternative approach might be to
6671 # check whether the file is mostly CRLF or just LF, and warn on the
6672 # minority, we bias toward LF here since most tools prefer LF.
6673 for linenum in crlf_lines:
6674 Error(filename, linenum, 'whitespace/newline', 1,
6675 'Unexpected \\r (^M) found; better to use only \\n')
6676
6677 # Suppress printing anything if --quiet was passed unless the error
6678 # count has increased after processing this file.
6679 if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
6680 _cpplint_state.PrintInfo('Done processing %s\n' % filename)
6681 _RestoreFilters()
6682
6683
6684 def PrintUsage(message):
6685 """Prints a brief usage string and exits, optionally with an error message.
6686
6687 Args:
6688 message: The optional error message.
6689 """
6690 sys.stderr.write(_USAGE % (sorted(list(GetAllExtensions())),
6691 ','.join(sorted(list(GetAllExtensions()))),
6692 sorted(GetHeaderExtensions()),
6693 ','.join(sorted(GetHeaderExtensions()))))
6694
6695 if message:
6696 sys.exit('\nFATAL ERROR: ' + message)
6697 else:
6698 sys.exit(0)
6699
6700 def PrintVersion():
6701 sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n')
6702 sys.stdout.write('cpplint ' + __VERSION__ + '\n')
6703 sys.stdout.write('Python ' + sys.version + '\n')
6704 sys.exit(0)
6705
6706 def PrintCategories():
6707 """Prints a list of all the error-categories used by error messages.
6708
6709 These are the categories used to filter messages via --filter.
6710 """
6711 sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
6712 sys.exit(0)
6713
6714
6715 def ParseArguments(args):
6716 """Parses the command line arguments.
6717
6718 This may set the output format and verbosity level as side-effects.
6719
6720 Args:
6721 args: The command line arguments:
6722
6723 Returns:
6724 The list of filenames to lint.
6725 """
6726 try:
6727 (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
6728 'v=',
6729 'version',
6730 'counting=',
6731 'filter=',
6732 'root=',
6733 'repository=',
6734 'linelength=',
6735 'extensions=',
6736 'exclude=',
6737 'recursive',
6738 'headers=',
6739 'includeorder=',
6740 'quiet'])
6741 except getopt.GetoptError:
6742 PrintUsage('Invalid arguments.')
6743
6744 verbosity = _VerboseLevel()
6745 output_format = _OutputFormat()
6746 filters = ''
6747 quiet = _Quiet()
6748 counting_style = ''
6749 recursive = False
6750
6751 for (opt, val) in opts:
6752 if opt == '--help':
6753 PrintUsage(None)
6754 if opt == '--version':
6755 PrintVersion()
6756 elif opt == '--output':
6757 if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'):
6758 PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
6759 'sed, gsed and junit.')
6760 output_format = val
6761 elif opt == '--quiet':
6762 quiet = True
6763 elif opt == '--verbose' or opt == '--v':
6764 verbosity = int(val)
6765 elif opt == '--filter':
6766 filters = val
6767 if not filters:
6768 PrintCategories()
6769 elif opt == '--counting':
6770 if val not in ('total', 'toplevel', 'detailed'):
6771 PrintUsage('Valid counting options are total, toplevel, and detailed')
6772 counting_style = val
6773 elif opt == '--root':
6774 global _root
6775 _root = val
6776 elif opt == '--repository':
6777 global _repository
6778 _repository = val
6779 elif opt == '--linelength':
6780 global _line_length
6781 try:
6782 _line_length = int(val)
6783 except ValueError:
6784 PrintUsage('Line length must be digits.')
6785 elif opt == '--exclude':
6786 global _excludes
6787 if not _excludes:
6788 _excludes = set()
6789 _excludes.update(glob.glob(val))
6790 elif opt == '--extensions':
6791 ProcessExtensionsOption(val)
6792 elif opt == '--headers':
6793 ProcessHppHeadersOption(val)
6794 elif opt == '--recursive':
6795 recursive = True
6796 elif opt == '--includeorder':
6797 ProcessIncludeOrderOption(val)
6798
6799 if not filenames:
6800 PrintUsage('No files were specified.')
6801
6802 if recursive:
6803 filenames = _ExpandDirectories(filenames)
6804
6805 if _excludes:
6806 filenames = _FilterExcludedFiles(filenames)
6807
6808 _SetOutputFormat(output_format)
6809 _SetQuiet(quiet)
6810 _SetVerboseLevel(verbosity)
6811 _SetFilters(filters)
6812 _SetCountingStyle(counting_style)
6813
6814 filenames.sort()
6815 return filenames
6816
6817 def _ExpandDirectories(filenames):
6818 """Searches a list of filenames and replaces directories in the list with
6819 all files descending from those directories. Files with extensions not in
6820 the valid extensions list are excluded.
6821
6822 Args:
6823 filenames: A list of files or directories
6824
6825 Returns:
6826 A list of all files that are members of filenames or descended from a
6827 directory in filenames
6828 """
6829 expanded = set()
6830 for filename in filenames:
6831 if not os.path.isdir(filename):
6832 expanded.add(filename)
6833 continue
6834
6835 for root, _, files in os.walk(filename):
6836 for loopfile in files:
6837 fullname = os.path.join(root, loopfile)
6838 if fullname.startswith('.' + os.path.sep):
6839 fullname = fullname[len('.' + os.path.sep):]
6840 expanded.add(fullname)
6841
6842 filtered = []
6843 for filename in expanded:
6844 if os.path.splitext(filename)[1][1:] in GetAllExtensions():
6845 filtered.append(filename)
6846 return filtered
6847
6848 def _FilterExcludedFiles(fnames):
6849 """Filters out files listed in the --exclude command line switch. File paths
6850 in the switch are evaluated relative to the current working directory
6851 """
6852 exclude_paths = [os.path.abspath(f) for f in _excludes]
6853 # because globbing does not work recursively, exclude all subpath of all excluded entries
6854 return [f for f in fnames
6855 if not any(e for e in exclude_paths
6856 if _IsParentOrSame(e, os.path.abspath(f)))]
6857
6858 def _IsParentOrSame(parent, child):
6859 """Return true if child is subdirectory of parent.
6860 Assumes both paths are absolute and don't contain symlinks.
6861 """
6862 parent = os.path.normpath(parent)
6863 child = os.path.normpath(child)
6864 if parent == child:
6865 return True
6866
6867 prefix = os.path.commonprefix([parent, child])
6868 if prefix != parent:
6869 return False
6870 # Note: os.path.commonprefix operates on character basis, so
6871 # take extra care of situations like '/foo/ba' and '/foo/bar/baz'
6872 child_suffix = child[len(prefix):]
6873 child_suffix = child_suffix.lstrip(os.sep)
6874 return child == os.path.join(prefix, child_suffix)
6875
6876 def main():
6877 filenames = ParseArguments(sys.argv[1:])
6878 backup_err = sys.stderr
6879 try:
6880 # Change stderr to write with replacement characters so we don't die
6881 # if we try to print something containing non-ASCII characters.
6882 sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
6883
6884 _cpplint_state.ResetErrorCounts()
6885 for filename in filenames:
6886 ProcessFile(filename, _cpplint_state.verbose_level)
6887 # If --quiet is passed, suppress printing error count unless there are errors.
6888 if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
6889 _cpplint_state.PrintErrorCounts()
6890
6891 if _cpplint_state.output_format == 'junit':
6892 sys.stderr.write(_cpplint_state.FormatJUnitXML())
6893
6894 finally:
6895 sys.stderr = backup_err
6896
6897 sys.exit(_cpplint_state.error_count > 0)
6898
6899
6900 if __name__ == '__main__':
6901 main()