]> git.proxmox.com Git - ceph.git/blame - ceph/src/arrow/cpp/build-support/cpplint.py
import quincy 17.2.0
[ceph.git] / ceph / src / arrow / cpp / build-support / cpplint.py
CommitLineData
1d09f67e
TL
1#!/usr/bin/env python3
2#
3# Copyright (c) 2009 Google Inc. All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met:
8#
9# * Redistributions of source code must retain the above copyright
10# notice, this list of conditions and the following disclaimer.
11# * Redistributions in binary form must reproduce the above
12# copyright notice, this list of conditions and the following disclaimer
13# in the documentation and/or other materials provided with the
14# distribution.
15# * Neither the name of Google Inc. nor the names of its
16# contributors may be used to endorse or promote products derived from
17# this software without specific prior written permission.
18#
19# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31"""Does google-lint on c++ files.
32
33The goal of this script is to identify places in the code that *may*
34be in non-compliance with google style. It does not attempt to fix
35up these problems -- the point is to educate. It does also not
36attempt to find all problems, or to ensure that everything it does
37find is legitimately a problem.
38
39In particular, we can get very confused by /* and // inside strings!
40We do a small hack, which is to ignore //'s with "'s after them on the
41same line, but it is far from perfect (in either direction).
42"""
43
44import codecs
45import copy
46import getopt
47import glob
48import itertools
49import math # for log
50import os
51import re
52import sre_compile
53import string
54import sys
55import unicodedata
56import xml.etree.ElementTree
57
58# if empty, use defaults
59_header_extensions = set([])
60
61# if empty, use defaults
62_valid_extensions = set([])
63
64
65# Files with any of these extensions are considered to be
66# header files (and will undergo different style checks).
67# This set can be extended by using the --headers
68# option (also supported in CPPLINT.cfg)
69def GetHeaderExtensions():
70 if not _header_extensions:
71 return set(['h', 'hpp', 'hxx', 'h++', 'cuh'])
72 return _header_extensions
73
74# The allowed extensions for file names
75# This is set by --extensions flag
76def GetAllExtensions():
77 if not _valid_extensions:
78 return GetHeaderExtensions().union(set(['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
79 return _valid_extensions
80
81def GetNonHeaderExtensions():
82 return GetAllExtensions().difference(GetHeaderExtensions())
83
84
85_USAGE = """
86Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit]
87 [--filter=-x,+y,...]
88 [--counting=total|toplevel|detailed] [--repository=path]
89 [--root=subdir] [--linelength=digits] [--recursive]
90 [--exclude=path]
91 [--headers=ext1,ext2]
92 [--extensions=hpp,cpp,...]
93 <file> [file] ...
94
95 The style guidelines this tries to follow are those in
96 https://google.github.io/styleguide/cppguide.html
97
98 Every problem is given a confidence score from 1-5, with 5 meaning we are
99 certain of the problem, and 1 meaning it could be a legitimate construct.
100 This will miss some errors, and is not a substitute for a code review.
101
102 To suppress false-positive errors of a certain category, add a
103 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
104 suppresses errors of all categories on that line.
105
106 The files passed in will be linted; at least one file must be provided.
107 Default linted extensions are %s.
108 Other file types will be ignored.
109 Change the extensions with the --extensions flag.
110
111 Flags:
112
113 output=emacs|eclipse|vs7|junit
114 By default, the output is formatted to ease emacs parsing. Output
115 compatible with eclipse (eclipse), Visual Studio (vs7), and JUnit
116 XML parsers such as those used in Jenkins and Bamboo may also be
117 used. Other formats are unsupported.
118
119 verbose=#
120 Specify a number 0-5 to restrict errors to certain verbosity levels.
121 Errors with lower verbosity levels have lower confidence and are more
122 likely to be false positives.
123
124 quiet
125 Suppress output other than linting errors, such as information about
126 which files have been processed and excluded.
127
128 filter=-x,+y,...
129 Specify a comma-separated list of category-filters to apply: only
130 error messages whose category names pass the filters will be printed.
131 (Category names are printed with the message and look like
132 "[whitespace/indent]".) Filters are evaluated left to right.
133 "-FOO" and "FOO" means "do not print categories that start with FOO".
134 "+FOO" means "do print categories that start with FOO".
135
136 Examples: --filter=-whitespace,+whitespace/braces
137 --filter=whitespace,runtime/printf,+runtime/printf_format
138 --filter=-,+build/include_what_you_use
139
140 To see a list of all the categories used in cpplint, pass no arg:
141 --filter=
142
143 counting=total|toplevel|detailed
144 The total number of errors found is always printed. If
145 'toplevel' is provided, then the count of errors in each of
146 the top-level categories like 'build' and 'whitespace' will
147 also be printed. If 'detailed' is provided, then a count
148 is provided for each category like 'build/class'.
149
150 repository=path
151 The top level directory of the repository, used to derive the header
152 guard CPP variable. By default, this is determined by searching for a
153 path that contains .git, .hg, or .svn. When this flag is specified, the
154 given path is used instead. This option allows the header guard CPP
155 variable to remain consistent even if members of a team have different
156 repository root directories (such as when checking out a subdirectory
157 with SVN). In addition, users of non-mainstream version control systems
158 can use this flag to ensure readable header guard CPP variables.
159
160 Examples:
161 Assuming that Alice checks out ProjectName and Bob checks out
162 ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
163 with no --repository flag, the header guard CPP variable will be:
164
165 Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
166 Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
167
168 If Alice uses the --repository=trunk flag and Bob omits the flag or
169 uses --repository=. then the header guard CPP variable will be:
170
171 Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
172 Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
173
174 root=subdir
175 The root directory used for deriving header guard CPP variables. This
176 directory is relative to the top level directory of the repository which
177 by default is determined by searching for a directory that contains .git,
178 .hg, or .svn but can also be controlled with the --repository flag. If
179 the specified directory does not exist, this flag is ignored.
180
181 Examples:
182 Assuming that src is the top level directory of the repository, the
183 header guard CPP variables for src/chrome/browser/ui/browser.h are:
184
185 No flag => CHROME_BROWSER_UI_BROWSER_H_
186 --root=chrome => BROWSER_UI_BROWSER_H_
187 --root=chrome/browser => UI_BROWSER_H_
188
189 linelength=digits
190 This is the allowed line length for the project. The default value is
191 80 characters.
192
193 Examples:
194 --linelength=120
195
196 recursive
197 Search for files to lint recursively. Each directory given in the list
198 of files to be linted is replaced by all files that descend from that
199 directory. Files with extensions not in the valid extensions list are
200 excluded.
201
202 exclude=path
203 Exclude the given path from the list of files to be linted. Relative
204 paths are evaluated relative to the current directory and shell globbing
205 is performed. This flag can be provided multiple times to exclude
206 multiple files.
207
208 Examples:
209 --exclude=one.cc
210 --exclude=src/*.cc
211 --exclude=src/*.cc --exclude=test/*.cc
212
213 extensions=extension,extension,...
214 The allowed file extensions that cpplint will check
215
216 Examples:
217 --extensions=%s
218
219 headers=extension,extension,...
220 The allowed header extensions that cpplint will consider to be header files
221 (by default, only files with extensions %s
222 will be assumed to be headers)
223
224 Examples:
225 --headers=%s
226
227 cpplint.py supports per-directory configurations specified in CPPLINT.cfg
228 files. CPPLINT.cfg file can contain a number of key=value pairs.
229 Currently the following options are supported:
230
231 set noparent
232 filter=+filter1,-filter2,...
233 exclude_files=regex
234 linelength=80
235 root=subdir
236
237 "set noparent" option prevents cpplint from traversing directory tree
238 upwards looking for more .cfg files in parent directories. This option
239 is usually placed in the top-level project directory.
240
241 The "filter" option is similar in function to --filter flag. It specifies
242 message filters in addition to the |_DEFAULT_FILTERS| and those specified
243 through --filter command-line flag.
244
245 "exclude_files" allows to specify a regular expression to be matched against
246 a file name. If the expression matches, the file is skipped and not run
247 through the linter.
248
249 "linelength" specifies the allowed line length for the project.
250
251 The "root" option is similar in function to the --root flag (see example
252 above).
253
254 CPPLINT.cfg has an effect on files in the same directory and all
255 subdirectories, unless overridden by a nested configuration file.
256
257 Example file:
258 filter=-build/include_order,+build/include_alpha
259 exclude_files=.*\\.cc
260
261 The above example disables build/include_order warning and enables
262 build/include_alpha as well as excludes all .cc from being
263 processed by linter, in the current directory (where the .cfg
264 file is located) and all subdirectories.
265""" % (list(GetAllExtensions()),
266 ','.join(list(GetAllExtensions())),
267 GetHeaderExtensions(),
268 ','.join(GetHeaderExtensions()))
269
270# We categorize each error message we print. Here are the categories.
271# We want an explicit list so we can list them all in cpplint --filter=.
272# If you add a new error message with a new category, add it to the list
273# here! cpplint_unittest.py should tell you if you forget to do this.
274_ERROR_CATEGORIES = [
275 'build/class',
276 'build/c++11',
277 'build/c++14',
278 'build/c++tr1',
279 'build/deprecated',
280 'build/endif_comment',
281 'build/explicit_make_pair',
282 'build/forward_decl',
283 'build/header_guard',
284 'build/include',
285 'build/include_subdir',
286 'build/include_alpha',
287 'build/include_order',
288 'build/include_what_you_use',
289 'build/namespaces_literals',
290 'build/namespaces',
291 'build/printf_format',
292 'build/storage_class',
293 'legal/copyright',
294 'readability/alt_tokens',
295 'readability/braces',
296 'readability/casting',
297 'readability/check',
298 'readability/constructors',
299 'readability/fn_size',
300 'readability/inheritance',
301 'readability/multiline_comment',
302 'readability/multiline_string',
303 'readability/namespace',
304 'readability/nolint',
305 'readability/nul',
306 'readability/strings',
307 'readability/todo',
308 'readability/utf8',
309 'runtime/arrays',
310 'runtime/casting',
311 'runtime/explicit',
312 'runtime/int',
313 'runtime/init',
314 'runtime/invalid_increment',
315 'runtime/member_string_references',
316 'runtime/memset',
317 'runtime/indentation_namespace',
318 'runtime/operator',
319 'runtime/printf',
320 'runtime/printf_format',
321 'runtime/references',
322 'runtime/string',
323 'runtime/threadsafe_fn',
324 'runtime/vlog',
325 'whitespace/blank_line',
326 'whitespace/braces',
327 'whitespace/comma',
328 'whitespace/comments',
329 'whitespace/empty_conditional_body',
330 'whitespace/empty_if_body',
331 'whitespace/empty_loop_body',
332 'whitespace/end_of_line',
333 'whitespace/ending_newline',
334 'whitespace/forcolon',
335 'whitespace/indent',
336 'whitespace/line_length',
337 'whitespace/newline',
338 'whitespace/operators',
339 'whitespace/parens',
340 'whitespace/semicolon',
341 'whitespace/tab',
342 'whitespace/todo',
343 ]
344
345# These error categories are no longer enforced by cpplint, but for backwards-
346# compatibility they may still appear in NOLINT comments.
347_LEGACY_ERROR_CATEGORIES = [
348 'readability/streams',
349 'readability/function',
350 ]
351
352# The default state of the category filter. This is overridden by the --filter=
353# flag. By default all errors are on, so only add here categories that should be
354# off by default (i.e., categories that must be enabled by the --filter= flags).
355# All entries here should start with a '-' or '+', as in the --filter= flag.
356_DEFAULT_FILTERS = ['-build/include_alpha']
357
358# The default list of categories suppressed for C (not C++) files.
359_DEFAULT_C_SUPPRESSED_CATEGORIES = [
360 'readability/casting',
361 ]
362
363# The default list of categories suppressed for Linux Kernel files.
364_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
365 'whitespace/tab',
366 ]
367
368# We used to check for high-bit characters, but after much discussion we
369# decided those were OK, as long as they were in UTF-8 and didn't represent
370# hard-coded international strings, which belong in a separate i18n file.
371
372# C++ headers
373_CPP_HEADERS = frozenset([
374 # Legacy
375 'algobase.h',
376 'algo.h',
377 'alloc.h',
378 'builtinbuf.h',
379 'bvector.h',
380 'complex.h',
381 'defalloc.h',
382 'deque.h',
383 'editbuf.h',
384 'fstream.h',
385 'function.h',
386 'hash_map',
387 'hash_map.h',
388 'hash_set',
389 'hash_set.h',
390 'hashtable.h',
391 'heap.h',
392 'indstream.h',
393 'iomanip.h',
394 'iostream.h',
395 'istream.h',
396 'iterator.h',
397 'list.h',
398 'map.h',
399 'multimap.h',
400 'multiset.h',
401 'ostream.h',
402 'pair.h',
403 'parsestream.h',
404 'pfstream.h',
405 'procbuf.h',
406 'pthread_alloc',
407 'pthread_alloc.h',
408 'rope',
409 'rope.h',
410 'ropeimpl.h',
411 'set.h',
412 'slist',
413 'slist.h',
414 'stack.h',
415 'stdiostream.h',
416 'stl_alloc.h',
417 'stl_relops.h',
418 'streambuf.h',
419 'stream.h',
420 'strfile.h',
421 'strstream.h',
422 'tempbuf.h',
423 'tree.h',
424 'type_traits.h',
425 'vector.h',
426 # 17.6.1.2 C++ library headers
427 'algorithm',
428 'array',
429 'atomic',
430 'bitset',
431 'chrono',
432 'codecvt',
433 'complex',
434 'condition_variable',
435 'deque',
436 'exception',
437 'forward_list',
438 'fstream',
439 'functional',
440 'future',
441 'initializer_list',
442 'iomanip',
443 'ios',
444 'iosfwd',
445 'iostream',
446 'istream',
447 'iterator',
448 'limits',
449 'list',
450 'locale',
451 'map',
452 'memory',
453 'mutex',
454 'new',
455 'numeric',
456 'ostream',
457 'queue',
458 'random',
459 'ratio',
460 'regex',
461 'scoped_allocator',
462 'set',
463 'sstream',
464 'stack',
465 'stdexcept',
466 'streambuf',
467 'string',
468 'strstream',
469 'system_error',
470 'thread',
471 'tuple',
472 'typeindex',
473 'typeinfo',
474 'type_traits',
475 'unordered_map',
476 'unordered_set',
477 'utility',
478 'valarray',
479 'vector',
480 # 17.6.1.2 C++ headers for C library facilities
481 'cassert',
482 'ccomplex',
483 'cctype',
484 'cerrno',
485 'cfenv',
486 'cfloat',
487 'cinttypes',
488 'ciso646',
489 'climits',
490 'clocale',
491 'cmath',
492 'csetjmp',
493 'csignal',
494 'cstdalign',
495 'cstdarg',
496 'cstdbool',
497 'cstddef',
498 'cstdint',
499 'cstdio',
500 'cstdlib',
501 'cstring',
502 'ctgmath',
503 'ctime',
504 'cuchar',
505 'cwchar',
506 'cwctype',
507 ])
508
509# Type names
510_TYPES = re.compile(
511 r'^(?:'
512 # [dcl.type.simple]
513 r'(char(16_t|32_t)?)|wchar_t|'
514 r'bool|short|int|long|signed|unsigned|float|double|'
515 # [support.types]
516 r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
517 # [cstdint.syn]
518 r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
519 r'(u?int(max|ptr)_t)|'
520 r')$')
521
522
523# These headers are excluded from [build/include] and [build/include_order]
524# checks:
525# - Anything not following google file name conventions (containing an
526# uppercase character, such as Python.h or nsStringAPI.h, for example).
527# - Lua headers.
528_THIRD_PARTY_HEADERS_PATTERN = re.compile(
529 r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
530
531# Pattern for matching FileInfo.BaseName() against test file name
532_test_suffixes = ['_test', '_regtest', '_unittest']
533_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
534
535# Pattern that matches only complete whitespace, possibly across multiple lines.
536_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
537
538# Assertion macros. These are defined in base/logging.h and
539# testing/base/public/gunit.h.
540_CHECK_MACROS = [
541 'DCHECK', 'CHECK',
542 'EXPECT_TRUE', 'ASSERT_TRUE',
543 'EXPECT_FALSE', 'ASSERT_FALSE',
544 ]
545
546# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
547_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
548
549for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
550 ('>=', 'GE'), ('>', 'GT'),
551 ('<=', 'LE'), ('<', 'LT')]:
552 _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
553 _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
554 _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
555 _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
556
557for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
558 ('>=', 'LT'), ('>', 'LE'),
559 ('<=', 'GT'), ('<', 'GE')]:
560 _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
561 _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
562
563# Alternative tokens and their replacements. For full list, see section 2.5
564# Alternative tokens [lex.digraph] in the C++ standard.
565#
566# Digraphs (such as '%:') are not included here since it's a mess to
567# match those on a word boundary.
568_ALT_TOKEN_REPLACEMENT = {
569 'and': '&&',
570 'bitor': '|',
571 'or': '||',
572 'xor': '^',
573 'compl': '~',
574 'bitand': '&',
575 'and_eq': '&=',
576 'or_eq': '|=',
577 'xor_eq': '^=',
578 'not': '!',
579 'not_eq': '!='
580 }
581
582# Compile regular expression that matches all the above keywords. The "[ =()]"
583# bit is meant to avoid matching these keywords outside of boolean expressions.
584#
585# False positives include C-style multi-line comments and multi-line strings
586# but those have always been troublesome for cpplint.
587_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
588 r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
589
590
591# These constants define types of headers for use with
592# _IncludeState.CheckNextIncludeOrder().
593_C_SYS_HEADER = 1
594_CPP_SYS_HEADER = 2
595_LIKELY_MY_HEADER = 3
596_POSSIBLE_MY_HEADER = 4
597_OTHER_HEADER = 5
598
599# These constants define the current inline assembly state
600_NO_ASM = 0 # Outside of inline assembly block
601_INSIDE_ASM = 1 # Inside inline assembly block
602_END_ASM = 2 # Last line of inline assembly block
603_BLOCK_ASM = 3 # The whole block is an inline assembly block
604
605# Match start of assembly blocks
606_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
607 r'(?:\s+(volatile|__volatile__))?'
608 r'\s*[{(]')
609
610# Match strings that indicate we're working on a C (not C++) file.
611_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
612 r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
613
614# Match string that indicates we're working on a Linux Kernel file.
615_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
616
617_regexp_compile_cache = {}
618
619# {str, set(int)}: a map from error categories to sets of linenumbers
620# on which those errors are expected and should be suppressed.
621_error_suppressions = {}
622
623# The root directory used for deriving header guard CPP variable.
624# This is set by --root flag.
625_root = None
626
627# The top level repository directory. If set, _root is calculated relative to
628# this directory instead of the directory containing version control artifacts.
629# This is set by the --repository flag.
630_repository = None
631
632# Files to exclude from linting. This is set by the --exclude flag.
633_excludes = None
634
635# Whether to suppress PrintInfo messages
636_quiet = False
637
638# The allowed line length of files.
639# This is set by --linelength flag.
640_line_length = 80
641
642try:
643 xrange(1, 0)
644except NameError:
645 # -- pylint: disable=redefined-builtin
646 xrange = range
647
648try:
649 unicode
650except NameError:
651 # -- pylint: disable=redefined-builtin
652 basestring = unicode = str
653
654try:
655 long(2)
656except NameError:
657 # -- pylint: disable=redefined-builtin
658 long = int
659
660if sys.version_info < (3,):
661 # -- pylint: disable=no-member
662 # BINARY_TYPE = str
663 itervalues = dict.itervalues
664 iteritems = dict.iteritems
665else:
666 # BINARY_TYPE = bytes
667 itervalues = dict.values
668 iteritems = dict.items
669
670def unicode_escape_decode(x):
671 if sys.version_info < (3,):
672 return codecs.unicode_escape_decode(x)[0]
673 else:
674 return x
675
676# {str, bool}: a map from error categories to booleans which indicate if the
677# category should be suppressed for every line.
678_global_error_suppressions = {}
679
680
681
682
683def ParseNolintSuppressions(filename, raw_line, linenum, error):
684 """Updates the global list of line error-suppressions.
685
686 Parses any NOLINT comments on the current line, updating the global
687 error_suppressions store. Reports an error if the NOLINT comment
688 was malformed.
689
690 Args:
691 filename: str, the name of the input file.
692 raw_line: str, the line of input text, with comments.
693 linenum: int, the number of the current line.
694 error: function, an error handler.
695 """
696 matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
697 if matched:
698 if matched.group(1):
699 suppressed_line = linenum + 1
700 else:
701 suppressed_line = linenum
702 category = matched.group(2)
703 if category in (None, '(*)'): # => "suppress all"
704 _error_suppressions.setdefault(None, set()).add(suppressed_line)
705 else:
706 if category.startswith('(') and category.endswith(')'):
707 category = category[1:-1]
708 if category in _ERROR_CATEGORIES:
709 _error_suppressions.setdefault(category, set()).add(suppressed_line)
710 elif category not in _LEGACY_ERROR_CATEGORIES:
711 error(filename, linenum, 'readability/nolint', 5,
712 'Unknown NOLINT error category: %s' % category)
713
714
715def ProcessGlobalSuppresions(lines):
716 """Updates the list of global error suppressions.
717
718 Parses any lint directives in the file that have global effect.
719
720 Args:
721 lines: An array of strings, each representing a line of the file, with the
722 last element being empty if the file is terminated with a newline.
723 """
724 for line in lines:
725 if _SEARCH_C_FILE.search(line):
726 for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
727 _global_error_suppressions[category] = True
728 if _SEARCH_KERNEL_FILE.search(line):
729 for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
730 _global_error_suppressions[category] = True
731
732
733def ResetNolintSuppressions():
734 """Resets the set of NOLINT suppressions to empty."""
735 _error_suppressions.clear()
736 _global_error_suppressions.clear()
737
738
739def IsErrorSuppressedByNolint(category, linenum):
740 """Returns true if the specified error category is suppressed on this line.
741
742 Consults the global error_suppressions map populated by
743 ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
744
745 Args:
746 category: str, the category of the error.
747 linenum: int, the current line number.
748 Returns:
749 bool, True iff the error should be suppressed due to a NOLINT comment or
750 global suppression.
751 """
752 return (_global_error_suppressions.get(category, False) or
753 linenum in _error_suppressions.get(category, set()) or
754 linenum in _error_suppressions.get(None, set()))
755
756
757def Match(pattern, s):
758 """Matches the string with the pattern, caching the compiled regexp."""
759 # The regexp compilation caching is inlined in both Match and Search for
760 # performance reasons; factoring it out into a separate function turns out
761 # to be noticeably expensive.
762 if pattern not in _regexp_compile_cache:
763 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
764 return _regexp_compile_cache[pattern].match(s)
765
766
767def ReplaceAll(pattern, rep, s):
768 """Replaces instances of pattern in a string with a replacement.
769
770 The compiled regex is kept in a cache shared by Match and Search.
771
772 Args:
773 pattern: regex pattern
774 rep: replacement text
775 s: search string
776
777 Returns:
778 string with replacements made (or original string if no replacements)
779 """
780 if pattern not in _regexp_compile_cache:
781 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
782 return _regexp_compile_cache[pattern].sub(rep, s)
783
784
785def Search(pattern, s):
786 """Searches the string for the pattern, caching the compiled regexp."""
787 if pattern not in _regexp_compile_cache:
788 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
789 return _regexp_compile_cache[pattern].search(s)
790
791
792def _IsSourceExtension(s):
793 """File extension (excluding dot) matches a source file extension."""
794 return s in GetNonHeaderExtensions()
795
796
797class _IncludeState(object):
798 """Tracks line numbers for includes, and the order in which includes appear.
799
800 include_list contains list of lists of (header, line number) pairs.
801 It's a lists of lists rather than just one flat list to make it
802 easier to update across preprocessor boundaries.
803
804 Call CheckNextIncludeOrder() once for each header in the file, passing
805 in the type constants defined above. Calls in an illegal order will
806 raise an _IncludeError with an appropriate error message.
807
808 """
809 # self._section will move monotonically through this set. If it ever
810 # needs to move backwards, CheckNextIncludeOrder will raise an error.
811 _INITIAL_SECTION = 0
812 _MY_H_SECTION = 1
813 _C_SECTION = 2
814 _CPP_SECTION = 3
815 _OTHER_H_SECTION = 4
816
817 _TYPE_NAMES = {
818 _C_SYS_HEADER: 'C system header',
819 _CPP_SYS_HEADER: 'C++ system header',
820 _LIKELY_MY_HEADER: 'header this file implements',
821 _POSSIBLE_MY_HEADER: 'header this file may implement',
822 _OTHER_HEADER: 'other header',
823 }
824 _SECTION_NAMES = {
825 _INITIAL_SECTION: "... nothing. (This can't be an error.)",
826 _MY_H_SECTION: 'a header this file implements',
827 _C_SECTION: 'C system header',
828 _CPP_SECTION: 'C++ system header',
829 _OTHER_H_SECTION: 'other header',
830 }
831
832 def __init__(self):
833 self.include_list = [[]]
834 self._section = None
835 self._last_header = None
836 self.ResetSection('')
837
838 def FindHeader(self, header):
839 """Check if a header has already been included.
840
841 Args:
842 header: header to check.
843 Returns:
844 Line number of previous occurrence, or -1 if the header has not
845 been seen before.
846 """
847 for section_list in self.include_list:
848 for f in section_list:
849 if f[0] == header:
850 return f[1]
851 return -1
852
853 def ResetSection(self, directive):
854 """Reset section checking for preprocessor directive.
855
856 Args:
857 directive: preprocessor directive (e.g. "if", "else").
858 """
859 # The name of the current section.
860 self._section = self._INITIAL_SECTION
861 # The path of last found header.
862 self._last_header = ''
863
864 # Update list of includes. Note that we never pop from the
865 # include list.
866 if directive in ('if', 'ifdef', 'ifndef'):
867 self.include_list.append([])
868 elif directive in ('else', 'elif'):
869 self.include_list[-1] = []
870
871 def SetLastHeader(self, header_path):
872 self._last_header = header_path
873
874 def CanonicalizeAlphabeticalOrder(self, header_path):
875 """Returns a path canonicalized for alphabetical comparison.
876
877 - replaces "-" with "_" so they both cmp the same.
878 - removes '-inl' since we don't require them to be after the main header.
879 - lowercase everything, just in case.
880
881 Args:
882 header_path: Path to be canonicalized.
883
884 Returns:
885 Canonicalized path.
886 """
887 return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
888
889 def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
890 """Check if a header is in alphabetical order with the previous header.
891
892 Args:
893 clean_lines: A CleansedLines instance containing the file.
894 linenum: The number of the line to check.
895 header_path: Canonicalized header to be checked.
896
897 Returns:
898 Returns true if the header is in alphabetical order.
899 """
900 # If previous section is different from current section, _last_header will
901 # be reset to empty string, so it's always less than current header.
902 #
903 # If previous line was a blank line, assume that the headers are
904 # intentionally sorted the way they are.
905 if (self._last_header > header_path and
906 Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
907 return False
908 return True
909
910 def CheckNextIncludeOrder(self, header_type):
911 """Returns a non-empty error message if the next header is out of order.
912
913 This function also updates the internal state to be ready to check
914 the next include.
915
916 Args:
917 header_type: One of the _XXX_HEADER constants defined above.
918
919 Returns:
920 The empty string if the header is in the right order, or an
921 error message describing what's wrong.
922
923 """
924 error_message = ('Found %s after %s' %
925 (self._TYPE_NAMES[header_type],
926 self._SECTION_NAMES[self._section]))
927
928 last_section = self._section
929
930 if header_type == _C_SYS_HEADER:
931 if self._section <= self._C_SECTION:
932 self._section = self._C_SECTION
933 else:
934 self._last_header = ''
935 return error_message
936 elif header_type == _CPP_SYS_HEADER:
937 if self._section <= self._CPP_SECTION:
938 self._section = self._CPP_SECTION
939 else:
940 self._last_header = ''
941 return error_message
942 elif header_type == _LIKELY_MY_HEADER:
943 if self._section <= self._MY_H_SECTION:
944 self._section = self._MY_H_SECTION
945 else:
946 self._section = self._OTHER_H_SECTION
947 elif header_type == _POSSIBLE_MY_HEADER:
948 if self._section <= self._MY_H_SECTION:
949 self._section = self._MY_H_SECTION
950 else:
951 # This will always be the fallback because we're not sure
952 # enough that the header is associated with this file.
953 self._section = self._OTHER_H_SECTION
954 else:
955 assert header_type == _OTHER_HEADER
956 self._section = self._OTHER_H_SECTION
957
958 if last_section != self._section:
959 self._last_header = ''
960
961 return ''
962
963
964class _CppLintState(object):
965 """Maintains module-wide state.."""
966
967 def __init__(self):
968 self.verbose_level = 1 # global setting.
969 self.error_count = 0 # global count of reported errors
970 # filters to apply when emitting error messages
971 self.filters = _DEFAULT_FILTERS[:]
972 # backup of filter list. Used to restore the state after each file.
973 self._filters_backup = self.filters[:]
974 self.counting = 'total' # In what way are we counting errors?
975 self.errors_by_category = {} # string to int dict storing error counts
976
977 # output format:
978 # "emacs" - format that emacs can parse (default)
979 # "eclipse" - format that eclipse can parse
980 # "vs7" - format that Microsoft Visual Studio 7 can parse
981 # "junit" - format that Jenkins, Bamboo, etc can parse
982 self.output_format = 'emacs'
983
984 # For JUnit output, save errors and failures until the end so that they
985 # can be written into the XML
986 self._junit_errors = []
987 self._junit_failures = []
988
989 def SetOutputFormat(self, output_format):
990 """Sets the output format for errors."""
991 self.output_format = output_format
992
993 def SetVerboseLevel(self, level):
994 """Sets the module's verbosity, and returns the previous setting."""
995 last_verbose_level = self.verbose_level
996 self.verbose_level = level
997 return last_verbose_level
998
999 def SetCountingStyle(self, counting_style):
1000 """Sets the module's counting options."""
1001 self.counting = counting_style
1002
1003 def SetFilters(self, filters):
1004 """Sets the error-message filters.
1005
1006 These filters are applied when deciding whether to emit a given
1007 error message.
1008
1009 Args:
1010 filters: A string of comma-separated filters (eg "+whitespace/indent").
1011 Each filter should start with + or -; else we die.
1012
1013 Raises:
1014 ValueError: The comma-separated filters did not all start with '+' or '-'.
1015 E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
1016 """
1017 # Default filters always have less priority than the flag ones.
1018 self.filters = _DEFAULT_FILTERS[:]
1019 self.AddFilters(filters)
1020
1021 def AddFilters(self, filters):
1022 """ Adds more filters to the existing list of error-message filters. """
1023 for filt in filters.split(','):
1024 clean_filt = filt.strip()
1025 if clean_filt:
1026 self.filters.append(clean_filt)
1027 for filt in self.filters:
1028 if not (filt.startswith('+') or filt.startswith('-')):
1029 raise ValueError('Every filter in --filters must start with + or -'
1030 ' (%s does not)' % filt)
1031
1032 def BackupFilters(self):
1033 """ Saves the current filter list to backup storage."""
1034 self._filters_backup = self.filters[:]
1035
1036 def RestoreFilters(self):
1037 """ Restores filters previously backed up."""
1038 self.filters = self._filters_backup[:]
1039
1040 def ResetErrorCounts(self):
1041 """Sets the module's error statistic back to zero."""
1042 self.error_count = 0
1043 self.errors_by_category = {}
1044
1045 def IncrementErrorCount(self, category):
1046 """Bumps the module's error statistic."""
1047 self.error_count += 1
1048 if self.counting in ('toplevel', 'detailed'):
1049 if self.counting != 'detailed':
1050 category = category.split('/')[0]
1051 if category not in self.errors_by_category:
1052 self.errors_by_category[category] = 0
1053 self.errors_by_category[category] += 1
1054
1055 def PrintErrorCounts(self):
1056 """Print a summary of errors by category, and the total."""
1057 for category, count in sorted(iteritems(self.errors_by_category)):
1058 self.PrintInfo('Category \'%s\' errors found: %d\n' %
1059 (category, count))
1060 if self.error_count > 0:
1061 self.PrintInfo('Total errors found: %d\n' % self.error_count)
1062
1063 def PrintInfo(self, message):
1064 if not _quiet and self.output_format != 'junit':
1065 sys.stderr.write(message)
1066
1067 def PrintError(self, message):
1068 if self.output_format == 'junit':
1069 self._junit_errors.append(message)
1070 else:
1071 sys.stderr.write(message)
1072
1073 def AddJUnitFailure(self, filename, linenum, message, category, confidence):
1074 self._junit_failures.append((filename, linenum, message, category,
1075 confidence))
1076
1077 def FormatJUnitXML(self):
1078 num_errors = len(self._junit_errors)
1079 num_failures = len(self._junit_failures)
1080
1081 testsuite = xml.etree.ElementTree.Element('testsuite')
1082 testsuite.attrib['name'] = 'cpplint'
1083 testsuite.attrib['errors'] = str(num_errors)
1084 testsuite.attrib['failures'] = str(num_failures)
1085
1086 if num_errors == 0 and num_failures == 0:
1087 testsuite.attrib['tests'] = str(1)
1088 xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
1089
1090 else:
1091 testsuite.attrib['tests'] = str(num_errors + num_failures)
1092 if num_errors > 0:
1093 testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1094 testcase.attrib['name'] = 'errors'
1095 error = xml.etree.ElementTree.SubElement(testcase, 'error')
1096 error.text = '\n'.join(self._junit_errors)
1097 if num_failures > 0:
1098 # Group failures by file
1099 failed_file_order = []
1100 failures_by_file = {}
1101 for failure in self._junit_failures:
1102 failed_file = failure[0]
1103 if failed_file not in failed_file_order:
1104 failed_file_order.append(failed_file)
1105 failures_by_file[failed_file] = []
1106 failures_by_file[failed_file].append(failure)
1107 # Create a testcase for each file
1108 for failed_file in failed_file_order:
1109 failures = failures_by_file[failed_file]
1110 testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1111 testcase.attrib['name'] = failed_file
1112 failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
1113 template = '{0}: {1} [{2}] [{3}]'
1114 texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
1115 failure.text = '\n'.join(texts)
1116
1117 xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
1118 return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
1119
1120
1121_cpplint_state = _CppLintState()
1122
1123
1124def _OutputFormat():
1125 """Gets the module's output format."""
1126 return _cpplint_state.output_format
1127
1128
1129def _SetOutputFormat(output_format):
1130 """Sets the module's output format."""
1131 _cpplint_state.SetOutputFormat(output_format)
1132
1133
1134def _VerboseLevel():
1135 """Returns the module's verbosity setting."""
1136 return _cpplint_state.verbose_level
1137
1138
1139def _SetVerboseLevel(level):
1140 """Sets the module's verbosity, and returns the previous setting."""
1141 return _cpplint_state.SetVerboseLevel(level)
1142
1143
1144def _SetCountingStyle(level):
1145 """Sets the module's counting options."""
1146 _cpplint_state.SetCountingStyle(level)
1147
1148
1149def _Filters():
1150 """Returns the module's list of output filters, as a list."""
1151 return _cpplint_state.filters
1152
1153
1154def _SetFilters(filters):
1155 """Sets the module's error-message filters.
1156
1157 These filters are applied when deciding whether to emit a given
1158 error message.
1159
1160 Args:
1161 filters: A string of comma-separated filters (eg "whitespace/indent").
1162 Each filter should start with + or -; else we die.
1163 """
1164 _cpplint_state.SetFilters(filters)
1165
1166def _AddFilters(filters):
1167 """Adds more filter overrides.
1168
1169 Unlike _SetFilters, this function does not reset the current list of filters
1170 available.
1171
1172 Args:
1173 filters: A string of comma-separated filters (eg "whitespace/indent").
1174 Each filter should start with + or -; else we die.
1175 """
1176 _cpplint_state.AddFilters(filters)
1177
1178def _BackupFilters():
1179 """ Saves the current filter list to backup storage."""
1180 _cpplint_state.BackupFilters()
1181
1182def _RestoreFilters():
1183 """ Restores filters previously backed up."""
1184 _cpplint_state.RestoreFilters()
1185
1186class _FunctionState(object):
1187 """Tracks current function name and the number of lines in its body."""
1188
1189 _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
1190 _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
1191
1192 def __init__(self):
1193 self.in_a_function = False
1194 self.lines_in_function = 0
1195 self.current_function = ''
1196
1197 def Begin(self, function_name):
1198 """Start analyzing function body.
1199
1200 Args:
1201 function_name: The name of the function being tracked.
1202 """
1203 self.in_a_function = True
1204 self.lines_in_function = 0
1205 self.current_function = function_name
1206
1207 def Count(self):
1208 """Count line in current function body."""
1209 if self.in_a_function:
1210 self.lines_in_function += 1
1211
1212 def Check(self, error, filename, linenum):
1213 """Report if too many lines in function body.
1214
1215 Args:
1216 error: The function to call with any errors found.
1217 filename: The name of the current file.
1218 linenum: The number of the line to check.
1219 """
1220 if not self.in_a_function:
1221 return
1222
1223 if Match(r'T(EST|est)', self.current_function):
1224 base_trigger = self._TEST_TRIGGER
1225 else:
1226 base_trigger = self._NORMAL_TRIGGER
1227 trigger = base_trigger * 2**_VerboseLevel()
1228
1229 if self.lines_in_function > trigger:
1230 error_level = int(math.log(self.lines_in_function / base_trigger, 2))
1231 # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
1232 if error_level > 5:
1233 error_level = 5
1234 error(filename, linenum, 'readability/fn_size', error_level,
1235 'Small and focused functions are preferred:'
1236 ' %s has %d non-comment lines'
1237 ' (error triggered by exceeding %d lines).' % (
1238 self.current_function, self.lines_in_function, trigger))
1239
1240 def End(self):
1241 """Stop analyzing function body."""
1242 self.in_a_function = False
1243
1244
1245class _IncludeError(Exception):
1246 """Indicates a problem with the include order in a file."""
1247 pass
1248
1249
1250class FileInfo(object):
1251 """Provides utility functions for filenames.
1252
1253 FileInfo provides easy access to the components of a file's path
1254 relative to the project root.
1255 """
1256
1257 def __init__(self, filename):
1258 self._filename = filename
1259
1260 def FullName(self):
1261 """Make Windows paths like Unix."""
1262 return os.path.abspath(self._filename).replace('\\', '/')
1263
1264 def RepositoryName(self):
1265 r"""FullName after removing the local path to the repository.
1266
1267 If we have a real absolute path name here we can try to do something smart:
1268 detecting the root of the checkout and truncating /path/to/checkout from
1269 the name so that we get header guards that don't include things like
1270 "C:\Documents and Settings\..." or "/home/username/..." in them and thus
1271 people on different computers who have checked the source out to different
1272 locations won't see bogus errors.
1273 """
1274 fullname = self.FullName()
1275
1276 if os.path.exists(fullname):
1277 project_dir = os.path.dirname(fullname)
1278
1279 # If the user specified a repository path, it exists, and the file is
1280 # contained in it, use the specified repository path
1281 if _repository:
1282 repo = FileInfo(_repository).FullName()
1283 root_dir = project_dir
1284 while os.path.exists(root_dir):
1285 # allow case insensitive compare on Windows
1286 if os.path.normcase(root_dir) == os.path.normcase(repo):
1287 return os.path.relpath(fullname, root_dir).replace('\\', '/')
1288 one_up_dir = os.path.dirname(root_dir)
1289 if one_up_dir == root_dir:
1290 break
1291 root_dir = one_up_dir
1292
1293 if os.path.exists(os.path.join(project_dir, ".svn")):
1294 # If there's a .svn file in the current directory, we recursively look
1295 # up the directory tree for the top of the SVN checkout
1296 root_dir = project_dir
1297 one_up_dir = os.path.dirname(root_dir)
1298 while os.path.exists(os.path.join(one_up_dir, ".svn")):
1299 root_dir = os.path.dirname(root_dir)
1300 one_up_dir = os.path.dirname(one_up_dir)
1301
1302 prefix = os.path.commonprefix([root_dir, project_dir])
1303 return fullname[len(prefix) + 1:]
1304
1305 # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
1306 # searching up from the current path.
1307 root_dir = current_dir = os.path.dirname(fullname)
1308 while current_dir != os.path.dirname(current_dir):
1309 if (os.path.exists(os.path.join(current_dir, ".git")) or
1310 os.path.exists(os.path.join(current_dir, ".hg")) or
1311 os.path.exists(os.path.join(current_dir, ".svn"))):
1312 root_dir = current_dir
1313 current_dir = os.path.dirname(current_dir)
1314
1315 if (os.path.exists(os.path.join(root_dir, ".git")) or
1316 os.path.exists(os.path.join(root_dir, ".hg")) or
1317 os.path.exists(os.path.join(root_dir, ".svn"))):
1318 prefix = os.path.commonprefix([root_dir, project_dir])
1319 return fullname[len(prefix) + 1:]
1320
1321 # Don't know what to do; header guard warnings may be wrong...
1322 return fullname
1323
1324 def Split(self):
1325 """Splits the file into the directory, basename, and extension.
1326
1327 For 'chrome/browser/browser.cc', Split() would
1328 return ('chrome/browser', 'browser', '.cc')
1329
1330 Returns:
1331 A tuple of (directory, basename, extension).
1332 """
1333
1334 googlename = self.RepositoryName()
1335 project, rest = os.path.split(googlename)
1336 return (project,) + os.path.splitext(rest)
1337
1338 def BaseName(self):
1339 """File base name - text after the final slash, before the final period."""
1340 return self.Split()[1]
1341
1342 def Extension(self):
1343 """File extension - text following the final period, includes that period."""
1344 return self.Split()[2]
1345
1346 def NoExtension(self):
1347 """File has no source file extension."""
1348 return '/'.join(self.Split()[0:2])
1349
1350 def IsSource(self):
1351 """File has a source file extension."""
1352 return _IsSourceExtension(self.Extension()[1:])
1353
1354
1355def _ShouldPrintError(category, confidence, linenum):
1356 """If confidence >= verbose, category passes filter and is not suppressed."""
1357
1358 # There are three ways we might decide not to print an error message:
1359 # a "NOLINT(category)" comment appears in the source,
1360 # the verbosity level isn't high enough, or the filters filter it out.
1361 if IsErrorSuppressedByNolint(category, linenum):
1362 return False
1363
1364 if confidence < _cpplint_state.verbose_level:
1365 return False
1366
1367 is_filtered = False
1368 for one_filter in _Filters():
1369 if one_filter.startswith('-'):
1370 if category.startswith(one_filter[1:]):
1371 is_filtered = True
1372 elif one_filter.startswith('+'):
1373 if category.startswith(one_filter[1:]):
1374 is_filtered = False
1375 else:
1376 assert False # should have been checked for in SetFilter.
1377 if is_filtered:
1378 return False
1379
1380 return True
1381
1382
1383def Error(filename, linenum, category, confidence, message):
1384 """Logs the fact we've found a lint error.
1385
1386 We log where the error was found, and also our confidence in the error,
1387 that is, how certain we are this is a legitimate style regression, and
1388 not a misidentification or a use that's sometimes justified.
1389
1390 False positives can be suppressed by the use of
1391 "cpplint(category)" comments on the offending line. These are
1392 parsed into _error_suppressions.
1393
1394 Args:
1395 filename: The name of the file containing the error.
1396 linenum: The number of the line containing the error.
1397 category: A string used to describe the "category" this bug
1398 falls under: "whitespace", say, or "runtime". Categories
1399 may have a hierarchy separated by slashes: "whitespace/indent".
1400 confidence: A number from 1-5 representing a confidence score for
1401 the error, with 5 meaning that we are certain of the problem,
1402 and 1 meaning that it could be a legitimate construct.
1403 message: The error message.
1404 """
1405 if _ShouldPrintError(category, confidence, linenum):
1406 _cpplint_state.IncrementErrorCount(category)
1407 if _cpplint_state.output_format == 'vs7':
1408 _cpplint_state.PrintError('%s(%s): warning: %s [%s] [%d]\n' % (
1409 filename, linenum, message, category, confidence))
1410 elif _cpplint_state.output_format == 'eclipse':
1411 sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
1412 filename, linenum, message, category, confidence))
1413 elif _cpplint_state.output_format == 'junit':
1414 _cpplint_state.AddJUnitFailure(filename, linenum, message, category,
1415 confidence)
1416 else:
1417 final_message = '%s:%s: %s [%s] [%d]\n' % (
1418 filename, linenum, message, category, confidence)
1419 sys.stderr.write(final_message)
1420
1421# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
1422_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
1423 r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
1424# Match a single C style comment on the same line.
1425_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
1426# Matches multi-line C style comments.
1427# This RE is a little bit more complicated than one might expect, because we
1428# have to take care of space removals tools so we can handle comments inside
1429# statements better.
1430# The current rule is: We only clear spaces from both sides when we're at the
1431# end of the line. Otherwise, we try to remove spaces from the right side,
1432# if this doesn't work we try on left side but only if there's a non-character
1433# on the right.
1434_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
1435 r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
1436 _RE_PATTERN_C_COMMENTS + r'\s+|' +
1437 r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
1438 _RE_PATTERN_C_COMMENTS + r')')
1439
1440
1441def IsCppString(line):
1442 """Does line terminate so, that the next symbol is in string constant.
1443
1444 This function does not consider single-line nor multi-line comments.
1445
1446 Args:
1447 line: is a partial line of code starting from the 0..n.
1448
1449 Returns:
1450 True, if next character appended to 'line' is inside a
1451 string constant.
1452 """
1453
1454 line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
1455 return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
1456
1457
1458def CleanseRawStrings(raw_lines):
1459 """Removes C++11 raw strings from lines.
1460
1461 Before:
1462 static const char kData[] = R"(
1463 multi-line string
1464 )";
1465
1466 After:
1467 static const char kData[] = ""
1468 (replaced by blank line)
1469 "";
1470
1471 Args:
1472 raw_lines: list of raw lines.
1473
1474 Returns:
1475 list of lines with C++11 raw strings replaced by empty strings.
1476 """
1477
1478 delimiter = None
1479 lines_without_raw_strings = []
1480 for line in raw_lines:
1481 if delimiter:
1482 # Inside a raw string, look for the end
1483 end = line.find(delimiter)
1484 if end >= 0:
1485 # Found the end of the string, match leading space for this
1486 # line and resume copying the original lines, and also insert
1487 # a "" on the last line.
1488 leading_space = Match(r'^(\s*)\S', line)
1489 line = leading_space.group(1) + '""' + line[end + len(delimiter):]
1490 delimiter = None
1491 else:
1492 # Haven't found the end yet, append a blank line.
1493 line = '""'
1494
1495 # Look for beginning of a raw string, and replace them with
1496 # empty strings. This is done in a loop to handle multiple raw
1497 # strings on the same line.
1498 while delimiter is None:
1499 # Look for beginning of a raw string.
1500 # See 2.14.15 [lex.string] for syntax.
1501 #
1502 # Once we have matched a raw string, we check the prefix of the
1503 # line to make sure that the line is not part of a single line
1504 # comment. It's done this way because we remove raw strings
1505 # before removing comments as opposed to removing comments
1506 # before removing raw strings. This is because there are some
1507 # cpplint checks that requires the comments to be preserved, but
1508 # we don't want to check comments that are inside raw strings.
1509 matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
1510 if (matched and
1511 not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
1512 matched.group(1))):
1513 delimiter = ')' + matched.group(2) + '"'
1514
1515 end = matched.group(3).find(delimiter)
1516 if end >= 0:
1517 # Raw string ended on same line
1518 line = (matched.group(1) + '""' +
1519 matched.group(3)[end + len(delimiter):])
1520 delimiter = None
1521 else:
1522 # Start of a multi-line raw string
1523 line = matched.group(1) + '""'
1524 else:
1525 break
1526
1527 lines_without_raw_strings.append(line)
1528
1529 # TODO(unknown): if delimiter is not None here, we might want to
1530 # emit a warning for unterminated string.
1531 return lines_without_raw_strings
1532
1533
1534def FindNextMultiLineCommentStart(lines, lineix):
1535 """Find the beginning marker for a multiline comment."""
1536 while lineix < len(lines):
1537 if lines[lineix].strip().startswith('/*'):
1538 # Only return this marker if the comment goes beyond this line
1539 if lines[lineix].strip().find('*/', 2) < 0:
1540 return lineix
1541 lineix += 1
1542 return len(lines)
1543
1544
1545def FindNextMultiLineCommentEnd(lines, lineix):
1546 """We are inside a comment, find the end marker."""
1547 while lineix < len(lines):
1548 if lines[lineix].strip().endswith('*/'):
1549 return lineix
1550 lineix += 1
1551 return len(lines)
1552
1553
1554def RemoveMultiLineCommentsFromRange(lines, begin, end):
1555 """Clears a range of lines for multi-line comments."""
1556 # Having // dummy comments makes the lines non-empty, so we will not get
1557 # unnecessary blank line warnings later in the code.
1558 for i in range(begin, end):
1559 lines[i] = '/**/'
1560
1561
1562def RemoveMultiLineComments(filename, lines, error):
1563 """Removes multiline (c-style) comments from lines."""
1564 lineix = 0
1565 while lineix < len(lines):
1566 lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
1567 if lineix_begin >= len(lines):
1568 return
1569 lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
1570 if lineix_end >= len(lines):
1571 error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
1572 'Could not find end of multi-line comment')
1573 return
1574 RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
1575 lineix = lineix_end + 1
1576
1577
1578def CleanseComments(line):
1579 """Removes //-comments and single-line C-style /* */ comments.
1580
1581 Args:
1582 line: A line of C++ source.
1583
1584 Returns:
1585 The line with single-line comments removed.
1586 """
1587 commentpos = line.find('//')
1588 if commentpos != -1 and not IsCppString(line[:commentpos]):
1589 line = line[:commentpos].rstrip()
1590 # get rid of /* ... */
1591 return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
1592
1593
1594class CleansedLines(object):
1595 """Holds 4 copies of all lines with different preprocessing applied to them.
1596
1597 1) elided member contains lines without strings and comments.
1598 2) lines member contains lines without comments.
1599 3) raw_lines member contains all the lines without processing.
1600 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
1601 strings removed.
1602 All these members are of <type 'list'>, and of the same length.
1603 """
1604
1605 def __init__(self, lines):
1606 self.elided = []
1607 self.lines = []
1608 self.raw_lines = lines
1609 self.num_lines = len(lines)
1610 self.lines_without_raw_strings = CleanseRawStrings(lines)
1611 for linenum in range(len(self.lines_without_raw_strings)):
1612 self.lines.append(CleanseComments(
1613 self.lines_without_raw_strings[linenum]))
1614 elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
1615 self.elided.append(CleanseComments(elided))
1616
1617 def NumLines(self):
1618 """Returns the number of lines represented."""
1619 return self.num_lines
1620
1621 @staticmethod
1622 def _CollapseStrings(elided):
1623 """Collapses strings and chars on a line to simple "" or '' blocks.
1624
1625 We nix strings first so we're not fooled by text like '"http://"'
1626
1627 Args:
1628 elided: The line being processed.
1629
1630 Returns:
1631 The line with collapsed strings.
1632 """
1633 if _RE_PATTERN_INCLUDE.match(elided):
1634 return elided
1635
1636 # Remove escaped characters first to make quote/single quote collapsing
1637 # basic. Things that look like escaped characters shouldn't occur
1638 # outside of strings and chars.
1639 elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
1640
1641 # Replace quoted strings and digit separators. Both single quotes
1642 # and double quotes are processed in the same loop, otherwise
1643 # nested quotes wouldn't work.
1644 collapsed = ''
1645 while True:
1646 # Find the first quote character
1647 match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
1648 if not match:
1649 collapsed += elided
1650 break
1651 head, quote, tail = match.groups()
1652
1653 if quote == '"':
1654 # Collapse double quoted strings
1655 second_quote = tail.find('"')
1656 if second_quote >= 0:
1657 collapsed += head + '""'
1658 elided = tail[second_quote + 1:]
1659 else:
1660 # Unmatched double quote, don't bother processing the rest
1661 # of the line since this is probably a multiline string.
1662 collapsed += elided
1663 break
1664 else:
1665 # Found single quote, check nearby text to eliminate digit separators.
1666 #
1667 # There is no special handling for floating point here, because
1668 # the integer/fractional/exponent parts would all be parsed
1669 # correctly as long as there are digits on both sides of the
1670 # separator. So we are fine as long as we don't see something
1671 # like "0.'3" (gcc 4.9.0 will not allow this literal).
1672 if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
1673 match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
1674 collapsed += head + match_literal.group(1).replace("'", '')
1675 elided = match_literal.group(2)
1676 else:
1677 second_quote = tail.find('\'')
1678 if second_quote >= 0:
1679 collapsed += head + "''"
1680 elided = tail[second_quote + 1:]
1681 else:
1682 # Unmatched single quote
1683 collapsed += elided
1684 break
1685
1686 return collapsed
1687
1688
1689def FindEndOfExpressionInLine(line, startpos, stack):
1690 """Find the position just after the end of current parenthesized expression.
1691
1692 Args:
1693 line: a CleansedLines line.
1694 startpos: start searching at this position.
1695 stack: nesting stack at startpos.
1696
1697 Returns:
1698 On finding matching end: (index just after matching end, None)
1699 On finding an unclosed expression: (-1, None)
1700 Otherwise: (-1, new stack at end of this line)
1701 """
1702 for i in xrange(startpos, len(line)):
1703 char = line[i]
1704 if char in '([{':
1705 # Found start of parenthesized expression, push to expression stack
1706 stack.append(char)
1707 elif char == '<':
1708 # Found potential start of template argument list
1709 if i > 0 and line[i - 1] == '<':
1710 # Left shift operator
1711 if stack and stack[-1] == '<':
1712 stack.pop()
1713 if not stack:
1714 return (-1, None)
1715 elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
1716 # operator<, don't add to stack
1717 continue
1718 else:
1719 # Tentative start of template argument list
1720 stack.append('<')
1721 elif char in ')]}':
1722 # Found end of parenthesized expression.
1723 #
1724 # If we are currently expecting a matching '>', the pending '<'
1725 # must have been an operator. Remove them from expression stack.
1726 while stack and stack[-1] == '<':
1727 stack.pop()
1728 if not stack:
1729 return (-1, None)
1730 if ((stack[-1] == '(' and char == ')') or
1731 (stack[-1] == '[' and char == ']') or
1732 (stack[-1] == '{' and char == '}')):
1733 stack.pop()
1734 if not stack:
1735 return (i + 1, None)
1736 else:
1737 # Mismatched parentheses
1738 return (-1, None)
1739 elif char == '>':
1740 # Found potential end of template argument list.
1741
1742 # Ignore "->" and operator functions
1743 if (i > 0 and
1744 (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
1745 continue
1746
1747 # Pop the stack if there is a matching '<'. Otherwise, ignore
1748 # this '>' since it must be an operator.
1749 if stack:
1750 if stack[-1] == '<':
1751 stack.pop()
1752 if not stack:
1753 return (i + 1, None)
1754 elif char == ';':
1755 # Found something that look like end of statements. If we are currently
1756 # expecting a '>', the matching '<' must have been an operator, since
1757 # template argument list should not contain statements.
1758 while stack and stack[-1] == '<':
1759 stack.pop()
1760 if not stack:
1761 return (-1, None)
1762
1763 # Did not find end of expression or unbalanced parentheses on this line
1764 return (-1, stack)
1765
1766
1767def CloseExpression(clean_lines, linenum, pos):
1768 """If input points to ( or { or [ or <, finds the position that closes it.
1769
1770 If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
1771 linenum/pos that correspond to the closing of the expression.
1772
1773 TODO(unknown): cpplint spends a fair bit of time matching parentheses.
1774 Ideally we would want to index all opening and closing parentheses once
1775 and have CloseExpression be just a simple lookup, but due to preprocessor
1776 tricks, this is not so easy.
1777
1778 Args:
1779 clean_lines: A CleansedLines instance containing the file.
1780 linenum: The number of the line to check.
1781 pos: A position on the line.
1782
1783 Returns:
1784 A tuple (line, linenum, pos) pointer *past* the closing brace, or
1785 (line, len(lines), -1) if we never find a close. Note we ignore
1786 strings and comments when matching; and the line we return is the
1787 'cleansed' line at linenum.
1788 """
1789
1790 line = clean_lines.elided[linenum]
1791 if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
1792 return (line, clean_lines.NumLines(), -1)
1793
1794 # Check first line
1795 (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
1796 if end_pos > -1:
1797 return (line, linenum, end_pos)
1798
1799 # Continue scanning forward
1800 while stack and linenum < clean_lines.NumLines() - 1:
1801 linenum += 1
1802 line = clean_lines.elided[linenum]
1803 (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
1804 if end_pos > -1:
1805 return (line, linenum, end_pos)
1806
1807 # Did not find end of expression before end of file, give up
1808 return (line, clean_lines.NumLines(), -1)
1809
1810
1811def FindStartOfExpressionInLine(line, endpos, stack):
1812 """Find position at the matching start of current expression.
1813
1814 This is almost the reverse of FindEndOfExpressionInLine, but note
1815 that the input position and returned position differs by 1.
1816
1817 Args:
1818 line: a CleansedLines line.
1819 endpos: start searching at this position.
1820 stack: nesting stack at endpos.
1821
1822 Returns:
1823 On finding matching start: (index at matching start, None)
1824 On finding an unclosed expression: (-1, None)
1825 Otherwise: (-1, new stack at beginning of this line)
1826 """
1827 i = endpos
1828 while i >= 0:
1829 char = line[i]
1830 if char in ')]}':
1831 # Found end of expression, push to expression stack
1832 stack.append(char)
1833 elif char == '>':
1834 # Found potential end of template argument list.
1835 #
1836 # Ignore it if it's a "->" or ">=" or "operator>"
1837 if (i > 0 and
1838 (line[i - 1] == '-' or
1839 Match(r'\s>=\s', line[i - 1:]) or
1840 Search(r'\boperator\s*$', line[0:i]))):
1841 i -= 1
1842 else:
1843 stack.append('>')
1844 elif char == '<':
1845 # Found potential start of template argument list
1846 if i > 0 and line[i - 1] == '<':
1847 # Left shift operator
1848 i -= 1
1849 else:
1850 # If there is a matching '>', we can pop the expression stack.
1851 # Otherwise, ignore this '<' since it must be an operator.
1852 if stack and stack[-1] == '>':
1853 stack.pop()
1854 if not stack:
1855 return (i, None)
1856 elif char in '([{':
1857 # Found start of expression.
1858 #
1859 # If there are any unmatched '>' on the stack, they must be
1860 # operators. Remove those.
1861 while stack and stack[-1] == '>':
1862 stack.pop()
1863 if not stack:
1864 return (-1, None)
1865 if ((char == '(' and stack[-1] == ')') or
1866 (char == '[' and stack[-1] == ']') or
1867 (char == '{' and stack[-1] == '}')):
1868 stack.pop()
1869 if not stack:
1870 return (i, None)
1871 else:
1872 # Mismatched parentheses
1873 return (-1, None)
1874 elif char == ';':
1875 # Found something that look like end of statements. If we are currently
1876 # expecting a '<', the matching '>' must have been an operator, since
1877 # template argument list should not contain statements.
1878 while stack and stack[-1] == '>':
1879 stack.pop()
1880 if not stack:
1881 return (-1, None)
1882
1883 i -= 1
1884
1885 return (-1, stack)
1886
1887
1888def ReverseCloseExpression(clean_lines, linenum, pos):
1889 """If input points to ) or } or ] or >, finds the position that opens it.
1890
1891 If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
1892 linenum/pos that correspond to the opening of the expression.
1893
1894 Args:
1895 clean_lines: A CleansedLines instance containing the file.
1896 linenum: The number of the line to check.
1897 pos: A position on the line.
1898
1899 Returns:
1900 A tuple (line, linenum, pos) pointer *at* the opening brace, or
1901 (line, 0, -1) if we never find the matching opening brace. Note
1902 we ignore strings and comments when matching; and the line we
1903 return is the 'cleansed' line at linenum.
1904 """
1905 line = clean_lines.elided[linenum]
1906 if line[pos] not in ')}]>':
1907 return (line, 0, -1)
1908
1909 # Check last line
1910 (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
1911 if start_pos > -1:
1912 return (line, linenum, start_pos)
1913
1914 # Continue scanning backward
1915 while stack and linenum > 0:
1916 linenum -= 1
1917 line = clean_lines.elided[linenum]
1918 (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
1919 if start_pos > -1:
1920 return (line, linenum, start_pos)
1921
1922 # Did not find start of expression before beginning of file, give up
1923 return (line, 0, -1)
1924
1925
1926def CheckForCopyright(filename, lines, error):
1927 """Logs an error if no Copyright message appears at the top of the file."""
1928
1929 # We'll say it should occur by line 10. Don't forget there's a
1930 # dummy line at the front.
1931 for line in range(1, min(len(lines), 11)):
1932 if re.search(r'Copyright', lines[line], re.I): break
1933 else: # means no copyright line was found
1934 error(filename, 0, 'legal/copyright', 5,
1935 'No copyright message found. '
1936 'You should have a line: "Copyright [year] <Copyright Owner>"')
1937
1938
1939def GetIndentLevel(line):
1940 """Return the number of leading spaces in line.
1941
1942 Args:
1943 line: A string to check.
1944
1945 Returns:
1946 An integer count of leading spaces, possibly zero.
1947 """
1948 indent = Match(r'^( *)\S', line)
1949 if indent:
1950 return len(indent.group(1))
1951 else:
1952 return 0
1953
1954
1955def GetHeaderGuardCPPVariable(filename):
1956 """Returns the CPP variable that should be used as a header guard.
1957
1958 Args:
1959 filename: The name of a C++ header file.
1960
1961 Returns:
1962 The CPP variable that should be used as a header guard in the
1963 named file.
1964
1965 """
1966
1967 # Restores original filename in case that cpplint is invoked from Emacs's
1968 # flymake.
1969 filename = re.sub(r'_flymake\.h$', '.h', filename)
1970 filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
1971 # Replace 'c++' with 'cpp'.
1972 filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
1973
1974 fileinfo = FileInfo(filename)
1975 file_path_from_root = fileinfo.RepositoryName()
1976 if _root:
1977 suffix = os.sep
1978 # On Windows using directory separator will leave us with
1979 # "bogus escape error" unless we properly escape regex.
1980 if suffix == '\\':
1981 suffix += '\\'
1982 file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root)
1983 return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
1984
1985
1986def CheckForHeaderGuard(filename, clean_lines, error):
1987 """Checks that the file contains a header guard.
1988
1989 Logs an error if no #ifndef header guard is present. For other
1990 headers, checks that the full pathname is used.
1991
1992 Args:
1993 filename: The name of the C++ header file.
1994 clean_lines: A CleansedLines instance containing the file.
1995 error: The function to call with any errors found.
1996 """
1997
1998 # Don't check for header guards if there are error suppression
1999 # comments somewhere in this file.
2000 #
2001 # Because this is silencing a warning for a nonexistent line, we
2002 # only support the very specific NOLINT(build/header_guard) syntax,
2003 # and not the general NOLINT or NOLINT(*) syntax.
2004 raw_lines = clean_lines.lines_without_raw_strings
2005 for i in raw_lines:
2006 if Search(r'//\s*NOLINT\(build/header_guard\)', i):
2007 return
2008
2009 # Allow pragma once instead of header guards
2010 for i in raw_lines:
2011 if Search(r'^\s*#pragma\s+once', i):
2012 return
2013
2014 cppvar = GetHeaderGuardCPPVariable(filename)
2015
2016 ifndef = ''
2017 ifndef_linenum = 0
2018 define = ''
2019 endif = ''
2020 endif_linenum = 0
2021 for linenum, line in enumerate(raw_lines):
2022 linesplit = line.split()
2023 if len(linesplit) >= 2:
2024 # find the first occurrence of #ifndef and #define, save arg
2025 if not ifndef and linesplit[0] == '#ifndef':
2026 # set ifndef to the header guard presented on the #ifndef line.
2027 ifndef = linesplit[1]
2028 ifndef_linenum = linenum
2029 if not define and linesplit[0] == '#define':
2030 define = linesplit[1]
2031 # find the last occurrence of #endif, save entire line
2032 if line.startswith('#endif'):
2033 endif = line
2034 endif_linenum = linenum
2035
2036 if not ifndef or not define or ifndef != define:
2037 error(filename, 0, 'build/header_guard', 5,
2038 'No #ifndef header guard found, suggested CPP variable is: %s' %
2039 cppvar)
2040 return
2041
2042 # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
2043 # for backward compatibility.
2044 if ifndef != cppvar:
2045 error_level = 0
2046 if ifndef != cppvar + '_':
2047 error_level = 5
2048
2049 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
2050 error)
2051 error(filename, ifndef_linenum, 'build/header_guard', error_level,
2052 '#ifndef header guard has wrong style, please use: %s' % cppvar)
2053
2054 # Check for "//" comments on endif line.
2055 ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
2056 error)
2057 match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
2058 if match:
2059 if match.group(1) == '_':
2060 # Issue low severity warning for deprecated double trailing underscore
2061 error(filename, endif_linenum, 'build/header_guard', 0,
2062 '#endif line should be "#endif // %s"' % cppvar)
2063 return
2064
2065 # Didn't find the corresponding "//" comment. If this file does not
2066 # contain any "//" comments at all, it could be that the compiler
2067 # only wants "/**/" comments, look for those instead.
2068 no_single_line_comments = True
2069 for i in xrange(1, len(raw_lines) - 1):
2070 line = raw_lines[i]
2071 if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
2072 no_single_line_comments = False
2073 break
2074
2075 if no_single_line_comments:
2076 match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
2077 if match:
2078 if match.group(1) == '_':
2079 # Low severity warning for double trailing underscore
2080 error(filename, endif_linenum, 'build/header_guard', 0,
2081 '#endif line should be "#endif /* %s */"' % cppvar)
2082 return
2083
2084 # Didn't find anything
2085 error(filename, endif_linenum, 'build/header_guard', 5,
2086 '#endif line should be "#endif // %s"' % cppvar)
2087
2088
2089def CheckHeaderFileIncluded(filename, include_state, error):
2090 """Logs an error if a source file does not include its header."""
2091
2092 # Do not check test files
2093 fileinfo = FileInfo(filename)
2094 if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
2095 return
2096
2097 for ext in GetHeaderExtensions():
2098 basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
2099 headerfile = basefilename + '.' + ext
2100 if not os.path.exists(headerfile):
2101 continue
2102 headername = FileInfo(headerfile).RepositoryName()
2103 first_include = None
2104 for section_list in include_state.include_list:
2105 for f in section_list:
2106 if headername in f[0] or f[0] in headername:
2107 return
2108 if not first_include:
2109 first_include = f[1]
2110
2111 error(filename, first_include, 'build/include', 5,
2112 '%s should include its header file %s' % (fileinfo.RepositoryName(),
2113 headername))
2114
2115
2116def CheckForBadCharacters(filename, lines, error):
2117 """Logs an error for each line containing bad characters.
2118
2119 Two kinds of bad characters:
2120
2121 1. Unicode replacement characters: These indicate that either the file
2122 contained invalid UTF-8 (likely) or Unicode replacement characters (which
2123 it shouldn't). Note that it's possible for this to throw off line
2124 numbering if the invalid UTF-8 occurred adjacent to a newline.
2125
2126 2. NUL bytes. These are problematic for some tools.
2127
2128 Args:
2129 filename: The name of the current file.
2130 lines: An array of strings, each representing a line of the file.
2131 error: The function to call with any errors found.
2132 """
2133 for linenum, line in enumerate(lines):
2134 if unicode_escape_decode('\ufffd') in line:
2135 error(filename, linenum, 'readability/utf8', 5,
2136 'Line contains invalid UTF-8 (or Unicode replacement character).')
2137 if '\0' in line:
2138 error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
2139
2140
2141def CheckForNewlineAtEOF(filename, lines, error):
2142 """Logs an error if there is no newline char at the end of the file.
2143
2144 Args:
2145 filename: The name of the current file.
2146 lines: An array of strings, each representing a line of the file.
2147 error: The function to call with any errors found.
2148 """
2149
2150 # The array lines() was created by adding two newlines to the
2151 # original file (go figure), then splitting on \n.
2152 # To verify that the file ends in \n, we just have to make sure the
2153 # last-but-two element of lines() exists and is empty.
2154 if len(lines) < 3 or lines[-2]:
2155 error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
2156 'Could not find a newline character at the end of the file.')
2157
2158
2159def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
2160 """Logs an error if we see /* ... */ or "..." that extend past one line.
2161
2162 /* ... */ comments are legit inside macros, for one line.
2163 Otherwise, we prefer // comments, so it's ok to warn about the
2164 other. Likewise, it's ok for strings to extend across multiple
2165 lines, as long as a line continuation character (backslash)
2166 terminates each line. Although not currently prohibited by the C++
2167 style guide, it's ugly and unnecessary. We don't do well with either
2168 in this lint program, so we warn about both.
2169
2170 Args:
2171 filename: The name of the current file.
2172 clean_lines: A CleansedLines instance containing the file.
2173 linenum: The number of the line to check.
2174 error: The function to call with any errors found.
2175 """
2176 line = clean_lines.elided[linenum]
2177
2178 # Remove all \\ (escaped backslashes) from the line. They are OK, and the
2179 # second (escaped) slash may trigger later \" detection erroneously.
2180 line = line.replace('\\\\', '')
2181
2182 if line.count('/*') > line.count('*/'):
2183 error(filename, linenum, 'readability/multiline_comment', 5,
2184 'Complex multi-line /*...*/-style comment found. '
2185 'Lint may give bogus warnings. '
2186 'Consider replacing these with //-style comments, '
2187 'with #if 0...#endif, '
2188 'or with more clearly structured multi-line comments.')
2189
2190 if (line.count('"') - line.count('\\"')) % 2:
2191 error(filename, linenum, 'readability/multiline_string', 5,
2192 'Multi-line string ("...") found. This lint script doesn\'t '
2193 'do well with such strings, and may give bogus warnings. '
2194 'Use C++11 raw strings or concatenation instead.')
2195
2196
2197# (non-threadsafe name, thread-safe alternative, validation pattern)
2198#
2199# The validation pattern is used to eliminate false positives such as:
2200# _rand(); // false positive due to substring match.
2201# ->rand(); // some member function rand().
2202# ACMRandom rand(seed); // some variable named rand.
2203# ISAACRandom rand(); // another variable named rand.
2204#
2205# Basically we require the return value of these functions to be used
2206# in some expression context on the same line by matching on some
2207# operator before the function name. This eliminates constructors and
2208# member function calls.
2209_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
2210_THREADING_LIST = (
2211 ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
2212 ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
2213 ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
2214 ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
2215 ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
2216 ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
2217 ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
2218 ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
2219 ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
2220 ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
2221 ('strtok(', 'strtok_r(',
2222 _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
2223 ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
2224 )
2225
2226
2227def CheckPosixThreading(filename, clean_lines, linenum, error):
2228 """Checks for calls to thread-unsafe functions.
2229
2230 Much code has been originally written without consideration of
2231 multi-threading. Also, engineers are relying on their old experience;
2232 they have learned posix before threading extensions were added. These
2233 tests guide the engineers to use thread-safe functions (when using
2234 posix directly).
2235
2236 Args:
2237 filename: The name of the current file.
2238 clean_lines: A CleansedLines instance containing the file.
2239 linenum: The number of the line to check.
2240 error: The function to call with any errors found.
2241 """
2242 line = clean_lines.elided[linenum]
2243 for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
2244 # Additional pattern matching check to confirm that this is the
2245 # function we are looking for
2246 if Search(pattern, line):
2247 error(filename, linenum, 'runtime/threadsafe_fn', 2,
2248 'Consider using ' + multithread_safe_func +
2249 '...) instead of ' + single_thread_func +
2250 '...) for improved thread safety.')
2251
2252
2253def CheckVlogArguments(filename, clean_lines, linenum, error):
2254 """Checks that VLOG() is only used for defining a logging level.
2255
2256 For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
2257 VLOG(FATAL) are not.
2258
2259 Args:
2260 filename: The name of the current file.
2261 clean_lines: A CleansedLines instance containing the file.
2262 linenum: The number of the line to check.
2263 error: The function to call with any errors found.
2264 """
2265 line = clean_lines.elided[linenum]
2266 if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
2267 error(filename, linenum, 'runtime/vlog', 5,
2268 'VLOG() should be used with numeric verbosity level. '
2269 'Use LOG() if you want symbolic severity levels.')
2270
2271# Matches invalid increment: *count++, which moves pointer instead of
2272# incrementing a value.
2273_RE_PATTERN_INVALID_INCREMENT = re.compile(
2274 r'^\s*\*\w+(\+\+|--);')
2275
2276
2277def CheckInvalidIncrement(filename, clean_lines, linenum, error):
2278 """Checks for invalid increment *count++.
2279
2280 For example following function:
2281 void increment_counter(int* count) {
2282 *count++;
2283 }
2284 is invalid, because it effectively does count++, moving pointer, and should
2285 be replaced with ++*count, (*count)++ or *count += 1.
2286
2287 Args:
2288 filename: The name of the current file.
2289 clean_lines: A CleansedLines instance containing the file.
2290 linenum: The number of the line to check.
2291 error: The function to call with any errors found.
2292 """
2293 line = clean_lines.elided[linenum]
2294 if _RE_PATTERN_INVALID_INCREMENT.match(line):
2295 error(filename, linenum, 'runtime/invalid_increment', 5,
2296 'Changing pointer instead of value (or unused value of operator*).')
2297
2298
2299def IsMacroDefinition(clean_lines, linenum):
2300 if Search(r'^#define', clean_lines[linenum]):
2301 return True
2302
2303 if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
2304 return True
2305
2306 return False
2307
2308
2309def IsForwardClassDeclaration(clean_lines, linenum):
2310 return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
2311
2312
2313class _BlockInfo(object):
2314 """Stores information about a generic block of code."""
2315
2316 def __init__(self, linenum, seen_open_brace):
2317 self.starting_linenum = linenum
2318 self.seen_open_brace = seen_open_brace
2319 self.open_parentheses = 0
2320 self.inline_asm = _NO_ASM
2321 self.check_namespace_indentation = False
2322
2323 def CheckBegin(self, filename, clean_lines, linenum, error):
2324 """Run checks that applies to text up to the opening brace.
2325
2326 This is mostly for checking the text after the class identifier
2327 and the "{", usually where the base class is specified. For other
2328 blocks, there isn't much to check, so we always pass.
2329
2330 Args:
2331 filename: The name of the current file.
2332 clean_lines: A CleansedLines instance containing the file.
2333 linenum: The number of the line to check.
2334 error: The function to call with any errors found.
2335 """
2336 pass
2337
2338 def CheckEnd(self, filename, clean_lines, linenum, error):
2339 """Run checks that applies to text after the closing brace.
2340
2341 This is mostly used for checking end of namespace comments.
2342
2343 Args:
2344 filename: The name of the current file.
2345 clean_lines: A CleansedLines instance containing the file.
2346 linenum: The number of the line to check.
2347 error: The function to call with any errors found.
2348 """
2349 pass
2350
2351 def IsBlockInfo(self):
2352 """Returns true if this block is a _BlockInfo.
2353
2354 This is convenient for verifying that an object is an instance of
2355 a _BlockInfo, but not an instance of any of the derived classes.
2356
2357 Returns:
2358 True for this class, False for derived classes.
2359 """
2360 return self.__class__ == _BlockInfo
2361
2362
2363class _ExternCInfo(_BlockInfo):
2364 """Stores information about an 'extern "C"' block."""
2365
2366 def __init__(self, linenum):
2367 _BlockInfo.__init__(self, linenum, True)
2368
2369
2370class _ClassInfo(_BlockInfo):
2371 """Stores information about a class."""
2372
2373 def __init__(self, name, class_or_struct, clean_lines, linenum):
2374 _BlockInfo.__init__(self, linenum, False)
2375 self.name = name
2376 self.is_derived = False
2377 self.check_namespace_indentation = True
2378 if class_or_struct == 'struct':
2379 self.access = 'public'
2380 self.is_struct = True
2381 else:
2382 self.access = 'private'
2383 self.is_struct = False
2384
2385 # Remember initial indentation level for this class. Using raw_lines here
2386 # instead of elided to account for leading comments.
2387 self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
2388
2389 # Try to find the end of the class. This will be confused by things like:
2390 # class A {
2391 # } *x = { ...
2392 #
2393 # But it's still good enough for CheckSectionSpacing.
2394 self.last_line = 0
2395 depth = 0
2396 for i in range(linenum, clean_lines.NumLines()):
2397 line = clean_lines.elided[i]
2398 depth += line.count('{') - line.count('}')
2399 if not depth:
2400 self.last_line = i
2401 break
2402
2403 def CheckBegin(self, filename, clean_lines, linenum, error):
2404 # Look for a bare ':'
2405 if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
2406 self.is_derived = True
2407
2408 def CheckEnd(self, filename, clean_lines, linenum, error):
2409 # If there is a DISALLOW macro, it should appear near the end of
2410 # the class.
2411 seen_last_thing_in_class = False
2412 for i in xrange(linenum - 1, self.starting_linenum, -1):
2413 match = Search(
2414 r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
2415 self.name + r'\)',
2416 clean_lines.elided[i])
2417 if match:
2418 if seen_last_thing_in_class:
2419 error(filename, i, 'readability/constructors', 3,
2420 match.group(1) + ' should be the last thing in the class')
2421 break
2422
2423 if not Match(r'^\s*$', clean_lines.elided[i]):
2424 seen_last_thing_in_class = True
2425
2426 # Check that closing brace is aligned with beginning of the class.
2427 # Only do this if the closing brace is indented by only whitespaces.
2428 # This means we will not check single-line class definitions.
2429 indent = Match(r'^( *)\}', clean_lines.elided[linenum])
2430 if indent and len(indent.group(1)) != self.class_indent:
2431 if self.is_struct:
2432 parent = 'struct ' + self.name
2433 else:
2434 parent = 'class ' + self.name
2435 error(filename, linenum, 'whitespace/indent', 3,
2436 'Closing brace should be aligned with beginning of %s' % parent)
2437
2438
2439class _NamespaceInfo(_BlockInfo):
2440 """Stores information about a namespace."""
2441
2442 def __init__(self, name, linenum):
2443 _BlockInfo.__init__(self, linenum, False)
2444 self.name = name or ''
2445 self.check_namespace_indentation = True
2446
2447 def CheckEnd(self, filename, clean_lines, linenum, error):
2448 """Check end of namespace comments."""
2449 line = clean_lines.raw_lines[linenum]
2450
2451 # Check how many lines is enclosed in this namespace. Don't issue
2452 # warning for missing namespace comments if there aren't enough
2453 # lines. However, do apply checks if there is already an end of
2454 # namespace comment and it's incorrect.
2455 #
2456 # TODO(unknown): We always want to check end of namespace comments
2457 # if a namespace is large, but sometimes we also want to apply the
2458 # check if a short namespace contained nontrivial things (something
2459 # other than forward declarations). There is currently no logic on
2460 # deciding what these nontrivial things are, so this check is
2461 # triggered by namespace size only, which works most of the time.
2462 if (linenum - self.starting_linenum < 10
2463 and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
2464 return
2465
2466 # Look for matching comment at end of namespace.
2467 #
2468 # Note that we accept C style "/* */" comments for terminating
2469 # namespaces, so that code that terminate namespaces inside
2470 # preprocessor macros can be cpplint clean.
2471 #
2472 # We also accept stuff like "// end of namespace <name>." with the
2473 # period at the end.
2474 #
2475 # Besides these, we don't accept anything else, otherwise we might
2476 # get false negatives when existing comment is a substring of the
2477 # expected namespace.
2478 if self.name:
2479 # Named namespace
2480 if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
2481 re.escape(self.name) + r'[\*/\.\\\s]*$'),
2482 line):
2483 error(filename, linenum, 'readability/namespace', 5,
2484 'Namespace should be terminated with "// namespace %s"' %
2485 self.name)
2486 else:
2487 # Anonymous namespace
2488 if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
2489 # If "// namespace anonymous" or "// anonymous namespace (more text)",
2490 # mention "// anonymous namespace" as an acceptable form
2491 if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
2492 error(filename, linenum, 'readability/namespace', 5,
2493 'Anonymous namespace should be terminated with "// namespace"'
2494 ' or "// anonymous namespace"')
2495 else:
2496 error(filename, linenum, 'readability/namespace', 5,
2497 'Anonymous namespace should be terminated with "// namespace"')
2498
2499
2500class _PreprocessorInfo(object):
2501 """Stores checkpoints of nesting stacks when #if/#else is seen."""
2502
2503 def __init__(self, stack_before_if):
2504 # The entire nesting stack before #if
2505 self.stack_before_if = stack_before_if
2506
2507 # The entire nesting stack up to #else
2508 self.stack_before_else = []
2509
2510 # Whether we have already seen #else or #elif
2511 self.seen_else = False
2512
2513
2514class NestingState(object):
2515 """Holds states related to parsing braces."""
2516
2517 def __init__(self):
2518 # Stack for tracking all braces. An object is pushed whenever we
2519 # see a "{", and popped when we see a "}". Only 3 types of
2520 # objects are possible:
2521 # - _ClassInfo: a class or struct.
2522 # - _NamespaceInfo: a namespace.
2523 # - _BlockInfo: some other type of block.
2524 self.stack = []
2525
2526 # Top of the previous stack before each Update().
2527 #
2528 # Because the nesting_stack is updated at the end of each line, we
2529 # had to do some convoluted checks to find out what is the current
2530 # scope at the beginning of the line. This check is simplified by
2531 # saving the previous top of nesting stack.
2532 #
2533 # We could save the full stack, but we only need the top. Copying
2534 # the full nesting stack would slow down cpplint by ~10%.
2535 self.previous_stack_top = []
2536
2537 # Stack of _PreprocessorInfo objects.
2538 self.pp_stack = []
2539
2540 def SeenOpenBrace(self):
2541 """Check if we have seen the opening brace for the innermost block.
2542
2543 Returns:
2544 True if we have seen the opening brace, False if the innermost
2545 block is still expecting an opening brace.
2546 """
2547 return (not self.stack) or self.stack[-1].seen_open_brace
2548
2549 def InNamespaceBody(self):
2550 """Check if we are currently one level inside a namespace body.
2551
2552 Returns:
2553 True if top of the stack is a namespace block, False otherwise.
2554 """
2555 return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
2556
2557 def InExternC(self):
2558 """Check if we are currently one level inside an 'extern "C"' block.
2559
2560 Returns:
2561 True if top of the stack is an extern block, False otherwise.
2562 """
2563 return self.stack and isinstance(self.stack[-1], _ExternCInfo)
2564
2565 def InClassDeclaration(self):
2566 """Check if we are currently one level inside a class or struct declaration.
2567
2568 Returns:
2569 True if top of the stack is a class/struct, False otherwise.
2570 """
2571 return self.stack and isinstance(self.stack[-1], _ClassInfo)
2572
2573 def InAsmBlock(self):
2574 """Check if we are currently one level inside an inline ASM block.
2575
2576 Returns:
2577 True if the top of the stack is a block containing inline ASM.
2578 """
2579 return self.stack and self.stack[-1].inline_asm != _NO_ASM
2580
2581 def InTemplateArgumentList(self, clean_lines, linenum, pos):
2582 """Check if current position is inside template argument list.
2583
2584 Args:
2585 clean_lines: A CleansedLines instance containing the file.
2586 linenum: The number of the line to check.
2587 pos: position just after the suspected template argument.
2588 Returns:
2589 True if (linenum, pos) is inside template arguments.
2590 """
2591 while linenum < clean_lines.NumLines():
2592 # Find the earliest character that might indicate a template argument
2593 line = clean_lines.elided[linenum]
2594 match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
2595 if not match:
2596 linenum += 1
2597 pos = 0
2598 continue
2599 token = match.group(1)
2600 pos += len(match.group(0))
2601
2602 # These things do not look like template argument list:
2603 # class Suspect {
2604 # class Suspect x; }
2605 if token in ('{', '}', ';'): return False
2606
2607 # These things look like template argument list:
2608 # template <class Suspect>
2609 # template <class Suspect = default_value>
2610 # template <class Suspect[]>
2611 # template <class Suspect...>
2612 if token in ('>', '=', '[', ']', '.'): return True
2613
2614 # Check if token is an unmatched '<'.
2615 # If not, move on to the next character.
2616 if token != '<':
2617 pos += 1
2618 if pos >= len(line):
2619 linenum += 1
2620 pos = 0
2621 continue
2622
2623 # We can't be sure if we just find a single '<', and need to
2624 # find the matching '>'.
2625 (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
2626 if end_pos < 0:
2627 # Not sure if template argument list or syntax error in file
2628 return False
2629 linenum = end_line
2630 pos = end_pos
2631 return False
2632
2633 def UpdatePreprocessor(self, line):
2634 """Update preprocessor stack.
2635
2636 We need to handle preprocessors due to classes like this:
2637 #ifdef SWIG
2638 struct ResultDetailsPageElementExtensionPoint {
2639 #else
2640 struct ResultDetailsPageElementExtensionPoint : public Extension {
2641 #endif
2642
2643 We make the following assumptions (good enough for most files):
2644 - Preprocessor condition evaluates to true from #if up to first
2645 #else/#elif/#endif.
2646
2647 - Preprocessor condition evaluates to false from #else/#elif up
2648 to #endif. We still perform lint checks on these lines, but
2649 these do not affect nesting stack.
2650
2651 Args:
2652 line: current line to check.
2653 """
2654 if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
2655 # Beginning of #if block, save the nesting stack here. The saved
2656 # stack will allow us to restore the parsing state in the #else case.
2657 self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
2658 elif Match(r'^\s*#\s*(else|elif)\b', line):
2659 # Beginning of #else block
2660 if self.pp_stack:
2661 if not self.pp_stack[-1].seen_else:
2662 # This is the first #else or #elif block. Remember the
2663 # whole nesting stack up to this point. This is what we
2664 # keep after the #endif.
2665 self.pp_stack[-1].seen_else = True
2666 self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
2667
2668 # Restore the stack to how it was before the #if
2669 self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
2670 else:
2671 # TODO(unknown): unexpected #else, issue warning?
2672 pass
2673 elif Match(r'^\s*#\s*endif\b', line):
2674 # End of #if or #else blocks.
2675 if self.pp_stack:
2676 # If we saw an #else, we will need to restore the nesting
2677 # stack to its former state before the #else, otherwise we
2678 # will just continue from where we left off.
2679 if self.pp_stack[-1].seen_else:
2680 # Here we can just use a shallow copy since we are the last
2681 # reference to it.
2682 self.stack = self.pp_stack[-1].stack_before_else
2683 # Drop the corresponding #if
2684 self.pp_stack.pop()
2685 else:
2686 # TODO(unknown): unexpected #endif, issue warning?
2687 pass
2688
2689 # TODO(unknown): Update() is too long, but we will refactor later.
2690 def Update(self, filename, clean_lines, linenum, error):
2691 """Update nesting state with current line.
2692
2693 Args:
2694 filename: The name of the current file.
2695 clean_lines: A CleansedLines instance containing the file.
2696 linenum: The number of the line to check.
2697 error: The function to call with any errors found.
2698 """
2699 line = clean_lines.elided[linenum]
2700
2701 # Remember top of the previous nesting stack.
2702 #
2703 # The stack is always pushed/popped and not modified in place, so
2704 # we can just do a shallow copy instead of copy.deepcopy. Using
2705 # deepcopy would slow down cpplint by ~28%.
2706 if self.stack:
2707 self.previous_stack_top = self.stack[-1]
2708 else:
2709 self.previous_stack_top = None
2710
2711 # Update pp_stack
2712 self.UpdatePreprocessor(line)
2713
2714 # Count parentheses. This is to avoid adding struct arguments to
2715 # the nesting stack.
2716 if self.stack:
2717 inner_block = self.stack[-1]
2718 depth_change = line.count('(') - line.count(')')
2719 inner_block.open_parentheses += depth_change
2720
2721 # Also check if we are starting or ending an inline assembly block.
2722 if inner_block.inline_asm in (_NO_ASM, _END_ASM):
2723 if (depth_change != 0 and
2724 inner_block.open_parentheses == 1 and
2725 _MATCH_ASM.match(line)):
2726 # Enter assembly block
2727 inner_block.inline_asm = _INSIDE_ASM
2728 else:
2729 # Not entering assembly block. If previous line was _END_ASM,
2730 # we will now shift to _NO_ASM state.
2731 inner_block.inline_asm = _NO_ASM
2732 elif (inner_block.inline_asm == _INSIDE_ASM and
2733 inner_block.open_parentheses == 0):
2734 # Exit assembly block
2735 inner_block.inline_asm = _END_ASM
2736
2737 # Consume namespace declaration at the beginning of the line. Do
2738 # this in a loop so that we catch same line declarations like this:
2739 # namespace proto2 { namespace bridge { class MessageSet; } }
2740 while True:
2741 # Match start of namespace. The "\b\s*" below catches namespace
2742 # declarations even if it weren't followed by a whitespace, this
2743 # is so that we don't confuse our namespace checker. The
2744 # missing spaces will be flagged by CheckSpacing.
2745 namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
2746 if not namespace_decl_match:
2747 break
2748
2749 new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
2750 self.stack.append(new_namespace)
2751
2752 line = namespace_decl_match.group(2)
2753 if line.find('{') != -1:
2754 new_namespace.seen_open_brace = True
2755 line = line[line.find('{') + 1:]
2756
2757 # Look for a class declaration in whatever is left of the line
2758 # after parsing namespaces. The regexp accounts for decorated classes
2759 # such as in:
2760 # class LOCKABLE API Object {
2761 # };
2762 class_decl_match = Match(
2763 r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
2764 r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
2765 r'(.*)$', line)
2766 if (class_decl_match and
2767 (not self.stack or self.stack[-1].open_parentheses == 0)):
2768 # We do not want to accept classes that are actually template arguments:
2769 # template <class Ignore1,
2770 # class Ignore2 = Default<Args>,
2771 # template <Args> class Ignore3>
2772 # void Function() {};
2773 #
2774 # To avoid template argument cases, we scan forward and look for
2775 # an unmatched '>'. If we see one, assume we are inside a
2776 # template argument list.
2777 end_declaration = len(class_decl_match.group(1))
2778 if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
2779 self.stack.append(_ClassInfo(
2780 class_decl_match.group(3), class_decl_match.group(2),
2781 clean_lines, linenum))
2782 line = class_decl_match.group(4)
2783
2784 # If we have not yet seen the opening brace for the innermost block,
2785 # run checks here.
2786 if not self.SeenOpenBrace():
2787 self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
2788
2789 # Update access control if we are inside a class/struct
2790 if self.stack and isinstance(self.stack[-1], _ClassInfo):
2791 classinfo = self.stack[-1]
2792 access_match = Match(
2793 r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
2794 r':(?:[^:]|$)',
2795 line)
2796 if access_match:
2797 classinfo.access = access_match.group(2)
2798
2799 # Check that access keywords are indented +1 space. Skip this
2800 # check if the keywords are not preceded by whitespaces.
2801 indent = access_match.group(1)
2802 if (len(indent) != classinfo.class_indent + 1 and
2803 Match(r'^\s*$', indent)):
2804 if classinfo.is_struct:
2805 parent = 'struct ' + classinfo.name
2806 else:
2807 parent = 'class ' + classinfo.name
2808 slots = ''
2809 if access_match.group(3):
2810 slots = access_match.group(3)
2811 error(filename, linenum, 'whitespace/indent', 3,
2812 '%s%s: should be indented +1 space inside %s' % (
2813 access_match.group(2), slots, parent))
2814
2815 # Consume braces or semicolons from what's left of the line
2816 while True:
2817 # Match first brace, semicolon, or closed parenthesis.
2818 matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
2819 if not matched:
2820 break
2821
2822 token = matched.group(1)
2823 if token == '{':
2824 # If namespace or class hasn't seen a opening brace yet, mark
2825 # namespace/class head as complete. Push a new block onto the
2826 # stack otherwise.
2827 if not self.SeenOpenBrace():
2828 self.stack[-1].seen_open_brace = True
2829 elif Match(r'^extern\s*"[^"]*"\s*\{', line):
2830 self.stack.append(_ExternCInfo(linenum))
2831 else:
2832 self.stack.append(_BlockInfo(linenum, True))
2833 if _MATCH_ASM.match(line):
2834 self.stack[-1].inline_asm = _BLOCK_ASM
2835
2836 elif token == ';' or token == ')':
2837 # If we haven't seen an opening brace yet, but we already saw
2838 # a semicolon, this is probably a forward declaration. Pop
2839 # the stack for these.
2840 #
2841 # Similarly, if we haven't seen an opening brace yet, but we
2842 # already saw a closing parenthesis, then these are probably
2843 # function arguments with extra "class" or "struct" keywords.
2844 # Also pop these stack for these.
2845 if not self.SeenOpenBrace():
2846 self.stack.pop()
2847 else: # token == '}'
2848 # Perform end of block checks and pop the stack.
2849 if self.stack:
2850 self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
2851 self.stack.pop()
2852 line = matched.group(2)
2853
2854 def InnermostClass(self):
2855 """Get class info on the top of the stack.
2856
2857 Returns:
2858 A _ClassInfo object if we are inside a class, or None otherwise.
2859 """
2860 for i in range(len(self.stack), 0, -1):
2861 classinfo = self.stack[i - 1]
2862 if isinstance(classinfo, _ClassInfo):
2863 return classinfo
2864 return None
2865
2866 def CheckCompletedBlocks(self, filename, error):
2867 """Checks that all classes and namespaces have been completely parsed.
2868
2869 Call this when all lines in a file have been processed.
2870 Args:
2871 filename: The name of the current file.
2872 error: The function to call with any errors found.
2873 """
2874 # Note: This test can result in false positives if #ifdef constructs
2875 # get in the way of brace matching. See the testBuildClass test in
2876 # cpplint_unittest.py for an example of this.
2877 for obj in self.stack:
2878 if isinstance(obj, _ClassInfo):
2879 error(filename, obj.starting_linenum, 'build/class', 5,
2880 'Failed to find complete declaration of class %s' %
2881 obj.name)
2882 elif isinstance(obj, _NamespaceInfo):
2883 error(filename, obj.starting_linenum, 'build/namespaces', 5,
2884 'Failed to find complete declaration of namespace %s' %
2885 obj.name)
2886
2887
2888def CheckForNonStandardConstructs(filename, clean_lines, linenum,
2889 nesting_state, error):
2890 r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
2891
2892 Complain about several constructs which gcc-2 accepts, but which are
2893 not standard C++. Warning about these in lint is one way to ease the
2894 transition to new compilers.
2895 - put storage class first (e.g. "static const" instead of "const static").
2896 - "%lld" instead of %qd" in printf-type functions.
2897 - "%1$d" is non-standard in printf-type functions.
2898 - "\%" is an undefined character escape sequence.
2899 - text after #endif is not allowed.
2900 - invalid inner-style forward declaration.
2901 - >? and <? operators, and their >?= and <?= cousins.
2902
2903 Additionally, check for constructor/destructor style violations and reference
2904 members, as it is very convenient to do so while checking for
2905 gcc-2 compliance.
2906
2907 Args:
2908 filename: The name of the current file.
2909 clean_lines: A CleansedLines instance containing the file.
2910 linenum: The number of the line to check.
2911 nesting_state: A NestingState instance which maintains information about
2912 the current stack of nested blocks being parsed.
2913 error: A callable to which errors are reported, which takes 4 arguments:
2914 filename, line number, error level, and message
2915 """
2916
2917 # Remove comments from the line, but leave in strings for now.
2918 line = clean_lines.lines[linenum]
2919
2920 if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
2921 error(filename, linenum, 'runtime/printf_format', 3,
2922 '%q in format strings is deprecated. Use %ll instead.')
2923
2924 if Search(r'printf\s*\(.*".*%\d+\$', line):
2925 error(filename, linenum, 'runtime/printf_format', 2,
2926 '%N$ formats are unconventional. Try rewriting to avoid them.')
2927
2928 # Remove escaped backslashes before looking for undefined escapes.
2929 line = line.replace('\\\\', '')
2930
2931 if Search(r'("|\').*\\(%|\[|\(|{)', line):
2932 error(filename, linenum, 'build/printf_format', 3,
2933 '%, [, (, and { are undefined character escapes. Unescape them.')
2934
2935 # For the rest, work with both comments and strings removed.
2936 line = clean_lines.elided[linenum]
2937
2938 if Search(r'\b(const|volatile|void|char|short|int|long'
2939 r'|float|double|signed|unsigned'
2940 r'|schar|u?int8|u?int16|u?int32|u?int64)'
2941 r'\s+(register|static|extern|typedef)\b',
2942 line):
2943 error(filename, linenum, 'build/storage_class', 5,
2944 'Storage-class specifier (static, extern, typedef, etc) should be '
2945 'at the beginning of the declaration.')
2946
2947 if Match(r'\s*#\s*endif\s*[^/\s]+', line):
2948 error(filename, linenum, 'build/endif_comment', 5,
2949 'Uncommented text after #endif is non-standard. Use a comment.')
2950
2951 if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
2952 error(filename, linenum, 'build/forward_decl', 5,
2953 'Inner-style forward declarations are invalid. Remove this line.')
2954
2955 if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
2956 line):
2957 error(filename, linenum, 'build/deprecated', 3,
2958 '>? and <? (max and min) operators are non-standard and deprecated.')
2959
2960 if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
2961 # TODO(unknown): Could it be expanded safely to arbitrary references,
2962 # without triggering too many false positives? The first
2963 # attempt triggered 5 warnings for mostly benign code in the regtest, hence
2964 # the restriction.
2965 # Here's the original regexp, for the reference:
2966 # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
2967 # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
2968 error(filename, linenum, 'runtime/member_string_references', 2,
2969 'const string& members are dangerous. It is much better to use '
2970 'alternatives, such as pointers or simple constants.')
2971
2972 # Everything else in this function operates on class declarations.
2973 # Return early if the top of the nesting stack is not a class, or if
2974 # the class head is not completed yet.
2975 classinfo = nesting_state.InnermostClass()
2976 if not classinfo or not classinfo.seen_open_brace:
2977 return
2978
2979 # The class may have been declared with namespace or classname qualifiers.
2980 # The constructor and destructor will not have those qualifiers.
2981 base_classname = classinfo.name.split('::')[-1]
2982
2983 # Look for single-argument constructors that aren't marked explicit.
2984 # Technically a valid construct, but against style.
2985 explicit_constructor_match = Match(
2986 r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
2987 r'\(((?:[^()]|\([^()]*\))*)\)'
2988 % re.escape(base_classname),
2989 line)
2990
2991 if explicit_constructor_match:
2992 is_marked_explicit = explicit_constructor_match.group(1)
2993
2994 if not explicit_constructor_match.group(2):
2995 constructor_args = []
2996 else:
2997 constructor_args = explicit_constructor_match.group(2).split(',')
2998
2999 # collapse arguments so that commas in template parameter lists and function
3000 # argument parameter lists don't split arguments in two
3001 i = 0
3002 while i < len(constructor_args):
3003 constructor_arg = constructor_args[i]
3004 while (constructor_arg.count('<') > constructor_arg.count('>') or
3005 constructor_arg.count('(') > constructor_arg.count(')')):
3006 constructor_arg += ',' + constructor_args[i + 1]
3007 del constructor_args[i + 1]
3008 constructor_args[i] = constructor_arg
3009 i += 1
3010
3011 variadic_args = [arg for arg in constructor_args if '&&...' in arg]
3012 defaulted_args = [arg for arg in constructor_args if '=' in arg]
3013 noarg_constructor = (not constructor_args or # empty arg list
3014 # 'void' arg specifier
3015 (len(constructor_args) == 1 and
3016 constructor_args[0].strip() == 'void'))
3017 onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
3018 not noarg_constructor) or
3019 # all but at most one arg defaulted
3020 (len(constructor_args) >= 1 and
3021 not noarg_constructor and
3022 len(defaulted_args) >= len(constructor_args) - 1) or
3023 # variadic arguments with zero or one argument
3024 (len(constructor_args) <= 2 and
3025 len(variadic_args) >= 1))
3026 initializer_list_constructor = bool(
3027 onearg_constructor and
3028 Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
3029 copy_constructor = bool(
3030 onearg_constructor and
3031 Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
3032 % re.escape(base_classname), constructor_args[0].strip()))
3033
3034 if (not is_marked_explicit and
3035 onearg_constructor and
3036 not initializer_list_constructor and
3037 not copy_constructor):
3038 if defaulted_args or variadic_args:
3039 error(filename, linenum, 'runtime/explicit', 5,
3040 'Constructors callable with one argument '
3041 'should be marked explicit.')
3042 else:
3043 error(filename, linenum, 'runtime/explicit', 5,
3044 'Single-parameter constructors should be marked explicit.')
3045 elif is_marked_explicit and not onearg_constructor:
3046 if noarg_constructor:
3047 error(filename, linenum, 'runtime/explicit', 5,
3048 'Zero-parameter constructors should not be marked explicit.')
3049
3050
3051def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
3052 """Checks for the correctness of various spacing around function calls.
3053
3054 Args:
3055 filename: The name of the current file.
3056 clean_lines: A CleansedLines instance containing the file.
3057 linenum: The number of the line to check.
3058 error: The function to call with any errors found.
3059 """
3060 line = clean_lines.elided[linenum]
3061
3062 # Since function calls often occur inside if/for/while/switch
3063 # expressions - which have their own, more liberal conventions - we
3064 # first see if we should be looking inside such an expression for a
3065 # function call, to which we can apply more strict standards.
3066 fncall = line # if there's no control flow construct, look at whole line
3067 for pattern in (r'\bif\s*\((.*)\)\s*{',
3068 r'\bfor\s*\((.*)\)\s*{',
3069 r'\bwhile\s*\((.*)\)\s*[{;]',
3070 r'\bswitch\s*\((.*)\)\s*{'):
3071 match = Search(pattern, line)
3072 if match:
3073 fncall = match.group(1) # look inside the parens for function calls
3074 break
3075
3076 # Except in if/for/while/switch, there should never be space
3077 # immediately inside parens (eg "f( 3, 4 )"). We make an exception
3078 # for nested parens ( (a+b) + c ). Likewise, there should never be
3079 # a space before a ( when it's a function argument. I assume it's a
3080 # function argument when the char before the whitespace is legal in
3081 # a function name (alnum + _) and we're not starting a macro. Also ignore
3082 # pointers and references to arrays and functions coz they're too tricky:
3083 # we use a very simple way to recognize these:
3084 # " (something)(maybe-something)" or
3085 # " (something)(maybe-something," or
3086 # " (something)[something]"
3087 # Note that we assume the contents of [] to be short enough that
3088 # they'll never need to wrap.
3089 if ( # Ignore control structures.
3090 not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
3091 fncall) and
3092 # Ignore pointers/references to functions.
3093 not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
3094 # Ignore pointers/references to arrays.
3095 not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
3096 if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
3097 error(filename, linenum, 'whitespace/parens', 4,
3098 'Extra space after ( in function call')
3099 elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
3100 error(filename, linenum, 'whitespace/parens', 2,
3101 'Extra space after (')
3102 if (Search(r'\w\s+\(', fncall) and
3103 not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
3104 not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
3105 not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
3106 not Search(r'\b(' + '|'.join(_ALT_TOKEN_REPLACEMENT.keys()) + r')\b\s+\(',
3107 fncall) and
3108 not Search(r'\bcase\s+\(', fncall)):
3109 # TODO(unknown): Space after an operator function seem to be a common
3110 # error, silence those for now by restricting them to highest verbosity.
3111 if Search(r'\boperator_*\b', line):
3112 error(filename, linenum, 'whitespace/parens', 0,
3113 'Extra space before ( in function call')
3114 else:
3115 error(filename, linenum, 'whitespace/parens', 4,
3116 'Extra space before ( in function call')
3117 # If the ) is followed only by a newline or a { + newline, assume it's
3118 # part of a control statement (if/while/etc), and don't complain
3119 if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
3120 # If the closing parenthesis is preceded by only whitespaces,
3121 # try to give a more descriptive error message.
3122 if Search(r'^\s+\)', fncall):
3123 error(filename, linenum, 'whitespace/parens', 2,
3124 'Closing ) should be moved to the previous line')
3125 else:
3126 error(filename, linenum, 'whitespace/parens', 2,
3127 'Extra space before )')
3128
3129
3130def IsBlankLine(line):
3131 """Returns true if the given line is blank.
3132
3133 We consider a line to be blank if the line is empty or consists of
3134 only white spaces.
3135
3136 Args:
3137 line: A line of a string.
3138
3139 Returns:
3140 True, if the given line is blank.
3141 """
3142 return not line or line.isspace()
3143
3144
3145def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
3146 error):
3147 is_namespace_indent_item = (
3148 len(nesting_state.stack) > 1 and
3149 nesting_state.stack[-1].check_namespace_indentation and
3150 isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
3151 nesting_state.previous_stack_top == nesting_state.stack[-2])
3152
3153 if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
3154 clean_lines.elided, line):
3155 CheckItemIndentationInNamespace(filename, clean_lines.elided,
3156 line, error)
3157
3158
3159def CheckForFunctionLengths(filename, clean_lines, linenum,
3160 function_state, error):
3161 """Reports for long function bodies.
3162
3163 For an overview why this is done, see:
3164 https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
3165
3166 Uses a simplistic algorithm assuming other style guidelines
3167 (especially spacing) are followed.
3168 Only checks unindented functions, so class members are unchecked.
3169 Trivial bodies are unchecked, so constructors with huge initializer lists
3170 may be missed.
3171 Blank/comment lines are not counted so as to avoid encouraging the removal
3172 of vertical space and comments just to get through a lint check.
3173 NOLINT *on the last line of a function* disables this check.
3174
3175 Args:
3176 filename: The name of the current file.
3177 clean_lines: A CleansedLines instance containing the file.
3178 linenum: The number of the line to check.
3179 function_state: Current function name and lines in body so far.
3180 error: The function to call with any errors found.
3181 """
3182 lines = clean_lines.lines
3183 line = lines[linenum]
3184 joined_line = ''
3185
3186 starting_func = False
3187 regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
3188 match_result = Match(regexp, line)
3189 if match_result:
3190 # If the name is all caps and underscores, figure it's a macro and
3191 # ignore it, unless it's TEST or TEST_F.
3192 function_name = match_result.group(1).split()[-1]
3193 if function_name == 'TEST' or function_name == 'TEST_F' or (
3194 not Match(r'[A-Z_]+$', function_name)):
3195 starting_func = True
3196
3197 if starting_func:
3198 body_found = False
3199 for start_linenum in range(linenum, clean_lines.NumLines()):
3200 start_line = lines[start_linenum]
3201 joined_line += ' ' + start_line.lstrip()
3202 if Search(r'(;|})', start_line): # Declarations and trivial functions
3203 body_found = True
3204 break # ... ignore
3205 elif Search(r'{', start_line):
3206 body_found = True
3207 function = Search(r'((\w|:)*)\(', line).group(1)
3208 if Match(r'TEST', function): # Handle TEST... macros
3209 parameter_regexp = Search(r'(\(.*\))', joined_line)
3210 if parameter_regexp: # Ignore bad syntax
3211 function += parameter_regexp.group(1)
3212 else:
3213 function += '()'
3214 function_state.Begin(function)
3215 break
3216 if not body_found:
3217 # No body for the function (or evidence of a non-function) was found.
3218 error(filename, linenum, 'readability/fn_size', 5,
3219 'Lint failed to find start of function body.')
3220 elif Match(r'^\}\s*$', line): # function end
3221 function_state.Check(error, filename, linenum)
3222 function_state.End()
3223 elif not Match(r'^\s*$', line):
3224 function_state.Count() # Count non-blank/non-comment lines.
3225
3226
3227_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
3228
3229
3230def CheckComment(line, filename, linenum, next_line_start, error):
3231 """Checks for common mistakes in comments.
3232
3233 Args:
3234 line: The line in question.
3235 filename: The name of the current file.
3236 linenum: The number of the line to check.
3237 next_line_start: The first non-whitespace column of the next line.
3238 error: The function to call with any errors found.
3239 """
3240 commentpos = line.find('//')
3241 if commentpos != -1:
3242 # Check if the // may be in quotes. If so, ignore it
3243 if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
3244 # Allow one space for new scopes, two spaces otherwise:
3245 if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
3246 ((commentpos >= 1 and
3247 line[commentpos-1] not in string.whitespace) or
3248 (commentpos >= 2 and
3249 line[commentpos-2] not in string.whitespace))):
3250 error(filename, linenum, 'whitespace/comments', 2,
3251 'At least two spaces is best between code and comments')
3252
3253 # Checks for common mistakes in TODO comments.
3254 comment = line[commentpos:]
3255 match = _RE_PATTERN_TODO.match(comment)
3256 if match:
3257 # One whitespace is correct; zero whitespace is handled elsewhere.
3258 leading_whitespace = match.group(1)
3259 if len(leading_whitespace) > 1:
3260 error(filename, linenum, 'whitespace/todo', 2,
3261 'Too many spaces before TODO')
3262
3263 username = match.group(2)
3264 if not username:
3265 error(filename, linenum, 'readability/todo', 2,
3266 'Missing username in TODO; it should look like '
3267 '"// TODO(my_username): Stuff."')
3268
3269 middle_whitespace = match.group(3)
3270 # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
3271 if middle_whitespace != ' ' and middle_whitespace != '':
3272 error(filename, linenum, 'whitespace/todo', 2,
3273 'TODO(my_username) should be followed by a space')
3274
3275 # If the comment contains an alphanumeric character, there
3276 # should be a space somewhere between it and the // unless
3277 # it's a /// or //! Doxygen comment.
3278 if (Match(r'//[^ ]*\w', comment) and
3279 not Match(r'(///|//\!)(\s+|$)', comment)):
3280 error(filename, linenum, 'whitespace/comments', 4,
3281 'Should have a space between // and comment')
3282
3283
3284def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
3285 """Checks for improper use of DISALLOW* macros.
3286
3287 Args:
3288 filename: The name of the current file.
3289 clean_lines: A CleansedLines instance containing the file.
3290 linenum: The number of the line to check.
3291 nesting_state: A NestingState instance which maintains information about
3292 the current stack of nested blocks being parsed.
3293 error: The function to call with any errors found.
3294 """
3295 line = clean_lines.elided[linenum] # get rid of comments and strings
3296
3297 matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
3298 r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
3299 if not matched:
3300 return
3301 if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
3302 if nesting_state.stack[-1].access != 'private':
3303 error(filename, linenum, 'readability/constructors', 3,
3304 '%s must be in the private: section' % matched.group(1))
3305
3306 else:
3307 # Found DISALLOW* macro outside a class declaration, or perhaps it
3308 # was used inside a function when it should have been part of the
3309 # class declaration. We could issue a warning here, but it
3310 # probably resulted in a compiler error already.
3311 pass
3312
3313
3314def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
3315 """Checks for the correctness of various spacing issues in the code.
3316
3317 Things we check for: spaces around operators, spaces after
3318 if/for/while/switch, no spaces around parens in function calls, two
3319 spaces between code and comment, don't start a block with a blank
3320 line, don't end a function with a blank line, don't add a blank line
3321 after public/protected/private, don't have too many blank lines in a row.
3322
3323 Args:
3324 filename: The name of the current file.
3325 clean_lines: A CleansedLines instance containing the file.
3326 linenum: The number of the line to check.
3327 nesting_state: A NestingState instance which maintains information about
3328 the current stack of nested blocks being parsed.
3329 error: The function to call with any errors found.
3330 """
3331
3332 # Don't use "elided" lines here, otherwise we can't check commented lines.
3333 # Don't want to use "raw" either, because we don't want to check inside C++11
3334 # raw strings,
3335 raw = clean_lines.lines_without_raw_strings
3336 line = raw[linenum]
3337
3338 # Before nixing comments, check if the line is blank for no good
3339 # reason. This includes the first line after a block is opened, and
3340 # blank lines at the end of a function (ie, right before a line like '}'
3341 #
3342 # Skip all the blank line checks if we are immediately inside a
3343 # namespace body. In other words, don't issue blank line warnings
3344 # for this block:
3345 # namespace {
3346 #
3347 # }
3348 #
3349 # A warning about missing end of namespace comments will be issued instead.
3350 #
3351 # Also skip blank line checks for 'extern "C"' blocks, which are formatted
3352 # like namespaces.
3353 if (IsBlankLine(line) and
3354 not nesting_state.InNamespaceBody() and
3355 not nesting_state.InExternC()):
3356 elided = clean_lines.elided
3357 prev_line = elided[linenum - 1]
3358 prevbrace = prev_line.rfind('{')
3359 # TODO(unknown): Don't complain if line before blank line, and line after,
3360 # both start with alnums and are indented the same amount.
3361 # This ignores whitespace at the start of a namespace block
3362 # because those are not usually indented.
3363 if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
3364 # OK, we have a blank line at the start of a code block. Before we
3365 # complain, we check if it is an exception to the rule: The previous
3366 # non-empty line has the parameters of a function header that are indented
3367 # 4 spaces (because they did not fit in a 80 column line when placed on
3368 # the same line as the function name). We also check for the case where
3369 # the previous line is indented 6 spaces, which may happen when the
3370 # initializers of a constructor do not fit into a 80 column line.
3371 exception = False
3372 if Match(r' {6}\w', prev_line): # Initializer list?
3373 # We are looking for the opening column of initializer list, which
3374 # should be indented 4 spaces to cause 6 space indentation afterwards.
3375 search_position = linenum-2
3376 while (search_position >= 0
3377 and Match(r' {6}\w', elided[search_position])):
3378 search_position -= 1
3379 exception = (search_position >= 0
3380 and elided[search_position][:5] == ' :')
3381 else:
3382 # Search for the function arguments or an initializer list. We use a
3383 # simple heuristic here: If the line is indented 4 spaces; and we have a
3384 # closing paren, without the opening paren, followed by an opening brace
3385 # or colon (for initializer lists) we assume that it is the last line of
3386 # a function header. If we have a colon indented 4 spaces, it is an
3387 # initializer list.
3388 exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
3389 prev_line)
3390 or Match(r' {4}:', prev_line))
3391
3392 if not exception:
3393 error(filename, linenum, 'whitespace/blank_line', 2,
3394 'Redundant blank line at the start of a code block '
3395 'should be deleted.')
3396 # Ignore blank lines at the end of a block in a long if-else
3397 # chain, like this:
3398 # if (condition1) {
3399 # // Something followed by a blank line
3400 #
3401 # } else if (condition2) {
3402 # // Something else
3403 # }
3404 if linenum + 1 < clean_lines.NumLines():
3405 next_line = raw[linenum + 1]
3406 if (next_line
3407 and Match(r'\s*}', next_line)
3408 and next_line.find('} else ') == -1):
3409 error(filename, linenum, 'whitespace/blank_line', 3,
3410 'Redundant blank line at the end of a code block '
3411 'should be deleted.')
3412
3413 matched = Match(r'\s*(public|protected|private):', prev_line)
3414 if matched:
3415 error(filename, linenum, 'whitespace/blank_line', 3,
3416 'Do not leave a blank line after "%s:"' % matched.group(1))
3417
3418 # Next, check comments
3419 next_line_start = 0
3420 if linenum + 1 < clean_lines.NumLines():
3421 next_line = raw[linenum + 1]
3422 next_line_start = len(next_line) - len(next_line.lstrip())
3423 CheckComment(line, filename, linenum, next_line_start, error)
3424
3425 # get rid of comments and strings
3426 line = clean_lines.elided[linenum]
3427
3428 # You shouldn't have spaces before your brackets, except maybe after
3429 # 'delete []' or 'return []() {};'
3430 if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
3431 error(filename, linenum, 'whitespace/braces', 5,
3432 'Extra space before [')
3433
3434 # In range-based for, we wanted spaces before and after the colon, but
3435 # not around "::" tokens that might appear.
3436 if (Search(r'for *\(.*[^:]:[^: ]', line) or
3437 Search(r'for *\(.*[^: ]:[^:]', line)):
3438 error(filename, linenum, 'whitespace/forcolon', 2,
3439 'Missing space around colon in range-based for loop')
3440
3441
3442def CheckOperatorSpacing(filename, clean_lines, linenum, error):
3443 """Checks for horizontal spacing around operators.
3444
3445 Args:
3446 filename: The name of the current file.
3447 clean_lines: A CleansedLines instance containing the file.
3448 linenum: The number of the line to check.
3449 error: The function to call with any errors found.
3450 """
3451 line = clean_lines.elided[linenum]
3452
3453 # Don't try to do spacing checks for operator methods. Do this by
3454 # replacing the troublesome characters with something else,
3455 # preserving column position for all other characters.
3456 #
3457 # The replacement is done repeatedly to avoid false positives from
3458 # operators that call operators.
3459 while True:
3460 match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
3461 if match:
3462 line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
3463 else:
3464 break
3465
3466 # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
3467 # Otherwise not. Note we only check for non-spaces on *both* sides;
3468 # sometimes people put non-spaces on one side when aligning ='s among
3469 # many lines (not that this is behavior that I approve of...)
3470 if ((Search(r'[\w.]=', line) or
3471 Search(r'=[\w.]', line))
3472 and not Search(r'\b(if|while|for) ', line)
3473 # Operators taken from [lex.operators] in C++11 standard.
3474 and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
3475 and not Search(r'operator=', line)):
3476 error(filename, linenum, 'whitespace/operators', 4,
3477 'Missing spaces around =')
3478
3479 # It's ok not to have spaces around binary operators like + - * /, but if
3480 # there's too little whitespace, we get concerned. It's hard to tell,
3481 # though, so we punt on this one for now. TODO.
3482
3483 # You should always have whitespace around binary operators.
3484 #
3485 # Check <= and >= first to avoid false positives with < and >, then
3486 # check non-include lines for spacing around < and >.
3487 #
3488 # If the operator is followed by a comma, assume it's be used in a
3489 # macro context and don't do any checks. This avoids false
3490 # positives.
3491 #
3492 # Note that && is not included here. This is because there are too
3493 # many false positives due to RValue references.
3494 match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
3495 if match:
3496 error(filename, linenum, 'whitespace/operators', 3,
3497 'Missing spaces around %s' % match.group(1))
3498 elif not Match(r'#.*include', line):
3499 # Look for < that is not surrounded by spaces. This is only
3500 # triggered if both sides are missing spaces, even though
3501 # technically should should flag if at least one side is missing a
3502 # space. This is done to avoid some false positives with shifts.
3503 match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
3504 if match:
3505 (_, _, end_pos) = CloseExpression(
3506 clean_lines, linenum, len(match.group(1)))
3507 if end_pos <= -1:
3508 error(filename, linenum, 'whitespace/operators', 3,
3509 'Missing spaces around <')
3510
3511 # Look for > that is not surrounded by spaces. Similar to the
3512 # above, we only trigger if both sides are missing spaces to avoid
3513 # false positives with shifts.
3514 match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
3515 if match:
3516 (_, _, start_pos) = ReverseCloseExpression(
3517 clean_lines, linenum, len(match.group(1)))
3518 if start_pos <= -1:
3519 error(filename, linenum, 'whitespace/operators', 3,
3520 'Missing spaces around >')
3521
3522 # We allow no-spaces around << when used like this: 10<<20, but
3523 # not otherwise (particularly, not when used as streams)
3524 #
3525 # We also allow operators following an opening parenthesis, since
3526 # those tend to be macros that deal with operators.
3527 match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
3528 if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
3529 not (match.group(1) == 'operator' and match.group(2) == ';')):
3530 error(filename, linenum, 'whitespace/operators', 3,
3531 'Missing spaces around <<')
3532
3533 # We allow no-spaces around >> for almost anything. This is because
3534 # C++11 allows ">>" to close nested templates, which accounts for
3535 # most cases when ">>" is not followed by a space.
3536 #
3537 # We still warn on ">>" followed by alpha character, because that is
3538 # likely due to ">>" being used for right shifts, e.g.:
3539 # value >> alpha
3540 #
3541 # When ">>" is used to close templates, the alphanumeric letter that
3542 # follows would be part of an identifier, and there should still be
3543 # a space separating the template type and the identifier.
3544 # type<type<type>> alpha
3545 match = Search(r'>>[a-zA-Z_]', line)
3546 if match:
3547 error(filename, linenum, 'whitespace/operators', 3,
3548 'Missing spaces around >>')
3549
3550 # There shouldn't be space around unary operators
3551 match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
3552 if match:
3553 error(filename, linenum, 'whitespace/operators', 4,
3554 'Extra space for operator %s' % match.group(1))
3555
3556
3557def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
3558 """Checks for horizontal spacing around parentheses.
3559
3560 Args:
3561 filename: The name of the current file.
3562 clean_lines: A CleansedLines instance containing the file.
3563 linenum: The number of the line to check.
3564 error: The function to call with any errors found.
3565 """
3566 line = clean_lines.elided[linenum]
3567
3568 # No spaces after an if, while, switch, or for
3569 match = Search(r' (if\(|for\(|while\(|switch\()', line)
3570 if match:
3571 error(filename, linenum, 'whitespace/parens', 5,
3572 'Missing space before ( in %s' % match.group(1))
3573
3574 # For if/for/while/switch, the left and right parens should be
3575 # consistent about how many spaces are inside the parens, and
3576 # there should either be zero or one spaces inside the parens.
3577 # We don't want: "if ( foo)" or "if ( foo )".
3578 # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
3579 match = Search(r'\b(if|for|while|switch)\s*'
3580 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
3581 line)
3582 if match:
3583 if len(match.group(2)) != len(match.group(4)):
3584 if not (match.group(3) == ';' and
3585 len(match.group(2)) == 1 + len(match.group(4)) or
3586 not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
3587 error(filename, linenum, 'whitespace/parens', 5,
3588 'Mismatching spaces inside () in %s' % match.group(1))
3589 if len(match.group(2)) not in [0, 1]:
3590 error(filename, linenum, 'whitespace/parens', 5,
3591 'Should have zero or one spaces inside ( and ) in %s' %
3592 match.group(1))
3593
3594
3595def CheckCommaSpacing(filename, clean_lines, linenum, error):
3596 """Checks for horizontal spacing near commas and semicolons.
3597
3598 Args:
3599 filename: The name of the current file.
3600 clean_lines: A CleansedLines instance containing the file.
3601 linenum: The number of the line to check.
3602 error: The function to call with any errors found.
3603 """
3604 raw = clean_lines.lines_without_raw_strings
3605 line = clean_lines.elided[linenum]
3606
3607 # You should always have a space after a comma (either as fn arg or operator)
3608 #
3609 # This does not apply when the non-space character following the
3610 # comma is another comma, since the only time when that happens is
3611 # for empty macro arguments.
3612 #
3613 # We run this check in two passes: first pass on elided lines to
3614 # verify that lines contain missing whitespaces, second pass on raw
3615 # lines to confirm that those missing whitespaces are not due to
3616 # elided comments.
3617 if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
3618 Search(r',[^,\s]', raw[linenum])):
3619 error(filename, linenum, 'whitespace/comma', 3,
3620 'Missing space after ,')
3621
3622 # You should always have a space after a semicolon
3623 # except for few corner cases
3624 # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
3625 # space after ;
3626 if Search(r';[^\s};\\)/]', line):
3627 error(filename, linenum, 'whitespace/semicolon', 3,
3628 'Missing space after ;')
3629
3630
3631def _IsType(clean_lines, nesting_state, expr):
3632 """Check if expression looks like a type name, returns true if so.
3633
3634 Args:
3635 clean_lines: A CleansedLines instance containing the file.
3636 nesting_state: A NestingState instance which maintains information about
3637 the current stack of nested blocks being parsed.
3638 expr: The expression to check.
3639 Returns:
3640 True, if token looks like a type.
3641 """
3642 # Keep only the last token in the expression
3643 last_word = Match(r'^.*(\b\S+)$', expr)
3644 if last_word:
3645 token = last_word.group(1)
3646 else:
3647 token = expr
3648
3649 # Match native types and stdint types
3650 if _TYPES.match(token):
3651 return True
3652
3653 # Try a bit harder to match templated types. Walk up the nesting
3654 # stack until we find something that resembles a typename
3655 # declaration for what we are looking for.
3656 typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
3657 r'\b')
3658 block_index = len(nesting_state.stack) - 1
3659 while block_index >= 0:
3660 if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
3661 return False
3662
3663 # Found where the opening brace is. We want to scan from this
3664 # line up to the beginning of the function, minus a few lines.
3665 # template <typename Type1, // stop scanning here
3666 # ...>
3667 # class C
3668 # : public ... { // start scanning here
3669 last_line = nesting_state.stack[block_index].starting_linenum
3670
3671 next_block_start = 0
3672 if block_index > 0:
3673 next_block_start = nesting_state.stack[block_index - 1].starting_linenum
3674 first_line = last_line
3675 while first_line >= next_block_start:
3676 if clean_lines.elided[first_line].find('template') >= 0:
3677 break
3678 first_line -= 1
3679 if first_line < next_block_start:
3680 # Didn't find any "template" keyword before reaching the next block,
3681 # there are probably no template things to check for this block
3682 block_index -= 1
3683 continue
3684
3685 # Look for typename in the specified range
3686 for i in xrange(first_line, last_line + 1, 1):
3687 if Search(typename_pattern, clean_lines.elided[i]):
3688 return True
3689 block_index -= 1
3690
3691 return False
3692
3693
3694def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
3695 """Checks for horizontal spacing near commas.
3696
3697 Args:
3698 filename: The name of the current file.
3699 clean_lines: A CleansedLines instance containing the file.
3700 linenum: The number of the line to check.
3701 nesting_state: A NestingState instance which maintains information about
3702 the current stack of nested blocks being parsed.
3703 error: The function to call with any errors found.
3704 """
3705 line = clean_lines.elided[linenum]
3706
3707 # Except after an opening paren, or after another opening brace (in case of
3708 # an initializer list, for instance), you should have spaces before your
3709 # braces when they are delimiting blocks, classes, namespaces etc.
3710 # And since you should never have braces at the beginning of a line,
3711 # this is an easy test. Except that braces used for initialization don't
3712 # follow the same rule; we often don't want spaces before those.
3713 match = Match(r'^(.*[^ ({>]){', line)
3714
3715 if match:
3716 # Try a bit harder to check for brace initialization. This
3717 # happens in one of the following forms:
3718 # Constructor() : initializer_list_{} { ... }
3719 # Constructor{}.MemberFunction()
3720 # Type variable{};
3721 # FunctionCall(type{}, ...);
3722 # LastArgument(..., type{});
3723 # LOG(INFO) << type{} << " ...";
3724 # map_of_type[{...}] = ...;
3725 # ternary = expr ? new type{} : nullptr;
3726 # OuterTemplate<InnerTemplateConstructor<Type>{}>
3727 #
3728 # We check for the character following the closing brace, and
3729 # silence the warning if it's one of those listed above, i.e.
3730 # "{.;,)<>]:".
3731 #
3732 # To account for nested initializer list, we allow any number of
3733 # closing braces up to "{;,)<". We can't simply silence the
3734 # warning on first sight of closing brace, because that would
3735 # cause false negatives for things that are not initializer lists.
3736 # Silence this: But not this:
3737 # Outer{ if (...) {
3738 # Inner{...} if (...){ // Missing space before {
3739 # }; }
3740 #
3741 # There is a false negative with this approach if people inserted
3742 # spurious semicolons, e.g. "if (cond){};", but we will catch the
3743 # spurious semicolon with a separate check.
3744 leading_text = match.group(1)
3745 (endline, endlinenum, endpos) = CloseExpression(
3746 clean_lines, linenum, len(match.group(1)))
3747 trailing_text = ''
3748 if endpos > -1:
3749 trailing_text = endline[endpos:]
3750 for offset in xrange(endlinenum + 1,
3751 min(endlinenum + 3, clean_lines.NumLines() - 1)):
3752 trailing_text += clean_lines.elided[offset]
3753 # We also suppress warnings for `uint64_t{expression}` etc., as the style
3754 # guide recommends brace initialization for integral types to avoid
3755 # overflow/truncation.
3756 if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
3757 and not _IsType(clean_lines, nesting_state, leading_text)):
3758 error(filename, linenum, 'whitespace/braces', 5,
3759 'Missing space before {')
3760
3761 # Make sure '} else {' has spaces.
3762 if Search(r'}else', line):
3763 error(filename, linenum, 'whitespace/braces', 5,
3764 'Missing space before else')
3765
3766 # You shouldn't have a space before a semicolon at the end of the line.
3767 # There's a special case for "for" since the style guide allows space before
3768 # the semicolon there.
3769 if Search(r':\s*;\s*$', line):
3770 error(filename, linenum, 'whitespace/semicolon', 5,
3771 'Semicolon defining empty statement. Use {} instead.')
3772 elif Search(r'^\s*;\s*$', line):
3773 error(filename, linenum, 'whitespace/semicolon', 5,
3774 'Line contains only semicolon. If this should be an empty statement, '
3775 'use {} instead.')
3776 elif (Search(r'\s+;\s*$', line) and
3777 not Search(r'\bfor\b', line)):
3778 error(filename, linenum, 'whitespace/semicolon', 5,
3779 'Extra space before last semicolon. If this should be an empty '
3780 'statement, use {} instead.')
3781
3782
3783def IsDecltype(clean_lines, linenum, column):
3784 """Check if the token ending on (linenum, column) is decltype().
3785
3786 Args:
3787 clean_lines: A CleansedLines instance containing the file.
3788 linenum: the number of the line to check.
3789 column: end column of the token to check.
3790 Returns:
3791 True if this token is decltype() expression, False otherwise.
3792 """
3793 (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
3794 if start_col < 0:
3795 return False
3796 if Search(r'\bdecltype\s*$', text[0:start_col]):
3797 return True
3798 return False
3799
3800def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
3801 """Checks for additional blank line issues related to sections.
3802
3803 Currently the only thing checked here is blank line before protected/private.
3804
3805 Args:
3806 filename: The name of the current file.
3807 clean_lines: A CleansedLines instance containing the file.
3808 class_info: A _ClassInfo objects.
3809 linenum: The number of the line to check.
3810 error: The function to call with any errors found.
3811 """
3812 # Skip checks if the class is small, where small means 25 lines or less.
3813 # 25 lines seems like a good cutoff since that's the usual height of
3814 # terminals, and any class that can't fit in one screen can't really
3815 # be considered "small".
3816 #
3817 # Also skip checks if we are on the first line. This accounts for
3818 # classes that look like
3819 # class Foo { public: ... };
3820 #
3821 # If we didn't find the end of the class, last_line would be zero,
3822 # and the check will be skipped by the first condition.
3823 if (class_info.last_line - class_info.starting_linenum <= 24 or
3824 linenum <= class_info.starting_linenum):
3825 return
3826
3827 matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
3828 if matched:
3829 # Issue warning if the line before public/protected/private was
3830 # not a blank line, but don't do this if the previous line contains
3831 # "class" or "struct". This can happen two ways:
3832 # - We are at the beginning of the class.
3833 # - We are forward-declaring an inner class that is semantically
3834 # private, but needed to be public for implementation reasons.
3835 # Also ignores cases where the previous line ends with a backslash as can be
3836 # common when defining classes in C macros.
3837 prev_line = clean_lines.lines[linenum - 1]
3838 if (not IsBlankLine(prev_line) and
3839 not Search(r'\b(class|struct)\b', prev_line) and
3840 not Search(r'\\$', prev_line)):
3841 # Try a bit harder to find the beginning of the class. This is to
3842 # account for multi-line base-specifier lists, e.g.:
3843 # class Derived
3844 # : public Base {
3845 end_class_head = class_info.starting_linenum
3846 for i in range(class_info.starting_linenum, linenum):
3847 if Search(r'\{\s*$', clean_lines.lines[i]):
3848 end_class_head = i
3849 break
3850 if end_class_head < linenum - 1:
3851 error(filename, linenum, 'whitespace/blank_line', 3,
3852 '"%s:" should be preceded by a blank line' % matched.group(1))
3853
3854
3855def GetPreviousNonBlankLine(clean_lines, linenum):
3856 """Return the most recent non-blank line and its line number.
3857
3858 Args:
3859 clean_lines: A CleansedLines instance containing the file contents.
3860 linenum: The number of the line to check.
3861
3862 Returns:
3863 A tuple with two elements. The first element is the contents of the last
3864 non-blank line before the current line, or the empty string if this is the
3865 first non-blank line. The second is the line number of that line, or -1
3866 if this is the first non-blank line.
3867 """
3868
3869 prevlinenum = linenum - 1
3870 while prevlinenum >= 0:
3871 prevline = clean_lines.elided[prevlinenum]
3872 if not IsBlankLine(prevline): # if not a blank line...
3873 return (prevline, prevlinenum)
3874 prevlinenum -= 1
3875 return ('', -1)
3876
3877
3878def CheckBraces(filename, clean_lines, linenum, error):
3879 """Looks for misplaced braces (e.g. at the end of line).
3880
3881 Args:
3882 filename: The name of the current file.
3883 clean_lines: A CleansedLines instance containing the file.
3884 linenum: The number of the line to check.
3885 error: The function to call with any errors found.
3886 """
3887
3888 line = clean_lines.elided[linenum] # get rid of comments and strings
3889
3890 if Match(r'\s*{\s*$', line):
3891 # We allow an open brace to start a line in the case where someone is using
3892 # braces in a block to explicitly create a new scope, which is commonly used
3893 # to control the lifetime of stack-allocated variables. Braces are also
3894 # used for brace initializers inside function calls. We don't detect this
3895 # perfectly: we just don't complain if the last non-whitespace character on
3896 # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
3897 # previous line starts a preprocessor block. We also allow a brace on the
3898 # following line if it is part of an array initialization and would not fit
3899 # within the 80 character limit of the preceding line.
3900 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
3901 if (not Search(r'[,;:}{(]\s*$', prevline) and
3902 not Match(r'\s*#', prevline) and
3903 not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
3904 error(filename, linenum, 'whitespace/braces', 4,
3905 '{ should almost always be at the end of the previous line')
3906
3907 # An else clause should be on the same line as the preceding closing brace.
3908 if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
3909 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
3910 if Match(r'\s*}\s*$', prevline):
3911 error(filename, linenum, 'whitespace/newline', 4,
3912 'An else should appear on the same line as the preceding }')
3913
3914 # If braces come on one side of an else, they should be on both.
3915 # However, we have to worry about "else if" that spans multiple lines!
3916 if Search(r'else if\s*\(', line): # could be multi-line if
3917 brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
3918 # find the ( after the if
3919 pos = line.find('else if')
3920 pos = line.find('(', pos)
3921 if pos > 0:
3922 (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
3923 brace_on_right = endline[endpos:].find('{') != -1
3924 if brace_on_left != brace_on_right: # must be brace after if
3925 error(filename, linenum, 'readability/braces', 5,
3926 'If an else has a brace on one side, it should have it on both')
3927 elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
3928 error(filename, linenum, 'readability/braces', 5,
3929 'If an else has a brace on one side, it should have it on both')
3930
3931 # Likewise, an else should never have the else clause on the same line
3932 if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
3933 error(filename, linenum, 'whitespace/newline', 4,
3934 'Else clause should never be on same line as else (use 2 lines)')
3935
3936 # In the same way, a do/while should never be on one line
3937 if Match(r'\s*do [^\s{]', line):
3938 error(filename, linenum, 'whitespace/newline', 4,
3939 'do/while clauses should not be on a single line')
3940
3941 # Check single-line if/else bodies. The style guide says 'curly braces are not
3942 # required for single-line statements'. We additionally allow multi-line,
3943 # single statements, but we reject anything with more than one semicolon in
3944 # it. This means that the first semicolon after the if should be at the end of
3945 # its line, and the line after that should have an indent level equal to or
3946 # lower than the if. We also check for ambiguous if/else nesting without
3947 # braces.
3948 if_else_match = Search(r'\b(if\s*\(|else\b)', line)
3949 if if_else_match and not Match(r'\s*#', line):
3950 if_indent = GetIndentLevel(line)
3951 endline, endlinenum, endpos = line, linenum, if_else_match.end()
3952 if_match = Search(r'\bif\s*\(', line)
3953 if if_match:
3954 # This could be a multiline if condition, so find the end first.
3955 pos = if_match.end() - 1
3956 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
3957 # Check for an opening brace, either directly after the if or on the next
3958 # line. If found, this isn't a single-statement conditional.
3959 if (not Match(r'\s*{', endline[endpos:])
3960 and not (Match(r'\s*$', endline[endpos:])
3961 and endlinenum < (len(clean_lines.elided) - 1)
3962 and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
3963 while (endlinenum < len(clean_lines.elided)
3964 and ';' not in clean_lines.elided[endlinenum][endpos:]):
3965 endlinenum += 1
3966 endpos = 0
3967 if endlinenum < len(clean_lines.elided):
3968 endline = clean_lines.elided[endlinenum]
3969 # We allow a mix of whitespace and closing braces (e.g. for one-liner
3970 # methods) and a single \ after the semicolon (for macros)
3971 endpos = endline.find(';')
3972 if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
3973 # Semicolon isn't the last character, there's something trailing.
3974 # Output a warning if the semicolon is not contained inside
3975 # a lambda expression.
3976 if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
3977 endline):
3978 error(filename, linenum, 'readability/braces', 4,
3979 'If/else bodies with multiple statements require braces')
3980 elif endlinenum < len(clean_lines.elided) - 1:
3981 # Make sure the next line is dedented
3982 next_line = clean_lines.elided[endlinenum + 1]
3983 next_indent = GetIndentLevel(next_line)
3984 # With ambiguous nested if statements, this will error out on the
3985 # if that *doesn't* match the else, regardless of whether it's the
3986 # inner one or outer one.
3987 if (if_match and Match(r'\s*else\b', next_line)
3988 and next_indent != if_indent):
3989 error(filename, linenum, 'readability/braces', 4,
3990 'Else clause should be indented at the same level as if. '
3991 'Ambiguous nested if/else chains require braces.')
3992 elif next_indent > if_indent:
3993 error(filename, linenum, 'readability/braces', 4,
3994 'If/else bodies with multiple statements require braces')
3995
3996
3997def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
3998 """Looks for redundant trailing semicolon.
3999
4000 Args:
4001 filename: The name of the current file.
4002 clean_lines: A CleansedLines instance containing the file.
4003 linenum: The number of the line to check.
4004 error: The function to call with any errors found.
4005 """
4006
4007 line = clean_lines.elided[linenum]
4008
4009 # Block bodies should not be followed by a semicolon. Due to C++11
4010 # brace initialization, there are more places where semicolons are
4011 # required than not, so we use explicitly list the allowed rules
4012 # rather than listing the disallowed ones. These are the places
4013 # where "};" should be replaced by just "}":
4014 # 1. Some flavor of block following closing parenthesis:
4015 # for (;;) {};
4016 # while (...) {};
4017 # switch (...) {};
4018 # Function(...) {};
4019 # if (...) {};
4020 # if (...) else if (...) {};
4021 #
4022 # 2. else block:
4023 # if (...) else {};
4024 #
4025 # 3. const member function:
4026 # Function(...) const {};
4027 #
4028 # 4. Block following some statement:
4029 # x = 42;
4030 # {};
4031 #
4032 # 5. Block at the beginning of a function:
4033 # Function(...) {
4034 # {};
4035 # }
4036 #
4037 # Note that naively checking for the preceding "{" will also match
4038 # braces inside multi-dimensional arrays, but this is fine since
4039 # that expression will not contain semicolons.
4040 #
4041 # 6. Block following another block:
4042 # while (true) {}
4043 # {};
4044 #
4045 # 7. End of namespaces:
4046 # namespace {};
4047 #
4048 # These semicolons seems far more common than other kinds of
4049 # redundant semicolons, possibly due to people converting classes
4050 # to namespaces. For now we do not warn for this case.
4051 #
4052 # Try matching case 1 first.
4053 match = Match(r'^(.*\)\s*)\{', line)
4054 if match:
4055 # Matched closing parenthesis (case 1). Check the token before the
4056 # matching opening parenthesis, and don't warn if it looks like a
4057 # macro. This avoids these false positives:
4058 # - macro that defines a base class
4059 # - multi-line macro that defines a base class
4060 # - macro that defines the whole class-head
4061 #
4062 # But we still issue warnings for macros that we know are safe to
4063 # warn, specifically:
4064 # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
4065 # - TYPED_TEST
4066 # - INTERFACE_DEF
4067 # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
4068 #
4069 # We implement a list of safe macros instead of a list of
4070 # unsafe macros, even though the latter appears less frequently in
4071 # google code and would have been easier to implement. This is because
4072 # the downside for getting the allowed checks wrong means some extra
4073 # semicolons, while the downside for getting disallowed checks wrong
4074 # would result in compile errors.
4075 #
4076 # In addition to macros, we also don't want to warn on
4077 # - Compound literals
4078 # - Lambdas
4079 # - alignas specifier with anonymous structs
4080 # - decltype
4081 closing_brace_pos = match.group(1).rfind(')')
4082 opening_parenthesis = ReverseCloseExpression(
4083 clean_lines, linenum, closing_brace_pos)
4084 if opening_parenthesis[2] > -1:
4085 line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
4086 macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
4087 func = Match(r'^(.*\])\s*$', line_prefix)
4088 if ((macro and
4089 macro.group(1) not in (
4090 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
4091 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
4092 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
4093 (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
4094 Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
4095 Search(r'\bdecltype$', line_prefix) or
4096 Search(r'\s+=\s*$', line_prefix)):
4097 match = None
4098 if (match and
4099 opening_parenthesis[1] > 1 and
4100 Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
4101 # Multi-line lambda-expression
4102 match = None
4103
4104 else:
4105 # Try matching cases 2-3.
4106 match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
4107 if not match:
4108 # Try matching cases 4-6. These are always matched on separate lines.
4109 #
4110 # Note that we can't simply concatenate the previous line to the
4111 # current line and do a single match, otherwise we may output
4112 # duplicate warnings for the blank line case:
4113 # if (cond) {
4114 # // blank line
4115 # }
4116 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4117 if prevline and Search(r'[;{}]\s*$', prevline):
4118 match = Match(r'^(\s*)\{', line)
4119
4120 # Check matching closing brace
4121 if match:
4122 (endline, endlinenum, endpos) = CloseExpression(
4123 clean_lines, linenum, len(match.group(1)))
4124 if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
4125 # Current {} pair is eligible for semicolon check, and we have found
4126 # the redundant semicolon, output warning here.
4127 #
4128 # Note: because we are scanning forward for opening braces, and
4129 # outputting warnings for the matching closing brace, if there are
4130 # nested blocks with trailing semicolons, we will get the error
4131 # messages in reversed order.
4132
4133 # We need to check the line forward for NOLINT
4134 raw_lines = clean_lines.raw_lines
4135 ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
4136 error)
4137 ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
4138 error)
4139
4140 error(filename, endlinenum, 'readability/braces', 4,
4141 "You don't need a ; after a }")
4142
4143
4144def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
4145 """Look for empty loop/conditional body with only a single semicolon.
4146
4147 Args:
4148 filename: The name of the current file.
4149 clean_lines: A CleansedLines instance containing the file.
4150 linenum: The number of the line to check.
4151 error: The function to call with any errors found.
4152 """
4153
4154 # Search for loop keywords at the beginning of the line. Because only
4155 # whitespaces are allowed before the keywords, this will also ignore most
4156 # do-while-loops, since those lines should start with closing brace.
4157 #
4158 # We also check "if" blocks here, since an empty conditional block
4159 # is likely an error.
4160 line = clean_lines.elided[linenum]
4161 matched = Match(r'\s*(for|while|if)\s*\(', line)
4162 if matched:
4163 # Find the end of the conditional expression.
4164 (end_line, end_linenum, end_pos) = CloseExpression(
4165 clean_lines, linenum, line.find('('))
4166
4167 # Output warning if what follows the condition expression is a semicolon.
4168 # No warning for all other cases, including whitespace or newline, since we
4169 # have a separate check for semicolons preceded by whitespace.
4170 if end_pos >= 0 and Match(r';', end_line[end_pos:]):
4171 if matched.group(1) == 'if':
4172 error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
4173 'Empty conditional bodies should use {}')
4174 else:
4175 error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
4176 'Empty loop bodies should use {} or continue')
4177
4178 # Check for if statements that have completely empty bodies (no comments)
4179 # and no else clauses.
4180 if end_pos >= 0 and matched.group(1) == 'if':
4181 # Find the position of the opening { for the if statement.
4182 # Return without logging an error if it has no brackets.
4183 opening_linenum = end_linenum
4184 opening_line_fragment = end_line[end_pos:]
4185 # Loop until EOF or find anything that's not whitespace or opening {.
4186 while not Search(r'^\s*\{', opening_line_fragment):
4187 if Search(r'^(?!\s*$)', opening_line_fragment):
4188 # Conditional has no brackets.
4189 return
4190 opening_linenum += 1
4191 if opening_linenum == len(clean_lines.elided):
4192 # Couldn't find conditional's opening { or any code before EOF.
4193 return
4194 opening_line_fragment = clean_lines.elided[opening_linenum]
4195 # Set opening_line (opening_line_fragment may not be entire opening line).
4196 opening_line = clean_lines.elided[opening_linenum]
4197
4198 # Find the position of the closing }.
4199 opening_pos = opening_line_fragment.find('{')
4200 if opening_linenum == end_linenum:
4201 # We need to make opening_pos relative to the start of the entire line.
4202 opening_pos += end_pos
4203 (closing_line, closing_linenum, closing_pos) = CloseExpression(
4204 clean_lines, opening_linenum, opening_pos)
4205 if closing_pos < 0:
4206 return
4207
4208 # Now construct the body of the conditional. This consists of the portion
4209 # of the opening line after the {, all lines until the closing line,
4210 # and the portion of the closing line before the }.
4211 if (clean_lines.raw_lines[opening_linenum] !=
4212 CleanseComments(clean_lines.raw_lines[opening_linenum])):
4213 # Opening line ends with a comment, so conditional isn't empty.
4214 return
4215 if closing_linenum > opening_linenum:
4216 # Opening line after the {. Ignore comments here since we checked above.
4217 bodylist = list(opening_line[opening_pos+1:])
4218 # All lines until closing line, excluding closing line, with comments.
4219 bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
4220 # Closing line before the }. Won't (and can't) have comments.
4221 bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
4222 body = '\n'.join(bodylist)
4223 else:
4224 # If statement has brackets and fits on a single line.
4225 body = opening_line[opening_pos+1:closing_pos-1]
4226
4227 # Check if the body is empty
4228 if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
4229 return
4230 # The body is empty. Now make sure there's not an else clause.
4231 current_linenum = closing_linenum
4232 current_line_fragment = closing_line[closing_pos:]
4233 # Loop until EOF or find anything that's not whitespace or else clause.
4234 while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
4235 if Search(r'^(?=\s*else)', current_line_fragment):
4236 # Found an else clause, so don't log an error.
4237 return
4238 current_linenum += 1
4239 if current_linenum == len(clean_lines.elided):
4240 break
4241 current_line_fragment = clean_lines.elided[current_linenum]
4242
4243 # The body is empty and there's no else clause until EOF or other code.
4244 error(filename, end_linenum, 'whitespace/empty_if_body', 4,
4245 ('If statement had no body and no else clause'))
4246
4247
4248def FindCheckMacro(line):
4249 """Find a replaceable CHECK-like macro.
4250
4251 Args:
4252 line: line to search on.
4253 Returns:
4254 (macro name, start position), or (None, -1) if no replaceable
4255 macro is found.
4256 """
4257 for macro in _CHECK_MACROS:
4258 i = line.find(macro)
4259 if i >= 0:
4260 # Find opening parenthesis. Do a regular expression match here
4261 # to make sure that we are matching the expected CHECK macro, as
4262 # opposed to some other macro that happens to contain the CHECK
4263 # substring.
4264 matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
4265 if not matched:
4266 continue
4267 return (macro, len(matched.group(1)))
4268 return (None, -1)
4269
4270
4271def CheckCheck(filename, clean_lines, linenum, error):
4272 """Checks the use of CHECK and EXPECT macros.
4273
4274 Args:
4275 filename: The name of the current file.
4276 clean_lines: A CleansedLines instance containing the file.
4277 linenum: The number of the line to check.
4278 error: The function to call with any errors found.
4279 """
4280
4281 # Decide the set of replacement macros that should be suggested
4282 lines = clean_lines.elided
4283 (check_macro, start_pos) = FindCheckMacro(lines[linenum])
4284 if not check_macro:
4285 return
4286
4287 # Find end of the boolean expression by matching parentheses
4288 (last_line, end_line, end_pos) = CloseExpression(
4289 clean_lines, linenum, start_pos)
4290 if end_pos < 0:
4291 return
4292
4293 # If the check macro is followed by something other than a
4294 # semicolon, assume users will log their own custom error messages
4295 # and don't suggest any replacements.
4296 if not Match(r'\s*;', last_line[end_pos:]):
4297 return
4298
4299 if linenum == end_line:
4300 expression = lines[linenum][start_pos + 1:end_pos - 1]
4301 else:
4302 expression = lines[linenum][start_pos + 1:]
4303 for i in xrange(linenum + 1, end_line):
4304 expression += lines[i]
4305 expression += last_line[0:end_pos - 1]
4306
4307 # Parse expression so that we can take parentheses into account.
4308 # This avoids false positives for inputs like "CHECK((a < 4) == b)",
4309 # which is not replaceable by CHECK_LE.
4310 lhs = ''
4311 rhs = ''
4312 operator = None
4313 while expression:
4314 matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
4315 r'==|!=|>=|>|<=|<|\()(.*)$', expression)
4316 if matched:
4317 token = matched.group(1)
4318 if token == '(':
4319 # Parenthesized operand
4320 expression = matched.group(2)
4321 (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
4322 if end < 0:
4323 return # Unmatched parenthesis
4324 lhs += '(' + expression[0:end]
4325 expression = expression[end:]
4326 elif token in ('&&', '||'):
4327 # Logical and/or operators. This means the expression
4328 # contains more than one term, for example:
4329 # CHECK(42 < a && a < b);
4330 #
4331 # These are not replaceable with CHECK_LE, so bail out early.
4332 return
4333 elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
4334 # Non-relational operator
4335 lhs += token
4336 expression = matched.group(2)
4337 else:
4338 # Relational operator
4339 operator = token
4340 rhs = matched.group(2)
4341 break
4342 else:
4343 # Unparenthesized operand. Instead of appending to lhs one character
4344 # at a time, we do another regular expression match to consume several
4345 # characters at once if possible. Trivial benchmark shows that this
4346 # is more efficient when the operands are longer than a single
4347 # character, which is generally the case.
4348 matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
4349 if not matched:
4350 matched = Match(r'^(\s*\S)(.*)$', expression)
4351 if not matched:
4352 break
4353 lhs += matched.group(1)
4354 expression = matched.group(2)
4355
4356 # Only apply checks if we got all parts of the boolean expression
4357 if not (lhs and operator and rhs):
4358 return
4359
4360 # Check that rhs do not contain logical operators. We already know
4361 # that lhs is fine since the loop above parses out && and ||.
4362 if rhs.find('&&') > -1 or rhs.find('||') > -1:
4363 return
4364
4365 # At least one of the operands must be a constant literal. This is
4366 # to avoid suggesting replacements for unprintable things like
4367 # CHECK(variable != iterator)
4368 #
4369 # The following pattern matches decimal, hex integers, strings, and
4370 # characters (in that order).
4371 lhs = lhs.strip()
4372 rhs = rhs.strip()
4373 match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
4374 if Match(match_constant, lhs) or Match(match_constant, rhs):
4375 # Note: since we know both lhs and rhs, we can provide a more
4376 # descriptive error message like:
4377 # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
4378 # Instead of:
4379 # Consider using CHECK_EQ instead of CHECK(a == b)
4380 #
4381 # We are still keeping the less descriptive message because if lhs
4382 # or rhs gets long, the error message might become unreadable.
4383 error(filename, linenum, 'readability/check', 2,
4384 'Consider using %s instead of %s(a %s b)' % (
4385 _CHECK_REPLACEMENT[check_macro][operator],
4386 check_macro, operator))
4387
4388
4389def CheckAltTokens(filename, clean_lines, linenum, error):
4390 """Check alternative keywords being used in boolean expressions.
4391
4392 Args:
4393 filename: The name of the current file.
4394 clean_lines: A CleansedLines instance containing the file.
4395 linenum: The number of the line to check.
4396 error: The function to call with any errors found.
4397 """
4398 line = clean_lines.elided[linenum]
4399
4400 # Avoid preprocessor lines
4401 if Match(r'^\s*#', line):
4402 return
4403
4404 # Last ditch effort to avoid multi-line comments. This will not help
4405 # if the comment started before the current line or ended after the
4406 # current line, but it catches most of the false positives. At least,
4407 # it provides a way to workaround this warning for people who use
4408 # multi-line comments in preprocessor macros.
4409 #
4410 # TODO(unknown): remove this once cpplint has better support for
4411 # multi-line comments.
4412 if line.find('/*') >= 0 or line.find('*/') >= 0:
4413 return
4414
4415 for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
4416 error(filename, linenum, 'readability/alt_tokens', 2,
4417 'Use operator %s instead of %s' % (
4418 _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
4419
4420
4421def GetLineWidth(line):
4422 """Determines the width of the line in column positions.
4423
4424 Args:
4425 line: A string, which may be a Unicode string.
4426
4427 Returns:
4428 The width of the line in column positions, accounting for Unicode
4429 combining characters and wide characters.
4430 """
4431 if isinstance(line, unicode):
4432 width = 0
4433 for uc in unicodedata.normalize('NFC', line):
4434 if unicodedata.east_asian_width(uc) in ('W', 'F'):
4435 width += 2
4436 elif not unicodedata.combining(uc):
4437 width += 1
4438 return width
4439 else:
4440 return len(line)
4441
4442
4443def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
4444 error):
4445 """Checks rules from the 'C++ style rules' section of cppguide.html.
4446
4447 Most of these rules are hard to test (naming, comment style), but we
4448 do what we can. In particular we check for 2-space indents, line lengths,
4449 tab usage, spaces inside code, etc.
4450
4451 Args:
4452 filename: The name of the current file.
4453 clean_lines: A CleansedLines instance containing the file.
4454 linenum: The number of the line to check.
4455 file_extension: The extension (without the dot) of the filename.
4456 nesting_state: A NestingState instance which maintains information about
4457 the current stack of nested blocks being parsed.
4458 error: The function to call with any errors found.
4459 """
4460
4461 # Don't use "elided" lines here, otherwise we can't check commented lines.
4462 # Don't want to use "raw" either, because we don't want to check inside C++11
4463 # raw strings,
4464 raw_lines = clean_lines.lines_without_raw_strings
4465 line = raw_lines[linenum]
4466 prev = raw_lines[linenum - 1] if linenum > 0 else ''
4467
4468 if line.find('\t') != -1:
4469 error(filename, linenum, 'whitespace/tab', 1,
4470 'Tab found; better to use spaces')
4471
4472 # One or three blank spaces at the beginning of the line is weird; it's
4473 # hard to reconcile that with 2-space indents.
4474 # NOTE: here are the conditions rob pike used for his tests. Mine aren't
4475 # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
4476 # if(RLENGTH > 20) complain = 0;
4477 # if(match($0, " +(error|private|public|protected):")) complain = 0;
4478 # if(match(prev, "&& *$")) complain = 0;
4479 # if(match(prev, "\\|\\| *$")) complain = 0;
4480 # if(match(prev, "[\",=><] *$")) complain = 0;
4481 # if(match($0, " <<")) complain = 0;
4482 # if(match(prev, " +for \\(")) complain = 0;
4483 # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
4484 scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
4485 classinfo = nesting_state.InnermostClass()
4486 initial_spaces = 0
4487 cleansed_line = clean_lines.elided[linenum]
4488 while initial_spaces < len(line) and line[initial_spaces] == ' ':
4489 initial_spaces += 1
4490 # There are certain situations we allow one space, notably for
4491 # section labels, and also lines containing multi-line raw strings.
4492 # We also don't check for lines that look like continuation lines
4493 # (of lines ending in double quotes, commas, equals, or angle brackets)
4494 # because the rules for how to indent those are non-trivial.
4495 if (not Search(r'[",=><] *$', prev) and
4496 (initial_spaces == 1 or initial_spaces == 3) and
4497 not Match(scope_or_label_pattern, cleansed_line) and
4498 not (clean_lines.raw_lines[linenum] != line and
4499 Match(r'^\s*""', line))):
4500 error(filename, linenum, 'whitespace/indent', 3,
4501 'Weird number of spaces at line-start. '
4502 'Are you using a 2-space indent?')
4503
4504 if line and line[-1].isspace():
4505 error(filename, linenum, 'whitespace/end_of_line', 4,
4506 'Line ends in whitespace. Consider deleting these extra spaces.')
4507
4508 # Check if the line is a header guard.
4509 is_header_guard = False
4510 if file_extension in GetHeaderExtensions():
4511 cppvar = GetHeaderGuardCPPVariable(filename)
4512 if (line.startswith('#ifndef %s' % cppvar) or
4513 line.startswith('#define %s' % cppvar) or
4514 line.startswith('#endif // %s' % cppvar)):
4515 is_header_guard = True
4516 # #include lines and header guards can be long, since there's no clean way to
4517 # split them.
4518 #
4519 # URLs can be long too. It's possible to split these, but it makes them
4520 # harder to cut&paste.
4521 #
4522 # The "$Id:...$" comment may also get very long without it being the
4523 # developers fault.
4524 #
4525 # Doxygen documentation copying can get pretty long when using an overloaded
4526 # function declaration
4527 if (not line.startswith('#include') and not is_header_guard and
4528 not Match(r'^\s*//.*http(s?)://\S*$', line) and
4529 not Match(r'^\s*//\s*[^\s]*$', line) and
4530 not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
4531 not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
4532 line_width = GetLineWidth(line)
4533 if line_width > _line_length:
4534 error(filename, linenum, 'whitespace/line_length', 2,
4535 'Lines should be <= %i characters long' % _line_length)
4536
4537 if (cleansed_line.count(';') > 1 and
4538 # allow simple single line lambdas
4539 not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
4540 line) and
4541 # for loops are allowed two ;'s (and may run over two lines).
4542 cleansed_line.find('for') == -1 and
4543 (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
4544 GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
4545 # It's ok to have many commands in a switch case that fits in 1 line
4546 not ((cleansed_line.find('case ') != -1 or
4547 cleansed_line.find('default:') != -1) and
4548 cleansed_line.find('break;') != -1)):
4549 error(filename, linenum, 'whitespace/newline', 0,
4550 'More than one command on the same line')
4551
4552 # Some more style checks
4553 CheckBraces(filename, clean_lines, linenum, error)
4554 CheckTrailingSemicolon(filename, clean_lines, linenum, error)
4555 CheckEmptyBlockBody(filename, clean_lines, linenum, error)
4556 CheckAccess(filename, clean_lines, linenum, nesting_state, error)
4557 CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
4558 CheckOperatorSpacing(filename, clean_lines, linenum, error)
4559 CheckParenthesisSpacing(filename, clean_lines, linenum, error)
4560 CheckCommaSpacing(filename, clean_lines, linenum, error)
4561 CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
4562 CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
4563 CheckCheck(filename, clean_lines, linenum, error)
4564 CheckAltTokens(filename, clean_lines, linenum, error)
4565 classinfo = nesting_state.InnermostClass()
4566 if classinfo:
4567 CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
4568
4569
4570_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
4571# Matches the first component of a filename delimited by -s and _s. That is:
4572# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
4573# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
4574# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
4575# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
4576_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
4577
4578
4579def _DropCommonSuffixes(filename):
4580 """Drops common suffixes like _test.cc or -inl.h from filename.
4581
4582 For example:
4583 >>> _DropCommonSuffixes('foo/foo-inl.h')
4584 'foo/foo'
4585 >>> _DropCommonSuffixes('foo/bar/foo.cc')
4586 'foo/bar/foo'
4587 >>> _DropCommonSuffixes('foo/foo_internal.h')
4588 'foo/foo'
4589 >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
4590 'foo/foo_unusualinternal'
4591
4592 Args:
4593 filename: The input filename.
4594
4595 Returns:
4596 The filename with the common suffix removed.
4597 """
4598 for suffix in itertools.chain(
4599 ('%s.%s' % (test_suffix.lstrip('_'), ext)
4600 for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
4601 ('%s.%s' % (suffix, ext)
4602 for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
4603 if (filename.endswith(suffix) and len(filename) > len(suffix) and
4604 filename[-len(suffix) - 1] in ('-', '_')):
4605 return filename[:-len(suffix) - 1]
4606 return os.path.splitext(filename)[0]
4607
4608
4609def _ClassifyInclude(fileinfo, include, is_system):
4610 """Figures out what kind of header 'include' is.
4611
4612 Args:
4613 fileinfo: The current file cpplint is running over. A FileInfo instance.
4614 include: The path to a #included file.
4615 is_system: True if the #include used <> rather than "".
4616
4617 Returns:
4618 One of the _XXX_HEADER constants.
4619
4620 For example:
4621 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
4622 _C_SYS_HEADER
4623 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
4624 _CPP_SYS_HEADER
4625 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
4626 _LIKELY_MY_HEADER
4627 >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
4628 ... 'bar/foo_other_ext.h', False)
4629 _POSSIBLE_MY_HEADER
4630 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
4631 _OTHER_HEADER
4632 """
4633 # This is a list of all standard c++ header files, except
4634 # those already checked for above.
4635 is_cpp_h = include in _CPP_HEADERS
4636
4637 # Headers with C++ extensions shouldn't be considered C system headers
4638 if is_system and os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']:
4639 is_system = False
4640
4641 if is_system:
4642 if is_cpp_h:
4643 return _CPP_SYS_HEADER
4644 else:
4645 return _C_SYS_HEADER
4646
4647 # If the target file and the include we're checking share a
4648 # basename when we drop common extensions, and the include
4649 # lives in . , then it's likely to be owned by the target file.
4650 target_dir, target_base = (
4651 os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
4652 include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
4653 target_dir_pub = os.path.normpath(target_dir + '/../public')
4654 target_dir_pub = target_dir_pub.replace('\\', '/')
4655 if target_base == include_base and (
4656 include_dir == target_dir or
4657 include_dir == target_dir_pub):
4658 return _LIKELY_MY_HEADER
4659
4660 # If the target and include share some initial basename
4661 # component, it's possible the target is implementing the
4662 # include, so it's allowed to be first, but we'll never
4663 # complain if it's not there.
4664 target_first_component = _RE_FIRST_COMPONENT.match(target_base)
4665 include_first_component = _RE_FIRST_COMPONENT.match(include_base)
4666 if (target_first_component and include_first_component and
4667 target_first_component.group(0) ==
4668 include_first_component.group(0)):
4669 return _POSSIBLE_MY_HEADER
4670
4671 return _OTHER_HEADER
4672
4673
4674
4675def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
4676 """Check rules that are applicable to #include lines.
4677
4678 Strings on #include lines are NOT removed from elided line, to make
4679 certain tasks easier. However, to prevent false positives, checks
4680 applicable to #include lines in CheckLanguage must be put here.
4681
4682 Args:
4683 filename: The name of the current file.
4684 clean_lines: A CleansedLines instance containing the file.
4685 linenum: The number of the line to check.
4686 include_state: An _IncludeState instance in which the headers are inserted.
4687 error: The function to call with any errors found.
4688 """
4689 fileinfo = FileInfo(filename)
4690 line = clean_lines.lines[linenum]
4691
4692 # "include" should use the new style "foo/bar.h" instead of just "bar.h"
4693 # Only do this check if the included header follows google naming
4694 # conventions. If not, assume that it's a 3rd party API that
4695 # requires special include conventions.
4696 #
4697 # We also make an exception for Lua headers, which follow google
4698 # naming convention but not the include convention.
4699 match = Match(r'#include\s*"([^/]+\.h)"', line)
4700 if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
4701 error(filename, linenum, 'build/include_subdir', 4,
4702 'Include the directory when naming .h files')
4703
4704 # we shouldn't include a file more than once. actually, there are a
4705 # handful of instances where doing so is okay, but in general it's
4706 # not.
4707 match = _RE_PATTERN_INCLUDE.search(line)
4708 if match:
4709 include = match.group(2)
4710 is_system = (match.group(1) == '<')
4711 duplicate_line = include_state.FindHeader(include)
4712 if duplicate_line >= 0:
4713 error(filename, linenum, 'build/include', 4,
4714 '"%s" already included at %s:%s' %
4715 (include, filename, duplicate_line))
4716 return
4717
4718 for extension in GetNonHeaderExtensions():
4719 if (include.endswith('.' + extension) and
4720 os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
4721 error(filename, linenum, 'build/include', 4,
4722 'Do not include .' + extension + ' files from other packages')
4723 return
4724
4725 if not _THIRD_PARTY_HEADERS_PATTERN.match(include):
4726 include_state.include_list[-1].append((include, linenum))
4727
4728 # We want to ensure that headers appear in the right order:
4729 # 1) for foo.cc, foo.h (preferred location)
4730 # 2) c system files
4731 # 3) cpp system files
4732 # 4) for foo.cc, foo.h (deprecated location)
4733 # 5) other google headers
4734 #
4735 # We classify each include statement as one of those 5 types
4736 # using a number of techniques. The include_state object keeps
4737 # track of the highest type seen, and complains if we see a
4738 # lower type after that.
4739 error_message = include_state.CheckNextIncludeOrder(
4740 _ClassifyInclude(fileinfo, include, is_system))
4741 if error_message:
4742 error(filename, linenum, 'build/include_order', 4,
4743 '%s. Should be: %s.h, c system, c++ system, other.' %
4744 (error_message, fileinfo.BaseName()))
4745 canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
4746 if not include_state.IsInAlphabeticalOrder(
4747 clean_lines, linenum, canonical_include):
4748 error(filename, linenum, 'build/include_alpha', 4,
4749 'Include "%s" not in alphabetical order' % include)
4750 include_state.SetLastHeader(canonical_include)
4751
4752
4753
4754def _GetTextInside(text, start_pattern):
4755 r"""Retrieves all the text between matching open and close parentheses.
4756
4757 Given a string of lines and a regular expression string, retrieve all the text
4758 following the expression and between opening punctuation symbols like
4759 (, [, or {, and the matching close-punctuation symbol. This properly nested
4760 occurrences of the punctuations, so for the text like
4761 printf(a(), b(c()));
4762 a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
4763 start_pattern must match string having an open punctuation symbol at the end.
4764
4765 Args:
4766 text: The lines to extract text. Its comments and strings must be elided.
4767 It can be single line and can span multiple lines.
4768 start_pattern: The regexp string indicating where to start extracting
4769 the text.
4770 Returns:
4771 The extracted text.
4772 None if either the opening string or ending punctuation could not be found.
4773 """
4774 # TODO(unknown): Audit cpplint.py to see what places could be profitably
4775 # rewritten to use _GetTextInside (and use inferior regexp matching today).
4776
4777 # Give opening punctuations to get the matching close-punctuations.
4778 matching_punctuation = {'(': ')', '{': '}', '[': ']'}
4779 closing_punctuation = set(itervalues(matching_punctuation))
4780
4781 # Find the position to start extracting text.
4782 match = re.search(start_pattern, text, re.M)
4783 if not match: # start_pattern not found in text.
4784 return None
4785 start_position = match.end(0)
4786
4787 assert start_position > 0, (
4788 'start_pattern must ends with an opening punctuation.')
4789 assert text[start_position - 1] in matching_punctuation, (
4790 'start_pattern must ends with an opening punctuation.')
4791 # Stack of closing punctuations we expect to have in text after position.
4792 punctuation_stack = [matching_punctuation[text[start_position - 1]]]
4793 position = start_position
4794 while punctuation_stack and position < len(text):
4795 if text[position] == punctuation_stack[-1]:
4796 punctuation_stack.pop()
4797 elif text[position] in closing_punctuation:
4798 # A closing punctuation without matching opening punctuations.
4799 return None
4800 elif text[position] in matching_punctuation:
4801 punctuation_stack.append(matching_punctuation[text[position]])
4802 position += 1
4803 if punctuation_stack:
4804 # Opening punctuations left without matching close-punctuations.
4805 return None
4806 # punctuations match.
4807 return text[start_position:position - 1]
4808
4809
4810# Patterns for matching call-by-reference parameters.
4811#
4812# Supports nested templates up to 2 levels deep using this messy pattern:
4813# < (?: < (?: < [^<>]*
4814# >
4815# | [^<>] )*
4816# >
4817# | [^<>] )*
4818# >
4819_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
4820_RE_PATTERN_TYPE = (
4821 r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
4822 r'(?:\w|'
4823 r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
4824 r'::)+')
4825# A call-by-reference parameter ends with '& identifier'.
4826_RE_PATTERN_REF_PARAM = re.compile(
4827 r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
4828 r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
4829# A call-by-const-reference parameter either ends with 'const& identifier'
4830# or looks like 'const type& identifier' when 'type' is atomic.
4831_RE_PATTERN_CONST_REF_PARAM = (
4832 r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
4833 r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
4834# Stream types.
4835_RE_PATTERN_REF_STREAM_PARAM = (
4836 r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
4837
4838
4839def CheckLanguage(filename, clean_lines, linenum, file_extension,
4840 include_state, nesting_state, error):
4841 """Checks rules from the 'C++ language rules' section of cppguide.html.
4842
4843 Some of these rules are hard to test (function overloading, using
4844 uint32 inappropriately), but we do the best we can.
4845
4846 Args:
4847 filename: The name of the current file.
4848 clean_lines: A CleansedLines instance containing the file.
4849 linenum: The number of the line to check.
4850 file_extension: The extension (without the dot) of the filename.
4851 include_state: An _IncludeState instance in which the headers are inserted.
4852 nesting_state: A NestingState instance which maintains information about
4853 the current stack of nested blocks being parsed.
4854 error: The function to call with any errors found.
4855 """
4856 # If the line is empty or consists of entirely a comment, no need to
4857 # check it.
4858 line = clean_lines.elided[linenum]
4859 if not line:
4860 return
4861
4862 match = _RE_PATTERN_INCLUDE.search(line)
4863 if match:
4864 CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
4865 return
4866
4867 # Reset include state across preprocessor directives. This is meant
4868 # to silence warnings for conditional includes.
4869 match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
4870 if match:
4871 include_state.ResetSection(match.group(1))
4872
4873
4874 # Perform other checks now that we are sure that this is not an include line
4875 CheckCasts(filename, clean_lines, linenum, error)
4876 CheckGlobalStatic(filename, clean_lines, linenum, error)
4877 CheckPrintf(filename, clean_lines, linenum, error)
4878
4879 if file_extension in GetHeaderExtensions():
4880 # TODO(unknown): check that 1-arg constructors are explicit.
4881 # How to tell it's a constructor?
4882 # (handled in CheckForNonStandardConstructs for now)
4883 # TODO(unknown): check that classes declare or disable copy/assign
4884 # (level 1 error)
4885 pass
4886
4887 # Check if people are using the verboten C basic types. The only exception
4888 # we regularly allow is "unsigned short port" for port.
4889 if Search(r'\bshort port\b', line):
4890 if not Search(r'\bunsigned short port\b', line):
4891 error(filename, linenum, 'runtime/int', 4,
4892 'Use "unsigned short" for ports, not "short"')
4893 else:
4894 match = Search(r'\b(short|long(?! +double)|long long)\b', line)
4895 if match:
4896 error(filename, linenum, 'runtime/int', 4,
4897 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
4898
4899 # Check if some verboten operator overloading is going on
4900 # TODO(unknown): catch out-of-line unary operator&:
4901 # class X {};
4902 # int operator&(const X& x) { return 42; } // unary operator&
4903 # The trick is it's hard to tell apart from binary operator&:
4904 # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
4905 if Search(r'\boperator\s*&\s*\(\s*\)', line):
4906 error(filename, linenum, 'runtime/operator', 4,
4907 'Unary operator& is dangerous. Do not use it.')
4908
4909 # Check for suspicious usage of "if" like
4910 # } if (a == b) {
4911 if Search(r'\}\s*if\s*\(', line):
4912 error(filename, linenum, 'readability/braces', 4,
4913 'Did you mean "else if"? If not, start a new line for "if".')
4914
4915 # Check for potential format string bugs like printf(foo).
4916 # We constrain the pattern not to pick things like DocidForPrintf(foo).
4917 # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
4918 # TODO(unknown): Catch the following case. Need to change the calling
4919 # convention of the whole function to process multiple line to handle it.
4920 # printf(
4921 # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
4922 printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
4923 if printf_args:
4924 match = Match(r'([\w.\->()]+)$', printf_args)
4925 if match and match.group(1) != '__VA_ARGS__':
4926 function_name = re.search(r'\b((?:string)?printf)\s*\(',
4927 line, re.I).group(1)
4928 error(filename, linenum, 'runtime/printf', 4,
4929 'Potential format string bug. Do %s("%%s", %s) instead.'
4930 % (function_name, match.group(1)))
4931
4932 # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
4933 match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
4934 if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
4935 error(filename, linenum, 'runtime/memset', 4,
4936 'Did you mean "memset(%s, 0, %s)"?'
4937 % (match.group(1), match.group(2)))
4938
4939 if Search(r'\busing namespace\b', line):
4940 if Search(r'\bliterals\b', line):
4941 error(filename, linenum, 'build/namespaces_literals', 5,
4942 'Do not use namespace using-directives. '
4943 'Use using-declarations instead.')
4944 else:
4945 error(filename, linenum, 'build/namespaces', 5,
4946 'Do not use namespace using-directives. '
4947 'Use using-declarations instead.')
4948
4949 # Detect variable-length arrays.
4950 match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
4951 if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
4952 match.group(3).find(']') == -1):
4953 # Split the size using space and arithmetic operators as delimiters.
4954 # If any of the resulting tokens are not compile time constants then
4955 # report the error.
4956 tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
4957 is_const = True
4958 skip_next = False
4959 for tok in tokens:
4960 if skip_next:
4961 skip_next = False
4962 continue
4963
4964 if Search(r'sizeof\(.+\)', tok): continue
4965 if Search(r'arraysize\(\w+\)', tok): continue
4966
4967 tok = tok.lstrip('(')
4968 tok = tok.rstrip(')')
4969 if not tok: continue
4970 if Match(r'\d+', tok): continue
4971 if Match(r'0[xX][0-9a-fA-F]+', tok): continue
4972 if Match(r'k[A-Z0-9]\w*', tok): continue
4973 if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
4974 if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
4975 # A catch all for tricky sizeof cases, including 'sizeof expression',
4976 # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
4977 # requires skipping the next token because we split on ' ' and '*'.
4978 if tok.startswith('sizeof'):
4979 skip_next = True
4980 continue
4981 is_const = False
4982 break
4983 if not is_const:
4984 error(filename, linenum, 'runtime/arrays', 1,
4985 'Do not use variable-length arrays. Use an appropriately named '
4986 "('k' followed by CamelCase) compile-time constant for the size.")
4987
4988 # Check for use of unnamed namespaces in header files. Registration
4989 # macros are typically OK, so we allow use of "namespace {" on lines
4990 # that end with backslashes.
4991 if (file_extension in GetHeaderExtensions()
4992 and Search(r'\bnamespace\s*{', line)
4993 and line[-1] != '\\'):
4994 error(filename, linenum, 'build/namespaces', 4,
4995 'Do not use unnamed namespaces in header files. See '
4996 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
4997 ' for more information.')
4998
4999
5000def CheckGlobalStatic(filename, clean_lines, linenum, error):
5001 """Check for unsafe global or static objects.
5002
5003 Args:
5004 filename: The name of the current file.
5005 clean_lines: A CleansedLines instance containing the file.
5006 linenum: The number of the line to check.
5007 error: The function to call with any errors found.
5008 """
5009 line = clean_lines.elided[linenum]
5010
5011 # Match two lines at a time to support multiline declarations
5012 if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
5013 line += clean_lines.elided[linenum + 1].strip()
5014
5015 # Check for people declaring static/global STL strings at the top level.
5016 # This is dangerous because the C++ language does not guarantee that
5017 # globals with constructors are initialized before the first access, and
5018 # also because globals can be destroyed when some threads are still running.
5019 # TODO(unknown): Generalize this to also find static unique_ptr instances.
5020 # TODO(unknown): File bugs for clang-tidy to find these.
5021 match = Match(
5022 r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
5023 r'([a-zA-Z0-9_:]+)\b(.*)',
5024 line)
5025
5026 # Remove false positives:
5027 # - String pointers (as opposed to values).
5028 # string *pointer
5029 # const string *pointer
5030 # string const *pointer
5031 # string *const pointer
5032 #
5033 # - Functions and template specializations.
5034 # string Function<Type>(...
5035 # string Class<Type>::Method(...
5036 #
5037 # - Operators. These are matched separately because operator names
5038 # cross non-word boundaries, and trying to match both operators
5039 # and functions at the same time would decrease accuracy of
5040 # matching identifiers.
5041 # string Class::operator*()
5042 if (match and
5043 not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
5044 not Search(r'\boperator\W', line) and
5045 not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
5046 if Search(r'\bconst\b', line):
5047 error(filename, linenum, 'runtime/string', 4,
5048 'For a static/global string constant, use a C style string '
5049 'instead: "%schar%s %s[]".' %
5050 (match.group(1), match.group(2) or '', match.group(3)))
5051 else:
5052 error(filename, linenum, 'runtime/string', 4,
5053 'Static/global string variables are not permitted.')
5054
5055 if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
5056 Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
5057 error(filename, linenum, 'runtime/init', 4,
5058 'You seem to be initializing a member variable with itself.')
5059
5060
5061def CheckPrintf(filename, clean_lines, linenum, error):
5062 """Check for printf related issues.
5063
5064 Args:
5065 filename: The name of the current file.
5066 clean_lines: A CleansedLines instance containing the file.
5067 linenum: The number of the line to check.
5068 error: The function to call with any errors found.
5069 """
5070 line = clean_lines.elided[linenum]
5071
5072 # When snprintf is used, the second argument shouldn't be a literal.
5073 match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
5074 if match and match.group(2) != '0':
5075 # If 2nd arg is zero, snprintf is used to calculate size.
5076 error(filename, linenum, 'runtime/printf', 3,
5077 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
5078 'to snprintf.' % (match.group(1), match.group(2)))
5079
5080 # Check if some verboten C functions are being used.
5081 if Search(r'\bsprintf\s*\(', line):
5082 error(filename, linenum, 'runtime/printf', 5,
5083 'Never use sprintf. Use snprintf instead.')
5084 match = Search(r'\b(strcpy|strcat)\s*\(', line)
5085 if match:
5086 error(filename, linenum, 'runtime/printf', 4,
5087 'Almost always, snprintf is better than %s' % match.group(1))
5088
5089
5090def IsDerivedFunction(clean_lines, linenum):
5091 """Check if current line contains an inherited function.
5092
5093 Args:
5094 clean_lines: A CleansedLines instance containing the file.
5095 linenum: The number of the line to check.
5096 Returns:
5097 True if current line contains a function with "override"
5098 virt-specifier.
5099 """
5100 # Scan back a few lines for start of current function
5101 for i in xrange(linenum, max(-1, linenum - 10), -1):
5102 match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
5103 if match:
5104 # Look for "override" after the matching closing parenthesis
5105 line, _, closing_paren = CloseExpression(
5106 clean_lines, i, len(match.group(1)))
5107 return (closing_paren >= 0 and
5108 Search(r'\boverride\b', line[closing_paren:]))
5109 return False
5110
5111
5112def IsOutOfLineMethodDefinition(clean_lines, linenum):
5113 """Check if current line contains an out-of-line method definition.
5114
5115 Args:
5116 clean_lines: A CleansedLines instance containing the file.
5117 linenum: The number of the line to check.
5118 Returns:
5119 True if current line contains an out-of-line method definition.
5120 """
5121 # Scan back a few lines for start of current function
5122 for i in xrange(linenum, max(-1, linenum - 10), -1):
5123 if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
5124 return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
5125 return False
5126
5127
5128def IsInitializerList(clean_lines, linenum):
5129 """Check if current line is inside constructor initializer list.
5130
5131 Args:
5132 clean_lines: A CleansedLines instance containing the file.
5133 linenum: The number of the line to check.
5134 Returns:
5135 True if current line appears to be inside constructor initializer
5136 list, False otherwise.
5137 """
5138 for i in xrange(linenum, 1, -1):
5139 line = clean_lines.elided[i]
5140 if i == linenum:
5141 remove_function_body = Match(r'^(.*)\{\s*$', line)
5142 if remove_function_body:
5143 line = remove_function_body.group(1)
5144
5145 if Search(r'\s:\s*\w+[({]', line):
5146 # A lone colon tend to indicate the start of a constructor
5147 # initializer list. It could also be a ternary operator, which
5148 # also tend to appear in constructor initializer lists as
5149 # opposed to parameter lists.
5150 return True
5151 if Search(r'\}\s*,\s*$', line):
5152 # A closing brace followed by a comma is probably the end of a
5153 # brace-initialized member in constructor initializer list.
5154 return True
5155 if Search(r'[{};]\s*$', line):
5156 # Found one of the following:
5157 # - A closing brace or semicolon, probably the end of the previous
5158 # function.
5159 # - An opening brace, probably the start of current class or namespace.
5160 #
5161 # Current line is probably not inside an initializer list since
5162 # we saw one of those things without seeing the starting colon.
5163 return False
5164
5165 # Got to the beginning of the file without seeing the start of
5166 # constructor initializer list.
5167 return False
5168
5169
5170def CheckForNonConstReference(filename, clean_lines, linenum,
5171 nesting_state, error):
5172 """Check for non-const references.
5173
5174 Separate from CheckLanguage since it scans backwards from current
5175 line, instead of scanning forward.
5176
5177 Args:
5178 filename: The name of the current file.
5179 clean_lines: A CleansedLines instance containing the file.
5180 linenum: The number of the line to check.
5181 nesting_state: A NestingState instance which maintains information about
5182 the current stack of nested blocks being parsed.
5183 error: The function to call with any errors found.
5184 """
5185 # Do nothing if there is no '&' on current line.
5186 line = clean_lines.elided[linenum]
5187 if '&' not in line:
5188 return
5189
5190 # If a function is inherited, current function doesn't have much of
5191 # a choice, so any non-const references should not be blamed on
5192 # derived function.
5193 if IsDerivedFunction(clean_lines, linenum):
5194 return
5195
5196 # Don't warn on out-of-line method definitions, as we would warn on the
5197 # in-line declaration, if it isn't marked with 'override'.
5198 if IsOutOfLineMethodDefinition(clean_lines, linenum):
5199 return
5200
5201 # Long type names may be broken across multiple lines, usually in one
5202 # of these forms:
5203 # LongType
5204 # ::LongTypeContinued &identifier
5205 # LongType::
5206 # LongTypeContinued &identifier
5207 # LongType<
5208 # ...>::LongTypeContinued &identifier
5209 #
5210 # If we detected a type split across two lines, join the previous
5211 # line to current line so that we can match const references
5212 # accordingly.
5213 #
5214 # Note that this only scans back one line, since scanning back
5215 # arbitrary number of lines would be expensive. If you have a type
5216 # that spans more than 2 lines, please use a typedef.
5217 if linenum > 1:
5218 previous = None
5219 if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
5220 # previous_line\n + ::current_line
5221 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
5222 clean_lines.elided[linenum - 1])
5223 elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
5224 # previous_line::\n + current_line
5225 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
5226 clean_lines.elided[linenum - 1])
5227 if previous:
5228 line = previous.group(1) + line.lstrip()
5229 else:
5230 # Check for templated parameter that is split across multiple lines
5231 endpos = line.rfind('>')
5232 if endpos > -1:
5233 (_, startline, startpos) = ReverseCloseExpression(
5234 clean_lines, linenum, endpos)
5235 if startpos > -1 and startline < linenum:
5236 # Found the matching < on an earlier line, collect all
5237 # pieces up to current line.
5238 line = ''
5239 for i in xrange(startline, linenum + 1):
5240 line += clean_lines.elided[i].strip()
5241
5242 # Check for non-const references in function parameters. A single '&' may
5243 # found in the following places:
5244 # inside expression: binary & for bitwise AND
5245 # inside expression: unary & for taking the address of something
5246 # inside declarators: reference parameter
5247 # We will exclude the first two cases by checking that we are not inside a
5248 # function body, including one that was just introduced by a trailing '{'.
5249 # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
5250 if (nesting_state.previous_stack_top and
5251 not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
5252 isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
5253 # Not at toplevel, not within a class, and not within a namespace
5254 return
5255
5256 # Avoid initializer lists. We only need to scan back from the
5257 # current line for something that starts with ':'.
5258 #
5259 # We don't need to check the current line, since the '&' would
5260 # appear inside the second set of parentheses on the current line as
5261 # opposed to the first set.
5262 if linenum > 0:
5263 for i in xrange(linenum - 1, max(0, linenum - 10), -1):
5264 previous_line = clean_lines.elided[i]
5265 if not Search(r'[),]\s*$', previous_line):
5266 break
5267 if Match(r'^\s*:\s+\S', previous_line):
5268 return
5269
5270 # Avoid preprocessors
5271 if Search(r'\\\s*$', line):
5272 return
5273
5274 # Avoid constructor initializer lists
5275 if IsInitializerList(clean_lines, linenum):
5276 return
5277
5278 # We allow non-const references in a few standard places, like functions
5279 # called "swap()" or iostream operators like "<<" or ">>". Do not check
5280 # those function parameters.
5281 #
5282 # We also accept & in static_assert, which looks like a function but
5283 # it's actually a declaration expression.
5284 allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
5285 r'operator\s*[<>][<>]|'
5286 r'static_assert|COMPILE_ASSERT'
5287 r')\s*\(')
5288 if Search(allowed_functions, line):
5289 return
5290 elif not Search(r'\S+\([^)]*$', line):
5291 # Don't see an allowed function on this line. Actually we
5292 # didn't see any function name on this line, so this is likely a
5293 # multi-line parameter list. Try a bit harder to catch this case.
5294 for i in xrange(2):
5295 if (linenum > i and
5296 Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
5297 return
5298
5299 decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
5300 for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
5301 if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
5302 not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
5303 error(filename, linenum, 'runtime/references', 2,
5304 'Is this a non-const reference? '
5305 'If so, make const or use a pointer: ' +
5306 ReplaceAll(' *<', '<', parameter))
5307
5308
5309def CheckCasts(filename, clean_lines, linenum, error):
5310 """Various cast related checks.
5311
5312 Args:
5313 filename: The name of the current file.
5314 clean_lines: A CleansedLines instance containing the file.
5315 linenum: The number of the line to check.
5316 error: The function to call with any errors found.
5317 """
5318 line = clean_lines.elided[linenum]
5319
5320 # Check to see if they're using an conversion function cast.
5321 # I just try to capture the most common basic types, though there are more.
5322 # Parameterless conversion functions, such as bool(), are allowed as they are
5323 # probably a member operator declaration or default constructor.
5324 match = Search(
5325 r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
5326 r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
5327 r'(\([^)].*)', line)
5328 expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
5329 if match and not expecting_function:
5330 matched_type = match.group(2)
5331
5332 # matched_new_or_template is used to silence two false positives:
5333 # - New operators
5334 # - Template arguments with function types
5335 #
5336 # For template arguments, we match on types immediately following
5337 # an opening bracket without any spaces. This is a fast way to
5338 # silence the common case where the function type is the first
5339 # template argument. False negative with less-than comparison is
5340 # avoided because those operators are usually followed by a space.
5341 #
5342 # function<double(double)> // bracket + no space = false positive
5343 # value < double(42) // bracket + space = true positive
5344 matched_new_or_template = match.group(1)
5345
5346 # Avoid arrays by looking for brackets that come after the closing
5347 # parenthesis.
5348 if Match(r'\([^()]+\)\s*\[', match.group(3)):
5349 return
5350
5351 # Other things to ignore:
5352 # - Function pointers
5353 # - Casts to pointer types
5354 # - Placement new
5355 # - Alias declarations
5356 matched_funcptr = match.group(3)
5357 if (matched_new_or_template is None and
5358 not (matched_funcptr and
5359 (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
5360 matched_funcptr) or
5361 matched_funcptr.startswith('(*)'))) and
5362 not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
5363 not Search(r'new\(\S+\)\s*' + matched_type, line)):
5364 error(filename, linenum, 'readability/casting', 4,
5365 'Using deprecated casting style. '
5366 'Use static_cast<%s>(...) instead' %
5367 matched_type)
5368
5369 if not expecting_function:
5370 CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
5371 r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
5372
5373 # This doesn't catch all cases. Consider (const char * const)"hello".
5374 #
5375 # (char *) "foo" should always be a const_cast (reinterpret_cast won't
5376 # compile).
5377 if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
5378 r'\((char\s?\*+\s?)\)\s*"', error):
5379 pass
5380 else:
5381 # Check pointer casts for other than string constants
5382 CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
5383 r'\((\w+\s?\*+\s?)\)', error)
5384
5385 # In addition, we look for people taking the address of a cast. This
5386 # is dangerous -- casts can assign to temporaries, so the pointer doesn't
5387 # point where you think.
5388 #
5389 # Some non-identifier character is required before the '&' for the
5390 # expression to be recognized as a cast. These are casts:
5391 # expression = &static_cast<int*>(temporary());
5392 # function(&(int*)(temporary()));
5393 #
5394 # This is not a cast:
5395 # reference_type&(int* function_param);
5396 match = Search(
5397 r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
5398 r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
5399 if match:
5400 # Try a better error message when the & is bound to something
5401 # dereferenced by the casted pointer, as opposed to the casted
5402 # pointer itself.
5403 parenthesis_error = False
5404 match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
5405 if match:
5406 _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
5407 if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
5408 _, y2, x2 = CloseExpression(clean_lines, y1, x1)
5409 if x2 >= 0:
5410 extended_line = clean_lines.elided[y2][x2:]
5411 if y2 < clean_lines.NumLines() - 1:
5412 extended_line += clean_lines.elided[y2 + 1]
5413 if Match(r'\s*(?:->|\[)', extended_line):
5414 parenthesis_error = True
5415
5416 if parenthesis_error:
5417 error(filename, linenum, 'readability/casting', 4,
5418 ('Are you taking an address of something dereferenced '
5419 'from a cast? Wrapping the dereferenced expression in '
5420 'parentheses will make the binding more obvious'))
5421 else:
5422 error(filename, linenum, 'runtime/casting', 4,
5423 ('Are you taking an address of a cast? '
5424 'This is dangerous: could be a temp var. '
5425 'Take the address before doing the cast, rather than after'))
5426
5427
5428def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
5429 """Checks for a C-style cast by looking for the pattern.
5430
5431 Args:
5432 filename: The name of the current file.
5433 clean_lines: A CleansedLines instance containing the file.
5434 linenum: The number of the line to check.
5435 cast_type: The string for the C++ cast to recommend. This is either
5436 reinterpret_cast, static_cast, or const_cast, depending.
5437 pattern: The regular expression used to find C-style casts.
5438 error: The function to call with any errors found.
5439
5440 Returns:
5441 True if an error was emitted.
5442 False otherwise.
5443 """
5444 line = clean_lines.elided[linenum]
5445 match = Search(pattern, line)
5446 if not match:
5447 return False
5448
5449 # Exclude lines with keywords that tend to look like casts
5450 context = line[0:match.start(1) - 1]
5451 if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
5452 return False
5453
5454 # Try expanding current context to see if we one level of
5455 # parentheses inside a macro.
5456 if linenum > 0:
5457 for i in xrange(linenum - 1, max(0, linenum - 5), -1):
5458 context = clean_lines.elided[i] + context
5459 if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
5460 return False
5461
5462 # operator++(int) and operator--(int)
5463 if context.endswith(' operator++') or context.endswith(' operator--'):
5464 return False
5465
5466 # A single unnamed argument for a function tends to look like old style cast.
5467 # If we see those, don't issue warnings for deprecated casts.
5468 remainder = line[match.end(0):]
5469 if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
5470 remainder):
5471 return False
5472
5473 # At this point, all that should be left is actual casts.
5474 error(filename, linenum, 'readability/casting', 4,
5475 'Using C-style cast. Use %s<%s>(...) instead' %
5476 (cast_type, match.group(1)))
5477
5478 return True
5479
5480
5481def ExpectingFunctionArgs(clean_lines, linenum):
5482 """Checks whether where function type arguments are expected.
5483
5484 Args:
5485 clean_lines: A CleansedLines instance containing the file.
5486 linenum: The number of the line to check.
5487
5488 Returns:
5489 True if the line at 'linenum' is inside something that expects arguments
5490 of function types.
5491 """
5492 line = clean_lines.elided[linenum]
5493 return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
5494 (linenum >= 2 and
5495 (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
5496 clean_lines.elided[linenum - 1]) or
5497 Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
5498 clean_lines.elided[linenum - 2]) or
5499 Search(r'\bstd::m?function\s*\<\s*$',
5500 clean_lines.elided[linenum - 1]))))
5501
5502
5503_HEADERS_CONTAINING_TEMPLATES = (
5504 ('<deque>', ('deque',)),
5505 ('<functional>', ('unary_function', 'binary_function',
5506 'plus', 'minus', 'multiplies', 'divides', 'modulus',
5507 'negate',
5508 'equal_to', 'not_equal_to', 'greater', 'less',
5509 'greater_equal', 'less_equal',
5510 'logical_and', 'logical_or', 'logical_not',
5511 'unary_negate', 'not1', 'binary_negate', 'not2',
5512 'bind1st', 'bind2nd',
5513 'pointer_to_unary_function',
5514 'pointer_to_binary_function',
5515 'ptr_fun',
5516 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
5517 'mem_fun_ref_t',
5518 'const_mem_fun_t', 'const_mem_fun1_t',
5519 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
5520 'mem_fun_ref',
5521 )),
5522 ('<limits>', ('numeric_limits',)),
5523 ('<list>', ('list',)),
5524 ('<map>', ('map', 'multimap',)),
5525 ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
5526 'unique_ptr', 'weak_ptr')),
5527 ('<queue>', ('queue', 'priority_queue',)),
5528 ('<set>', ('set', 'multiset',)),
5529 ('<stack>', ('stack',)),
5530 ('<string>', ('char_traits', 'basic_string',)),
5531 ('<tuple>', ('tuple',)),
5532 ('<unordered_map>', ('unordered_map', 'unordered_multimap')),
5533 ('<unordered_set>', ('unordered_set', 'unordered_multiset')),
5534 ('<utility>', ('pair',)),
5535 ('<vector>', ('vector',)),
5536
5537 # gcc extensions.
5538 # Note: std::hash is their hash, ::hash is our hash
5539 ('<hash_map>', ('hash_map', 'hash_multimap',)),
5540 ('<hash_set>', ('hash_set', 'hash_multiset',)),
5541 ('<slist>', ('slist',)),
5542 )
5543
5544_HEADERS_MAYBE_TEMPLATES = (
5545 ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
5546 'transform',
5547 )),
5548 ('<utility>', ('forward', 'make_pair', 'move', 'swap')),
5549 )
5550
5551_RE_PATTERN_STRING = re.compile(r'\bstring\b')
5552
5553_re_pattern_headers_maybe_templates = []
5554for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
5555 for _template in _templates:
5556 # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
5557 # type::max().
5558 _re_pattern_headers_maybe_templates.append(
5559 (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
5560 _template,
5561 _header))
5562
5563# Other scripts may reach in and modify this pattern.
5564_re_pattern_templates = []
5565for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
5566 for _template in _templates:
5567 _re_pattern_templates.append(
5568 (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
5569 _template + '<>',
5570 _header))
5571
5572
5573def FilesBelongToSameModule(filename_cc, filename_h):
5574 """Check if these two filenames belong to the same module.
5575
5576 The concept of a 'module' here is a as follows:
5577 foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
5578 same 'module' if they are in the same directory.
5579 some/path/public/xyzzy and some/path/internal/xyzzy are also considered
5580 to belong to the same module here.
5581
5582 If the filename_cc contains a longer path than the filename_h, for example,
5583 '/absolute/path/to/base/sysinfo.cc', and this file would include
5584 'base/sysinfo.h', this function also produces the prefix needed to open the
5585 header. This is used by the caller of this function to more robustly open the
5586 header file. We don't have access to the real include paths in this context,
5587 so we need this guesswork here.
5588
5589 Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
5590 according to this implementation. Because of this, this function gives
5591 some false positives. This should be sufficiently rare in practice.
5592
5593 Args:
5594 filename_cc: is the path for the source (e.g. .cc) file
5595 filename_h: is the path for the header path
5596
5597 Returns:
5598 Tuple with a bool and a string:
5599 bool: True if filename_cc and filename_h belong to the same module.
5600 string: the additional prefix needed to open the header file.
5601 """
5602 fileinfo_cc = FileInfo(filename_cc)
5603 if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
5604 return (False, '')
5605
5606 fileinfo_h = FileInfo(filename_h)
5607 if not fileinfo_h.Extension().lstrip('.') in GetHeaderExtensions():
5608 return (False, '')
5609
5610 filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
5611 matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
5612 if matched_test_suffix:
5613 filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
5614
5615 filename_cc = filename_cc.replace('/public/', '/')
5616 filename_cc = filename_cc.replace('/internal/', '/')
5617
5618 filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
5619 if filename_h.endswith('-inl'):
5620 filename_h = filename_h[:-len('-inl')]
5621 filename_h = filename_h.replace('/public/', '/')
5622 filename_h = filename_h.replace('/internal/', '/')
5623
5624 files_belong_to_same_module = filename_cc.endswith(filename_h)
5625 common_path = ''
5626 if files_belong_to_same_module:
5627 common_path = filename_cc[:-len(filename_h)]
5628 return files_belong_to_same_module, common_path
5629
5630
5631def UpdateIncludeState(filename, include_dict, io=codecs):
5632 """Fill up the include_dict with new includes found from the file.
5633
5634 Args:
5635 filename: the name of the header to read.
5636 include_dict: a dictionary in which the headers are inserted.
5637 io: The io factory to use to read the file. Provided for testability.
5638
5639 Returns:
5640 True if a header was successfully added. False otherwise.
5641 """
5642 headerfile = None
5643 try:
5644 headerfile = io.open(filename, 'r', 'utf8', 'replace')
5645 except IOError:
5646 return False
5647 linenum = 0
5648 for line in headerfile:
5649 linenum += 1
5650 clean_line = CleanseComments(line)
5651 match = _RE_PATTERN_INCLUDE.search(clean_line)
5652 if match:
5653 include = match.group(2)
5654 include_dict.setdefault(include, linenum)
5655 return True
5656
5657
5658def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
5659 io=codecs):
5660 """Reports for missing stl includes.
5661
5662 This function will output warnings to make sure you are including the headers
5663 necessary for the stl containers and functions that you use. We only give one
5664 reason to include a header. For example, if you use both equal_to<> and
5665 less<> in a .h file, only one (the latter in the file) of these will be
5666 reported as a reason to include the <functional>.
5667
5668 Args:
5669 filename: The name of the current file.
5670 clean_lines: A CleansedLines instance containing the file.
5671 include_state: An _IncludeState instance.
5672 error: The function to call with any errors found.
5673 io: The IO factory to use to read the header file. Provided for unittest
5674 injection.
5675 """
5676 required = {} # A map of header name to linenumber and the template entity.
5677 # Example of required: { '<functional>': (1219, 'less<>') }
5678
5679 for linenum in range(clean_lines.NumLines()):
5680 line = clean_lines.elided[linenum]
5681 if not line or line[0] == '#':
5682 continue
5683
5684 # String is special -- it is a non-templatized type in STL.
5685 matched = _RE_PATTERN_STRING.search(line)
5686 if matched:
5687 # Don't warn about strings in non-STL namespaces:
5688 # (We check only the first match per line; good enough.)
5689 prefix = line[:matched.start()]
5690 if prefix.endswith('std::') or not prefix.endswith('::'):
5691 required['<string>'] = (linenum, 'string')
5692
5693 for pattern, template, header in _re_pattern_headers_maybe_templates:
5694 if pattern.search(line):
5695 required[header] = (linenum, template)
5696
5697 # The following function is just a speed up, no semantics are changed.
5698 if not '<' in line: # Reduces the cpu time usage by skipping lines.
5699 continue
5700
5701 for pattern, template, header in _re_pattern_templates:
5702 matched = pattern.search(line)
5703 if matched:
5704 # Don't warn about IWYU in non-STL namespaces:
5705 # (We check only the first match per line; good enough.)
5706 prefix = line[:matched.start()]
5707 if prefix.endswith('std::') or not prefix.endswith('::'):
5708 required[header] = (linenum, template)
5709
5710 # The policy is that if you #include something in foo.h you don't need to
5711 # include it again in foo.cc. Here, we will look at possible includes.
5712 # Let's flatten the include_state include_list and copy it into a dictionary.
5713 include_dict = dict([item for sublist in include_state.include_list
5714 for item in sublist])
5715
5716 # Did we find the header for this file (if any) and successfully load it?
5717 header_found = False
5718
5719 # Use the absolute path so that matching works properly.
5720 abs_filename = FileInfo(filename).FullName()
5721
5722 # For Emacs's flymake.
5723 # If cpplint is invoked from Emacs's flymake, a temporary file is generated
5724 # by flymake and that file name might end with '_flymake.cc'. In that case,
5725 # restore original file name here so that the corresponding header file can be
5726 # found.
5727 # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
5728 # instead of 'foo_flymake.h'
5729 abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
5730
5731 # include_dict is modified during iteration, so we iterate over a copy of
5732 # the keys.
5733 header_keys = list(include_dict.keys())
5734 for header in header_keys:
5735 (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
5736 fullpath = common_path + header
5737 if same_module and UpdateIncludeState(fullpath, include_dict, io):
5738 header_found = True
5739
5740 # If we can't find the header file for a .cc, assume it's because we don't
5741 # know where to look. In that case we'll give up as we're not sure they
5742 # didn't include it in the .h file.
5743 # TODO(unknown): Do a better job of finding .h files so we are confident that
5744 # not having the .h file means there isn't one.
5745 if not header_found:
5746 for extension in GetNonHeaderExtensions():
5747 if filename.endswith('.' + extension):
5748 return
5749
5750 # All the lines have been processed, report the errors found.
5751 for required_header_unstripped in sorted(required, key=required.__getitem__):
5752 template = required[required_header_unstripped][1]
5753 if required_header_unstripped.strip('<>"') not in include_dict:
5754 error(filename, required[required_header_unstripped][0],
5755 'build/include_what_you_use', 4,
5756 'Add #include ' + required_header_unstripped + ' for ' + template)
5757
5758
5759_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
5760
5761
5762def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
5763 """Check that make_pair's template arguments are deduced.
5764
5765 G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
5766 specified explicitly, and such use isn't intended in any case.
5767
5768 Args:
5769 filename: The name of the current file.
5770 clean_lines: A CleansedLines instance containing the file.
5771 linenum: The number of the line to check.
5772 error: The function to call with any errors found.
5773 """
5774 line = clean_lines.elided[linenum]
5775 match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
5776 if match:
5777 error(filename, linenum, 'build/explicit_make_pair',
5778 4, # 4 = high confidence
5779 'For C++11-compatibility, omit template arguments from make_pair'
5780 ' OR use pair directly OR if appropriate, construct a pair directly')
5781
5782
5783def CheckRedundantVirtual(filename, clean_lines, linenum, error):
5784 """Check if line contains a redundant "virtual" function-specifier.
5785
5786 Args:
5787 filename: The name of the current file.
5788 clean_lines: A CleansedLines instance containing the file.
5789 linenum: The number of the line to check.
5790 error: The function to call with any errors found.
5791 """
5792 # Look for "virtual" on current line.
5793 line = clean_lines.elided[linenum]
5794 virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
5795 if not virtual: return
5796
5797 # Ignore "virtual" keywords that are near access-specifiers. These
5798 # are only used in class base-specifier and do not apply to member
5799 # functions.
5800 if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
5801 Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
5802 return
5803
5804 # Ignore the "virtual" keyword from virtual base classes. Usually
5805 # there is a column on the same line in these cases (virtual base
5806 # classes are rare in google3 because multiple inheritance is rare).
5807 if Match(r'^.*[^:]:[^:].*$', line): return
5808
5809 # Look for the next opening parenthesis. This is the start of the
5810 # parameter list (possibly on the next line shortly after virtual).
5811 # TODO(unknown): doesn't work if there are virtual functions with
5812 # decltype() or other things that use parentheses, but csearch suggests
5813 # that this is rare.
5814 end_col = -1
5815 end_line = -1
5816 start_col = len(virtual.group(2))
5817 for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
5818 line = clean_lines.elided[start_line][start_col:]
5819 parameter_list = Match(r'^([^(]*)\(', line)
5820 if parameter_list:
5821 # Match parentheses to find the end of the parameter list
5822 (_, end_line, end_col) = CloseExpression(
5823 clean_lines, start_line, start_col + len(parameter_list.group(1)))
5824 break
5825 start_col = 0
5826
5827 if end_col < 0:
5828 return # Couldn't find end of parameter list, give up
5829
5830 # Look for "override" or "final" after the parameter list
5831 # (possibly on the next few lines).
5832 for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
5833 line = clean_lines.elided[i][end_col:]
5834 match = Search(r'\b(override|final)\b', line)
5835 if match:
5836 error(filename, linenum, 'readability/inheritance', 4,
5837 ('"virtual" is redundant since function is '
5838 'already declared as "%s"' % match.group(1)))
5839
5840 # Set end_col to check whole lines after we are done with the
5841 # first line.
5842 end_col = 0
5843 if Search(r'[^\w]\s*$', line):
5844 break
5845
5846
5847def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
5848 """Check if line contains a redundant "override" or "final" virt-specifier.
5849
5850 Args:
5851 filename: The name of the current file.
5852 clean_lines: A CleansedLines instance containing the file.
5853 linenum: The number of the line to check.
5854 error: The function to call with any errors found.
5855 """
5856 # Look for closing parenthesis nearby. We need one to confirm where
5857 # the declarator ends and where the virt-specifier starts to avoid
5858 # false positives.
5859 line = clean_lines.elided[linenum]
5860 declarator_end = line.rfind(')')
5861 if declarator_end >= 0:
5862 fragment = line[declarator_end:]
5863 else:
5864 if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
5865 fragment = line
5866 else:
5867 return
5868
5869 # Check that at most one of "override" or "final" is present, not both
5870 if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
5871 error(filename, linenum, 'readability/inheritance', 4,
5872 ('"override" is redundant since function is '
5873 'already declared as "final"'))
5874
5875
5876
5877
5878# Returns true if we are at a new block, and it is directly
5879# inside of a namespace.
5880def IsBlockInNameSpace(nesting_state, is_forward_declaration):
5881 """Checks that the new block is directly in a namespace.
5882
5883 Args:
5884 nesting_state: The _NestingState object that contains info about our state.
5885 is_forward_declaration: If the class is a forward declared class.
5886 Returns:
5887 Whether or not the new block is directly in a namespace.
5888 """
5889 if is_forward_declaration:
5890 return len(nesting_state.stack) >= 1 and (
5891 isinstance(nesting_state.stack[-1], _NamespaceInfo))
5892
5893
5894 return (len(nesting_state.stack) > 1 and
5895 nesting_state.stack[-1].check_namespace_indentation and
5896 isinstance(nesting_state.stack[-2], _NamespaceInfo))
5897
5898
5899def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
5900 raw_lines_no_comments, linenum):
5901 """This method determines if we should apply our namespace indentation check.
5902
5903 Args:
5904 nesting_state: The current nesting state.
5905 is_namespace_indent_item: If we just put a new class on the stack, True.
5906 If the top of the stack is not a class, or we did not recently
5907 add the class, False.
5908 raw_lines_no_comments: The lines without the comments.
5909 linenum: The current line number we are processing.
5910
5911 Returns:
5912 True if we should apply our namespace indentation check. Currently, it
5913 only works for classes and namespaces inside of a namespace.
5914 """
5915
5916 is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
5917 linenum)
5918
5919 if not (is_namespace_indent_item or is_forward_declaration):
5920 return False
5921
5922 # If we are in a macro, we do not want to check the namespace indentation.
5923 if IsMacroDefinition(raw_lines_no_comments, linenum):
5924 return False
5925
5926 return IsBlockInNameSpace(nesting_state, is_forward_declaration)
5927
5928
5929# Call this method if the line is directly inside of a namespace.
5930# If the line above is blank (excluding comments) or the start of
5931# an inner namespace, it cannot be indented.
5932def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
5933 error):
5934 line = raw_lines_no_comments[linenum]
5935 if Match(r'^\s+', line):
5936 error(filename, linenum, 'runtime/indentation_namespace', 4,
5937 'Do not indent within a namespace')
5938
5939
5940def ProcessLine(filename, file_extension, clean_lines, line,
5941 include_state, function_state, nesting_state, error,
5942 extra_check_functions=None):
5943 """Processes a single line in the file.
5944
5945 Args:
5946 filename: Filename of the file that is being processed.
5947 file_extension: The extension (dot not included) of the file.
5948 clean_lines: An array of strings, each representing a line of the file,
5949 with comments stripped.
5950 line: Number of line being processed.
5951 include_state: An _IncludeState instance in which the headers are inserted.
5952 function_state: A _FunctionState instance which counts function lines, etc.
5953 nesting_state: A NestingState instance which maintains information about
5954 the current stack of nested blocks being parsed.
5955 error: A callable to which errors are reported, which takes 4 arguments:
5956 filename, line number, error level, and message
5957 extra_check_functions: An array of additional check functions that will be
5958 run on each source line. Each function takes 4
5959 arguments: filename, clean_lines, line, error
5960 """
5961 raw_lines = clean_lines.raw_lines
5962 ParseNolintSuppressions(filename, raw_lines[line], line, error)
5963 nesting_state.Update(filename, clean_lines, line, error)
5964 CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
5965 error)
5966 if nesting_state.InAsmBlock(): return
5967 CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
5968 CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
5969 CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
5970 CheckLanguage(filename, clean_lines, line, file_extension, include_state,
5971 nesting_state, error)
5972 CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
5973 CheckForNonStandardConstructs(filename, clean_lines, line,
5974 nesting_state, error)
5975 CheckVlogArguments(filename, clean_lines, line, error)
5976 CheckPosixThreading(filename, clean_lines, line, error)
5977 CheckInvalidIncrement(filename, clean_lines, line, error)
5978 CheckMakePairUsesDeduction(filename, clean_lines, line, error)
5979 CheckRedundantVirtual(filename, clean_lines, line, error)
5980 CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
5981 if extra_check_functions:
5982 for check_fn in extra_check_functions:
5983 check_fn(filename, clean_lines, line, error)
5984
5985def FlagCxx11Features(filename, clean_lines, linenum, error):
5986 """Flag those c++11 features that we only allow in certain places.
5987
5988 Args:
5989 filename: The name of the current file.
5990 clean_lines: A CleansedLines instance containing the file.
5991 linenum: The number of the line to check.
5992 error: The function to call with any errors found.
5993 """
5994 line = clean_lines.elided[linenum]
5995
5996 include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
5997
5998 # Flag unapproved C++ TR1 headers.
5999 if include and include.group(1).startswith('tr1/'):
6000 error(filename, linenum, 'build/c++tr1', 5,
6001 ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
6002
6003 # Flag unapproved C++11 headers.
6004 if include and include.group(1) in ('cfenv',
6005 'condition_variable',
6006 'fenv.h',
6007 'future',
6008 'mutex',
6009 'thread',
6010 'chrono',
6011 'ratio',
6012 'regex',
6013 'system_error',
6014 ):
6015 error(filename, linenum, 'build/c++11', 5,
6016 ('<%s> is an unapproved C++11 header.') % include.group(1))
6017
6018 # The only place where we need to worry about C++11 keywords and library
6019 # features in preprocessor directives is in macro definitions.
6020 if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
6021
6022 # These are classes and free functions. The classes are always
6023 # mentioned as std::*, but we only catch the free functions if
6024 # they're not found by ADL. They're alphabetical by header.
6025 for top_name in (
6026 # type_traits
6027 'alignment_of',
6028 'aligned_union',
6029 ):
6030 if Search(r'\bstd::%s\b' % top_name, line):
6031 error(filename, linenum, 'build/c++11', 5,
6032 ('std::%s is an unapproved C++11 class or function. Send c-style '
6033 'an example of where it would make your code more readable, and '
6034 'they may let you use it.') % top_name)
6035
6036
6037def FlagCxx14Features(filename, clean_lines, linenum, error):
6038 """Flag those C++14 features that we restrict.
6039
6040 Args:
6041 filename: The name of the current file.
6042 clean_lines: A CleansedLines instance containing the file.
6043 linenum: The number of the line to check.
6044 error: The function to call with any errors found.
6045 """
6046 line = clean_lines.elided[linenum]
6047
6048 include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
6049
6050 # Flag unapproved C++14 headers.
6051 if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
6052 error(filename, linenum, 'build/c++14', 5,
6053 ('<%s> is an unapproved C++14 header.') % include.group(1))
6054
6055
6056def ProcessFileData(filename, file_extension, lines, error,
6057 extra_check_functions=None):
6058 """Performs lint checks and reports any errors to the given error function.
6059
6060 Args:
6061 filename: Filename of the file that is being processed.
6062 file_extension: The extension (dot not included) of the file.
6063 lines: An array of strings, each representing a line of the file, with the
6064 last element being empty if the file is terminated with a newline.
6065 error: A callable to which errors are reported, which takes 4 arguments:
6066 filename, line number, error level, and message
6067 extra_check_functions: An array of additional check functions that will be
6068 run on each source line. Each function takes 4
6069 arguments: filename, clean_lines, line, error
6070 """
6071 lines = (['// marker so line numbers and indices both start at 1'] + lines +
6072 ['// marker so line numbers end in a known way'])
6073
6074 include_state = _IncludeState()
6075 function_state = _FunctionState()
6076 nesting_state = NestingState()
6077
6078 ResetNolintSuppressions()
6079
6080 CheckForCopyright(filename, lines, error)
6081 ProcessGlobalSuppresions(lines)
6082 RemoveMultiLineComments(filename, lines, error)
6083 clean_lines = CleansedLines(lines)
6084
6085 if file_extension in GetHeaderExtensions():
6086 CheckForHeaderGuard(filename, clean_lines, error)
6087
6088 for line in range(clean_lines.NumLines()):
6089 ProcessLine(filename, file_extension, clean_lines, line,
6090 include_state, function_state, nesting_state, error,
6091 extra_check_functions)
6092 FlagCxx11Features(filename, clean_lines, line, error)
6093 nesting_state.CheckCompletedBlocks(filename, error)
6094
6095 CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
6096
6097 # Check that the .cc file has included its header if it exists.
6098 if _IsSourceExtension(file_extension):
6099 CheckHeaderFileIncluded(filename, include_state, error)
6100
6101 # We check here rather than inside ProcessLine so that we see raw
6102 # lines rather than "cleaned" lines.
6103 CheckForBadCharacters(filename, lines, error)
6104
6105 CheckForNewlineAtEOF(filename, lines, error)
6106
6107def ProcessConfigOverrides(filename):
6108 """ Loads the configuration files and processes the config overrides.
6109
6110 Args:
6111 filename: The name of the file being processed by the linter.
6112
6113 Returns:
6114 False if the current |filename| should not be processed further.
6115 """
6116
6117 abs_filename = os.path.abspath(filename)
6118 cfg_filters = []
6119 keep_looking = True
6120 while keep_looking:
6121 abs_path, base_name = os.path.split(abs_filename)
6122 if not base_name:
6123 break # Reached the root directory.
6124
6125 cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
6126 abs_filename = abs_path
6127 if not os.path.isfile(cfg_file):
6128 continue
6129
6130 try:
6131 with open(cfg_file) as file_handle:
6132 for line in file_handle:
6133 line, _, _ = line.partition('#') # Remove comments.
6134 if not line.strip():
6135 continue
6136
6137 name, _, val = line.partition('=')
6138 name = name.strip()
6139 val = val.strip()
6140 if name == 'set noparent':
6141 keep_looking = False
6142 elif name == 'filter':
6143 cfg_filters.append(val)
6144 elif name == 'exclude_files':
6145 # When matching exclude_files pattern, use the base_name of
6146 # the current file name or the directory name we are processing.
6147 # For example, if we are checking for lint errors in /foo/bar/baz.cc
6148 # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
6149 # file's "exclude_files" filter is meant to be checked against "bar"
6150 # and not "baz" nor "bar/baz.cc".
6151 if base_name:
6152 pattern = re.compile(val)
6153 if pattern.match(base_name):
6154 _cpplint_state.PrintInfo('Ignoring "%s": file excluded by '
6155 '"%s". File path component "%s" matches pattern "%s"\n' %
6156 (filename, cfg_file, base_name, val))
6157 return False
6158 elif name == 'linelength':
6159 global _line_length
6160 try:
6161 _line_length = int(val)
6162 except ValueError:
6163 _cpplint_state.PrintError('Line length must be numeric.')
6164 elif name == 'extensions':
6165 global _valid_extensions
6166 try:
6167 extensions = [ext.strip() for ext in val.split(',')]
6168 _valid_extensions = set(extensions)
6169 except ValueError:
6170 sys.stderr.write('Extensions should be a comma-separated list of values;'
6171 'for example: extensions=hpp,cpp\n'
6172 'This could not be parsed: "%s"' % (val,))
6173 elif name == 'headers':
6174 global _header_extensions
6175 try:
6176 extensions = [ext.strip() for ext in val.split(',')]
6177 _header_extensions = set(extensions)
6178 except ValueError:
6179 sys.stderr.write('Extensions should be a comma-separated list of values;'
6180 'for example: extensions=hpp,cpp\n'
6181 'This could not be parsed: "%s"' % (val,))
6182 elif name == 'root':
6183 global _root
6184 _root = val
6185 else:
6186 _cpplint_state.PrintError(
6187 'Invalid configuration option (%s) in file %s\n' %
6188 (name, cfg_file))
6189
6190 except IOError:
6191 _cpplint_state.PrintError(
6192 "Skipping config file '%s': Can't open for reading\n" % cfg_file)
6193 keep_looking = False
6194
6195 # Apply all the accumulated filters in reverse order (top-level directory
6196 # config options having the least priority).
6197 for cfg_filter in reversed(cfg_filters):
6198 _AddFilters(cfg_filter)
6199
6200 return True
6201
6202
6203def ProcessFile(filename, vlevel, extra_check_functions=None):
6204 """Does google-lint on a single file.
6205
6206 Args:
6207 filename: The name of the file to parse.
6208
6209 vlevel: The level of errors to report. Every error of confidence
6210 >= verbose_level will be reported. 0 is a good default.
6211
6212 extra_check_functions: An array of additional check functions that will be
6213 run on each source line. Each function takes 4
6214 arguments: filename, clean_lines, line, error
6215 """
6216
6217 _SetVerboseLevel(vlevel)
6218 _BackupFilters()
6219
6220 if not ProcessConfigOverrides(filename):
6221 _RestoreFilters()
6222 return
6223
6224 lf_lines = []
6225 crlf_lines = []
6226 try:
6227 # Support the UNIX convention of using "-" for stdin. Note that
6228 # we are not opening the file with universal newline support
6229 # (which codecs doesn't support anyway), so the resulting lines do
6230 # contain trailing '\r' characters if we are reading a file that
6231 # has CRLF endings.
6232 # If after the split a trailing '\r' is present, it is removed
6233 # below.
6234 if filename == '-':
6235 lines = codecs.StreamReaderWriter(sys.stdin,
6236 codecs.getreader('utf8'),
6237 codecs.getwriter('utf8'),
6238 'replace').read().split('\n')
6239 else:
6240 lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
6241
6242 # Remove trailing '\r'.
6243 # The -1 accounts for the extra trailing blank line we get from split()
6244 for linenum in range(len(lines) - 1):
6245 if lines[linenum].endswith('\r'):
6246 lines[linenum] = lines[linenum].rstrip('\r')
6247 crlf_lines.append(linenum + 1)
6248 else:
6249 lf_lines.append(linenum + 1)
6250
6251 except IOError:
6252 _cpplint_state.PrintError(
6253 "Skipping input '%s': Can't open for reading\n" % filename)
6254 _RestoreFilters()
6255 return
6256
6257 # Note, if no dot is found, this will give the entire filename as the ext.
6258 file_extension = filename[filename.rfind('.') + 1:]
6259
6260 # When reading from stdin, the extension is unknown, so no cpplint tests
6261 # should rely on the extension.
6262 if filename != '-' and file_extension not in GetAllExtensions():
6263 _cpplint_state.PrintError('Ignoring %s; not a valid file name '
6264 '(%s)\n' % (filename, ', '.join(GetAllExtensions())))
6265 else:
6266 ProcessFileData(filename, file_extension, lines, Error,
6267 extra_check_functions)
6268
6269 # If end-of-line sequences are a mix of LF and CR-LF, issue
6270 # warnings on the lines with CR.
6271 #
6272 # Don't issue any warnings if all lines are uniformly LF or CR-LF,
6273 # since critique can handle these just fine, and the style guide
6274 # doesn't dictate a particular end of line sequence.
6275 #
6276 # We can't depend on os.linesep to determine what the desired
6277 # end-of-line sequence should be, since that will return the
6278 # server-side end-of-line sequence.
6279 if lf_lines and crlf_lines:
6280 # Warn on every line with CR. An alternative approach might be to
6281 # check whether the file is mostly CRLF or just LF, and warn on the
6282 # minority, we bias toward LF here since most tools prefer LF.
6283 for linenum in crlf_lines:
6284 Error(filename, linenum, 'whitespace/newline', 1,
6285 'Unexpected \\r (^M) found; better to use only \\n')
6286
6287 _cpplint_state.PrintInfo('Done processing %s\n' % filename)
6288 _RestoreFilters()
6289
6290
6291def PrintUsage(message):
6292 """Prints a brief usage string and exits, optionally with an error message.
6293
6294 Args:
6295 message: The optional error message.
6296 """
6297 sys.stderr.write(_USAGE)
6298
6299 if message:
6300 sys.exit('\nFATAL ERROR: ' + message)
6301 else:
6302 sys.exit(0)
6303
6304
6305def PrintCategories():
6306 """Prints a list of all the error-categories used by error messages.
6307
6308 These are the categories used to filter messages via --filter.
6309 """
6310 sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
6311 sys.exit(0)
6312
6313
6314def ParseArguments(args):
6315 """Parses the command line arguments.
6316
6317 This may set the output format and verbosity level as side-effects.
6318
6319 Args:
6320 args: The command line arguments:
6321
6322 Returns:
6323 The list of filenames to lint.
6324 """
6325 try:
6326 (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
6327 'counting=',
6328 'filter=',
6329 'root=',
6330 'repository=',
6331 'linelength=',
6332 'extensions=',
6333 'exclude=',
6334 'headers=',
6335 'quiet',
6336 'recursive'])
6337 except getopt.GetoptError:
6338 PrintUsage('Invalid arguments.')
6339
6340 verbosity = _VerboseLevel()
6341 output_format = _OutputFormat()
6342 filters = ''
6343 counting_style = ''
6344 recursive = False
6345
6346 for (opt, val) in opts:
6347 if opt == '--help':
6348 PrintUsage(None)
6349 elif opt == '--output':
6350 if val not in ('emacs', 'vs7', 'eclipse', 'junit'):
6351 PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
6352 'and junit.')
6353 output_format = val
6354 elif opt == '--verbose':
6355 verbosity = int(val)
6356 elif opt == '--filter':
6357 filters = val
6358 if not filters:
6359 PrintCategories()
6360 elif opt == '--counting':
6361 if val not in ('total', 'toplevel', 'detailed'):
6362 PrintUsage('Valid counting options are total, toplevel, and detailed')
6363 counting_style = val
6364 elif opt == '--root':
6365 global _root
6366 _root = val
6367 elif opt == '--repository':
6368 global _repository
6369 _repository = val
6370 elif opt == '--linelength':
6371 global _line_length
6372 try:
6373 _line_length = int(val)
6374 except ValueError:
6375 PrintUsage('Line length must be digits.')
6376 elif opt == '--exclude':
6377 global _excludes
6378 if not _excludes:
6379 _excludes = set()
6380 _excludes.update(glob.glob(val))
6381 elif opt == '--extensions':
6382 global _valid_extensions
6383 try:
6384 _valid_extensions = set(val.split(','))
6385 except ValueError:
6386 PrintUsage('Extensions must be comma separated list.')
6387 elif opt == '--headers':
6388 global _header_extensions
6389 try:
6390 _header_extensions = set(val.split(','))
6391 except ValueError:
6392 PrintUsage('Extensions must be comma separated list.')
6393 elif opt == '--recursive':
6394 recursive = True
6395 elif opt == '--quiet':
6396 global _quiet
6397 _quiet = True
6398
6399 if not filenames:
6400 PrintUsage('No files were specified.')
6401
6402 if recursive:
6403 filenames = _ExpandDirectories(filenames)
6404
6405 if _excludes:
6406 filenames = _FilterExcludedFiles(filenames)
6407
6408 _SetOutputFormat(output_format)
6409 _SetVerboseLevel(verbosity)
6410 _SetFilters(filters)
6411 _SetCountingStyle(counting_style)
6412
6413 return filenames
6414
6415def _ExpandDirectories(filenames):
6416 """Searches a list of filenames and replaces directories in the list with
6417 all files descending from those directories. Files with extensions not in
6418 the valid extensions list are excluded.
6419
6420 Args:
6421 filenames: A list of files or directories
6422
6423 Returns:
6424 A list of all files that are members of filenames or descended from a
6425 directory in filenames
6426 """
6427 expanded = set()
6428 for filename in filenames:
6429 if not os.path.isdir(filename):
6430 expanded.add(filename)
6431 continue
6432
6433 for root, _, files in os.walk(filename):
6434 for loopfile in files:
6435 fullname = os.path.join(root, loopfile)
6436 if fullname.startswith('.' + os.path.sep):
6437 fullname = fullname[len('.' + os.path.sep):]
6438 expanded.add(fullname)
6439
6440 filtered = []
6441 for filename in expanded:
6442 if os.path.splitext(filename)[1][1:] in GetAllExtensions():
6443 filtered.append(filename)
6444
6445 return filtered
6446
6447def _FilterExcludedFiles(filenames):
6448 """Filters out files listed in the --exclude command line switch. File paths
6449 in the switch are evaluated relative to the current working directory
6450 """
6451 exclude_paths = [os.path.abspath(f) for f in _excludes]
6452 return [f for f in filenames if os.path.abspath(f) not in exclude_paths]
6453
6454def main():
6455 filenames = ParseArguments(sys.argv[1:])
6456 backup_err = sys.stderr
6457 try:
6458 # Change stderr to write with replacement characters so we don't die
6459 # if we try to print something containing non-ASCII characters.
6460 sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
6461
6462 _cpplint_state.ResetErrorCounts()
6463 for filename in filenames:
6464 ProcessFile(filename, _cpplint_state.verbose_level)
6465 _cpplint_state.PrintErrorCounts()
6466
6467 if _cpplint_state.output_format == 'junit':
6468 sys.stderr.write(_cpplint_state.FormatJUnitXML())
6469
6470 finally:
6471 sys.stderr = backup_err
6472
6473 sys.exit(_cpplint_state.error_count > 0)
6474
6475
6476if __name__ == '__main__':
6477 main()