2 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3 from __future__
import absolute_import
, division
, print_function
, unicode_literals
6 from builtins
import str
8 from __builtin__
import str
14 from targets_builder
import TARGETSBuilder
16 from util
import ColorString
18 # This script generates TARGETS file for Buck.
19 # Buck is a build tool specifying dependencies among different build targets.
20 # User can pass extra dependencies as a JSON object via command line, and this
21 # script can include these dependencies in the generate TARGETS file.
23 # $python3 buckifier/buckify_rocksdb.py
24 # (This generates a TARGET file without user-specified dependency for unit
26 # $python3 buckifier/buckify_rocksdb.py \
28 # "extra_deps": [":test_dep", "//fakes/module:mock1"],
29 # "extra_compiler_flags": ["-DROCKSDB_LITE", "-Os"]
32 # (Generated TARGETS file has test_dep and mock1 as dependencies for RocksDB
33 # unit tests, and will use the extra_compiler_flags to compile the unit test
36 # tests to export as libraries for inclusion in other projects
37 _EXPORTED_TEST_LIBS
= ["env_basic_test"]
39 # Parse src.mk files as a Dictionary of
40 # VAR_NAME => list of files
41 def parse_src_mk(repo_path
):
42 src_mk
= repo_path
+ "/src.mk"
44 for line
in open(src_mk
):
46 if len(line
) == 0 or line
[0] == "#":
49 current_src
= line
.split("=")[0].strip()
50 src_files
[current_src
] = []
52 src_path
= line
.split("\\")[0].strip()
53 src_files
[current_src
].append(src_path
)
57 # get all .cc / .c files
58 def get_cc_files(repo_path
):
60 for root
, _dirnames
, filenames
in os
.walk(
62 ): # noqa: B007 T25377293 Grandfathered in
63 root
= root
[(len(repo_path
) + 1) :]
67 for filename
in fnmatch
.filter(filenames
, "*.cc"):
68 cc_files
.append(os
.path
.join(root
, filename
))
69 for filename
in fnmatch
.filter(filenames
, "*.c"):
70 cc_files
.append(os
.path
.join(root
, filename
))
74 # Get non_parallel tests from Makefile
75 def get_non_parallel_tests(repo_path
):
76 Makefile
= repo_path
+ "/Makefile"
80 found_non_parallel_tests
= False
81 for line
in open(Makefile
):
83 if line
.startswith("NON_PARALLEL_TEST ="):
84 found_non_parallel_tests
= True
85 elif found_non_parallel_tests
:
86 if line
.endswith("\\"):
87 # remove the trailing \
92 # we consumed all the non_parallel tests
98 # Parse extra dependencies passed by user from command line
99 def get_dependencies():
100 deps_map
= {"": {"extra_deps": [], "extra_compiler_flags": []}}
101 if len(sys
.argv
) < 2:
104 def encode_dict(data
):
106 for k
, v
in data
.items():
107 if isinstance(v
, dict):
112 extra_deps
= json
.loads(sys
.argv
[1], object_hook
=encode_dict
)
113 for target_alias
, deps
in extra_deps
.items():
114 deps_map
[target_alias
] = deps
118 # Prepare TARGETS file for buck
119 def generate_targets(repo_path
, deps_map
):
120 print(ColorString
.info("Generating TARGETS"))
122 src_mk
= parse_src_mk(repo_path
)
124 cc_files
= get_cc_files(repo_path
)
125 # get non_parallel tests from Makefile
126 non_parallel_tests
= get_non_parallel_tests(repo_path
)
128 if src_mk
is None or cc_files
is None or non_parallel_tests
is None:
132 if len(sys
.argv
) >= 2:
133 # Heuristically quote and canonicalize whitespace for inclusion
134 # in how the file was generated.
135 extra_argv
= " '{0}'".format(" ".join(sys
.argv
[1].split()))
137 TARGETS
= TARGETSBuilder("%s/TARGETS" % repo_path
, extra_argv
)
142 src_mk
["LIB_SOURCES"] +
143 # always add range_tree, it's only excluded on ppc64, which we don't use internally
144 src_mk
["RANGE_TREE_SOURCES"] + src_mk
["TOOL_LIB_SOURCES"],
146 "//folly/container:f14_hash",
147 "//folly/experimental/coro:blocking_wait",
148 "//folly/experimental/coro:collect",
149 "//folly/experimental/coro:coroutine",
150 "//folly/experimental/coro:task",
151 "//folly/synchronization:distributed_mutex",
154 # rocksdb_whole_archive_lib
156 "rocksdb_whole_archive_lib",
157 src_mk
["LIB_SOURCES"] +
158 # always add range_tree, it's only excluded on ppc64, which we don't use internally
159 src_mk
["RANGE_TREE_SOURCES"] + src_mk
["TOOL_LIB_SOURCES"],
161 "//folly/container:f14_hash",
162 "//folly/experimental/coro:blocking_wait",
163 "//folly/experimental/coro:collect",
164 "//folly/experimental/coro:coroutine",
165 "//folly/experimental/coro:task",
166 "//folly/synchronization:distributed_mutex",
169 extra_external_deps
="",
175 src_mk
.get("MOCK_LIB_SOURCES", [])
176 + src_mk
.get("TEST_LIB_SOURCES", [])
177 + src_mk
.get("EXP_LIB_SOURCES", [])
178 + src_mk
.get("ANALYZER_LIB_SOURCES", []),
180 extra_test_libs
=True,
185 src_mk
.get("BENCH_LIB_SOURCES", [])
186 + src_mk
.get("ANALYZER_LIB_SOURCES", [])
187 + ["test_util/testutil.cc"],
190 # rocksdb_cache_bench_tools_lib
192 "rocksdb_cache_bench_tools_lib",
193 src_mk
.get("CACHE_BENCH_LIB_SOURCES", []),
197 TARGETS
.add_rocksdb_library(
198 "rocksdb_stress_lib",
199 src_mk
.get("ANALYZER_LIB_SOURCES", [])
200 + src_mk
.get("STRESS_LIB_SOURCES", [])
201 + ["test_util/testutil.cc"],
205 "db_stress", ["db_stress_tool/db_stress.cc"], [":rocksdb_stress_lib"]
208 for src
in src_mk
.get("MICROBENCH_SOURCES", []):
209 name
= src
.rsplit("/", 1)[1].split(".")[0] if "/" in src
else src
.split(".")[0]
210 TARGETS
.add_binary(name
, [src
], [], extra_bench_libs
=True)
211 print("Extra dependencies:\n{0}".format(json
.dumps(deps_map
)))
213 # Dictionary test executable name -> relative source file path
216 # c_test.c is added through TARGETS.add_c_test(). If there
217 # are more than one .c test file, we need to extend
218 # TARGETS.add_c_test() to include other C tests too.
219 for test_src
in src_mk
.get("TEST_MAIN_SOURCES_C", []):
220 if test_src
!= "db/c_test.c":
221 print("Don't know how to deal with " + test_src
)
226 with
open(f
"{repo_path}/buckifier/bench.json") as json_file
:
227 fast_fancy_bench_config_list
= json
.load(json_file
)
228 for config_dict
in fast_fancy_bench_config_list
:
229 clean_benchmarks
= {}
230 benchmarks
= config_dict
["benchmarks"]
231 for binary
, benchmark_dict
in benchmarks
.items():
232 clean_benchmarks
[binary
] = {}
233 for benchmark
, overloaded_metric_list
in benchmark_dict
.items():
234 clean_benchmarks
[binary
][benchmark
] = []
235 for metric
in overloaded_metric_list
:
236 if not isinstance(metric
, dict):
237 clean_benchmarks
[binary
][benchmark
].append(metric
)
238 TARGETS
.add_fancy_bench_config(
242 config_dict
["expected_runtime_one_iter"],
243 config_dict
["sl_iterations"],
244 config_dict
["regression_threshold"],
247 with
open(f
"{repo_path}/buckifier/bench-slow.json") as json_file
:
248 slow_fancy_bench_config_list
= json
.load(json_file
)
249 for config_dict
in slow_fancy_bench_config_list
:
250 clean_benchmarks
= {}
251 benchmarks
= config_dict
["benchmarks"]
252 for binary
, benchmark_dict
in benchmarks
.items():
253 clean_benchmarks
[binary
] = {}
254 for benchmark
, overloaded_metric_list
in benchmark_dict
.items():
255 clean_benchmarks
[binary
][benchmark
] = []
256 for metric
in overloaded_metric_list
:
257 if not isinstance(metric
, dict):
258 clean_benchmarks
[binary
][benchmark
].append(metric
)
259 for config_dict
in slow_fancy_bench_config_list
:
260 TARGETS
.add_fancy_bench_config(
261 config_dict
["name"] + "_slow",
264 config_dict
["expected_runtime_one_iter"],
265 config_dict
["sl_iterations"],
266 config_dict
["regression_threshold"],
268 # it is better servicelab experiments break
269 # than rocksdb github ci
273 TARGETS
.add_test_header()
275 for test_src
in src_mk
.get("TEST_MAIN_SOURCES", []):
276 test
= test_src
.split(".c")[0].strip().split("/")[-1].strip()
277 test_source_map
[test
] = test_src
278 print("" + test
+ " " + test_src
)
280 for target_alias
, deps
in deps_map
.items():
281 for test
, test_src
in sorted(test_source_map
.items()):
283 print(ColorString
.warning("Failed to get test name for %s" % test_src
))
286 test_target_name
= test
if not target_alias
else test
+ "_" + target_alias
288 if test
in _EXPORTED_TEST_LIBS
:
289 test_library
= "%s_lib" % test_target_name
293 deps
=[":rocksdb_test_lib"],
294 extra_test_libs
=True,
296 TARGETS
.register_test(
299 deps
=json
.dumps(deps
["extra_deps"] + [":" + test_library
]),
300 extra_compiler_flags
=json
.dumps(deps
["extra_compiler_flags"]),
303 TARGETS
.register_test(
306 deps
=json
.dumps(deps
["extra_deps"] + [":rocksdb_test_lib"]),
307 extra_compiler_flags
=json
.dumps(deps
["extra_compiler_flags"]),
310 print(ColorString
.info("Generated TARGETS Summary:"))
311 print(ColorString
.info("- %d libs" % TARGETS
.total_lib
))
312 print(ColorString
.info("- %d binarys" % TARGETS
.total_bin
))
313 print(ColorString
.info("- %d tests" % TARGETS
.total_test
))
317 def get_rocksdb_path():
318 # rocksdb = {script_dir}/..
319 script_dir
= os
.path
.dirname(sys
.argv
[0])
320 script_dir
= os
.path
.abspath(script_dir
)
321 rocksdb_path
= os
.path
.abspath(os
.path
.join(script_dir
, "../"))
326 def exit_with_error(msg
):
327 print(ColorString
.error(msg
))
332 deps_map
= get_dependencies()
333 # Generate TARGETS file for buck
334 ok
= generate_targets(get_rocksdb_path(), deps_map
)
336 exit_with_error("Failed to generate TARGETS files")
339 if __name__
== "__main__":