1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10 #include "rocksdb/cache.h"
12 #include <forward_list>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "util/coding.h"
20 #include "util/string_util.h"
21 #include "util/testharness.h"
25 // Conversions between numeric keys/values and the types expected by Cache.
26 static std::string
EncodeKey(int k
) {
28 PutFixed32(&result
, k
);
31 static int DecodeKey(const Slice
& k
) {
32 assert(k
.size() == 4);
33 return DecodeFixed32(k
.data());
35 static void* EncodeValue(uintptr_t v
) { return reinterpret_cast<void*>(v
); }
36 static int DecodeValue(void* v
) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v
));
40 const std::string kLRU
= "lru";
41 const std::string kClock
= "clock";
43 void dumbDeleter(const Slice
& /*key*/, void* /*value*/) {}
45 void eraseDeleter(const Slice
& /*key*/, void* value
) {
46 Cache
* cache
= reinterpret_cast<Cache
*>(value
);
50 class CacheTest
: public testing::TestWithParam
<std::string
> {
52 static CacheTest
* current_
;
54 static void Deleter(const Slice
& key
, void* v
) {
55 current_
->deleted_keys_
.push_back(DecodeKey(key
));
56 current_
->deleted_values_
.push_back(DecodeValue(v
));
59 static const int kCacheSize
= 1000;
60 static const int kNumShardBits
= 4;
62 static const int kCacheSize2
= 100;
63 static const int kNumShardBits2
= 2;
65 std::vector
<int> deleted_keys_
;
66 std::vector
<int> deleted_values_
;
67 std::shared_ptr
<Cache
> cache_
;
68 std::shared_ptr
<Cache
> cache2_
;
71 : cache_(NewCache(kCacheSize
, kNumShardBits
, false)),
72 cache2_(NewCache(kCacheSize2
, kNumShardBits2
, false)) {
76 ~CacheTest() override
{}
78 std::shared_ptr
<Cache
> NewCache(size_t capacity
) {
79 auto type
= GetParam();
81 return NewLRUCache(capacity
);
84 return NewClockCache(capacity
);
89 std::shared_ptr
<Cache
> NewCache(size_t capacity
, int num_shard_bits
,
90 bool strict_capacity_limit
) {
91 auto type
= GetParam();
93 return NewLRUCache(capacity
, num_shard_bits
, strict_capacity_limit
);
96 return NewClockCache(capacity
, num_shard_bits
, strict_capacity_limit
);
101 int Lookup(std::shared_ptr
<Cache
> cache
, int key
) {
102 Cache::Handle
* handle
= cache
->Lookup(EncodeKey(key
));
103 const int r
= (handle
== nullptr) ? -1 : DecodeValue(cache
->Value(handle
));
104 if (handle
!= nullptr) {
105 cache
->Release(handle
);
110 void Insert(std::shared_ptr
<Cache
> cache
, int key
, int value
,
112 cache
->Insert(EncodeKey(key
), EncodeValue(value
), charge
,
113 &CacheTest::Deleter
);
116 void Erase(std::shared_ptr
<Cache
> cache
, int key
) {
117 cache
->Erase(EncodeKey(key
));
120 int Lookup(int key
) {
121 return Lookup(cache_
, key
);
124 void Insert(int key
, int value
, int charge
= 1) {
125 Insert(cache_
, key
, value
, charge
);
128 void Erase(int key
) {
132 int Lookup2(int key
) {
133 return Lookup(cache2_
, key
);
136 void Insert2(int key
, int value
, int charge
= 1) {
137 Insert(cache2_
, key
, value
, charge
);
140 void Erase2(int key
) {
144 CacheTest
* CacheTest::current_
;
146 TEST_P(CacheTest
, UsageTest
) {
147 // cache is std::shared_ptr and will be automatically cleaned up.
148 const uint64_t kCapacity
= 100000;
149 auto cache
= NewCache(kCapacity
, 8, false);
152 char value
[10] = "abcdef";
153 // make sure everything will be cached
154 for (int i
= 1; i
< 100; ++i
) {
155 std::string
key(i
, 'a');
156 auto kv_size
= key
.size() + 5;
157 cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
, dumbDeleter
);
159 ASSERT_EQ(usage
, cache
->GetUsage());
162 // make sure the cache will be overloaded
163 for (uint64_t i
= 1; i
< kCapacity
; ++i
) {
164 auto key
= ToString(i
);
165 cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
169 // the usage should be close to the capacity
170 ASSERT_GT(kCapacity
, cache
->GetUsage());
171 ASSERT_LT(kCapacity
* 0.95, cache
->GetUsage());
174 TEST_P(CacheTest
, PinnedUsageTest
) {
175 // cache is std::shared_ptr and will be automatically cleaned up.
176 const uint64_t kCapacity
= 100000;
177 auto cache
= NewCache(kCapacity
, 8, false);
179 size_t pinned_usage
= 0;
180 char value
[10] = "abcdef";
182 std::forward_list
<Cache::Handle
*> unreleased_handles
;
184 // Add entries. Unpin some of them after insertion. Then, pin some of them
185 // again. Check GetPinnedUsage().
186 for (int i
= 1; i
< 100; ++i
) {
187 std::string
key(i
, 'a');
188 auto kv_size
= key
.size() + 5;
189 Cache::Handle
* handle
;
190 cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
, dumbDeleter
,
192 pinned_usage
+= kv_size
;
193 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
195 cache
->Release(handle
);
196 pinned_usage
-= kv_size
;
197 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
199 unreleased_handles
.push_front(handle
);
202 unreleased_handles
.push_front(cache
->Lookup(key
));
203 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
206 pinned_usage
+= kv_size
;
208 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
212 // check that overloading the cache does not change the pinned usage
213 for (uint64_t i
= 1; i
< 2 * kCapacity
; ++i
) {
214 auto key
= ToString(i
);
215 cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
218 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
220 // release handles for pinned entries to prevent memory leaks
221 for (auto handle
: unreleased_handles
) {
222 cache
->Release(handle
);
226 TEST_P(CacheTest
, HitAndMiss
) {
227 ASSERT_EQ(-1, Lookup(100));
230 ASSERT_EQ(101, Lookup(100));
231 ASSERT_EQ(-1, Lookup(200));
232 ASSERT_EQ(-1, Lookup(300));
235 ASSERT_EQ(101, Lookup(100));
236 ASSERT_EQ(201, Lookup(200));
237 ASSERT_EQ(-1, Lookup(300));
240 ASSERT_EQ(102, Lookup(100));
241 ASSERT_EQ(201, Lookup(200));
242 ASSERT_EQ(-1, Lookup(300));
244 ASSERT_EQ(1U, deleted_keys_
.size());
245 ASSERT_EQ(100, deleted_keys_
[0]);
246 ASSERT_EQ(101, deleted_values_
[0]);
249 TEST_P(CacheTest
, InsertSameKey
) {
252 ASSERT_EQ(2, Lookup(1));
255 TEST_P(CacheTest
, Erase
) {
257 ASSERT_EQ(0U, deleted_keys_
.size());
262 ASSERT_EQ(-1, Lookup(100));
263 ASSERT_EQ(201, Lookup(200));
264 ASSERT_EQ(1U, deleted_keys_
.size());
265 ASSERT_EQ(100, deleted_keys_
[0]);
266 ASSERT_EQ(101, deleted_values_
[0]);
269 ASSERT_EQ(-1, Lookup(100));
270 ASSERT_EQ(201, Lookup(200));
271 ASSERT_EQ(1U, deleted_keys_
.size());
274 TEST_P(CacheTest
, EntriesArePinned
) {
276 Cache::Handle
* h1
= cache_
->Lookup(EncodeKey(100));
277 ASSERT_EQ(101, DecodeValue(cache_
->Value(h1
)));
278 ASSERT_EQ(1U, cache_
->GetUsage());
281 Cache::Handle
* h2
= cache_
->Lookup(EncodeKey(100));
282 ASSERT_EQ(102, DecodeValue(cache_
->Value(h2
)));
283 ASSERT_EQ(0U, deleted_keys_
.size());
284 ASSERT_EQ(2U, cache_
->GetUsage());
287 ASSERT_EQ(1U, deleted_keys_
.size());
288 ASSERT_EQ(100, deleted_keys_
[0]);
289 ASSERT_EQ(101, deleted_values_
[0]);
290 ASSERT_EQ(1U, cache_
->GetUsage());
293 ASSERT_EQ(-1, Lookup(100));
294 ASSERT_EQ(1U, deleted_keys_
.size());
295 ASSERT_EQ(1U, cache_
->GetUsage());
298 ASSERT_EQ(2U, deleted_keys_
.size());
299 ASSERT_EQ(100, deleted_keys_
[1]);
300 ASSERT_EQ(102, deleted_values_
[1]);
301 ASSERT_EQ(0U, cache_
->GetUsage());
304 TEST_P(CacheTest
, EvictionPolicy
) {
308 // Frequently used entry must be kept around
309 for (int i
= 0; i
< kCacheSize
+ 200; i
++) {
310 Insert(1000+i
, 2000+i
);
311 ASSERT_EQ(101, Lookup(100));
313 ASSERT_EQ(101, Lookup(100));
314 ASSERT_EQ(-1, Lookup(200));
317 TEST_P(CacheTest
, ExternalRefPinsEntries
) {
319 Cache::Handle
* h
= cache_
->Lookup(EncodeKey(100));
320 ASSERT_TRUE(cache_
->Ref(h
));
321 ASSERT_EQ(101, DecodeValue(cache_
->Value(h
)));
322 ASSERT_EQ(1U, cache_
->GetUsage());
324 for (int i
= 0; i
< 3; ++i
) {
326 // First release (i == 1) corresponds to Ref(), second release (i == 2)
327 // corresponds to Lookup(). Then, since all external refs are released,
328 // the below insertions should push out the cache entry.
331 // double cache size because the usage bit in block cache prevents 100 from
332 // being evicted in the first kCacheSize iterations
333 for (int j
= 0; j
< 2 * kCacheSize
+ 100; j
++) {
334 Insert(1000 + j
, 2000 + j
);
337 ASSERT_EQ(101, Lookup(100));
340 ASSERT_EQ(-1, Lookup(100));
343 TEST_P(CacheTest
, EvictionPolicyRef
) {
352 Cache::Handle
* h201
= cache_
->Lookup(EncodeKey(200));
353 Cache::Handle
* h202
= cache_
->Lookup(EncodeKey(201));
354 Cache::Handle
* h203
= cache_
->Lookup(EncodeKey(202));
355 Cache::Handle
* h204
= cache_
->Lookup(EncodeKey(203));
361 // Insert entries much more than Cache capacity
362 for (int i
= 0; i
< kCacheSize
+ 200; i
++) {
363 Insert(1000 + i
, 2000 + i
);
366 // Check whether the entries inserted in the beginning
367 // are evicted. Ones without extra ref are evicted and
368 // those with are not.
369 ASSERT_EQ(-1, Lookup(100));
370 ASSERT_EQ(-1, Lookup(101));
371 ASSERT_EQ(-1, Lookup(102));
372 ASSERT_EQ(-1, Lookup(103));
374 ASSERT_EQ(-1, Lookup(300));
375 ASSERT_EQ(-1, Lookup(301));
376 ASSERT_EQ(-1, Lookup(302));
377 ASSERT_EQ(-1, Lookup(303));
379 ASSERT_EQ(101, Lookup(200));
380 ASSERT_EQ(102, Lookup(201));
381 ASSERT_EQ(103, Lookup(202));
382 ASSERT_EQ(104, Lookup(203));
384 // Cleaning up all the handles
385 cache_
->Release(h201
);
386 cache_
->Release(h202
);
387 cache_
->Release(h203
);
388 cache_
->Release(h204
);
391 TEST_P(CacheTest
, EvictEmptyCache
) {
392 // Insert item large than capacity to trigger eviction on empty cache.
393 auto cache
= NewCache(1, 0, false);
394 ASSERT_OK(cache
->Insert("foo", nullptr, 10, dumbDeleter
));
397 TEST_P(CacheTest
, EraseFromDeleter
) {
398 // Have deleter which will erase item from cache, which will re-enter
399 // the cache at that point.
400 std::shared_ptr
<Cache
> cache
= NewCache(10, 0, false);
401 ASSERT_OK(cache
->Insert("foo", nullptr, 1, dumbDeleter
));
402 ASSERT_OK(cache
->Insert("bar", cache
.get(), 1, eraseDeleter
));
404 ASSERT_EQ(nullptr, cache
->Lookup("foo"));
405 ASSERT_EQ(nullptr, cache
->Lookup("bar"));
408 TEST_P(CacheTest
, ErasedHandleState
) {
409 // insert a key and get two handles
411 Cache::Handle
* h1
= cache_
->Lookup(EncodeKey(100));
412 Cache::Handle
* h2
= cache_
->Lookup(EncodeKey(100));
414 ASSERT_EQ(DecodeValue(cache_
->Value(h1
)), 1000);
415 ASSERT_EQ(DecodeValue(cache_
->Value(h2
)), 1000);
417 // delete the key from the cache
419 // can no longer find in the cache
420 ASSERT_EQ(-1, Lookup(100));
422 // release one handle
424 // still can't find in cache
425 ASSERT_EQ(-1, Lookup(100));
430 TEST_P(CacheTest
, HeavyEntries
) {
431 // Add a bunch of light and heavy entries and then count the combined
432 // size of items still in the cache, which must be approximately the
433 // same as the total capacity.
434 const int kLight
= 1;
435 const int kHeavy
= 10;
438 while (added
< 2*kCacheSize
) {
439 const int weight
= (index
& 1) ? kLight
: kHeavy
;
440 Insert(index
, 1000+index
, weight
);
445 int cached_weight
= 0;
446 for (int i
= 0; i
< index
; i
++) {
447 const int weight
= (i
& 1 ? kLight
: kHeavy
);
450 cached_weight
+= weight
;
451 ASSERT_EQ(1000+i
, r
);
454 ASSERT_LE(cached_weight
, kCacheSize
+ kCacheSize
/10);
457 TEST_P(CacheTest
, NewId
) {
458 uint64_t a
= cache_
->NewId();
459 uint64_t b
= cache_
->NewId();
466 explicit Value(size_t v
) : v_(v
) { }
472 void deleter(const Slice
& /*key*/, void* value
) {
473 delete static_cast<Value
*>(value
);
477 TEST_P(CacheTest
, ReleaseAndErase
) {
478 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
479 Cache::Handle
* handle
;
480 Status s
= cache
->Insert(EncodeKey(100), EncodeValue(100), 1,
481 &CacheTest::Deleter
, &handle
);
483 ASSERT_EQ(5U, cache
->GetCapacity());
484 ASSERT_EQ(1U, cache
->GetUsage());
485 ASSERT_EQ(0U, deleted_keys_
.size());
486 auto erased
= cache
->Release(handle
, true);
488 // This tests that deleter has been called
489 ASSERT_EQ(1U, deleted_keys_
.size());
492 TEST_P(CacheTest
, ReleaseWithoutErase
) {
493 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
494 Cache::Handle
* handle
;
495 Status s
= cache
->Insert(EncodeKey(100), EncodeValue(100), 1,
496 &CacheTest::Deleter
, &handle
);
498 ASSERT_EQ(5U, cache
->GetCapacity());
499 ASSERT_EQ(1U, cache
->GetUsage());
500 ASSERT_EQ(0U, deleted_keys_
.size());
501 auto erased
= cache
->Release(handle
);
502 ASSERT_FALSE(erased
);
503 // This tests that deleter is not called. When cache has free capacity it is
504 // not expected to immediately erase the released items.
505 ASSERT_EQ(0U, deleted_keys_
.size());
508 TEST_P(CacheTest
, SetCapacity
) {
509 // test1: increase capacity
510 // lets create a cache with capacity 5,
511 // then, insert 5 elements, then increase capacity
512 // to 10, returned capacity should be 10, usage=5
513 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
514 std::vector
<Cache::Handle
*> handles(10);
515 // Insert 5 entries, but not releasing.
516 for (size_t i
= 0; i
< 5; i
++) {
517 std::string key
= ToString(i
+1);
518 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
521 ASSERT_EQ(5U, cache
->GetCapacity());
522 ASSERT_EQ(5U, cache
->GetUsage());
523 cache
->SetCapacity(10);
524 ASSERT_EQ(10U, cache
->GetCapacity());
525 ASSERT_EQ(5U, cache
->GetUsage());
527 // test2: decrease capacity
528 // insert 5 more elements to cache, then release 5,
529 // then decrease capacity to 7, final capacity should be 7
530 // and usage should be 7
531 for (size_t i
= 5; i
< 10; i
++) {
532 std::string key
= ToString(i
+1);
533 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
536 ASSERT_EQ(10U, cache
->GetCapacity());
537 ASSERT_EQ(10U, cache
->GetUsage());
538 for (size_t i
= 0; i
< 5; i
++) {
539 cache
->Release(handles
[i
]);
541 ASSERT_EQ(10U, cache
->GetCapacity());
542 ASSERT_EQ(10U, cache
->GetUsage());
543 cache
->SetCapacity(7);
544 ASSERT_EQ(7, cache
->GetCapacity());
545 ASSERT_EQ(7, cache
->GetUsage());
547 // release remaining 5 to keep valgrind happy
548 for (size_t i
= 5; i
< 10; i
++) {
549 cache
->Release(handles
[i
]);
553 TEST_P(CacheTest
, SetStrictCapacityLimit
) {
554 // test1: set the flag to false. Insert more keys than capacity. See if they
556 std::shared_ptr
<Cache
> cache
= NewLRUCache(5, 0, false);
557 std::vector
<Cache::Handle
*> handles(10);
559 for (size_t i
= 0; i
< 10; i
++) {
560 std::string key
= ToString(i
+ 1);
561 s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
563 ASSERT_NE(nullptr, handles
[i
]);
566 // test2: set the flag to true. Insert and check if it fails.
567 std::string extra_key
= "extra";
568 Value
* extra_value
= new Value(0);
569 cache
->SetStrictCapacityLimit(true);
570 Cache::Handle
* handle
;
571 s
= cache
->Insert(extra_key
, extra_value
, 1, &deleter
, &handle
);
572 ASSERT_TRUE(s
.IsIncomplete());
573 ASSERT_EQ(nullptr, handle
);
575 for (size_t i
= 0; i
< 10; i
++) {
576 cache
->Release(handles
[i
]);
579 // test3: init with flag being true.
580 std::shared_ptr
<Cache
> cache2
= NewLRUCache(5, 0, true);
581 for (size_t i
= 0; i
< 5; i
++) {
582 std::string key
= ToString(i
+ 1);
583 s
= cache2
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
585 ASSERT_NE(nullptr, handles
[i
]);
587 s
= cache2
->Insert(extra_key
, extra_value
, 1, &deleter
, &handle
);
588 ASSERT_TRUE(s
.IsIncomplete());
589 ASSERT_EQ(nullptr, handle
);
590 // test insert without handle
591 s
= cache2
->Insert(extra_key
, extra_value
, 1, &deleter
);
592 // AS if the key have been inserted into cache but get evicted immediately.
594 ASSERT_EQ(5, cache
->GetUsage());
595 ASSERT_EQ(nullptr, cache2
->Lookup(extra_key
));
597 for (size_t i
= 0; i
< 5; i
++) {
598 cache2
->Release(handles
[i
]);
602 TEST_P(CacheTest
, OverCapacity
) {
605 // a LRUCache with n entries and one shard only
606 std::shared_ptr
<Cache
> cache
= NewCache(n
, 0, false);
608 std::vector
<Cache::Handle
*> handles(n
+1);
610 // Insert n+1 entries, but not releasing.
611 for (size_t i
= 0; i
< n
+ 1; i
++) {
612 std::string key
= ToString(i
+1);
613 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
617 // Guess what's in the cache now?
618 for (size_t i
= 0; i
< n
+ 1; i
++) {
619 std::string key
= ToString(i
+1);
620 auto h
= cache
->Lookup(key
);
621 ASSERT_TRUE(h
!= nullptr);
622 if (h
) cache
->Release(h
);
625 // the cache is over capacity since nothing could be evicted
626 ASSERT_EQ(n
+ 1U, cache
->GetUsage());
627 for (size_t i
= 0; i
< n
+ 1; i
++) {
628 cache
->Release(handles
[i
]);
630 // Make sure eviction is triggered.
631 cache
->SetCapacity(n
);
633 // cache is under capacity now since elements were released
634 ASSERT_EQ(n
, cache
->GetUsage());
636 // element 0 is evicted and the rest is there
637 // This is consistent with the LRU policy since the element 0
638 // was released first
639 for (size_t i
= 0; i
< n
+ 1; i
++) {
640 std::string key
= ToString(i
+1);
641 auto h
= cache
->Lookup(key
);
652 std::vector
<std::pair
<int, int>> callback_state
;
653 void callback(void* entry
, size_t charge
) {
654 callback_state
.push_back({DecodeValue(entry
), static_cast<int>(charge
)});
658 TEST_P(CacheTest
, ApplyToAllCacheEntiresTest
) {
659 std::vector
<std::pair
<int, int>> inserted
;
660 callback_state
.clear();
662 for (int i
= 0; i
< 10; ++i
) {
663 Insert(i
, i
* 2, i
+ 1);
664 inserted
.push_back({i
* 2, i
+ 1});
666 cache_
->ApplyToAllCacheEntries(callback
, true);
668 std::sort(inserted
.begin(), inserted
.end());
669 std::sort(callback_state
.begin(), callback_state
.end());
670 ASSERT_TRUE(inserted
== callback_state
);
673 TEST_P(CacheTest
, DefaultShardBits
) {
674 // test1: set the flag to false. Insert more keys than capacity. See if they
676 std::shared_ptr
<Cache
> cache
= NewCache(16 * 1024L * 1024L);
677 ShardedCache
* sc
= dynamic_cast<ShardedCache
*>(cache
.get());
678 ASSERT_EQ(5, sc
->GetNumShardBits());
680 cache
= NewLRUCache(511 * 1024L, -1, true);
681 sc
= dynamic_cast<ShardedCache
*>(cache
.get());
682 ASSERT_EQ(0, sc
->GetNumShardBits());
684 cache
= NewLRUCache(1024L * 1024L * 1024L, -1, true);
685 sc
= dynamic_cast<ShardedCache
*>(cache
.get());
686 ASSERT_EQ(6, sc
->GetNumShardBits());
689 #ifdef SUPPORT_CLOCK_CACHE
690 std::shared_ptr
<Cache
> (*new_clock_cache_func
)(size_t, int,
691 bool) = NewClockCache
;
692 INSTANTIATE_TEST_CASE_P(CacheTestInstance
, CacheTest
,
693 testing::Values(kLRU
, kClock
));
695 INSTANTIATE_TEST_CASE_P(CacheTestInstance
, CacheTest
, testing::Values(kLRU
));
696 #endif // SUPPORT_CLOCK_CACHE
698 } // namespace rocksdb
700 int main(int argc
, char** argv
) {
701 ::testing::InitGoogleTest(&argc
, argv
);
702 return RUN_ALL_TESTS();