1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10 #include "rocksdb/cache.h"
12 #include <forward_list>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "util/coding.h"
20 #include "util/string_util.h"
21 #include "util/testharness.h"
25 // Conversions between numeric keys/values and the types expected by Cache.
26 static std::string
EncodeKey(int k
) {
28 PutFixed32(&result
, k
);
31 static int DecodeKey(const Slice
& k
) {
32 assert(k
.size() == 4);
33 return DecodeFixed32(k
.data());
35 static void* EncodeValue(uintptr_t v
) { return reinterpret_cast<void*>(v
); }
36 static int DecodeValue(void* v
) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v
));
40 const std::string kLRU
= "lru";
41 const std::string kClock
= "clock";
43 void dumbDeleter(const Slice
& /*key*/, void* /*value*/) {}
45 void eraseDeleter(const Slice
& /*key*/, void* value
) {
46 Cache
* cache
= reinterpret_cast<Cache
*>(value
);
50 class CacheTest
: public testing::TestWithParam
<std::string
> {
52 static CacheTest
* current_
;
54 static void Deleter(const Slice
& key
, void* v
) {
55 current_
->deleted_keys_
.push_back(DecodeKey(key
));
56 current_
->deleted_values_
.push_back(DecodeValue(v
));
59 static const int kCacheSize
= 1000;
60 static const int kNumShardBits
= 4;
62 static const int kCacheSize2
= 100;
63 static const int kNumShardBits2
= 2;
65 std::vector
<int> deleted_keys_
;
66 std::vector
<int> deleted_values_
;
67 shared_ptr
<Cache
> cache_
;
68 shared_ptr
<Cache
> cache2_
;
71 : cache_(NewCache(kCacheSize
, kNumShardBits
, false)),
72 cache2_(NewCache(kCacheSize2
, kNumShardBits2
, false)) {
79 std::shared_ptr
<Cache
> NewCache(size_t capacity
) {
80 auto type
= GetParam();
82 return NewLRUCache(capacity
);
85 return NewClockCache(capacity
);
90 std::shared_ptr
<Cache
> NewCache(size_t capacity
, int num_shard_bits
,
91 bool strict_capacity_limit
) {
92 auto type
= GetParam();
94 return NewLRUCache(capacity
, num_shard_bits
, strict_capacity_limit
);
97 return NewClockCache(capacity
, num_shard_bits
, strict_capacity_limit
);
102 int Lookup(shared_ptr
<Cache
> cache
, int key
) {
103 Cache::Handle
* handle
= cache
->Lookup(EncodeKey(key
));
104 const int r
= (handle
== nullptr) ? -1 : DecodeValue(cache
->Value(handle
));
105 if (handle
!= nullptr) {
106 cache
->Release(handle
);
111 void Insert(shared_ptr
<Cache
> cache
, int key
, int value
, int charge
= 1) {
112 cache
->Insert(EncodeKey(key
), EncodeValue(value
), charge
,
113 &CacheTest::Deleter
);
116 void Erase(shared_ptr
<Cache
> cache
, int key
) {
117 cache
->Erase(EncodeKey(key
));
121 int Lookup(int key
) {
122 return Lookup(cache_
, key
);
125 void Insert(int key
, int value
, int charge
= 1) {
126 Insert(cache_
, key
, value
, charge
);
129 void Erase(int key
) {
133 int Lookup2(int key
) {
134 return Lookup(cache2_
, key
);
137 void Insert2(int key
, int value
, int charge
= 1) {
138 Insert(cache2_
, key
, value
, charge
);
141 void Erase2(int key
) {
145 CacheTest
* CacheTest::current_
;
147 TEST_P(CacheTest
, UsageTest
) {
148 // cache is shared_ptr and will be automatically cleaned up.
149 const uint64_t kCapacity
= 100000;
150 auto cache
= NewCache(kCapacity
, 8, false);
153 char value
[10] = "abcdef";
154 // make sure everything will be cached
155 for (int i
= 1; i
< 100; ++i
) {
156 std::string
key(i
, 'a');
157 auto kv_size
= key
.size() + 5;
158 cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
, dumbDeleter
);
160 ASSERT_EQ(usage
, cache
->GetUsage());
163 // make sure the cache will be overloaded
164 for (uint64_t i
= 1; i
< kCapacity
; ++i
) {
165 auto key
= ToString(i
);
166 cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
170 // the usage should be close to the capacity
171 ASSERT_GT(kCapacity
, cache
->GetUsage());
172 ASSERT_LT(kCapacity
* 0.95, cache
->GetUsage());
175 TEST_P(CacheTest
, PinnedUsageTest
) {
176 // cache is shared_ptr and will be automatically cleaned up.
177 const uint64_t kCapacity
= 100000;
178 auto cache
= NewCache(kCapacity
, 8, false);
180 size_t pinned_usage
= 0;
181 char value
[10] = "abcdef";
183 std::forward_list
<Cache::Handle
*> unreleased_handles
;
185 // Add entries. Unpin some of them after insertion. Then, pin some of them
186 // again. Check GetPinnedUsage().
187 for (int i
= 1; i
< 100; ++i
) {
188 std::string
key(i
, 'a');
189 auto kv_size
= key
.size() + 5;
190 Cache::Handle
* handle
;
191 cache
->Insert(key
, reinterpret_cast<void*>(value
), kv_size
, dumbDeleter
,
193 pinned_usage
+= kv_size
;
194 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
196 cache
->Release(handle
);
197 pinned_usage
-= kv_size
;
198 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
200 unreleased_handles
.push_front(handle
);
203 unreleased_handles
.push_front(cache
->Lookup(key
));
204 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
207 pinned_usage
+= kv_size
;
209 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
213 // check that overloading the cache does not change the pinned usage
214 for (uint64_t i
= 1; i
< 2 * kCapacity
; ++i
) {
215 auto key
= ToString(i
);
216 cache
->Insert(key
, reinterpret_cast<void*>(value
), key
.size() + 5,
219 ASSERT_EQ(pinned_usage
, cache
->GetPinnedUsage());
221 // release handles for pinned entries to prevent memory leaks
222 for (auto handle
: unreleased_handles
) {
223 cache
->Release(handle
);
227 TEST_P(CacheTest
, HitAndMiss
) {
228 ASSERT_EQ(-1, Lookup(100));
231 ASSERT_EQ(101, Lookup(100));
232 ASSERT_EQ(-1, Lookup(200));
233 ASSERT_EQ(-1, Lookup(300));
236 ASSERT_EQ(101, Lookup(100));
237 ASSERT_EQ(201, Lookup(200));
238 ASSERT_EQ(-1, Lookup(300));
241 ASSERT_EQ(102, Lookup(100));
242 ASSERT_EQ(201, Lookup(200));
243 ASSERT_EQ(-1, Lookup(300));
245 ASSERT_EQ(1U, deleted_keys_
.size());
246 ASSERT_EQ(100, deleted_keys_
[0]);
247 ASSERT_EQ(101, deleted_values_
[0]);
250 TEST_P(CacheTest
, InsertSameKey
) {
253 ASSERT_EQ(2, Lookup(1));
256 TEST_P(CacheTest
, Erase
) {
258 ASSERT_EQ(0U, deleted_keys_
.size());
263 ASSERT_EQ(-1, Lookup(100));
264 ASSERT_EQ(201, Lookup(200));
265 ASSERT_EQ(1U, deleted_keys_
.size());
266 ASSERT_EQ(100, deleted_keys_
[0]);
267 ASSERT_EQ(101, deleted_values_
[0]);
270 ASSERT_EQ(-1, Lookup(100));
271 ASSERT_EQ(201, Lookup(200));
272 ASSERT_EQ(1U, deleted_keys_
.size());
275 TEST_P(CacheTest
, EntriesArePinned
) {
277 Cache::Handle
* h1
= cache_
->Lookup(EncodeKey(100));
278 ASSERT_EQ(101, DecodeValue(cache_
->Value(h1
)));
279 ASSERT_EQ(1U, cache_
->GetUsage());
282 Cache::Handle
* h2
= cache_
->Lookup(EncodeKey(100));
283 ASSERT_EQ(102, DecodeValue(cache_
->Value(h2
)));
284 ASSERT_EQ(0U, deleted_keys_
.size());
285 ASSERT_EQ(2U, cache_
->GetUsage());
288 ASSERT_EQ(1U, deleted_keys_
.size());
289 ASSERT_EQ(100, deleted_keys_
[0]);
290 ASSERT_EQ(101, deleted_values_
[0]);
291 ASSERT_EQ(1U, cache_
->GetUsage());
294 ASSERT_EQ(-1, Lookup(100));
295 ASSERT_EQ(1U, deleted_keys_
.size());
296 ASSERT_EQ(1U, cache_
->GetUsage());
299 ASSERT_EQ(2U, deleted_keys_
.size());
300 ASSERT_EQ(100, deleted_keys_
[1]);
301 ASSERT_EQ(102, deleted_values_
[1]);
302 ASSERT_EQ(0U, cache_
->GetUsage());
305 TEST_P(CacheTest
, EvictionPolicy
) {
309 // Frequently used entry must be kept around
310 for (int i
= 0; i
< kCacheSize
+ 100; i
++) {
311 Insert(1000+i
, 2000+i
);
312 ASSERT_EQ(101, Lookup(100));
314 ASSERT_EQ(101, Lookup(100));
315 ASSERT_EQ(-1, Lookup(200));
318 TEST_P(CacheTest
, ExternalRefPinsEntries
) {
320 Cache::Handle
* h
= cache_
->Lookup(EncodeKey(100));
321 ASSERT_TRUE(cache_
->Ref(h
));
322 ASSERT_EQ(101, DecodeValue(cache_
->Value(h
)));
323 ASSERT_EQ(1U, cache_
->GetUsage());
325 for (int i
= 0; i
< 3; ++i
) {
327 // First release (i == 1) corresponds to Ref(), second release (i == 2)
328 // corresponds to Lookup(). Then, since all external refs are released,
329 // the below insertions should push out the cache entry.
332 // double cache size because the usage bit in block cache prevents 100 from
333 // being evicted in the first kCacheSize iterations
334 for (int j
= 0; j
< 2 * kCacheSize
+ 100; j
++) {
335 Insert(1000 + j
, 2000 + j
);
338 ASSERT_EQ(101, Lookup(100));
341 ASSERT_EQ(-1, Lookup(100));
344 TEST_P(CacheTest
, EvictionPolicyRef
) {
353 Cache::Handle
* h201
= cache_
->Lookup(EncodeKey(200));
354 Cache::Handle
* h202
= cache_
->Lookup(EncodeKey(201));
355 Cache::Handle
* h203
= cache_
->Lookup(EncodeKey(202));
356 Cache::Handle
* h204
= cache_
->Lookup(EncodeKey(203));
362 // Insert entries much more than Cache capacity
363 for (int i
= 0; i
< kCacheSize
+ 100; i
++) {
364 Insert(1000 + i
, 2000 + i
);
367 // Check whether the entries inserted in the beginning
368 // are evicted. Ones without extra ref are evicted and
369 // those with are not.
370 ASSERT_EQ(-1, Lookup(100));
371 ASSERT_EQ(-1, Lookup(101));
372 ASSERT_EQ(-1, Lookup(102));
373 ASSERT_EQ(-1, Lookup(103));
375 ASSERT_EQ(-1, Lookup(300));
376 ASSERT_EQ(-1, Lookup(301));
377 ASSERT_EQ(-1, Lookup(302));
378 ASSERT_EQ(-1, Lookup(303));
380 ASSERT_EQ(101, Lookup(200));
381 ASSERT_EQ(102, Lookup(201));
382 ASSERT_EQ(103, Lookup(202));
383 ASSERT_EQ(104, Lookup(203));
385 // Cleaning up all the handles
386 cache_
->Release(h201
);
387 cache_
->Release(h202
);
388 cache_
->Release(h203
);
389 cache_
->Release(h204
);
392 TEST_P(CacheTest
, EvictEmptyCache
) {
393 // Insert item large than capacity to trigger eviction on empty cache.
394 auto cache
= NewCache(1, 0, false);
395 ASSERT_OK(cache
->Insert("foo", nullptr, 10, dumbDeleter
));
398 TEST_P(CacheTest
, EraseFromDeleter
) {
399 // Have deleter which will erase item from cache, which will re-enter
400 // the cache at that point.
401 std::shared_ptr
<Cache
> cache
= NewCache(10, 0, false);
402 ASSERT_OK(cache
->Insert("foo", nullptr, 1, dumbDeleter
));
403 ASSERT_OK(cache
->Insert("bar", cache
.get(), 1, eraseDeleter
));
405 ASSERT_EQ(nullptr, cache
->Lookup("foo"));
406 ASSERT_EQ(nullptr, cache
->Lookup("bar"));
409 TEST_P(CacheTest
, ErasedHandleState
) {
410 // insert a key and get two handles
412 Cache::Handle
* h1
= cache_
->Lookup(EncodeKey(100));
413 Cache::Handle
* h2
= cache_
->Lookup(EncodeKey(100));
415 ASSERT_EQ(DecodeValue(cache_
->Value(h1
)), 1000);
416 ASSERT_EQ(DecodeValue(cache_
->Value(h2
)), 1000);
418 // delete the key from the cache
420 // can no longer find in the cache
421 ASSERT_EQ(-1, Lookup(100));
423 // release one handle
425 // still can't find in cache
426 ASSERT_EQ(-1, Lookup(100));
431 TEST_P(CacheTest
, HeavyEntries
) {
432 // Add a bunch of light and heavy entries and then count the combined
433 // size of items still in the cache, which must be approximately the
434 // same as the total capacity.
435 const int kLight
= 1;
436 const int kHeavy
= 10;
439 while (added
< 2*kCacheSize
) {
440 const int weight
= (index
& 1) ? kLight
: kHeavy
;
441 Insert(index
, 1000+index
, weight
);
446 int cached_weight
= 0;
447 for (int i
= 0; i
< index
; i
++) {
448 const int weight
= (i
& 1 ? kLight
: kHeavy
);
451 cached_weight
+= weight
;
452 ASSERT_EQ(1000+i
, r
);
455 ASSERT_LE(cached_weight
, kCacheSize
+ kCacheSize
/10);
458 TEST_P(CacheTest
, NewId
) {
459 uint64_t a
= cache_
->NewId();
460 uint64_t b
= cache_
->NewId();
467 explicit Value(size_t v
) : v_(v
) { }
473 void deleter(const Slice
& /*key*/, void* value
) {
474 delete static_cast<Value
*>(value
);
478 TEST_P(CacheTest
, ReleaseAndErase
) {
479 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
480 Cache::Handle
* handle
;
481 Status s
= cache
->Insert(EncodeKey(100), EncodeValue(100), 1,
482 &CacheTest::Deleter
, &handle
);
484 ASSERT_EQ(5U, cache
->GetCapacity());
485 ASSERT_EQ(1U, cache
->GetUsage());
486 ASSERT_EQ(0U, deleted_keys_
.size());
487 auto erased
= cache
->Release(handle
, true);
489 // This tests that deleter has been called
490 ASSERT_EQ(1U, deleted_keys_
.size());
493 TEST_P(CacheTest
, ReleaseWithoutErase
) {
494 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
495 Cache::Handle
* handle
;
496 Status s
= cache
->Insert(EncodeKey(100), EncodeValue(100), 1,
497 &CacheTest::Deleter
, &handle
);
499 ASSERT_EQ(5U, cache
->GetCapacity());
500 ASSERT_EQ(1U, cache
->GetUsage());
501 ASSERT_EQ(0U, deleted_keys_
.size());
502 auto erased
= cache
->Release(handle
);
503 ASSERT_FALSE(erased
);
504 // This tests that deleter is not called. When cache has free capacity it is
505 // not expected to immediately erase the released items.
506 ASSERT_EQ(0U, deleted_keys_
.size());
509 TEST_P(CacheTest
, SetCapacity
) {
510 // test1: increase capacity
511 // lets create a cache with capacity 5,
512 // then, insert 5 elements, then increase capacity
513 // to 10, returned capacity should be 10, usage=5
514 std::shared_ptr
<Cache
> cache
= NewCache(5, 0, false);
515 std::vector
<Cache::Handle
*> handles(10);
516 // Insert 5 entries, but not releasing.
517 for (size_t i
= 0; i
< 5; i
++) {
518 std::string key
= ToString(i
+1);
519 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
522 ASSERT_EQ(5U, cache
->GetCapacity());
523 ASSERT_EQ(5U, cache
->GetUsage());
524 cache
->SetCapacity(10);
525 ASSERT_EQ(10U, cache
->GetCapacity());
526 ASSERT_EQ(5U, cache
->GetUsage());
528 // test2: decrease capacity
529 // insert 5 more elements to cache, then release 5,
530 // then decrease capacity to 7, final capacity should be 7
531 // and usage should be 7
532 for (size_t i
= 5; i
< 10; i
++) {
533 std::string key
= ToString(i
+1);
534 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
537 ASSERT_EQ(10U, cache
->GetCapacity());
538 ASSERT_EQ(10U, cache
->GetUsage());
539 for (size_t i
= 0; i
< 5; i
++) {
540 cache
->Release(handles
[i
]);
542 ASSERT_EQ(10U, cache
->GetCapacity());
543 ASSERT_EQ(10U, cache
->GetUsage());
544 cache
->SetCapacity(7);
545 ASSERT_EQ(7, cache
->GetCapacity());
546 ASSERT_EQ(7, cache
->GetUsage());
548 // release remaining 5 to keep valgrind happy
549 for (size_t i
= 5; i
< 10; i
++) {
550 cache
->Release(handles
[i
]);
554 TEST_P(CacheTest
, SetStrictCapacityLimit
) {
555 // test1: set the flag to false. Insert more keys than capacity. See if they
557 std::shared_ptr
<Cache
> cache
= NewLRUCache(5, 0, false);
558 std::vector
<Cache::Handle
*> handles(10);
560 for (size_t i
= 0; i
< 10; i
++) {
561 std::string key
= ToString(i
+ 1);
562 s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
564 ASSERT_NE(nullptr, handles
[i
]);
567 // test2: set the flag to true. Insert and check if it fails.
568 std::string extra_key
= "extra";
569 Value
* extra_value
= new Value(0);
570 cache
->SetStrictCapacityLimit(true);
571 Cache::Handle
* handle
;
572 s
= cache
->Insert(extra_key
, extra_value
, 1, &deleter
, &handle
);
573 ASSERT_TRUE(s
.IsIncomplete());
574 ASSERT_EQ(nullptr, handle
);
576 for (size_t i
= 0; i
< 10; i
++) {
577 cache
->Release(handles
[i
]);
580 // test3: init with flag being true.
581 std::shared_ptr
<Cache
> cache2
= NewLRUCache(5, 0, true);
582 for (size_t i
= 0; i
< 5; i
++) {
583 std::string key
= ToString(i
+ 1);
584 s
= cache2
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
586 ASSERT_NE(nullptr, handles
[i
]);
588 s
= cache2
->Insert(extra_key
, extra_value
, 1, &deleter
, &handle
);
589 ASSERT_TRUE(s
.IsIncomplete());
590 ASSERT_EQ(nullptr, handle
);
591 // test insert without handle
592 s
= cache2
->Insert(extra_key
, extra_value
, 1, &deleter
);
593 // AS if the key have been inserted into cache but get evicted immediately.
595 ASSERT_EQ(5, cache
->GetUsage());
596 ASSERT_EQ(nullptr, cache2
->Lookup(extra_key
));
598 for (size_t i
= 0; i
< 5; i
++) {
599 cache2
->Release(handles
[i
]);
603 TEST_P(CacheTest
, OverCapacity
) {
606 // a LRUCache with n entries and one shard only
607 std::shared_ptr
<Cache
> cache
= NewCache(n
, 0, false);
609 std::vector
<Cache::Handle
*> handles(n
+1);
611 // Insert n+1 entries, but not releasing.
612 for (size_t i
= 0; i
< n
+ 1; i
++) {
613 std::string key
= ToString(i
+1);
614 Status s
= cache
->Insert(key
, new Value(i
+ 1), 1, &deleter
, &handles
[i
]);
618 // Guess what's in the cache now?
619 for (size_t i
= 0; i
< n
+ 1; i
++) {
620 std::string key
= ToString(i
+1);
621 auto h
= cache
->Lookup(key
);
622 ASSERT_TRUE(h
!= nullptr);
623 if (h
) cache
->Release(h
);
626 // the cache is over capacity since nothing could be evicted
627 ASSERT_EQ(n
+ 1U, cache
->GetUsage());
628 for (size_t i
= 0; i
< n
+ 1; i
++) {
629 cache
->Release(handles
[i
]);
631 // Make sure eviction is triggered.
632 cache
->SetCapacity(n
);
634 // cache is under capacity now since elements were released
635 ASSERT_EQ(n
, cache
->GetUsage());
637 // element 0 is evicted and the rest is there
638 // This is consistent with the LRU policy since the element 0
639 // was released first
640 for (size_t i
= 0; i
< n
+ 1; i
++) {
641 std::string key
= ToString(i
+1);
642 auto h
= cache
->Lookup(key
);
653 std::vector
<std::pair
<int, int>> callback_state
;
654 void callback(void* entry
, size_t charge
) {
655 callback_state
.push_back({DecodeValue(entry
), static_cast<int>(charge
)});
659 TEST_P(CacheTest
, ApplyToAllCacheEntiresTest
) {
660 std::vector
<std::pair
<int, int>> inserted
;
661 callback_state
.clear();
663 for (int i
= 0; i
< 10; ++i
) {
664 Insert(i
, i
* 2, i
+ 1);
665 inserted
.push_back({i
* 2, i
+ 1});
667 cache_
->ApplyToAllCacheEntries(callback
, true);
669 std::sort(inserted
.begin(), inserted
.end());
670 std::sort(callback_state
.begin(), callback_state
.end());
671 ASSERT_TRUE(inserted
== callback_state
);
674 TEST_P(CacheTest
, DefaultShardBits
) {
675 // test1: set the flag to false. Insert more keys than capacity. See if they
677 std::shared_ptr
<Cache
> cache
= NewCache(16 * 1024L * 1024L);
678 ShardedCache
* sc
= dynamic_cast<ShardedCache
*>(cache
.get());
679 ASSERT_EQ(5, sc
->GetNumShardBits());
681 cache
= NewLRUCache(511 * 1024L, -1, true);
682 sc
= dynamic_cast<ShardedCache
*>(cache
.get());
683 ASSERT_EQ(0, sc
->GetNumShardBits());
685 cache
= NewLRUCache(1024L * 1024L * 1024L, -1, true);
686 sc
= dynamic_cast<ShardedCache
*>(cache
.get());
687 ASSERT_EQ(6, sc
->GetNumShardBits());
690 #ifdef SUPPORT_CLOCK_CACHE
691 shared_ptr
<Cache
> (*new_clock_cache_func
)(size_t, int, bool) = NewClockCache
;
692 INSTANTIATE_TEST_CASE_P(CacheTestInstance
, CacheTest
,
693 testing::Values(kLRU
, kClock
));
695 INSTANTIATE_TEST_CASE_P(CacheTestInstance
, CacheTest
, testing::Values(kLRU
));
696 #endif // SUPPORT_CLOCK_CACHE
698 } // namespace rocksdb
700 int main(int argc
, char** argv
) {
701 ::testing::InitGoogleTest(&argc
, argv
);
702 return RUN_ALL_TESTS();