1 // SPDX-License-Identifier: GPL-2.0
3 * Test cases for KFENCE memory safety error detector. Since the interface with
4 * which KFENCE's reports are obtained is via the console, this is the output we
5 * should verify. For each test case checks the presence (or absence) of
6 * generated reports. Relies on 'console' tracepoint to capture reports as they
7 * appear in the kernel log.
9 * Copyright (C) 2020, Google LLC.
10 * Author: Alexander Potapenko <glider@google.com>
11 * Marco Elver <elver@google.com>
14 #include <kunit/test.h>
15 #include <linux/jiffies.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/tracepoint.h>
24 #include <trace/events/printk.h>
28 /* Report as observed from console. */
34 .lock
= __SPIN_LOCK_UNLOCKED(observed
.lock
),
37 /* Probe for console output: obtains observed lines of interest. */
38 static void probe_console(void *ignore
, const char *buf
, size_t len
)
43 spin_lock_irqsave(&observed
.lock
, flags
);
44 nlines
= observed
.nlines
;
46 if (strnstr(buf
, "BUG: KFENCE: ", len
) && strnstr(buf
, "test_", len
)) {
48 * KFENCE report and related to the test.
50 * The provided @buf is not NUL-terminated; copy no more than
51 * @len bytes and let strscpy() add the missing NUL-terminator.
53 strscpy(observed
.lines
[0], buf
, min(len
+ 1, sizeof(observed
.lines
[0])));
55 } else if (nlines
== 1 && (strnstr(buf
, "at 0x", len
) || strnstr(buf
, "of 0x", len
))) {
56 strscpy(observed
.lines
[nlines
++], buf
, min(len
+ 1, sizeof(observed
.lines
[0])));
59 WRITE_ONCE(observed
.nlines
, nlines
); /* Publish new nlines. */
60 spin_unlock_irqrestore(&observed
.lock
, flags
);
63 /* Check if a report related to the test exists. */
64 static bool report_available(void)
66 return READ_ONCE(observed
.nlines
) == ARRAY_SIZE(observed
.lines
);
69 /* Information we expect in a report. */
70 struct expect_report
{
71 enum kfence_error_type type
; /* The type or error. */
72 void *fn
; /* Function pointer to expected function where access occurred. */
73 char *addr
; /* Address at which the bad access occurred. */
74 bool is_write
; /* Is access a write. */
77 static const char *get_access_type(const struct expect_report
*r
)
79 return r
->is_write
? "write" : "read";
82 /* Check observed report matches information in @r. */
83 static bool report_matches(const struct expect_report
*r
)
87 typeof(observed
.lines
) expect
;
91 /* Doubled-checked locking. */
92 if (!report_available())
95 /* Generate expected report contents. */
99 end
= &expect
[0][sizeof(expect
[0]) - 1];
101 case KFENCE_ERROR_OOB
:
102 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: out-of-bounds %s",
105 case KFENCE_ERROR_UAF
:
106 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: use-after-free %s",
109 case KFENCE_ERROR_CORRUPTION
:
110 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: memory corruption");
112 case KFENCE_ERROR_INVALID
:
113 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: invalid %s",
116 case KFENCE_ERROR_INVALID_FREE
:
117 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: invalid free");
121 scnprintf(cur
, end
- cur
, " in %pS", r
->fn
);
122 /* The exact offset won't match, remove it; also strip module name. */
123 cur
= strchr(expect
[0], '+');
127 /* Access information */
129 end
= &expect
[1][sizeof(expect
[1]) - 1];
132 case KFENCE_ERROR_OOB
:
133 cur
+= scnprintf(cur
, end
- cur
, "Out-of-bounds %s at", get_access_type(r
));
135 case KFENCE_ERROR_UAF
:
136 cur
+= scnprintf(cur
, end
- cur
, "Use-after-free %s at", get_access_type(r
));
138 case KFENCE_ERROR_CORRUPTION
:
139 cur
+= scnprintf(cur
, end
- cur
, "Corrupted memory at");
141 case KFENCE_ERROR_INVALID
:
142 cur
+= scnprintf(cur
, end
- cur
, "Invalid %s at", get_access_type(r
));
144 case KFENCE_ERROR_INVALID_FREE
:
145 cur
+= scnprintf(cur
, end
- cur
, "Invalid free of");
149 cur
+= scnprintf(cur
, end
- cur
, " 0x%p", (void *)r
->addr
);
151 spin_lock_irqsave(&observed
.lock
, flags
);
152 if (!report_available())
153 goto out
; /* A new report is being captured. */
155 /* Finally match expected output to what we actually observed. */
156 ret
= strstr(observed
.lines
[0], expect
[0]) && strstr(observed
.lines
[1], expect
[1]);
158 spin_unlock_irqrestore(&observed
.lock
, flags
);
162 /* ===== Test cases ===== */
164 #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
166 /* Cache used by tests; if NULL, allocate from kmalloc instead. */
167 static struct kmem_cache
*test_cache
;
169 static size_t setup_test_cache(struct kunit
*test
, size_t size
, slab_flags_t flags
,
170 void (*ctor
)(void *))
172 if (test
->priv
!= TEST_PRIV_WANT_MEMCACHE
)
175 kunit_info(test
, "%s: size=%zu, ctor=%ps\n", __func__
, size
, ctor
);
178 * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
179 * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
180 * allocate via memcg, if enabled.
182 flags
|= SLAB_NOLEAKTRACE
| SLAB_ACCOUNT
;
183 test_cache
= kmem_cache_create("test", size
, 1, flags
, ctor
);
184 KUNIT_ASSERT_TRUE_MSG(test
, test_cache
, "could not create cache");
189 static void test_cache_destroy(void)
194 kmem_cache_destroy(test_cache
);
198 static inline size_t kmalloc_cache_alignment(size_t size
)
200 return kmalloc_caches
[kmalloc_type(GFP_KERNEL
)][__kmalloc_index(size
, false)]->align
;
203 /* Must always inline to match stack trace against caller. */
204 static __always_inline
void test_free(void *ptr
)
207 kmem_cache_free(test_cache
, ptr
);
213 * If this should be a KFENCE allocation, and on which side the allocation and
214 * the closest guard page should be.
216 enum allocation_policy
{
217 ALLOCATE_ANY
, /* KFENCE, any side. */
218 ALLOCATE_LEFT
, /* KFENCE, left side of page. */
219 ALLOCATE_RIGHT
, /* KFENCE, right side of page. */
220 ALLOCATE_NONE
, /* No KFENCE allocation. */
224 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
225 * current test_cache if set up.
227 static void *test_alloc(struct kunit
*test
, size_t size
, gfp_t gfp
, enum allocation_policy policy
)
230 unsigned long timeout
, resched_after
;
231 const char *policy_name
;
238 policy_name
= "left";
241 policy_name
= "right";
244 policy_name
= "none";
248 kunit_info(test
, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__
, size
, gfp
,
249 policy_name
, !!test_cache
);
252 * 100x the sample interval should be more than enough to ensure we get
253 * a KFENCE allocation eventually.
255 timeout
= jiffies
+ msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL
);
257 * Especially for non-preemption kernels, ensure the allocation-gate
258 * timer can catch up: after @resched_after, every failed allocation
259 * attempt yields, to ensure the allocation-gate timer is scheduled.
261 resched_after
= jiffies
+ msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL
);
264 alloc
= kmem_cache_alloc(test_cache
, gfp
);
266 alloc
= kmalloc(size
, gfp
);
268 if (is_kfence_address(alloc
)) {
269 struct page
*page
= virt_to_head_page(alloc
);
270 struct kmem_cache
*s
= test_cache
?:
271 kmalloc_caches
[kmalloc_type(GFP_KERNEL
)][__kmalloc_index(size
, false)];
274 * Verify that various helpers return the right values
275 * even for KFENCE objects; these are required so that
276 * memcg accounting works correctly.
278 KUNIT_EXPECT_EQ(test
, obj_to_index(s
, page
, alloc
), 0U);
279 KUNIT_EXPECT_EQ(test
, objs_per_slab_page(s
, page
), 1);
281 if (policy
== ALLOCATE_ANY
)
283 if (policy
== ALLOCATE_LEFT
&& IS_ALIGNED((unsigned long)alloc
, PAGE_SIZE
))
285 if (policy
== ALLOCATE_RIGHT
&&
286 !IS_ALIGNED((unsigned long)alloc
, PAGE_SIZE
))
288 } else if (policy
== ALLOCATE_NONE
)
293 if (time_after(jiffies
, resched_after
))
295 } while (time_before(jiffies
, timeout
));
297 KUNIT_ASSERT_TRUE_MSG(test
, false, "failed to allocate from KFENCE");
298 return NULL
; /* Unreachable. */
301 static void test_out_of_bounds_read(struct kunit
*test
)
304 struct expect_report expect
= {
305 .type
= KFENCE_ERROR_OOB
,
306 .fn
= test_out_of_bounds_read
,
311 setup_test_cache(test
, size
, 0, NULL
);
314 * If we don't have our own cache, adjust based on alignment, so that we
315 * actually access guard pages on either side.
318 size
= kmalloc_cache_alignment(size
);
320 /* Test both sides. */
322 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
);
323 expect
.addr
= buf
- 1;
324 READ_ONCE(*expect
.addr
);
325 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
328 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
329 expect
.addr
= buf
+ size
;
330 READ_ONCE(*expect
.addr
);
331 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
335 static void test_out_of_bounds_write(struct kunit
*test
)
338 struct expect_report expect
= {
339 .type
= KFENCE_ERROR_OOB
,
340 .fn
= test_out_of_bounds_write
,
345 setup_test_cache(test
, size
, 0, NULL
);
346 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
);
347 expect
.addr
= buf
- 1;
348 WRITE_ONCE(*expect
.addr
, 42);
349 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
353 static void test_use_after_free_read(struct kunit
*test
)
355 const size_t size
= 32;
356 struct expect_report expect
= {
357 .type
= KFENCE_ERROR_UAF
,
358 .fn
= test_use_after_free_read
,
362 setup_test_cache(test
, size
, 0, NULL
);
363 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
364 test_free(expect
.addr
);
365 READ_ONCE(*expect
.addr
);
366 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
369 static void test_double_free(struct kunit
*test
)
371 const size_t size
= 32;
372 struct expect_report expect
= {
373 .type
= KFENCE_ERROR_INVALID_FREE
,
374 .fn
= test_double_free
,
377 setup_test_cache(test
, size
, 0, NULL
);
378 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
379 test_free(expect
.addr
);
380 test_free(expect
.addr
); /* Double-free. */
381 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
384 static void test_invalid_addr_free(struct kunit
*test
)
386 const size_t size
= 32;
387 struct expect_report expect
= {
388 .type
= KFENCE_ERROR_INVALID_FREE
,
389 .fn
= test_invalid_addr_free
,
393 setup_test_cache(test
, size
, 0, NULL
);
394 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
395 expect
.addr
= buf
+ 1; /* Free on invalid address. */
396 test_free(expect
.addr
); /* Invalid address free. */
397 test_free(buf
); /* No error. */
398 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
401 static void test_corruption(struct kunit
*test
)
404 struct expect_report expect
= {
405 .type
= KFENCE_ERROR_CORRUPTION
,
406 .fn
= test_corruption
,
410 setup_test_cache(test
, size
, 0, NULL
);
412 /* Test both sides. */
414 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
);
415 expect
.addr
= buf
+ size
;
416 WRITE_ONCE(*expect
.addr
, 42);
418 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
420 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
421 expect
.addr
= buf
- 1;
422 WRITE_ONCE(*expect
.addr
, 42);
424 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
428 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
429 * leave a gap between the object and the guard page. Specifically, an
430 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
431 * respectively. Therefore it is impossible for the allocated object to
432 * contiguously line up with the right guard page.
434 * However, we test that an access to memory beyond the gap results in KFENCE
435 * detecting an OOB access.
437 static void test_kmalloc_aligned_oob_read(struct kunit
*test
)
439 const size_t size
= 73;
440 const size_t align
= kmalloc_cache_alignment(size
);
441 struct expect_report expect
= {
442 .type
= KFENCE_ERROR_OOB
,
443 .fn
= test_kmalloc_aligned_oob_read
,
448 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
451 * The object is offset to the right, so there won't be an OOB to the
454 READ_ONCE(*(buf
- 1));
455 KUNIT_EXPECT_FALSE(test
, report_available());
458 * @buf must be aligned on @align, therefore buf + size belongs to the
459 * same page -> no OOB.
461 READ_ONCE(*(buf
+ size
));
462 KUNIT_EXPECT_FALSE(test
, report_available());
464 /* Overflowing by @align bytes will result in an OOB. */
465 expect
.addr
= buf
+ size
+ align
;
466 READ_ONCE(*expect
.addr
);
467 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
472 static void test_kmalloc_aligned_oob_write(struct kunit
*test
)
474 const size_t size
= 73;
475 struct expect_report expect
= {
476 .type
= KFENCE_ERROR_CORRUPTION
,
477 .fn
= test_kmalloc_aligned_oob_write
,
481 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
483 * The object is offset to the right, so we won't get a page
484 * fault immediately after it.
486 expect
.addr
= buf
+ size
;
487 WRITE_ONCE(*expect
.addr
, READ_ONCE(*expect
.addr
) + 1);
488 KUNIT_EXPECT_FALSE(test
, report_available());
490 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
493 /* Test cache shrinking and destroying with KFENCE. */
494 static void test_shrink_memcache(struct kunit
*test
)
496 const size_t size
= 32;
499 setup_test_cache(test
, size
, 0, NULL
);
500 KUNIT_EXPECT_TRUE(test
, test_cache
);
501 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
502 kmem_cache_shrink(test_cache
);
505 KUNIT_EXPECT_FALSE(test
, report_available());
508 static void ctor_set_x(void *obj
)
510 /* Every object has at least 8 bytes. */
514 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
515 static void test_free_bulk(struct kunit
*test
)
519 for (iter
= 0; iter
< 5; iter
++) {
520 const size_t size
= setup_test_cache(test
, 8 + prandom_u32_max(300), 0,
521 (iter
& 1) ? ctor_set_x
: NULL
);
523 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
),
524 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_NONE
),
525 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
),
526 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_NONE
),
527 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_NONE
),
530 kmem_cache_free_bulk(test_cache
, ARRAY_SIZE(objects
), objects
);
531 KUNIT_ASSERT_FALSE(test
, report_available());
532 test_cache_destroy();
536 /* Test init-on-free works. */
537 static void test_init_on_free(struct kunit
*test
)
539 const size_t size
= 32;
540 struct expect_report expect
= {
541 .type
= KFENCE_ERROR_UAF
,
542 .fn
= test_init_on_free
,
547 if (!IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON
))
549 /* Assume it hasn't been disabled on command line. */
551 setup_test_cache(test
, size
, 0, NULL
);
552 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
553 for (i
= 0; i
< size
; i
++)
554 expect
.addr
[i
] = i
+ 1;
555 test_free(expect
.addr
);
557 for (i
= 0; i
< size
; i
++) {
559 * This may fail if the page was recycled by KFENCE and then
560 * written to again -- this however, is near impossible with a
563 KUNIT_EXPECT_EQ(test
, expect
.addr
[i
], (char)0);
565 if (!i
) /* Only check first access to not fail test if page is ever re-protected. */
566 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
570 /* Ensure that constructors work properly. */
571 static void test_memcache_ctor(struct kunit
*test
)
573 const size_t size
= 32;
577 setup_test_cache(test
, size
, 0, ctor_set_x
);
578 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
580 for (i
= 0; i
< 8; i
++)
581 KUNIT_EXPECT_EQ(test
, buf
[i
], (char)'x');
585 KUNIT_EXPECT_FALSE(test
, report_available());
588 /* Test that memory is zeroed if requested. */
589 static void test_gfpzero(struct kunit
*test
)
591 const size_t size
= PAGE_SIZE
; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
595 if (CONFIG_KFENCE_SAMPLE_INTERVAL
> 100) {
596 kunit_warn(test
, "skipping ... would take too long\n");
600 setup_test_cache(test
, size
, 0, NULL
);
601 buf1
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
602 for (i
= 0; i
< size
; i
++)
606 /* Try to get same address again -- this can take a while. */
608 buf2
= test_alloc(test
, size
, GFP_KERNEL
| __GFP_ZERO
, ALLOCATE_ANY
);
613 if (i
== CONFIG_KFENCE_NUM_OBJECTS
) {
614 kunit_warn(test
, "giving up ... cannot get same object back\n");
619 for (i
= 0; i
< size
; i
++)
620 KUNIT_EXPECT_EQ(test
, buf2
[i
], (char)0);
624 KUNIT_EXPECT_FALSE(test
, report_available());
627 static void test_invalid_access(struct kunit
*test
)
629 const struct expect_report expect
= {
630 .type
= KFENCE_ERROR_INVALID
,
631 .fn
= test_invalid_access
,
632 .addr
= &__kfence_pool
[10],
636 READ_ONCE(__kfence_pool
[10]);
637 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
640 /* Test SLAB_TYPESAFE_BY_RCU works. */
641 static void test_memcache_typesafe_by_rcu(struct kunit
*test
)
643 const size_t size
= 32;
644 struct expect_report expect
= {
645 .type
= KFENCE_ERROR_UAF
,
646 .fn
= test_memcache_typesafe_by_rcu
,
650 setup_test_cache(test
, size
, SLAB_TYPESAFE_BY_RCU
, NULL
);
651 KUNIT_EXPECT_TRUE(test
, test_cache
); /* Want memcache. */
653 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
657 test_free(expect
.addr
);
658 KUNIT_EXPECT_EQ(test
, *expect
.addr
, (char)42);
660 * Up to this point, memory should not have been freed yet, and
661 * therefore there should be no KFENCE report from the above access.
665 /* Above access to @expect.addr should not have generated a report! */
666 KUNIT_EXPECT_FALSE(test
, report_available());
668 /* Only after rcu_barrier() is the memory guaranteed to be freed. */
671 /* Expect use-after-free. */
672 KUNIT_EXPECT_EQ(test
, *expect
.addr
, (char)42);
673 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
676 /* Test krealloc(). */
677 static void test_krealloc(struct kunit
*test
)
679 const size_t size
= 32;
680 const struct expect_report expect
= {
681 .type
= KFENCE_ERROR_UAF
,
683 .addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
),
686 char *buf
= expect
.addr
;
689 KUNIT_EXPECT_FALSE(test
, test_cache
);
690 KUNIT_EXPECT_EQ(test
, ksize(buf
), size
); /* Precise size match after KFENCE alloc. */
691 for (i
= 0; i
< size
; i
++)
694 /* Check that we successfully change the size. */
695 buf
= krealloc(buf
, size
* 3, GFP_KERNEL
); /* Grow. */
696 /* Note: Might no longer be a KFENCE alloc. */
697 KUNIT_EXPECT_GE(test
, ksize(buf
), size
* 3);
698 for (i
= 0; i
< size
; i
++)
699 KUNIT_EXPECT_EQ(test
, buf
[i
], (char)(i
+ 1));
700 for (; i
< size
* 3; i
++) /* Fill to extra bytes. */
703 buf
= krealloc(buf
, size
* 2, GFP_KERNEL
); /* Shrink. */
704 KUNIT_EXPECT_GE(test
, ksize(buf
), size
* 2);
705 for (i
= 0; i
< size
* 2; i
++)
706 KUNIT_EXPECT_EQ(test
, buf
[i
], (char)(i
+ 1));
708 buf
= krealloc(buf
, 0, GFP_KERNEL
); /* Free. */
709 KUNIT_EXPECT_EQ(test
, (unsigned long)buf
, (unsigned long)ZERO_SIZE_PTR
);
710 KUNIT_ASSERT_FALSE(test
, report_available()); /* No reports yet! */
712 READ_ONCE(*expect
.addr
); /* Ensure krealloc() actually freed earlier KFENCE object. */
713 KUNIT_ASSERT_TRUE(test
, report_matches(&expect
));
716 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
717 static void test_memcache_alloc_bulk(struct kunit
*test
)
719 const size_t size
= 32;
721 unsigned long timeout
;
723 setup_test_cache(test
, size
, 0, NULL
);
724 KUNIT_EXPECT_TRUE(test
, test_cache
); /* Want memcache. */
726 * 100x the sample interval should be more than enough to ensure we get
727 * a KFENCE allocation eventually.
729 timeout
= jiffies
+ msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL
);
732 int i
, num
= kmem_cache_alloc_bulk(test_cache
, GFP_ATOMIC
, ARRAY_SIZE(objects
),
736 for (i
= 0; i
< ARRAY_SIZE(objects
); i
++) {
737 if (is_kfence_address(objects
[i
])) {
742 kmem_cache_free_bulk(test_cache
, num
, objects
);
744 * kmem_cache_alloc_bulk() disables interrupts, and calling it
745 * in a tight loop may not give KFENCE a chance to switch the
746 * static branch. Call cond_resched() to let KFENCE chime in.
749 } while (!pass
&& time_before(jiffies
, timeout
));
751 KUNIT_EXPECT_TRUE(test
, pass
);
752 KUNIT_EXPECT_FALSE(test
, report_available());
756 * KUnit does not provide a way to provide arguments to tests, and we encode
757 * additional info in the name. Set up 2 tests per test case, one using the
758 * default allocator, and another using a custom memcache (suffix '-memcache').
760 #define KFENCE_KUNIT_CASE(test_name) \
761 { .run_case = test_name, .name = #test_name }, \
762 { .run_case = test_name, .name = #test_name "-memcache" }
764 static struct kunit_case kfence_test_cases
[] = {
765 KFENCE_KUNIT_CASE(test_out_of_bounds_read
),
766 KFENCE_KUNIT_CASE(test_out_of_bounds_write
),
767 KFENCE_KUNIT_CASE(test_use_after_free_read
),
768 KFENCE_KUNIT_CASE(test_double_free
),
769 KFENCE_KUNIT_CASE(test_invalid_addr_free
),
770 KFENCE_KUNIT_CASE(test_corruption
),
771 KFENCE_KUNIT_CASE(test_free_bulk
),
772 KFENCE_KUNIT_CASE(test_init_on_free
),
773 KUNIT_CASE(test_kmalloc_aligned_oob_read
),
774 KUNIT_CASE(test_kmalloc_aligned_oob_write
),
775 KUNIT_CASE(test_shrink_memcache
),
776 KUNIT_CASE(test_memcache_ctor
),
777 KUNIT_CASE(test_invalid_access
),
778 KUNIT_CASE(test_gfpzero
),
779 KUNIT_CASE(test_memcache_typesafe_by_rcu
),
780 KUNIT_CASE(test_krealloc
),
781 KUNIT_CASE(test_memcache_alloc_bulk
),
785 /* ===== End test cases ===== */
787 static int test_init(struct kunit
*test
)
792 spin_lock_irqsave(&observed
.lock
, flags
);
793 for (i
= 0; i
< ARRAY_SIZE(observed
.lines
); i
++)
794 observed
.lines
[i
][0] = '\0';
796 spin_unlock_irqrestore(&observed
.lock
, flags
);
798 /* Any test with 'memcache' in its name will want a memcache. */
799 if (strstr(test
->name
, "memcache"))
800 test
->priv
= TEST_PRIV_WANT_MEMCACHE
;
807 static void test_exit(struct kunit
*test
)
809 test_cache_destroy();
812 static struct kunit_suite kfence_test_suite
= {
814 .test_cases
= kfence_test_cases
,
818 static struct kunit_suite
*kfence_test_suites
[] = { &kfence_test_suite
, NULL
};
820 static void register_tracepoints(struct tracepoint
*tp
, void *ignore
)
822 check_trace_callback_type_console(probe_console
);
823 if (!strcmp(tp
->name
, "console"))
824 WARN_ON(tracepoint_probe_register(tp
, probe_console
, NULL
));
827 static void unregister_tracepoints(struct tracepoint
*tp
, void *ignore
)
829 if (!strcmp(tp
->name
, "console"))
830 tracepoint_probe_unregister(tp
, probe_console
, NULL
);
834 * We only want to do tracepoints setup and teardown once, therefore we have to
835 * customize the init and exit functions and cannot rely on kunit_test_suite().
837 static int __init
kfence_test_init(void)
840 * Because we want to be able to build the test as a module, we need to
841 * iterate through all known tracepoints, since the static registration
844 for_each_kernel_tracepoint(register_tracepoints
, NULL
);
845 return __kunit_test_suites_init(kfence_test_suites
);
848 static void kfence_test_exit(void)
850 __kunit_test_suites_exit(kfence_test_suites
);
851 for_each_kernel_tracepoint(unregister_tracepoints
, NULL
);
852 tracepoint_synchronize_unregister();
855 late_initcall(kfence_test_init
);
856 module_exit(kfence_test_exit
);
858 MODULE_LICENSE("GPL v2");
859 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");