1 // SPDX-License-Identifier: GPL-2.0
5 * Author: Alexander Potapenko <glider@google.com>
6 * Copyright (C) 2016 Google, Inc.
8 * Based on code by Dmitry Chernenkov.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 #include <linux/gfp.h>
22 #include <linux/hash.h>
23 #include <linux/kernel.h>
25 #include <linux/percpu.h>
26 #include <linux/printk.h>
27 #include <linux/shrinker.h>
28 #include <linux/slab.h>
29 #include <linux/srcu.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/cpuhotplug.h>
37 /* Data structure and operations for quarantine queues. */
40 * Each queue is a signle-linked list, which also stores the total size of
41 * objects inside of it.
44 struct qlist_node
*head
;
45 struct qlist_node
*tail
;
50 #define QLIST_INIT { NULL, NULL, 0 }
52 static bool qlist_empty(struct qlist_head
*q
)
57 static void qlist_init(struct qlist_head
*q
)
59 q
->head
= q
->tail
= NULL
;
63 static void qlist_put(struct qlist_head
*q
, struct qlist_node
*qlink
,
66 if (unlikely(qlist_empty(q
)))
69 q
->tail
->next
= qlink
;
75 static void qlist_move_all(struct qlist_head
*from
, struct qlist_head
*to
)
77 if (unlikely(qlist_empty(from
)))
80 if (qlist_empty(to
)) {
86 to
->tail
->next
= from
->head
;
87 to
->tail
= from
->tail
;
88 to
->bytes
+= from
->bytes
;
93 #define QUARANTINE_PERCPU_SIZE (1 << 20)
94 #define QUARANTINE_BATCHES \
95 (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
98 * The object quarantine consists of per-cpu queues and a global queue,
99 * guarded by quarantine_lock.
101 static DEFINE_PER_CPU(struct qlist_head
, cpu_quarantine
);
103 /* Round-robin FIFO array of batches. */
104 static struct qlist_head global_quarantine
[QUARANTINE_BATCHES
];
105 static int quarantine_head
;
106 static int quarantine_tail
;
107 /* Total size of all objects in global_quarantine across all batches. */
108 static unsigned long quarantine_size
;
109 static DEFINE_RAW_SPINLOCK(quarantine_lock
);
110 DEFINE_STATIC_SRCU(remove_cache_srcu
);
112 /* Maximum size of the global queue. */
113 static unsigned long quarantine_max_size
;
116 * Target size of a batch in global_quarantine.
117 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
119 static unsigned long quarantine_batch_size
;
122 * The fraction of physical memory the quarantine is allowed to occupy.
123 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
124 * the ratio low to avoid OOM.
126 #define QUARANTINE_FRACTION 32
128 static struct kmem_cache
*qlink_to_cache(struct qlist_node
*qlink
)
130 return virt_to_head_page(qlink
)->slab_cache
;
133 static void *qlink_to_object(struct qlist_node
*qlink
, struct kmem_cache
*cache
)
135 struct kasan_free_meta
*free_info
=
136 container_of(qlink
, struct kasan_free_meta
,
139 return ((void *)free_info
) - cache
->kasan_info
.free_meta_offset
;
142 static void qlink_free(struct qlist_node
*qlink
, struct kmem_cache
*cache
)
144 void *object
= qlink_to_object(qlink
, cache
);
147 if (IS_ENABLED(CONFIG_SLAB
))
148 local_irq_save(flags
);
150 *(u8
*)kasan_mem_to_shadow(object
) = KASAN_KMALLOC_FREE
;
151 ___cache_free(cache
, object
, _THIS_IP_
);
153 if (IS_ENABLED(CONFIG_SLAB
))
154 local_irq_restore(flags
);
157 static void qlist_free_all(struct qlist_head
*q
, struct kmem_cache
*cache
)
159 struct qlist_node
*qlink
;
161 if (unlikely(qlist_empty(q
)))
166 struct kmem_cache
*obj_cache
=
167 cache
? cache
: qlink_to_cache(qlink
);
168 struct qlist_node
*next
= qlink
->next
;
170 qlink_free(qlink
, obj_cache
);
176 void quarantine_put(struct kasan_free_meta
*info
, struct kmem_cache
*cache
)
179 struct qlist_head
*q
;
180 struct qlist_head temp
= QLIST_INIT
;
183 * Note: irq must be disabled until after we move the batch to the
184 * global quarantine. Otherwise quarantine_remove_cache() can miss
185 * some objects belonging to the cache if they are in our local temp
186 * list. quarantine_remove_cache() executes on_each_cpu() at the
187 * beginning which ensures that it either sees the objects in per-cpu
188 * lists or in the global quarantine.
190 local_irq_save(flags
);
192 q
= this_cpu_ptr(&cpu_quarantine
);
194 local_irq_restore(flags
);
197 qlist_put(q
, &info
->quarantine_link
, cache
->size
);
198 if (unlikely(q
->bytes
> QUARANTINE_PERCPU_SIZE
)) {
199 qlist_move_all(q
, &temp
);
201 raw_spin_lock(&quarantine_lock
);
202 WRITE_ONCE(quarantine_size
, quarantine_size
+ temp
.bytes
);
203 qlist_move_all(&temp
, &global_quarantine
[quarantine_tail
]);
204 if (global_quarantine
[quarantine_tail
].bytes
>=
205 READ_ONCE(quarantine_batch_size
)) {
208 new_tail
= quarantine_tail
+ 1;
209 if (new_tail
== QUARANTINE_BATCHES
)
211 if (new_tail
!= quarantine_head
)
212 quarantine_tail
= new_tail
;
214 raw_spin_unlock(&quarantine_lock
);
217 local_irq_restore(flags
);
220 void quarantine_reduce(void)
222 size_t total_size
, new_quarantine_size
, percpu_quarantines
;
225 struct qlist_head to_free
= QLIST_INIT
;
227 if (likely(READ_ONCE(quarantine_size
) <=
228 READ_ONCE(quarantine_max_size
)))
232 * srcu critical section ensures that quarantine_remove_cache()
233 * will not miss objects belonging to the cache while they are in our
234 * local to_free list. srcu is chosen because (1) it gives us private
235 * grace period domain that does not interfere with anything else,
236 * and (2) it allows synchronize_srcu() to return without waiting
237 * if there are no pending read critical sections (which is the
240 srcu_idx
= srcu_read_lock(&remove_cache_srcu
);
241 raw_spin_lock_irqsave(&quarantine_lock
, flags
);
244 * Update quarantine size in case of hotplug. Allocate a fraction of
245 * the installed memory to quarantine minus per-cpu queue limits.
247 total_size
= (totalram_pages() << PAGE_SHIFT
) /
249 percpu_quarantines
= QUARANTINE_PERCPU_SIZE
* num_online_cpus();
250 new_quarantine_size
= (total_size
< percpu_quarantines
) ?
251 0 : total_size
- percpu_quarantines
;
252 WRITE_ONCE(quarantine_max_size
, new_quarantine_size
);
253 /* Aim at consuming at most 1/2 of slots in quarantine. */
254 WRITE_ONCE(quarantine_batch_size
, max((size_t)QUARANTINE_PERCPU_SIZE
,
255 2 * total_size
/ QUARANTINE_BATCHES
));
257 if (likely(quarantine_size
> quarantine_max_size
)) {
258 qlist_move_all(&global_quarantine
[quarantine_head
], &to_free
);
259 WRITE_ONCE(quarantine_size
, quarantine_size
- to_free
.bytes
);
261 if (quarantine_head
== QUARANTINE_BATCHES
)
265 raw_spin_unlock_irqrestore(&quarantine_lock
, flags
);
267 qlist_free_all(&to_free
, NULL
);
268 srcu_read_unlock(&remove_cache_srcu
, srcu_idx
);
271 static void qlist_move_cache(struct qlist_head
*from
,
272 struct qlist_head
*to
,
273 struct kmem_cache
*cache
)
275 struct qlist_node
*curr
;
277 if (unlikely(qlist_empty(from
)))
283 struct qlist_node
*next
= curr
->next
;
284 struct kmem_cache
*obj_cache
= qlink_to_cache(curr
);
286 if (obj_cache
== cache
)
287 qlist_put(to
, curr
, obj_cache
->size
);
289 qlist_put(from
, curr
, obj_cache
->size
);
295 static void per_cpu_remove_cache(void *arg
)
297 struct kmem_cache
*cache
= arg
;
298 struct qlist_head to_free
= QLIST_INIT
;
299 struct qlist_head
*q
;
301 q
= this_cpu_ptr(&cpu_quarantine
);
302 qlist_move_cache(q
, &to_free
, cache
);
303 qlist_free_all(&to_free
, cache
);
306 /* Free all quarantined objects belonging to cache. */
307 void quarantine_remove_cache(struct kmem_cache
*cache
)
309 unsigned long flags
, i
;
310 struct qlist_head to_free
= QLIST_INIT
;
313 * Must be careful to not miss any objects that are being moved from
314 * per-cpu list to the global quarantine in quarantine_put(),
315 * nor objects being freed in quarantine_reduce(). on_each_cpu()
316 * achieves the first goal, while synchronize_srcu() achieves the
319 on_each_cpu(per_cpu_remove_cache
, cache
, 1);
321 raw_spin_lock_irqsave(&quarantine_lock
, flags
);
322 for (i
= 0; i
< QUARANTINE_BATCHES
; i
++) {
323 if (qlist_empty(&global_quarantine
[i
]))
325 qlist_move_cache(&global_quarantine
[i
], &to_free
, cache
);
326 /* Scanning whole quarantine can take a while. */
327 raw_spin_unlock_irqrestore(&quarantine_lock
, flags
);
329 raw_spin_lock_irqsave(&quarantine_lock
, flags
);
331 raw_spin_unlock_irqrestore(&quarantine_lock
, flags
);
333 qlist_free_all(&to_free
, cache
);
335 synchronize_srcu(&remove_cache_srcu
);
338 static int kasan_cpu_online(unsigned int cpu
)
340 this_cpu_ptr(&cpu_quarantine
)->offline
= false;
344 static int kasan_cpu_offline(unsigned int cpu
)
346 struct qlist_head
*q
;
348 q
= this_cpu_ptr(&cpu_quarantine
);
349 /* Ensure the ordering between the writing to q->offline and
350 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
353 WRITE_ONCE(q
->offline
, true);
355 qlist_free_all(q
, NULL
);
359 static int __init
kasan_cpu_quarantine_init(void)
363 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "mm/kasan:online",
364 kasan_cpu_online
, kasan_cpu_offline
);
366 pr_err("kasan cpu quarantine register failed [%d]\n", ret
);
369 late_initcall(kasan_cpu_quarantine_init
);