]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/workingset.c
ALSA: hda: Constify hw_constraints
[mirror_ubuntu-artful-kernel.git] / mm / workingset.c
1 /*
2 * Workingset detection
3 *
4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
5 */
6
7 #include <linux/memcontrol.h>
8 #include <linux/writeback.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/pagemap.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/swap.h>
14 #include <linux/dax.h>
15 #include <linux/fs.h>
16 #include <linux/mm.h>
17
18 /*
19 * Double CLOCK lists
20 *
21 * Per node, two clock lists are maintained for file pages: the
22 * inactive and the active list. Freshly faulted pages start out at
23 * the head of the inactive list and page reclaim scans pages from the
24 * tail. Pages that are accessed multiple times on the inactive list
25 * are promoted to the active list, to protect them from reclaim,
26 * whereas active pages are demoted to the inactive list when the
27 * active list grows too big.
28 *
29 * fault ------------------------+
30 * |
31 * +--------------+ | +-------------+
32 * reclaim <- | inactive | <-+-- demotion | active | <--+
33 * +--------------+ +-------------+ |
34 * | |
35 * +-------------- promotion ------------------+
36 *
37 *
38 * Access frequency and refault distance
39 *
40 * A workload is thrashing when its pages are frequently used but they
41 * are evicted from the inactive list every time before another access
42 * would have promoted them to the active list.
43 *
44 * In cases where the average access distance between thrashing pages
45 * is bigger than the size of memory there is nothing that can be
46 * done - the thrashing set could never fit into memory under any
47 * circumstance.
48 *
49 * However, the average access distance could be bigger than the
50 * inactive list, yet smaller than the size of memory. In this case,
51 * the set could fit into memory if it weren't for the currently
52 * active pages - which may be used more, hopefully less frequently:
53 *
54 * +-memory available to cache-+
55 * | |
56 * +-inactive------+-active----+
57 * a b | c d e f g h i | J K L M N |
58 * +---------------+-----------+
59 *
60 * It is prohibitively expensive to accurately track access frequency
61 * of pages. But a reasonable approximation can be made to measure
62 * thrashing on the inactive list, after which refaulting pages can be
63 * activated optimistically to compete with the existing active pages.
64 *
65 * Approximating inactive page access frequency - Observations:
66 *
67 * 1. When a page is accessed for the first time, it is added to the
68 * head of the inactive list, slides every existing inactive page
69 * towards the tail by one slot, and pushes the current tail page
70 * out of memory.
71 *
72 * 2. When a page is accessed for the second time, it is promoted to
73 * the active list, shrinking the inactive list by one slot. This
74 * also slides all inactive pages that were faulted into the cache
75 * more recently than the activated page towards the tail of the
76 * inactive list.
77 *
78 * Thus:
79 *
80 * 1. The sum of evictions and activations between any two points in
81 * time indicate the minimum number of inactive pages accessed in
82 * between.
83 *
84 * 2. Moving one inactive page N page slots towards the tail of the
85 * list requires at least N inactive page accesses.
86 *
87 * Combining these:
88 *
89 * 1. When a page is finally evicted from memory, the number of
90 * inactive pages accessed while the page was in cache is at least
91 * the number of page slots on the inactive list.
92 *
93 * 2. In addition, measuring the sum of evictions and activations (E)
94 * at the time of a page's eviction, and comparing it to another
95 * reading (R) at the time the page faults back into memory tells
96 * the minimum number of accesses while the page was not cached.
97 * This is called the refault distance.
98 *
99 * Because the first access of the page was the fault and the second
100 * access the refault, we combine the in-cache distance with the
101 * out-of-cache distance to get the complete minimum access distance
102 * of this page:
103 *
104 * NR_inactive + (R - E)
105 *
106 * And knowing the minimum access distance of a page, we can easily
107 * tell if the page would be able to stay in cache assuming all page
108 * slots in the cache were available:
109 *
110 * NR_inactive + (R - E) <= NR_inactive + NR_active
111 *
112 * which can be further simplified to
113 *
114 * (R - E) <= NR_active
115 *
116 * Put into words, the refault distance (out-of-cache) can be seen as
117 * a deficit in inactive list space (in-cache). If the inactive list
118 * had (R - E) more page slots, the page would not have been evicted
119 * in between accesses, but activated instead. And on a full system,
120 * the only thing eating into inactive list space is active pages.
121 *
122 *
123 * Activating refaulting pages
124 *
125 * All that is known about the active list is that the pages have been
126 * accessed more than once in the past. This means that at any given
127 * time there is actually a good chance that pages on the active list
128 * are no longer in active use.
129 *
130 * So when a refault distance of (R - E) is observed and there are at
131 * least (R - E) active pages, the refaulting page is activated
132 * optimistically in the hope that (R - E) active pages are actually
133 * used less frequently than the refaulting page - or even not used at
134 * all anymore.
135 *
136 * If this is wrong and demotion kicks in, the pages which are truly
137 * used more frequently will be reactivated while the less frequently
138 * used once will be evicted from memory.
139 *
140 * But if this is right, the stale pages will be pushed out of memory
141 * and the used pages get to stay in cache.
142 *
143 *
144 * Implementation
145 *
146 * For each node's file LRU lists, a counter for inactive evictions
147 * and activations is maintained (node->inactive_age).
148 *
149 * On eviction, a snapshot of this counter (along with some bits to
150 * identify the node) is stored in the now empty page cache radix tree
151 * slot of the evicted page. This is called a shadow entry.
152 *
153 * On cache misses for which there are shadow entries, an eligible
154 * refault distance will immediately activate the refaulting page.
155 */
156
157 #define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \
158 NODES_SHIFT + \
159 MEM_CGROUP_ID_SHIFT)
160 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
161
162 /*
163 * Eviction timestamps need to be able to cover the full range of
164 * actionable refaults. However, bits are tight in the radix tree
165 * entry, and after storing the identifier for the lruvec there might
166 * not be enough left to represent every single actionable refault. In
167 * that case, we have to sacrifice granularity for distance, and group
168 * evictions into coarser buckets by shaving off lower timestamp bits.
169 */
170 static unsigned int bucket_order __read_mostly;
171
172 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
173 {
174 eviction >>= bucket_order;
175 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
176 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
177 eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
178
179 return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
180 }
181
182 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
183 unsigned long *evictionp)
184 {
185 unsigned long entry = (unsigned long)shadow;
186 int memcgid, nid;
187
188 entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
189 nid = entry & ((1UL << NODES_SHIFT) - 1);
190 entry >>= NODES_SHIFT;
191 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
192 entry >>= MEM_CGROUP_ID_SHIFT;
193
194 *memcgidp = memcgid;
195 *pgdat = NODE_DATA(nid);
196 *evictionp = entry << bucket_order;
197 }
198
199 /**
200 * workingset_eviction - note the eviction of a page from memory
201 * @mapping: address space the page was backing
202 * @page: the page being evicted
203 *
204 * Returns a shadow entry to be stored in @mapping->page_tree in place
205 * of the evicted @page so that a later refault can be detected.
206 */
207 void *workingset_eviction(struct address_space *mapping, struct page *page)
208 {
209 struct mem_cgroup *memcg = page_memcg(page);
210 struct pglist_data *pgdat = page_pgdat(page);
211 int memcgid = mem_cgroup_id(memcg);
212 unsigned long eviction;
213 struct lruvec *lruvec;
214
215 /* Page is fully exclusive and pins page->mem_cgroup */
216 VM_BUG_ON_PAGE(PageLRU(page), page);
217 VM_BUG_ON_PAGE(page_count(page), page);
218 VM_BUG_ON_PAGE(!PageLocked(page), page);
219
220 lruvec = mem_cgroup_lruvec(pgdat, memcg);
221 eviction = atomic_long_inc_return(&lruvec->inactive_age);
222 return pack_shadow(memcgid, pgdat, eviction);
223 }
224
225 /**
226 * workingset_refault - evaluate the refault of a previously evicted page
227 * @shadow: shadow entry of the evicted page
228 *
229 * Calculates and evaluates the refault distance of the previously
230 * evicted page in the context of the node it was allocated in.
231 *
232 * Returns %true if the page should be activated, %false otherwise.
233 */
234 bool workingset_refault(void *shadow)
235 {
236 unsigned long refault_distance;
237 unsigned long active_file;
238 struct mem_cgroup *memcg;
239 unsigned long eviction;
240 struct lruvec *lruvec;
241 unsigned long refault;
242 struct pglist_data *pgdat;
243 int memcgid;
244
245 unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
246
247 rcu_read_lock();
248 /*
249 * Look up the memcg associated with the stored ID. It might
250 * have been deleted since the page's eviction.
251 *
252 * Note that in rare events the ID could have been recycled
253 * for a new cgroup that refaults a shared page. This is
254 * impossible to tell from the available data. However, this
255 * should be a rare and limited disturbance, and activations
256 * are always speculative anyway. Ultimately, it's the aging
257 * algorithm's job to shake out the minimum access frequency
258 * for the active cache.
259 *
260 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
261 * would be better if the root_mem_cgroup existed in all
262 * configurations instead.
263 */
264 memcg = mem_cgroup_from_id(memcgid);
265 if (!mem_cgroup_disabled() && !memcg) {
266 rcu_read_unlock();
267 return false;
268 }
269 lruvec = mem_cgroup_lruvec(pgdat, memcg);
270 refault = atomic_long_read(&lruvec->inactive_age);
271 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
272
273 /*
274 * The unsigned subtraction here gives an accurate distance
275 * across inactive_age overflows in most cases.
276 *
277 * There is a special case: usually, shadow entries have a
278 * short lifetime and are either refaulted or reclaimed along
279 * with the inode before they get too old. But it is not
280 * impossible for the inactive_age to lap a shadow entry in
281 * the field, which can then can result in a false small
282 * refault distance, leading to a false activation should this
283 * old entry actually refault again. However, earlier kernels
284 * used to deactivate unconditionally with *every* reclaim
285 * invocation for the longest time, so the occasional
286 * inappropriate activation leading to pressure on the active
287 * list is not a problem.
288 */
289 refault_distance = (refault - eviction) & EVICTION_MASK;
290
291 inc_node_state(pgdat, WORKINGSET_REFAULT);
292 inc_memcg_state(memcg, WORKINGSET_REFAULT);
293
294 if (refault_distance <= active_file) {
295 inc_node_state(pgdat, WORKINGSET_ACTIVATE);
296 inc_memcg_state(memcg, WORKINGSET_ACTIVATE);
297 rcu_read_unlock();
298 return true;
299 }
300 rcu_read_unlock();
301 return false;
302 }
303
304 /**
305 * workingset_activation - note a page activation
306 * @page: page that is being activated
307 */
308 void workingset_activation(struct page *page)
309 {
310 struct mem_cgroup *memcg;
311 struct lruvec *lruvec;
312
313 rcu_read_lock();
314 /*
315 * Filter non-memcg pages here, e.g. unmap can call
316 * mark_page_accessed() on VDSO pages.
317 *
318 * XXX: See workingset_refault() - this should return
319 * root_mem_cgroup even for !CONFIG_MEMCG.
320 */
321 memcg = page_memcg_rcu(page);
322 if (!mem_cgroup_disabled() && !memcg)
323 goto out;
324 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
325 atomic_long_inc(&lruvec->inactive_age);
326 out:
327 rcu_read_unlock();
328 }
329
330 /*
331 * Shadow entries reflect the share of the working set that does not
332 * fit into memory, so their number depends on the access pattern of
333 * the workload. In most cases, they will refault or get reclaimed
334 * along with the inode, but a (malicious) workload that streams
335 * through files with a total size several times that of available
336 * memory, while preventing the inodes from being reclaimed, can
337 * create excessive amounts of shadow nodes. To keep a lid on this,
338 * track shadow nodes and reclaim them when they grow way past the
339 * point where they would still be useful.
340 */
341
342 static struct list_lru shadow_nodes;
343
344 void workingset_update_node(struct radix_tree_node *node, void *private)
345 {
346 struct address_space *mapping = private;
347
348 /* Only regular page cache has shadow entries */
349 if (dax_mapping(mapping) || shmem_mapping(mapping))
350 return;
351
352 /*
353 * Track non-empty nodes that contain only shadow entries;
354 * unlink those that contain pages or are being freed.
355 *
356 * Avoid acquiring the list_lru lock when the nodes are
357 * already where they should be. The list_empty() test is safe
358 * as node->private_list is protected by &mapping->tree_lock.
359 */
360 if (node->count && node->count == node->exceptional) {
361 if (list_empty(&node->private_list))
362 list_lru_add(&shadow_nodes, &node->private_list);
363 } else {
364 if (!list_empty(&node->private_list))
365 list_lru_del(&shadow_nodes, &node->private_list);
366 }
367 }
368
369 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
370 struct shrink_control *sc)
371 {
372 unsigned long max_nodes;
373 unsigned long nodes;
374 unsigned long cache;
375
376 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
377 local_irq_disable();
378 nodes = list_lru_shrink_count(&shadow_nodes, sc);
379 local_irq_enable();
380
381 /*
382 * Approximate a reasonable limit for the radix tree nodes
383 * containing shadow entries. We don't need to keep more
384 * shadow entries than possible pages on the active list,
385 * since refault distances bigger than that are dismissed.
386 *
387 * The size of the active list converges toward 100% of
388 * overall page cache as memory grows, with only a tiny
389 * inactive list. Assume the total cache size for that.
390 *
391 * Nodes might be sparsely populated, with only one shadow
392 * entry in the extreme case. Obviously, we cannot keep one
393 * node for every eligible shadow entry, so compromise on a
394 * worst-case density of 1/8th. Below that, not all eligible
395 * refaults can be detected anymore.
396 *
397 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
398 * each, this will reclaim shadow entries when they consume
399 * ~1.8% of available memory:
400 *
401 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
402 */
403 if (sc->memcg) {
404 cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
405 LRU_ALL_FILE);
406 } else {
407 cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
408 node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
409 }
410 max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
411
412 if (nodes <= max_nodes)
413 return 0;
414 return nodes - max_nodes;
415 }
416
417 static enum lru_status shadow_lru_isolate(struct list_head *item,
418 struct list_lru_one *lru,
419 spinlock_t *lru_lock,
420 void *arg)
421 {
422 struct address_space *mapping;
423 struct radix_tree_node *node;
424 unsigned int i;
425 int ret;
426
427 /*
428 * Page cache insertions and deletions synchroneously maintain
429 * the shadow node LRU under the mapping->tree_lock and the
430 * lru_lock. Because the page cache tree is emptied before
431 * the inode can be destroyed, holding the lru_lock pins any
432 * address_space that has radix tree nodes on the LRU.
433 *
434 * We can then safely transition to the mapping->tree_lock to
435 * pin only the address_space of the particular node we want
436 * to reclaim, take the node off-LRU, and drop the lru_lock.
437 */
438
439 node = container_of(item, struct radix_tree_node, private_list);
440 mapping = container_of(node->root, struct address_space, page_tree);
441
442 /* Coming from the list, invert the lock order */
443 if (!spin_trylock(&mapping->tree_lock)) {
444 spin_unlock(lru_lock);
445 ret = LRU_RETRY;
446 goto out;
447 }
448
449 list_lru_isolate(lru, item);
450 spin_unlock(lru_lock);
451
452 /*
453 * The nodes should only contain one or more shadow entries,
454 * no pages, so we expect to be able to remove them all and
455 * delete and free the empty node afterwards.
456 */
457 if (WARN_ON_ONCE(!node->exceptional))
458 goto out_invalid;
459 if (WARN_ON_ONCE(node->count != node->exceptional))
460 goto out_invalid;
461 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
462 if (node->slots[i]) {
463 if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i])))
464 goto out_invalid;
465 if (WARN_ON_ONCE(!node->exceptional))
466 goto out_invalid;
467 if (WARN_ON_ONCE(!mapping->nrexceptional))
468 goto out_invalid;
469 node->slots[i] = NULL;
470 node->exceptional--;
471 node->count--;
472 mapping->nrexceptional--;
473 }
474 }
475 if (WARN_ON_ONCE(node->exceptional))
476 goto out_invalid;
477 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
478 inc_memcg_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
479 __radix_tree_delete_node(&mapping->page_tree, node,
480 workingset_update_node, mapping);
481
482 out_invalid:
483 spin_unlock(&mapping->tree_lock);
484 ret = LRU_REMOVED_RETRY;
485 out:
486 local_irq_enable();
487 cond_resched();
488 local_irq_disable();
489 spin_lock(lru_lock);
490 return ret;
491 }
492
493 static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
494 struct shrink_control *sc)
495 {
496 unsigned long ret;
497
498 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
499 local_irq_disable();
500 ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
501 local_irq_enable();
502 return ret;
503 }
504
505 static struct shrinker workingset_shadow_shrinker = {
506 .count_objects = count_shadow_nodes,
507 .scan_objects = scan_shadow_nodes,
508 .seeks = DEFAULT_SEEKS,
509 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
510 };
511
512 /*
513 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
514 * mapping->tree_lock.
515 */
516 static struct lock_class_key shadow_nodes_key;
517
518 static int __init workingset_init(void)
519 {
520 unsigned int timestamp_bits;
521 unsigned int max_order;
522 int ret;
523
524 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
525 /*
526 * Calculate the eviction bucket size to cover the longest
527 * actionable refault distance, which is currently half of
528 * memory (totalram_pages/2). However, memory hotplug may add
529 * some more pages at runtime, so keep working with up to
530 * double the initial memory by using totalram_pages as-is.
531 */
532 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
533 max_order = fls_long(totalram_pages - 1);
534 if (max_order > timestamp_bits)
535 bucket_order = max_order - timestamp_bits;
536 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
537 timestamp_bits, max_order, bucket_order);
538
539 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
540 if (ret)
541 goto err;
542 ret = register_shrinker(&workingset_shadow_shrinker);
543 if (ret)
544 goto err_list_lru;
545 return 0;
546 err_list_lru:
547 list_lru_destroy(&shadow_nodes);
548 err:
549 return ret;
550 }
551 module_init(workingset_init);