]> git.proxmox.com Git - mirror_zfs.git/blob - module/os/linux/zfs/arc_os.c
Don't ignore zfs_arc_max below allmem/32
[mirror_zfs.git] / module / os / linux / zfs / arc_os.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2018, Joyent, Inc.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
27 */
28
29 #include <sys/spa.h>
30 #include <sys/zio.h>
31 #include <sys/spa_impl.h>
32 #include <sys/zio_compress.h>
33 #include <sys/zio_checksum.h>
34 #include <sys/zfs_context.h>
35 #include <sys/arc.h>
36 #include <sys/refcount.h>
37 #include <sys/vdev.h>
38 #include <sys/vdev_trim.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/multilist.h>
43 #include <sys/abd.h>
44 #include <sys/zil.h>
45 #include <sys/fm/fs/zfs.h>
46 #ifdef _KERNEL
47 #include <sys/shrinker.h>
48 #include <sys/vmsystm.h>
49 #include <sys/zpl.h>
50 #include <linux/page_compat.h>
51 #endif
52 #include <sys/callb.h>
53 #include <sys/kstat.h>
54 #include <sys/zthr.h>
55 #include <zfs_fletcher.h>
56 #include <sys/arc_impl.h>
57 #include <sys/trace_zfs.h>
58 #include <sys/aggsum.h>
59
60 int64_t last_free_memory;
61 free_memory_reason_t last_free_reason;
62
63 /*
64 * Return a default max arc size based on the amount of physical memory.
65 */
66 uint64_t
67 arc_default_max(uint64_t min, uint64_t allmem)
68 {
69 /* Default to 1/2 of all memory. */
70 return (MAX(allmem / 2, min));
71 }
72
73 #ifdef _KERNEL
74 /*
75 * Return maximum amount of memory that we could possibly use. Reduced
76 * to half of all memory in user space which is primarily used for testing.
77 */
78 uint64_t
79 arc_all_memory(void)
80 {
81 #ifdef CONFIG_HIGHMEM
82 return (ptob(zfs_totalram_pages - zfs_totalhigh_pages));
83 #else
84 return (ptob(zfs_totalram_pages));
85 #endif /* CONFIG_HIGHMEM */
86 }
87
88 /*
89 * Return the amount of memory that is considered free. In user space
90 * which is primarily used for testing we pretend that free memory ranges
91 * from 0-20% of all memory.
92 */
93 uint64_t
94 arc_free_memory(void)
95 {
96 #ifdef CONFIG_HIGHMEM
97 struct sysinfo si;
98 si_meminfo(&si);
99 return (ptob(si.freeram - si.freehigh));
100 #else
101 return (ptob(nr_free_pages() +
102 nr_inactive_file_pages() +
103 nr_inactive_anon_pages() +
104 nr_slab_reclaimable_pages()));
105 #endif /* CONFIG_HIGHMEM */
106 }
107
108 /*
109 * Additional reserve of pages for pp_reserve.
110 */
111 int64_t arc_pages_pp_reserve = 64;
112
113 /*
114 * Additional reserve of pages for swapfs.
115 */
116 int64_t arc_swapfs_reserve = 64;
117
118 /*
119 * Return the amount of memory that can be consumed before reclaim will be
120 * needed. Positive if there is sufficient free memory, negative indicates
121 * the amount of memory that needs to be freed up.
122 */
123 int64_t
124 arc_available_memory(void)
125 {
126 int64_t lowest = INT64_MAX;
127 free_memory_reason_t r = FMR_UNKNOWN;
128 int64_t n;
129 #ifdef freemem
130 #undef freemem
131 #endif
132 pgcnt_t needfree = btop(arc_need_free);
133 pgcnt_t lotsfree = btop(arc_sys_free);
134 pgcnt_t desfree = 0;
135 pgcnt_t freemem = btop(arc_free_memory());
136
137 if (needfree > 0) {
138 n = PAGESIZE * (-needfree);
139 if (n < lowest) {
140 lowest = n;
141 r = FMR_NEEDFREE;
142 }
143 }
144
145 /*
146 * check that we're out of range of the pageout scanner. It starts to
147 * schedule paging if freemem is less than lotsfree and needfree.
148 * lotsfree is the high-water mark for pageout, and needfree is the
149 * number of needed free pages. We add extra pages here to make sure
150 * the scanner doesn't start up while we're freeing memory.
151 */
152 n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
153 if (n < lowest) {
154 lowest = n;
155 r = FMR_LOTSFREE;
156 }
157
158 #if defined(_ILP32)
159 /*
160 * If we're on a 32-bit platform, it's possible that we'll exhaust the
161 * kernel heap space before we ever run out of available physical
162 * memory. Most checks of the size of the heap_area compare against
163 * tune.t_minarmem, which is the minimum available real memory that we
164 * can have in the system. However, this is generally fixed at 25 pages
165 * which is so low that it's useless. In this comparison, we seek to
166 * calculate the total heap-size, and reclaim if more than 3/4ths of the
167 * heap is allocated. (Or, in the calculation, if less than 1/4th is
168 * free)
169 */
170 n = vmem_size(heap_arena, VMEM_FREE) -
171 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
172 if (n < lowest) {
173 lowest = n;
174 r = FMR_HEAP_ARENA;
175 }
176 #endif
177
178 /*
179 * If zio data pages are being allocated out of a separate heap segment,
180 * then enforce that the size of available vmem for this arena remains
181 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
182 *
183 * Note that reducing the arc_zio_arena_free_shift keeps more virtual
184 * memory (in the zio_arena) free, which can avoid memory
185 * fragmentation issues.
186 */
187 if (zio_arena != NULL) {
188 n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
189 (vmem_size(zio_arena, VMEM_ALLOC) >>
190 arc_zio_arena_free_shift);
191 if (n < lowest) {
192 lowest = n;
193 r = FMR_ZIO_ARENA;
194 }
195 }
196
197 last_free_memory = lowest;
198 last_free_reason = r;
199
200 return (lowest);
201 }
202
203 static uint64_t
204 arc_evictable_memory(void)
205 {
206 int64_t asize = aggsum_value(&arc_size);
207 uint64_t arc_clean =
208 zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
209 zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
210 zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
211 zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
212 uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0);
213
214 /*
215 * Scale reported evictable memory in proportion to page cache, cap
216 * at specified min/max.
217 */
218 uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent;
219 min = MAX(arc_c_min, MIN(arc_c_max, min));
220
221 if (arc_dirty >= min)
222 return (arc_clean);
223
224 return (MAX((int64_t)asize - (int64_t)min, 0));
225 }
226
227 /*
228 * If sc->nr_to_scan is zero, the caller is requesting a query of the
229 * number of objects which can potentially be freed. If it is nonzero,
230 * the request is to free that many objects.
231 *
232 * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
233 * in struct shrinker and also require the shrinker to return the number
234 * of objects freed.
235 *
236 * Older kernels require the shrinker to return the number of freeable
237 * objects following the freeing of nr_to_free.
238 */
239 static spl_shrinker_t
240 __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
241 {
242 int64_t pages;
243
244 /* The arc is considered warm once reclaim has occurred */
245 if (unlikely(arc_warm == B_FALSE))
246 arc_warm = B_TRUE;
247
248 /* Return the potential number of reclaimable pages */
249 pages = btop((int64_t)arc_evictable_memory());
250 if (sc->nr_to_scan == 0)
251 return (pages);
252
253 /* Not allowed to perform filesystem reclaim */
254 if (!(sc->gfp_mask & __GFP_FS))
255 return (SHRINK_STOP);
256
257 /* Reclaim in progress */
258 if (mutex_tryenter(&arc_adjust_lock) == 0) {
259 ARCSTAT_INCR(arcstat_need_free, ptob(sc->nr_to_scan));
260 return (0);
261 }
262
263 mutex_exit(&arc_adjust_lock);
264
265 /*
266 * Evict the requested number of pages by shrinking arc_c the
267 * requested amount.
268 */
269 if (pages > 0) {
270 arc_reduce_target_size(ptob(sc->nr_to_scan));
271 if (current_is_kswapd())
272 arc_kmem_reap_soon();
273 #ifdef HAVE_SPLIT_SHRINKER_CALLBACK
274 pages = MAX((int64_t)pages -
275 (int64_t)btop(arc_evictable_memory()), 0);
276 #else
277 pages = btop(arc_evictable_memory());
278 #endif
279 /*
280 * We've shrunk what we can, wake up threads.
281 */
282 cv_broadcast(&arc_adjust_waiters_cv);
283 } else
284 pages = SHRINK_STOP;
285
286 /*
287 * When direct reclaim is observed it usually indicates a rapid
288 * increase in memory pressure. This occurs because the kswapd
289 * threads were unable to asynchronously keep enough free memory
290 * available. In this case set arc_no_grow to briefly pause arc
291 * growth to avoid compounding the memory pressure.
292 */
293 if (current_is_kswapd()) {
294 ARCSTAT_BUMP(arcstat_memory_indirect_count);
295 } else {
296 arc_no_grow = B_TRUE;
297 arc_kmem_reap_soon();
298 ARCSTAT_BUMP(arcstat_memory_direct_count);
299 }
300
301 return (pages);
302 }
303 SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func);
304
305 SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS);
306
307 int
308 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
309 {
310 uint64_t available_memory = arc_free_memory();
311
312 #if defined(_ILP32)
313 available_memory =
314 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
315 #endif
316
317 if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100)
318 return (0);
319
320 if (txg > spa->spa_lowmem_last_txg) {
321 spa->spa_lowmem_last_txg = txg;
322 spa->spa_lowmem_page_load = 0;
323 }
324 /*
325 * If we are in pageout, we know that memory is already tight,
326 * the arc is already going to be evicting, so we just want to
327 * continue to let page writes occur as quickly as possible.
328 */
329 if (current_is_kswapd()) {
330 if (spa->spa_lowmem_page_load >
331 MAX(arc_sys_free / 4, available_memory) / 4) {
332 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
333 return (SET_ERROR(ERESTART));
334 }
335 /* Note: reserve is inflated, so we deflate */
336 atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8);
337 return (0);
338 } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) {
339 /* memory is low, delay before restarting */
340 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
341 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
342 return (SET_ERROR(EAGAIN));
343 }
344 spa->spa_lowmem_page_load = 0;
345 return (0);
346 }
347
348 void
349 arc_lowmem_init(void)
350 {
351 uint64_t allmem = arc_all_memory();
352
353 /*
354 * Register a shrinker to support synchronous (direct) memory
355 * reclaim from the arc. This is done to prevent kswapd from
356 * swapping out pages when it is preferable to shrink the arc.
357 */
358 spl_register_shrinker(&arc_shrinker);
359
360 /* Set to 1/64 of all memory or a minimum of 512K */
361 arc_sys_free = MAX(allmem / 64, (512 * 1024));
362 arc_need_free = 0;
363 }
364
365 void
366 arc_lowmem_fini(void)
367 {
368 spl_unregister_shrinker(&arc_shrinker);
369 }
370
371 int
372 param_set_arc_long(const char *buf, zfs_kernel_param_t *kp)
373 {
374 int error;
375
376 error = param_set_long(buf, kp);
377 if (error < 0)
378 return (SET_ERROR(error));
379
380 arc_tuning_update(B_TRUE);
381
382 return (0);
383 }
384
385 int
386 param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
387 {
388 int error;
389
390 error = param_set_int(buf, kp);
391 if (error < 0)
392 return (SET_ERROR(error));
393
394 arc_tuning_update(B_TRUE);
395
396 return (0);
397 }
398 #else /* _KERNEL */
399 int64_t
400 arc_available_memory(void)
401 {
402 int64_t lowest = INT64_MAX;
403 free_memory_reason_t r = FMR_UNKNOWN;
404
405 /* Every 100 calls, free a small amount */
406 if (spa_get_random(100) == 0)
407 lowest = -1024;
408
409 last_free_memory = lowest;
410 last_free_reason = r;
411
412 return (lowest);
413 }
414
415 int
416 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
417 {
418 return (0);
419 }
420
421 uint64_t
422 arc_all_memory(void)
423 {
424 return (ptob(physmem) / 2);
425 }
426
427 uint64_t
428 arc_free_memory(void)
429 {
430 return (spa_get_random(arc_all_memory() * 20 / 100));
431 }
432 #endif /* _KERNEL */
433
434 /*
435 * Helper function for arc_prune_async() it is responsible for safely
436 * handling the execution of a registered arc_prune_func_t.
437 */
438 static void
439 arc_prune_task(void *ptr)
440 {
441 arc_prune_t *ap = (arc_prune_t *)ptr;
442 arc_prune_func_t *func = ap->p_pfunc;
443
444 if (func != NULL)
445 func(ap->p_adjust, ap->p_private);
446
447 zfs_refcount_remove(&ap->p_refcnt, func);
448 }
449
450 /*
451 * Notify registered consumers they must drop holds on a portion of the ARC
452 * buffered they reference. This provides a mechanism to ensure the ARC can
453 * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
454 * is analogous to dnlc_reduce_cache() but more generic.
455 *
456 * This operation is performed asynchronously so it may be safely called
457 * in the context of the arc_reclaim_thread(). A reference is taken here
458 * for each registered arc_prune_t and the arc_prune_task() is responsible
459 * for releasing it once the registered arc_prune_func_t has completed.
460 */
461 void
462 arc_prune_async(int64_t adjust)
463 {
464 arc_prune_t *ap;
465
466 mutex_enter(&arc_prune_mtx);
467 for (ap = list_head(&arc_prune_list); ap != NULL;
468 ap = list_next(&arc_prune_list, ap)) {
469
470 if (zfs_refcount_count(&ap->p_refcnt) >= 2)
471 continue;
472
473 zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
474 ap->p_adjust = adjust;
475 if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
476 ap, TQ_SLEEP) == TASKQID_INVALID) {
477 zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
478 continue;
479 }
480 ARCSTAT_BUMP(arcstat_prune);
481 }
482 mutex_exit(&arc_prune_mtx);
483 }