4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2018, Joyent, Inc.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
31 #include <sys/spa_impl.h>
32 #include <sys/zio_compress.h>
33 #include <sys/zio_checksum.h>
34 #include <sys/zfs_context.h>
36 #include <sys/refcount.h>
38 #include <sys/vdev_trim.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/multilist.h>
45 #include <sys/fm/fs/zfs.h>
47 #include <sys/shrinker.h>
48 #include <sys/vmsystm.h>
50 #include <linux/page_compat.h>
52 #include <sys/callb.h>
53 #include <sys/kstat.h>
55 #include <zfs_fletcher.h>
56 #include <sys/arc_impl.h>
57 #include <sys/trace_zfs.h>
58 #include <sys/aggsum.h>
60 int64_t last_free_memory
;
61 free_memory_reason_t last_free_reason
;
64 * Return a default max arc size based on the amount of physical memory.
67 arc_default_max(uint64_t min
, uint64_t allmem
)
69 /* Default to 1/2 of all memory. */
70 return (MAX(allmem
/ 2, min
));
75 * Return maximum amount of memory that we could possibly use. Reduced
76 * to half of all memory in user space which is primarily used for testing.
82 return (ptob(zfs_totalram_pages
- zfs_totalhigh_pages
));
84 return (ptob(zfs_totalram_pages
));
85 #endif /* CONFIG_HIGHMEM */
89 * Return the amount of memory that is considered free. In user space
90 * which is primarily used for testing we pretend that free memory ranges
91 * from 0-20% of all memory.
99 return (ptob(si
.freeram
- si
.freehigh
));
101 return (ptob(nr_free_pages() +
102 nr_inactive_file_pages() +
103 nr_inactive_anon_pages() +
104 nr_slab_reclaimable_pages()));
105 #endif /* CONFIG_HIGHMEM */
109 * Additional reserve of pages for pp_reserve.
111 int64_t arc_pages_pp_reserve
= 64;
114 * Additional reserve of pages for swapfs.
116 int64_t arc_swapfs_reserve
= 64;
119 * Return the amount of memory that can be consumed before reclaim will be
120 * needed. Positive if there is sufficient free memory, negative indicates
121 * the amount of memory that needs to be freed up.
124 arc_available_memory(void)
126 int64_t lowest
= INT64_MAX
;
127 free_memory_reason_t r
= FMR_UNKNOWN
;
132 pgcnt_t needfree
= btop(arc_need_free
);
133 pgcnt_t lotsfree
= btop(arc_sys_free
);
135 pgcnt_t freemem
= btop(arc_free_memory());
138 n
= PAGESIZE
* (-needfree
);
146 * check that we're out of range of the pageout scanner. It starts to
147 * schedule paging if freemem is less than lotsfree and needfree.
148 * lotsfree is the high-water mark for pageout, and needfree is the
149 * number of needed free pages. We add extra pages here to make sure
150 * the scanner doesn't start up while we're freeing memory.
152 n
= PAGESIZE
* (freemem
- lotsfree
- needfree
- desfree
);
160 * If we're on a 32-bit platform, it's possible that we'll exhaust the
161 * kernel heap space before we ever run out of available physical
162 * memory. Most checks of the size of the heap_area compare against
163 * tune.t_minarmem, which is the minimum available real memory that we
164 * can have in the system. However, this is generally fixed at 25 pages
165 * which is so low that it's useless. In this comparison, we seek to
166 * calculate the total heap-size, and reclaim if more than 3/4ths of the
167 * heap is allocated. (Or, in the calculation, if less than 1/4th is
170 n
= vmem_size(heap_arena
, VMEM_FREE
) -
171 (vmem_size(heap_arena
, VMEM_FREE
| VMEM_ALLOC
) >> 2);
179 * If zio data pages are being allocated out of a separate heap segment,
180 * then enforce that the size of available vmem for this arena remains
181 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
183 * Note that reducing the arc_zio_arena_free_shift keeps more virtual
184 * memory (in the zio_arena) free, which can avoid memory
185 * fragmentation issues.
187 if (zio_arena
!= NULL
) {
188 n
= (int64_t)vmem_size(zio_arena
, VMEM_FREE
) -
189 (vmem_size(zio_arena
, VMEM_ALLOC
) >>
190 arc_zio_arena_free_shift
);
197 last_free_memory
= lowest
;
198 last_free_reason
= r
;
204 arc_evictable_memory(void)
206 int64_t asize
= aggsum_value(&arc_size
);
208 zfs_refcount_count(&arc_mru
->arcs_esize
[ARC_BUFC_DATA
]) +
209 zfs_refcount_count(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]) +
210 zfs_refcount_count(&arc_mfu
->arcs_esize
[ARC_BUFC_DATA
]) +
211 zfs_refcount_count(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]);
212 uint64_t arc_dirty
= MAX((int64_t)asize
- (int64_t)arc_clean
, 0);
215 * Scale reported evictable memory in proportion to page cache, cap
216 * at specified min/max.
218 uint64_t min
= (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent
;
219 min
= MAX(arc_c_min
, MIN(arc_c_max
, min
));
221 if (arc_dirty
>= min
)
224 return (MAX((int64_t)asize
- (int64_t)min
, 0));
228 * If sc->nr_to_scan is zero, the caller is requesting a query of the
229 * number of objects which can potentially be freed. If it is nonzero,
230 * the request is to free that many objects.
232 * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
233 * in struct shrinker and also require the shrinker to return the number
236 * Older kernels require the shrinker to return the number of freeable
237 * objects following the freeing of nr_to_free.
239 static spl_shrinker_t
240 __arc_shrinker_func(struct shrinker
*shrink
, struct shrink_control
*sc
)
244 /* The arc is considered warm once reclaim has occurred */
245 if (unlikely(arc_warm
== B_FALSE
))
248 /* Return the potential number of reclaimable pages */
249 pages
= btop((int64_t)arc_evictable_memory());
250 if (sc
->nr_to_scan
== 0)
253 /* Not allowed to perform filesystem reclaim */
254 if (!(sc
->gfp_mask
& __GFP_FS
))
255 return (SHRINK_STOP
);
257 /* Reclaim in progress */
258 if (mutex_tryenter(&arc_adjust_lock
) == 0) {
259 ARCSTAT_INCR(arcstat_need_free
, ptob(sc
->nr_to_scan
));
263 mutex_exit(&arc_adjust_lock
);
266 * Evict the requested number of pages by shrinking arc_c the
270 arc_reduce_target_size(ptob(sc
->nr_to_scan
));
271 if (current_is_kswapd())
272 arc_kmem_reap_soon();
273 #ifdef HAVE_SPLIT_SHRINKER_CALLBACK
274 pages
= MAX((int64_t)pages
-
275 (int64_t)btop(arc_evictable_memory()), 0);
277 pages
= btop(arc_evictable_memory());
280 * We've shrunk what we can, wake up threads.
282 cv_broadcast(&arc_adjust_waiters_cv
);
287 * When direct reclaim is observed it usually indicates a rapid
288 * increase in memory pressure. This occurs because the kswapd
289 * threads were unable to asynchronously keep enough free memory
290 * available. In this case set arc_no_grow to briefly pause arc
291 * growth to avoid compounding the memory pressure.
293 if (current_is_kswapd()) {
294 ARCSTAT_BUMP(arcstat_memory_indirect_count
);
296 arc_no_grow
= B_TRUE
;
297 arc_kmem_reap_soon();
298 ARCSTAT_BUMP(arcstat_memory_direct_count
);
303 SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func
);
305 SPL_SHRINKER_DECLARE(arc_shrinker
, arc_shrinker_func
, DEFAULT_SEEKS
);
308 arc_memory_throttle(spa_t
*spa
, uint64_t reserve
, uint64_t txg
)
310 uint64_t available_memory
= arc_free_memory();
314 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
317 if (available_memory
> arc_all_memory() * arc_lotsfree_percent
/ 100)
320 if (txg
> spa
->spa_lowmem_last_txg
) {
321 spa
->spa_lowmem_last_txg
= txg
;
322 spa
->spa_lowmem_page_load
= 0;
325 * If we are in pageout, we know that memory is already tight,
326 * the arc is already going to be evicting, so we just want to
327 * continue to let page writes occur as quickly as possible.
329 if (current_is_kswapd()) {
330 if (spa
->spa_lowmem_page_load
>
331 MAX(arc_sys_free
/ 4, available_memory
) / 4) {
332 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim
);
333 return (SET_ERROR(ERESTART
));
335 /* Note: reserve is inflated, so we deflate */
336 atomic_add_64(&spa
->spa_lowmem_page_load
, reserve
/ 8);
338 } else if (spa
->spa_lowmem_page_load
> 0 && arc_reclaim_needed()) {
339 /* memory is low, delay before restarting */
340 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
341 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim
);
342 return (SET_ERROR(EAGAIN
));
344 spa
->spa_lowmem_page_load
= 0;
349 arc_lowmem_init(void)
351 uint64_t allmem
= arc_all_memory();
354 * Register a shrinker to support synchronous (direct) memory
355 * reclaim from the arc. This is done to prevent kswapd from
356 * swapping out pages when it is preferable to shrink the arc.
358 spl_register_shrinker(&arc_shrinker
);
360 /* Set to 1/64 of all memory or a minimum of 512K */
361 arc_sys_free
= MAX(allmem
/ 64, (512 * 1024));
366 arc_lowmem_fini(void)
368 spl_unregister_shrinker(&arc_shrinker
);
372 param_set_arc_long(const char *buf
, zfs_kernel_param_t
*kp
)
376 error
= param_set_long(buf
, kp
);
378 return (SET_ERROR(error
));
380 arc_tuning_update(B_TRUE
);
386 param_set_arc_int(const char *buf
, zfs_kernel_param_t
*kp
)
390 error
= param_set_int(buf
, kp
);
392 return (SET_ERROR(error
));
394 arc_tuning_update(B_TRUE
);
400 arc_available_memory(void)
402 int64_t lowest
= INT64_MAX
;
403 free_memory_reason_t r
= FMR_UNKNOWN
;
405 /* Every 100 calls, free a small amount */
406 if (spa_get_random(100) == 0)
409 last_free_memory
= lowest
;
410 last_free_reason
= r
;
416 arc_memory_throttle(spa_t
*spa
, uint64_t reserve
, uint64_t txg
)
424 return (ptob(physmem
) / 2);
428 arc_free_memory(void)
430 return (spa_get_random(arc_all_memory() * 20 / 100));
435 * Helper function for arc_prune_async() it is responsible for safely
436 * handling the execution of a registered arc_prune_func_t.
439 arc_prune_task(void *ptr
)
441 arc_prune_t
*ap
= (arc_prune_t
*)ptr
;
442 arc_prune_func_t
*func
= ap
->p_pfunc
;
445 func(ap
->p_adjust
, ap
->p_private
);
447 zfs_refcount_remove(&ap
->p_refcnt
, func
);
451 * Notify registered consumers they must drop holds on a portion of the ARC
452 * buffered they reference. This provides a mechanism to ensure the ARC can
453 * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
454 * is analogous to dnlc_reduce_cache() but more generic.
456 * This operation is performed asynchronously so it may be safely called
457 * in the context of the arc_reclaim_thread(). A reference is taken here
458 * for each registered arc_prune_t and the arc_prune_task() is responsible
459 * for releasing it once the registered arc_prune_func_t has completed.
462 arc_prune_async(int64_t adjust
)
466 mutex_enter(&arc_prune_mtx
);
467 for (ap
= list_head(&arc_prune_list
); ap
!= NULL
;
468 ap
= list_next(&arc_prune_list
, ap
)) {
470 if (zfs_refcount_count(&ap
->p_refcnt
) >= 2)
473 zfs_refcount_add(&ap
->p_refcnt
, ap
->p_pfunc
);
474 ap
->p_adjust
= adjust
;
475 if (taskq_dispatch(arc_prune_taskq
, arc_prune_task
,
476 ap
, TQ_SLEEP
) == TASKQID_INVALID
) {
477 zfs_refcount_remove(&ap
->p_refcnt
, ap
->p_pfunc
);
480 ARCSTAT_BUMP(arcstat_prune
);
482 mutex_exit(&arc_prune_mtx
);