#define kmem_cache_reap_now kmem_cache_reap_soon
#define freemem vm_free_count()
#define minfree vm_cnt.v_free_min
-#define heap_arena kernel_arena
-#define zio_arena NULL
#define kmem_alloc(size, kmflags) zfs_kmem_alloc((size), (kmflags))
#define kmem_zalloc(size, kmflags) \
zfs_kmem_alloc((size), (kmflags) | M_ZERO)
#define kmem_free(buf, size) zfs_kmem_free((buf), (size))
-#define vmem_qcache_reap(ptr) ((void)0)
#endif /* _OPENSOLARIS_SYS_KMEM_H_ */
#define SKC_MAGIC 0x2c2c2c2c
#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
-#define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
#define SPL_KMEM_CACHE_OBJ_PER_SLAB 8 /* Target objects per slab */
#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */
#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
uint32_t skc_slab_objs; /* Objects per slab */
uint32_t skc_slab_size; /* Slab size */
uint32_t skc_delay; /* Slab reclaim interval */
- uint32_t skc_reap; /* Slab reclaim count */
atomic_t skc_ref; /* Ref count callers */
taskqid_t skc_taskqid; /* Slab reclaim task */
struct list_head skc_list; /* List of caches linkage */
extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
-extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count);
+extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
extern void spl_kmem_reap(void);
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
-#define kmem_cache_reap_now(skc) \
- spl_kmem_cache_reap_now(skc, skc->skc_reap)
+#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
#define kmem_reap() spl_kmem_reap()
/*
typedef struct vmem { } vmem_t;
-extern vmem_t *heap_arena;
-extern vmem_t *zio_alloc_arena;
-extern vmem_t *zio_arena;
-
-extern size_t vmem_size(vmem_t *vmp, int typemask);
-
/*
* Memory allocation interfaces
*/
#define vmem_alloc(sz, fl) spl_vmem_alloc((sz), (fl), __func__, __LINE__)
#define vmem_zalloc(sz, fl) spl_vmem_zalloc((sz), (fl), __func__, __LINE__)
#define vmem_free(ptr, sz) spl_vmem_free((ptr), (sz))
-#define vmem_qcache_reap(ptr) ((void)0)
extern void *spl_vmem_alloc(size_t sz, int fl, const char *func, int line);
extern void *spl_vmem_zalloc(size_t sz, int fl, const char *func, int line);
#define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */
#define arc_need_free ARCSTAT(arcstat_need_free) /* bytes to be freed */
-extern int arc_zio_arena_free_shift;
extern taskq_t *arc_prune_taskq;
extern arc_stats_t arc_stats;
extern hrtime_t arc_growtime;
#define kmem_debugging() 0
#define kmem_cache_reap_now(_c) umem_cache_reap_now(_c);
#define kmem_cache_set_move(_c, _cb) /* nothing */
-#define vmem_qcache_reap(_v) /* nothing */
#define POINTER_INVALIDATE(_pp) /* nothing */
#define POINTER_IS_VALID(_p) 0
-extern vmem_t *zio_arena;
-
typedef umem_cache_t kmem_cache_t;
typedef enum kmem_cbrc {
uint64_t physmem;
char hw_serial[HW_HOSTID_LEN];
struct utsname hw_utsname;
-vmem_t *zio_arena = NULL;
/* If set, all blocks read will be copied to the specified directory. */
char *vn_dumpdir = NULL;
extern struct vfsops zfs_vfsops;
-/* vmem_size typemask */
-#define VMEM_ALLOC 0x01
-#define VMEM_FREE 0x02
-#define VMEM_MAXFREE 0x10
-typedef size_t vmem_size_t;
-extern vmem_size_t vmem_size(vmem_t *vm, int typemask);
-
uint_t zfs_arc_free_target = 0;
int64_t last_free_memory;
}
#endif
- /*
- * If zio data pages are being allocated out of a separate heap segment,
- * then enforce that the size of available vmem for this arena remains
- * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
- *
- * Note that reducing the arc_zio_arena_free_shift keeps more virtual
- * memory (in the zio_arena) free, which can avoid memory
- * fragmentation issues.
- */
- if (zio_arena != NULL) {
- n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
- (vmem_size(zio_arena, VMEM_ALLOC) >>
- arc_zio_arena_free_shift);
- if (n < lowest) {
- lowest = n;
- r = FMR_ZIO_ARENA;
- }
- }
-
last_free_memory = lowest;
last_free_reason = r;
DTRACE_PROBE2(arc__available_memory, int64_t, lowest, int, r);
skc->skc_obj_size = size;
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
skc->skc_delay = SPL_KMEM_CACHE_DELAY;
- skc->skc_reap = SPL_KMEM_CACHE_REAP;
atomic_set(&skc->skc_ref, 0);
INIT_LIST_HEAD(&skc->skc_list);
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
uint64_t oldalloc = skc->skc_obj_alloc;
- spl_kmem_cache_reap_now(skc,
- MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
+ spl_kmem_cache_reap_now(skc);
if (oldalloc > skc->skc_obj_alloc)
alloc += oldalloc - skc->skc_obj_alloc;
}
* effort and we do not want to thrash creating and destroying slabs.
*/
void
-spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
+spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
{
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
#include <sys/shrinker.h>
#include <linux/module.h>
-vmem_t *heap_arena = NULL;
-EXPORT_SYMBOL(heap_arena);
-
-vmem_t *zio_alloc_arena = NULL;
-EXPORT_SYMBOL(zio_alloc_arena);
-
-vmem_t *zio_arena = NULL;
-EXPORT_SYMBOL(zio_arena);
-
-#define VMEM_FLOOR_SIZE (4 * 1024 * 1024) /* 4MB floor */
-
-/*
- * Return approximate virtual memory usage based on these assumptions:
- *
- * 1) The major SPL consumer of virtual memory is the kmem cache.
- * 2) Memory allocated with vmem_alloc() is short lived and can be ignored.
- * 3) Allow a 4MB floor as a generous pad given normal consumption.
- * 4) The spl_kmem_cache_sem only contends with cache create/destroy.
- */
-size_t
-vmem_size(vmem_t *vmp, int typemask)
-{
- spl_kmem_cache_t *skc = NULL;
- size_t alloc = VMEM_FLOOR_SIZE;
-
- if ((typemask & VMEM_ALLOC) && (typemask & VMEM_FREE))
- return (VMALLOC_TOTAL);
-
-
- down_read(&spl_kmem_cache_sem);
- list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
- if (skc->skc_flags & KMC_VMEM)
- alloc += skc->skc_slab_size * skc->skc_slab_total;
- }
- up_read(&spl_kmem_cache_sem);
-
- if (typemask & VMEM_ALLOC)
- return (MIN(alloc, VMALLOC_TOTAL));
- else if (typemask & VMEM_FREE)
- return (MAX(VMALLOC_TOTAL - alloc, 0));
- else
- return (0);
-}
-EXPORT_SYMBOL(vmem_size);
-
/*
* Public vmem_alloc(), vmem_zalloc() and vmem_free() interfaces.
*/
int64_t lowest = INT64_MAX;
free_memory_reason_t r = FMR_UNKNOWN;
int64_t n;
-#ifdef freemem
-#undef freemem
-#endif
- pgcnt_t needfree = btop(arc_need_free);
- pgcnt_t lotsfree = btop(arc_sys_free);
- pgcnt_t desfree = 0;
- pgcnt_t freemem = btop(arc_free_memory());
-
- if (needfree > 0) {
- n = PAGESIZE * (-needfree);
- if (n < lowest) {
- lowest = n;
- r = FMR_NEEDFREE;
- }
- }
- /*
- * check that we're out of range of the pageout scanner. It starts to
- * schedule paging if freemem is less than lotsfree and needfree.
- * lotsfree is the high-water mark for pageout, and needfree is the
- * number of needed free pages. We add extra pages here to make sure
- * the scanner doesn't start up while we're freeing memory.
- */
- n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
- if (n < lowest) {
- lowest = n;
- r = FMR_LOTSFREE;
+ if (arc_need_free > 0) {
+ lowest = -arc_need_free;
+ r = FMR_NEEDFREE;
}
-#if defined(_ILP32)
- /*
- * If we're on a 32-bit platform, it's possible that we'll exhaust the
- * kernel heap space before we ever run out of available physical
- * memory. Most checks of the size of the heap_area compare against
- * tune.t_minarmem, which is the minimum available real memory that we
- * can have in the system. However, this is generally fixed at 25 pages
- * which is so low that it's useless. In this comparison, we seek to
- * calculate the total heap-size, and reclaim if more than 3/4ths of the
- * heap is allocated. (Or, in the calculation, if less than 1/4th is
- * free)
- */
- n = vmem_size(heap_arena, VMEM_FREE) -
- (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
+ n = arc_free_memory() - arc_sys_free - arc_need_free;
if (n < lowest) {
lowest = n;
- r = FMR_HEAP_ARENA;
- }
-#endif
-
- /*
- * If zio data pages are being allocated out of a separate heap segment,
- * then enforce that the size of available vmem for this arena remains
- * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
- *
- * Note that reducing the arc_zio_arena_free_shift keeps more virtual
- * memory (in the zio_arena) free, which can avoid memory
- * fragmentation issues.
- */
- if (zio_arena != NULL) {
- n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
- (vmem_size(zio_arena, VMEM_ALLOC) >>
- arc_zio_arena_free_shift);
- if (n < lowest) {
- lowest = n;
- r = FMR_ZIO_ARENA;
- }
+ r = FMR_LOTSFREE;
}
last_free_memory = lowest;
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
{
- uint64_t available_memory = arc_free_memory();
-
-#if defined(_ILP32)
- available_memory =
- MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
-#endif
+ uint64_t free_memory = arc_free_memory();
- if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100)
+ if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100)
return (0);
if (txg > spa->spa_lowmem_last_txg) {
*/
if (current_is_kswapd()) {
if (spa->spa_lowmem_page_load >
- MAX(arc_sys_free / 4, available_memory) / 4) {
+ MAX(arc_sys_free / 4, free_memory) / 4) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(ERESTART));
}
*/
boolean_t arc_warm;
-/*
- * log2 fraction of the zio arena to keep free.
- */
-int arc_zio_arena_free_shift = 2;
-
/*
* These tunables are for performance analysis.
*/
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
-
- if (zio_arena != NULL) {
- /*
- * Ask the vmem arena to reclaim unused memory from its
- * quantum caches.
- */
- vmem_qcache_reap(zio_arena);
- }
}
/* ARGSUSED */