} else {
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size > PAGE_SIZE*2) && !(flags & __GFP_NOWARN))) {
+ if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size > PAGE_SIZE * 2) && !(flags & __GFP_NOWARN))) {
+ if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
* this usually ends up being a large allocation of ~32k because
* we need to allocate enough memory for the worst case number of
* cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
- * explicitly pass __GFP_NOWARN to suppress the kmem warning */
+ * explicitly pass KM_NODEBUG to suppress the kmem warning */
skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
- kmem_flags | __GFP_NOWARN);
+ kmem_flags | KM_NODEBUG);
if (skc == NULL)
RETURN(NULL);
}
/* Allocate a new slab for the cache */
- sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | __GFP_NOWARN);
+ sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | KM_NODEBUG);
if (sks == NULL)
GOTO(out, sks = NULL);
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_alloc(size, KM_SLEEP | __GFP_NOWARN);
+ ptr[i] = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
if (ptr[i])
count++;
}
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_zalloc(size, KM_SLEEP | __GFP_NOWARN);
+ ptr[i] = kmem_zalloc(size, KM_SLEEP | KM_NODEBUG);
if (ptr[i])
count++;
}