The __get_free_pages() function must be used in place of kmalloc()
to ensure the __GFP_COMP is strictly honored. This is due to
kmalloc() being layered on the generic Linux slab caches. It
wasn't until recently that all caches were created using __GFP_COMP.
This means that it is possible for a kmalloc() which passed the
__GFP_COMP flag to be returned a non-compound allocation.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
typedef struct spl_kmem_emergency {
struct rb_node ske_node; /* Emergency tree linkage */
typedef struct spl_kmem_emergency {
struct rb_node ske_node; /* Emergency tree linkage */
- void *ske_obj; /* Buffer address */
+ unsigned long ske_obj; /* Buffer address */
} spl_kmem_emergency_t;
typedef struct spl_kmem_cache {
} spl_kmem_emergency_t;
typedef struct spl_kmem_cache {
while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
- if (address < (unsigned long)ske->ske_obj)
+ if (address < ske->ske_obj)
- else if (address > (unsigned long)ske->ske_obj)
+ else if (address > ske->ske_obj)
node = node->rb_right;
else
return (ske);
node = node->rb_right;
else
return (ske);
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
- unsigned long address = (unsigned long)ske->ske_obj;
+ unsigned long address = ske->ske_obj;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
- if (address < (unsigned long)ske_tmp->ske_obj)
+ if (address < ske_tmp->ske_obj)
new = &((*new)->rb_left);
new = &((*new)->rb_left);
- else if (address > (unsigned long)ske_tmp->ske_obj)
+ else if (address > ske_tmp->ske_obj)
new = &((*new)->rb_right);
else
return (0);
new = &((*new)->rb_right);
else
return (0);
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
+ int order = get_order(skc->skc_obj_size);
int empty;
/* Last chance use a partial slab if one now exists */
int empty;
/* Last chance use a partial slab if one now exists */
if (ske == NULL)
return (-ENOMEM);
if (ske == NULL)
return (-ENOMEM);
- ske->ske_obj = kmalloc(skc->skc_obj_size, lflags);
- if (ske->ske_obj == NULL) {
+ ske->ske_obj = __get_free_pages(lflags, order);
+ if (ske->ske_obj == 0) {
kfree(ske);
return (-ENOMEM);
}
kfree(ske);
return (-ENOMEM);
}
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
+ free_pages(ske->ske_obj, order);
kfree(ske);
return (-EINVAL);
}
kfree(ske);
return (-EINVAL);
}
+ *obj = (void *)ske->ske_obj;
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
+ int order = get_order(skc->skc_obj_size);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
if (ske == NULL)
return (-ENOENT);
if (ske == NULL)
return (-ENOENT);
+ free_pages(ske->ske_obj, order);