]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
mm, page_alloc: rename __GFP_WAIT to __GFP_RECLAIM
authorMel Gorman <mgorman@techsingularity.net>
Sat, 7 Nov 2015 00:28:28 +0000 (16:28 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 7 Nov 2015 01:50:42 +0000 (17:50 -0800)
__GFP_WAIT was used to signal that the caller was in atomic context and
could not sleep.  Now it is possible to distinguish between true atomic
context and callers that are not willing to sleep.  The latter should
clear __GFP_DIRECT_RECLAIM so kswapd will still wake.  As clearing
__GFP_WAIT behaves differently, there is a risk that people will clear the
wrong flags.  This patch renames __GFP_WAIT to __GFP_RECLAIM to clearly
indicate what it does -- setting it allows all reclaim activity, clearing
them prevents it.

[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Vitaly Wool <vitalywool@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
38 files changed:
block/blk-core.c
block/blk-mq.c
block/scsi_ioctl.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/paride/pd.c
drivers/block/pktcdvd.c
drivers/gpu/drm/i915/i915_gem.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-devsets.c
drivers/ide/ide-disk.c
drivers/ide/ide-ioctls.c
drivers/ide/ide-park.c
drivers/ide/ide-pm.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/infiniband/hw/qib/qib_init.c
drivers/misc/vmw_balloon.c
drivers/nvme/host/pci.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/staging/rdma/hfi1/init.c
drivers/staging/rdma/ipath/ipath_file_ops.c
fs/cachefiles/internal.h
fs/direct-io.c
fs/nilfs2/mdt.h
include/linux/gfp.h
kernel/power/swap.c
lib/percpu_ida.c
mm/failslab.c
mm/filemap.c
mm/huge_memory.c
mm/memcontrol.c
mm/migrate.c
mm/page_alloc.c
security/integrity/ima/ima_crypto.c

index 9e32f0868e365b753ee721ab75fca46b13d8a414..590cca21c24a771fe04339046ef172a23b278f35 100644 (file)
@@ -638,7 +638,7 @@ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
                if (percpu_ref_tryget_live(&q->q_usage_counter))
                        return 0;
 
-               if (!(gfp & __GFP_WAIT))
+               if (!gfpflags_allow_blocking(gfp))
                        return -EBUSY;
 
                ret = wait_event_interruptible(q->mq_freeze_wq,
@@ -2038,7 +2038,7 @@ void generic_make_request(struct bio *bio)
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+               if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
 
                        q->make_request_fn(q, bio);
 
index 68c0a3416b341cc33327315b9abd88965f10cafa..694f8703f83cf4b5db887399bbde818bde492eaf 100644 (file)
@@ -1186,7 +1186,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
                ctx = blk_mq_get_ctx(q);
                hctx = q->mq_ops->map_queue(q, ctx->cpu);
                blk_mq_set_alloc_data(&alloc_data, q,
-                               __GFP_WAIT|__GFP_HIGH, false, ctx, hctx);
+                               __GFP_RECLAIM|__GFP_HIGH, false, ctx, hctx);
                rq = __blk_mq_alloc_request(&alloc_data, rw);
                ctx = alloc_data.ctx;
                hctx = alloc_data.hctx;
index dda653ce7b24cfb959f668bdb4a676900ed7637d..0774799942e06a8d890a5c88e40990cd53a15037 100644 (file)
@@ -444,7 +444,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 
        }
 
-       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto error_free_buffer;
@@ -495,7 +495,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
                break;
        }
 
-       if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+       if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) {
                err = DRIVER_ERROR << 24;
                goto error;
        }
@@ -536,7 +536,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
        struct request *rq;
        int err;
 
-       rq = blk_get_request(q, WRITE, __GFP_WAIT);
+       rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
index e5e0f19ceda09eb8dfc2e129b504b7c0899eede8..3dc53a16ed3aaf14dfd30311c16e0dc14f6623a0 100644 (file)
@@ -1007,7 +1007,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
        bm_set_page_unchanged(b->bm_pages[page_nr]);
 
        if (ctx->flags & BM_AIO_COPY_PAGES) {
-               page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
+               page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
                copy_highpage(page, b->bm_pages[page_nr]);
                bm_store_page_idx(page, page_nr);
        } else
index f504232c1ee779079353e7ffd4b2d6831f87e61b..a28a562f7b7f245355db7d536e0558c78cc7e374 100644 (file)
@@ -173,7 +173,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
 {
        struct request *rq;
 
-       rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true);
+       rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
        return blk_mq_rq_to_pdu(rq);
 }
 
index b9242d78283db3a2bb03524a9bbbfd6923b7dccf..562b5a4ca7b712f6b2b4440375904d8be1ac5200 100644 (file)
@@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk,
        struct request *rq;
        int err = 0;
 
-       rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
index 7be2375db7f2630e8412bd2c21819ae35a6c8da0..5959c2981cc7d18efbbec56615db1d29b2698064 100644 (file)
@@ -704,14 +704,14 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
        int ret = 0;
 
        rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
-                            WRITE : READ, __GFP_WAIT);
+                            WRITE : READ, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
 
        if (cgc->buflen) {
                ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
-                                     __GFP_WAIT);
+                                     __GFP_RECLAIM);
                if (ret)
                        goto out;
        }
index d58cb9e034fe896acf71d6a399cf00e599ff357b..7e505d4be7c0718ab475f62741b5d0d6d3cc9692 100644 (file)
@@ -2216,7 +2216,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        mapping = file_inode(obj->base.filp)->i_mapping;
        gfp = mapping_gfp_mask(mapping);
        gfp |= __GFP_NORETRY | __GFP_NOWARN;
-       gfp &= ~(__GFP_IO | __GFP_WAIT);
+       gfp &= ~(__GFP_IO | __GFP_RECLAIM);
        sg = st->sgl;
        st->nents = 0;
        for (i = 0; i < page_count; i++) {
index 1362ad80a76c071e9e2abb2b36472e433bc5dae0..05352f490d6088d736016e588c8a79313df30e21 100644 (file)
@@ -92,7 +92,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
        struct request *rq;
        int error;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->special = (char *)pc;
 
index 64a6b827b3dd12210e23008a81af06e9d9d7cb64..ef907fd5ba98a036d98d24478af49ec0a4c5831b 100644 (file)
@@ -441,7 +441,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
                struct request *rq;
                int error;
 
-               rq = blk_get_request(drive->queue, write, __GFP_WAIT);
+               rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
 
                memcpy(rq->cmd, cmd, BLK_MAX_CDB);
                rq->cmd_type = REQ_TYPE_ATA_PC;
index 066e3903651842fa31c82bc278c8f7434bf6c950..474173eb31bb345c86bf31fcb195a44432ee14ad 100644 (file)
@@ -303,7 +303,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
        struct request *rq;
        int ret;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_flags = REQ_QUIET;
        ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
index b05a74d78ef560deefd8f022d27f145766e094b4..0dd43b4fcec6353633d12338be557b8e1e13ef14 100644 (file)
@@ -165,7 +165,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
        if (!(setting->flags & DS_SYNC))
                return setting->set(drive, arg);
 
-       rq = blk_get_request(q, READ, __GFP_WAIT);
+       rq = blk_get_request(q, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_len = 5;
        rq->cmd[0] = REQ_DEVSET_EXEC;
index 56b9708894a5e294302e42066711274b6941616c..37a8a907febeb2556277fa78d03f474a5d865d84 100644 (file)
@@ -477,7 +477,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
        if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
                return -EBUSY;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 
        drive->mult_req = arg;
index aa2e9b77b20d39a67d3da80fd23338fcb631faf6..d05db2469209bb1dfe1f9757d078b0298e55d180 100644 (file)
@@ -125,7 +125,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
        if (NULL == (void *) arg) {
                struct request *rq;
 
-               rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+               rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
                rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
                err = blk_execute_rq(drive->queue, NULL, rq, 0);
                blk_put_request(rq);
@@ -221,7 +221,7 @@ static int generic_drive_reset(ide_drive_t *drive)
        struct request *rq;
        int ret = 0;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_len = 1;
        rq->cmd[0] = REQ_DRIVE_RESET;
index c808685204883db93213e80ee9516a298e1f58b1..2d7dca56dd244387a633e2eec30b177dfbd25a70 100644 (file)
@@ -31,7 +31,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
        }
        spin_unlock_irq(&hwif->lock);
 
-       rq = blk_get_request(q, READ, __GFP_WAIT);
+       rq = blk_get_request(q, READ, __GFP_RECLAIM);
        rq->cmd[0] = REQ_PARK_HEADS;
        rq->cmd_len = 1;
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
index 081e43458d50f745671f9c5c4476d760e33b7fc0..e34af488693a62d26fbd7d282232d393a4807f55 100644 (file)
@@ -18,7 +18,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
        rq->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_SUSPEND;
@@ -88,7 +88,7 @@ int generic_ide_resume(struct device *dev)
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
        rq->cmd_flags |= REQ_PREEMPT;
        rq->special = &rqpm;
index f5d51d1d09ee480becca86a1cda687ab3919cdb9..12fa04997dcc33a09129389435fb96f00bfe8e87 100644 (file)
@@ -852,7 +852,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
        BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
        BUG_ON(size < 0 || size % tape->blk_size);
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd[13] = cmd;
        rq->rq_disk = tape->disk;
@@ -860,7 +860,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
 
        if (size) {
                ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
-                                     __GFP_WAIT);
+                                     __GFP_RECLAIM);
                if (ret)
                        goto out_put;
        }
index 0979e126fff1e69ee3b3f8df19e5a443cbae5194..a716693417a308c49186bcfb4e692aa86ef1d99d 100644 (file)
@@ -430,7 +430,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
        int error;
        int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
 
-       rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 
        /*
@@ -441,7 +441,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
         */
        if (nsect) {
                error = blk_rq_map_kern(drive->queue, rq, buf,
-                                       nsect * SECTOR_SIZE, __GFP_WAIT);
+                                       nsect * SECTOR_SIZE, __GFP_RECLAIM);
                if (error)
                        goto put_req;
        }
index 7e00470adc30223c183f0e287f7a7d0d9beff944..4ff340fe904f5c3bc9484b1f0cf2c53abd2bd4dd 100644 (file)
@@ -1680,7 +1680,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        egrcnt = rcd->rcvegrcnt;
        egroff = rcd->rcvegr_tid_base;
index 89300870fefb97a66291b96d52fca3aeb3714259..1e688bfec56728c3d00ebc353031c26fde29f187 100644 (file)
@@ -75,7 +75,7 @@ MODULE_LICENSE("GPL");
 
 /*
  * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
- * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
+ * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
  * __GFP_NOWARN, to suppress page allocation failure warnings.
  */
 #define VMW_PAGE_ALLOC_NOSLEEP         (__GFP_HIGHMEM|__GFP_NOWARN)
index e878590e71b68ad3863ddc83006e76420996a4f4..6c195554d94ac9d690c5ffadb8fa47badc123816 100644 (file)
@@ -1025,11 +1025,13 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
        req->special = (void *)0;
 
        if (buffer && bufflen) {
-               ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_WAIT);
+               ret = blk_rq_map_kern(q, req, buffer, bufflen,
+                                     __GFP_DIRECT_RECLAIM);
                if (ret)
                        goto out;
        } else if (ubuffer && bufflen) {
-               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_WAIT);
+               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
+                                     __GFP_DIRECT_RECLAIM);
                if (ret)
                        goto out;
                bio = req->bio;
index 66a96cd98b975dcdbd5429cf02f069b52771fa84..984ddcb4786d6074c94a2b0387c25362a4f4c2c4 100644 (file)
@@ -1970,7 +1970,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
        struct request *req;
 
        /*
-        * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
+        * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
         * request becomes available
         */
        req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
index 126a48c6431e5a5d9798aed3472916b06ef476c8..dd8ad2a44510cae1b97260b7d64fc65e7770db53 100644 (file)
@@ -222,13 +222,13 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
        int write = (data_direction == DMA_TO_DEVICE);
        int ret = DRIVER_ERROR << 24;
 
-       req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
+       req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
        if (IS_ERR(req))
                return ret;
        blk_rq_set_block_pc(req);
 
        if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
-                                       buffer, bufflen, __GFP_WAIT))
+                                       buffer, bufflen, __GFP_RECLAIM))
                goto out;
 
        req->cmd_len = COMMAND_SIZE(cmd[0]);
index 47a1202fcbdf5117e66a8c78da95c80bbffdf459..8666f3ad24e9960bfb85b6814de6c277ae37b522 100644 (file)
@@ -1560,7 +1560,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        /*
         * The minimum size of the eager buffers is a groups of MTU-sized
index 5d9b9dbd8fc44804fbf7a4256782ecc7d4f7630f..13c3cd11ab92a5c610ef7a5e32795a0b0a0931c9 100644 (file)
@@ -905,7 +905,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        egrcnt = dd->ipath_rcvegrcnt;
        /* TID number offset for this port */
index aecd0859eacbf28d9b3782527c178f8e05000e7c..9c4b737a54df64bb15df446c5c359e594ce5b3a3 100644 (file)
@@ -30,7 +30,7 @@ extern unsigned cachefiles_debug;
 #define CACHEFILES_DEBUG_KLEAVE        2
 #define CACHEFILES_DEBUG_KDEBUG        4
 
-#define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC)
+#define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
 
 /*
  * node records
index 3ae0e0427191c7849fc70301e58792515f5f181c..18e7554cf94cac57d1eb3dc17ba4001e75cef5f6 100644 (file)
@@ -361,7 +361,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
 
        /*
         * bio_alloc() is guaranteed to return a bio when called with
-        * __GFP_WAIT and we request a valid number of vectors.
+        * __GFP_RECLAIM and we request a valid number of vectors.
         */
        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
index fe529a87a208d4db026efbe98de71608a1622a6c..03246cac33384e2cc9a0d49bfe8beaba74118873 100644 (file)
@@ -72,7 +72,7 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
 }
 
 /* Default GFP flags using highmem */
-#define NILFS_MDT_GFP      (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
+#define NILFS_MDT_GFP      (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM)
 
 int nilfs_mdt_get_block(struct inode *, unsigned long, int,
                        void (*init_block)(struct inode *,
index 86f9f7da86ea8fedc7ebfe80b668843eb5d5cb6c..369227202ac20824a1df4b86dbb1ea7469e5ed50 100644 (file)
@@ -107,7 +107,7 @@ struct vm_area_struct;
  * can be cleared when the reclaiming of pages would cause unnecessary
  * disruption.
  */
-#define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
 #define __GFP_DIRECT_RECLAIM   ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
 #define __GFP_KSWAPD_RECLAIM   ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
 
@@ -126,12 +126,12 @@ struct vm_area_struct;
  */
 #define GFP_ATOMIC     (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
 #define GFP_NOWAIT     (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO       (__GFP_WAIT)
-#define GFP_NOFS       (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL     (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_TEMPORARY  (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+#define GFP_NOIO       (__GFP_RECLAIM)
+#define GFP_NOFS       (__GFP_RECLAIM | __GFP_IO)
+#define GFP_KERNEL     (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_TEMPORARY  (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
                         __GFP_RECLAIMABLE)
-#define GFP_USER       (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_USER       (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
 #define GFP_HIGHUSER   (GFP_USER | __GFP_HIGHMEM)
 #define GFP_HIGHUSER_MOVABLE   (GFP_HIGHUSER | __GFP_MOVABLE)
 #define GFP_TRANSHUGE  ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
@@ -143,12 +143,12 @@ struct vm_area_struct;
 #define GFP_MOVABLE_SHIFT 3
 
 /* Control page allocator reclaim behavior */
-#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
+#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
                        __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
                        __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
 
 /* Control slab gfp mask during early boot */
-#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
+#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
 
 /* Control allocation constraints */
 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
index b2066fb5b10f7639af4a391fe9786429cff90ac4..12cd989dadf639c3276ca228fef1431284c862ec 100644 (file)
@@ -257,7 +257,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
        struct bio *bio;
        int error = 0;
 
-       bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+       bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
        bio->bi_bdev = hib_resume_bdev;
 
@@ -356,7 +356,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
                return -ENOSPC;
 
        if (hb) {
-               src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
+               src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
                                              __GFP_NORETRY);
                if (src) {
                        copy_page(src, buf);
@@ -364,7 +364,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
                        ret = hib_wait_io(hb); /* Free pages */
                        if (ret)
                                return ret;
-                       src = (void *)__get_free_page(__GFP_WAIT |
+                       src = (void *)__get_free_page(__GFP_RECLAIM |
                                                      __GFP_NOWARN |
                                                      __GFP_NORETRY);
                        if (src) {
@@ -672,7 +672,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
        nr_threads = num_online_cpus() - 1;
        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 
-       page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+       page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
        if (!page) {
                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
                ret = -ENOMEM;
@@ -975,7 +975,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
                last = tmp;
 
                tmp->map = (struct swap_map_page *)
-                          __get_free_page(__GFP_WAIT | __GFP_HIGH);
+                          __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
                if (!tmp->map) {
                        release_swap_reader(handle);
                        return -ENOMEM;
@@ -1242,9 +1242,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
 
        for (i = 0; i < read_pages; i++) {
                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
-                                                 __GFP_WAIT | __GFP_HIGH :
-                                                 __GFP_WAIT | __GFP_NOWARN |
-                                                 __GFP_NORETRY);
+                                                 __GFP_RECLAIM | __GFP_HIGH :
+                                                 __GFP_RECLAIM | __GFP_NOWARN |
+                                                 __GFP_NORETRY);
 
                if (!page[i]) {
                        if (i < LZO_CMP_PAGES) {
index f75715131f2094cc3b76d2d1f14ba31e1ccf3744..6d40944960de77bff090f41a0f94f0f85e8522a4 100644 (file)
@@ -135,7 +135,7 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
  * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
  *
  * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
+ * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
  * however long it takes until another thread frees an id (same semantics as a
  * mempool).
  *
index 98fb490311eb94386aebd2f4ceb77c729f4fa01e..79171b4a58269986491198403a322d6c2a7dc814 100644 (file)
@@ -3,11 +3,11 @@
 
 static struct {
        struct fault_attr attr;
-       bool ignore_gfp_wait;
+       bool ignore_gfp_reclaim;
        bool cache_filter;
 } failslab = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = true,
+       .ignore_gfp_reclaim = true,
        .cache_filter = false,
 };
 
@@ -16,7 +16,7 @@ bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
        if (gfpflags & __GFP_NOFAIL)
                return false;
 
-        if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
+       if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM))
                return false;
 
        if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
@@ -42,7 +42,7 @@ static int __init failslab_debugfs_init(void)
                return PTR_ERR(dir);
 
        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
-                               &failslab.ignore_gfp_wait))
+                               &failslab.ignore_gfp_reclaim))
                goto fail;
        if (!debugfs_create_bool("cache-filter", mode, dir,
                                &failslab.cache_filter))
index 58e04e26f9965d5ddb8c35fc3770349493b62eab..6ef3674c0763977aaf8cc112d67a0d68b2ef1581 100644 (file)
@@ -2713,7 +2713,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
  * page is known to the local caching routines.
  *
  * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
+ * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
  *
  */
 int try_to_release_page(struct page *page, gfp_t gfp_mask)
index f5c08b46fef85dd5c6d4247299660a0e1acb0838..9812d4618651621e1d7ebc343e8aeff3434a828c 100644 (file)
@@ -786,7 +786,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 
 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 {
-       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
+       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
 }
 
 /* Caller must hold page table lock. */
index 05374f09339ccf57678433ab3db6d83587c7077c..a5470674a4772af204083071023ec58c9a404855 100644 (file)
@@ -2120,7 +2120,7 @@ done_restock:
        /*
         * If the hierarchy is above the normal consumption range, schedule
         * reclaim on returning to userland.  We can perform reclaim here
-        * if __GFP_WAIT but let's always punt for simplicity and so that
+        * if __GFP_RECLAIM but let's always punt for simplicity and so that
         * GFP_KERNEL can consistently be used during reclaim.  @memcg is
         * not recorded as it most likely matches current's and won't
         * change in the meantime.  As high limit is checked again before
index e60379eb23f8402bf65cb18e8a6e3fb3e3c98411..7890d0bb5e23c3db75cc682878a2c5ec89b0e513 100644 (file)
@@ -1752,7 +1752,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                goto out_dropref;
 
        new_page = alloc_pages_node(node,
-               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
+               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
                HPAGE_PMD_ORDER);
        if (!new_page)
                goto out_fail;
index 70461f3e3378691499d63005bd74059ba54c4566..1b373096b990d08edf6153cdcb542975f2cb4c2b 100644 (file)
@@ -2160,11 +2160,11 @@ static struct {
        struct fault_attr attr;
 
        bool ignore_gfp_highmem;
-       bool ignore_gfp_wait;
+       bool ignore_gfp_reclaim;
        u32 min_order;
 } fail_page_alloc = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = true,
+       .ignore_gfp_reclaim = true,
        .ignore_gfp_highmem = true,
        .min_order = 1,
 };
@@ -2183,7 +2183,8 @@ static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
                return false;
        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
                return false;
-       if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_DIRECT_RECLAIM))
+       if (fail_page_alloc.ignore_gfp_reclaim &&
+                       (gfp_mask & __GFP_DIRECT_RECLAIM))
                return false;
 
        return should_fail(&fail_page_alloc.attr, 1 << order);
@@ -2202,7 +2203,7 @@ static int __init fail_page_alloc_debugfs(void)
                return PTR_ERR(dir);
 
        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
-                               &fail_page_alloc.ignore_gfp_wait))
+                               &fail_page_alloc.ignore_gfp_reclaim))
                goto fail;
        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
                                &fail_page_alloc.ignore_gfp_highmem))
index e24121afb2f2773eacced7410dbdd5953a49c206..6eb62936c6723c02ca0b565ebe955aab2eb74c17 100644 (file)
@@ -126,7 +126,7 @@ static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
 {
        void *ptr;
        int order = ima_maxorder;
-       gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY;
+       gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
 
        if (order)
                order = min(get_order(max_size), order);