]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
md/async: don't pass a memory pointer as a page pointer.
authorNeilBrown <neilb@suse.de>
Fri, 16 Oct 2009 05:40:25 +0000 (16:40 +1100)
committerNeilBrown <neilb@suse.de>
Fri, 16 Oct 2009 05:40:25 +0000 (16:40 +1100)
md/raid6 passes a list of 'struct page *' to the async_tx routines,
which then either DMA map them for offload, or take the page_address
for CPU based calculations.

For RAID6 we sometime leave 'blanks' in the list of pages.
For CPU based calcs, we want to treat theses as a page of zeros.
For offloaded calculations, we simply don't pass a page to the
hardware.

Currently the 'blanks' are encoded as a pointer to
raid6_empty_zero_page.  This is a 4096 byte memory region, not a
'struct page'.  This is mostly handled correctly but is rather ugly.

So change the code to pass and expect a NULL pointer for the blanks.
When taking page_address of a page, we need to check for a NULL and
in that case use raid6_empty_zero_page.

Signed-off-by: NeilBrown <neilb@suse.de>
crypto/async_tx/async_pq.c
crypto/async_tx/async_raid6_recov.c
drivers/md/raid5.c

index b88db6d1dc65f70626d15b141497dd9bcff40b07..9ab1ce4af3cc12c9b7edf43b051bf7518b25907c 100644 (file)
  */
 static struct page *scribble;
 
-static bool is_raid6_zero_block(struct page *p)
-{
-       return p == (void *) raid6_empty_zero_page;
-}
-
 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
  * and async_syndrome_val() contains the 'P' destination address at
  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
@@ -83,7 +78,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
         * sources and update the coefficients accordingly
         */
        for (i = 0, idx = 0; i < src_cnt; i++) {
-               if (is_raid6_zero_block(blocks[i]))
+               if (blocks[i] == NULL)
                        continue;
                dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
                                            DMA_TO_DEVICE);
@@ -160,9 +155,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
                srcs = (void **) blocks;
 
        for (i = 0; i < disks; i++) {
-               if (is_raid6_zero_block(blocks[i])) {
+               if (blocks[i] == NULL) {
                        BUG_ON(i > disks - 3); /* P or Q can't be zero */
-                       srcs[i] = blocks[i];
+                       srcs[i] = (void*)raid6_empty_zero_page;
                } else
                        srcs[i] = page_address(blocks[i]) + offset;
        }
@@ -290,12 +285,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
                if (submit->flags & ASYNC_TX_FENCE)
                        dma_flags |= DMA_PREP_FENCE;
                for (i = 0; i < disks; i++)
-                       if (likely(blocks[i])) {
-                               BUG_ON(is_raid6_zero_block(blocks[i]));
+                       if (likely(blocks[i]))
                                dma_src[i] = dma_map_page(dev, blocks[i],
                                                          offset, len,
                                                          DMA_TO_DEVICE);
-                       }
 
                for (;;) {
                        tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
index 6d73dde4786d3ca05aff5c431de9e060f4ba9131..8e30b6ed078936d6e16e7a951882362115ee6bbd 100644 (file)
@@ -263,10 +263,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
         * delta p and delta q
         */
        dp = blocks[faila];
-       blocks[faila] = (void *)raid6_empty_zero_page;
+       blocks[faila] = NULL;
        blocks[disks-2] = dp;
        dq = blocks[failb];
-       blocks[failb] = (void *)raid6_empty_zero_page;
+       blocks[failb] = NULL;
        blocks[disks-1] = dq;
 
        init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
@@ -338,7 +338,10 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
 
                async_tx_quiesce(&submit->depend_tx);
                for (i = 0; i < disks; i++)
-                       ptrs[i] = page_address(blocks[i]);
+                       if (blocks[i] == NULL)
+                               ptrs[i] = (void*)raid6_empty_zero_page;
+                       else
+                               ptrs[i] = page_address(blocks[i]);
 
                raid6_2data_recov(disks, bytes, faila, failb, ptrs);
 
@@ -398,7 +401,10 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
 
                async_tx_quiesce(&submit->depend_tx);
                for (i = 0; i < disks; i++)
-                       ptrs[i] = page_address(blocks[i]);
+                       if (blocks[i] == NULL)
+                               ptrs[i] = (void*)raid6_empty_zero_page;
+                       else
+                               ptrs[i] = page_address(blocks[i]);
 
                raid6_datap_recov(disks, bytes, faila, ptrs);
 
@@ -414,7 +420,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
         * Use the dead data page as temporary storage for delta q
         */
        dq = blocks[faila];
-       blocks[faila] = (void *)raid6_empty_zero_page;
+       blocks[faila] = NULL;
        blocks[disks-1] = dq;
 
        /* in the 4 disk case we only need to perform a single source
index c4366c9373c5ec050e8cffed052587d4e1977740..dcd9e659ed9d20607fbaa0efdbb84a4af2e96e06 100644 (file)
@@ -720,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
        int i;
 
        for (i = 0; i < disks; i++)
-               srcs[i] = (void *)raid6_empty_zero_page;
+               srcs[i] = NULL;
 
        count = 0;
        i = d0_idx;
@@ -816,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
         * slot number conversion for 'faila' and 'failb'
         */
        for (i = 0; i < disks ; i++)
-               blocks[i] = (void *)raid6_empty_zero_page;
+               blocks[i] = NULL;
        count = 0;
        i = d0_idx;
        do {