]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86, mm: use pfn instead of pos in split_mem_range
authorYinghai Lu <yinghai@kernel.org>
Sat, 17 Nov 2012 03:39:14 +0000 (19:39 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Sat, 17 Nov 2012 19:59:41 +0000 (11:59 -0800)
could save some bit shifting operations.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-38-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/mm/init.c

index 1cca052b2cbda33ac97103554e1c271f1ad9e82a..4bf1c5374928b678e9c158748533b679bdb40da9 100644 (file)
@@ -204,12 +204,11 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
                                     unsigned long end)
 {
        unsigned long start_pfn, end_pfn;
-       unsigned long pos;
+       unsigned long pfn;
        int i;
 
        /* head if not big page alignment ? */
-       start_pfn = PFN_DOWN(start);
-       pos = PFN_PHYS(start_pfn);
+       pfn = start_pfn = PFN_DOWN(start);
 #ifdef CONFIG_X86_32
        /*
         * Don't use a large page for the first 2/4MB of memory
@@ -217,26 +216,26 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
         * and overlapping MTRRs into large pages can cause
         * slowdowns.
         */
-       if (pos == 0)
+       if (pfn == 0)
                end_pfn = PFN_DOWN(PMD_SIZE);
        else
-               end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+               end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #else /* CONFIG_X86_64 */
-       end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+       end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #endif
        if (end_pfn > PFN_DOWN(end))
                end_pfn = PFN_DOWN(end);
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 
        /* big page (2M) range */
-       start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+       start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #ifdef CONFIG_X86_32
        end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
 #else /* CONFIG_X86_64 */
-       end_pfn = PFN_DOWN(round_up(pos, PUD_SIZE));
+       end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
        if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE)))
                end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
 #endif
@@ -244,32 +243,32 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask & (1<<PG_LEVEL_2M));
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 
 #ifdef CONFIG_X86_64
        /* big page (1G) range */
-       start_pfn = PFN_DOWN(round_up(pos, PUD_SIZE));
+       start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
        end_pfn = PFN_DOWN(round_down(end, PUD_SIZE));
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask &
                                 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 
        /* tail is not big page (1G) alignment */
-       start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+       start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
        end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask & (1<<PG_LEVEL_2M));
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 #endif
 
        /* tail is not big page (2M) alignment */
-       start_pfn = PFN_DOWN(pos);
+       start_pfn = pfn;
        end_pfn = PFN_DOWN(end);
        nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);