]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/cma.c
include/linux/mm.h: simplify flag check
[mirror_ubuntu-zesty-kernel.git] / mm / cma.c
CommitLineData
a254129e
JK
1/*
2 * Contiguous Memory Allocator
3 *
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
7 * Written by:
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
17 */
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
26
27#include <linux/memblock.h>
28#include <linux/err.h>
29#include <linux/mm.h>
30#include <linux/mutex.h>
31#include <linux/sizes.h>
32#include <linux/slab.h>
33#include <linux/log2.h>
34#include <linux/cma.h>
f7426b98 35#include <linux/highmem.h>
620951e2 36#include <linux/io.h>
a254129e 37
28b24c1f
SL
38#include "cma.h"
39
40struct cma cma_areas[MAX_CMA_AREAS];
41unsigned cma_area_count;
a254129e
JK
42static DEFINE_MUTEX(cma_mutex);
43
ac173824 44phys_addr_t cma_get_base(const struct cma *cma)
a254129e
JK
45{
46 return PFN_PHYS(cma->base_pfn);
47}
48
ac173824 49unsigned long cma_get_size(const struct cma *cma)
a254129e
JK
50{
51 return cma->count << PAGE_SHIFT;
52}
53
ac173824
SL
54static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
55 int align_order)
a254129e 56{
68faed63
WY
57 if (align_order <= cma->order_per_bit)
58 return 0;
59 return (1UL << (align_order - cma->order_per_bit)) - 1;
a254129e
JK
60}
61
850fc430
DP
62/*
63 * Find a PFN aligned to the specified order and return an offset represented in
64 * order_per_bits.
65 */
ac173824
SL
66static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
67 int align_order)
b5be83e3 68{
b5be83e3
GF
69 if (align_order <= cma->order_per_bit)
70 return 0;
850fc430
DP
71
72 return (ALIGN(cma->base_pfn, (1UL << align_order))
73 - cma->base_pfn) >> cma->order_per_bit;
b5be83e3
GF
74}
75
ac173824
SL
76static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
77 unsigned long pages)
a254129e
JK
78{
79 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
80}
81
ac173824
SL
82static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
83 unsigned int count)
a254129e
JK
84{
85 unsigned long bitmap_no, bitmap_count;
86
87 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
88 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
89
90 mutex_lock(&cma->lock);
91 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
92 mutex_unlock(&cma->lock);
93}
94
95static int __init cma_activate_area(struct cma *cma)
96{
97 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
98 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
99 unsigned i = cma->count >> pageblock_order;
100 struct zone *zone;
101
102 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
103
104 if (!cma->bitmap)
105 return -ENOMEM;
106
107 WARN_ON_ONCE(!pfn_valid(pfn));
108 zone = page_zone(pfn_to_page(pfn));
109
110 do {
111 unsigned j;
112
113 base_pfn = pfn;
114 for (j = pageblock_nr_pages; j; --j, pfn++) {
115 WARN_ON_ONCE(!pfn_valid(pfn));
116 /*
117 * alloc_contig_range requires the pfn range
118 * specified to be in the same zone. Make this
119 * simple by forcing the entire CMA resv range
120 * to be in the same zone.
121 */
122 if (page_zone(pfn_to_page(pfn)) != zone)
123 goto err;
124 }
125 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
126 } while (--i);
127
128 mutex_init(&cma->lock);
26b02a1f
SL
129
130#ifdef CONFIG_CMA_DEBUGFS
131 INIT_HLIST_HEAD(&cma->mem_head);
132 spin_lock_init(&cma->mem_head_lock);
133#endif
134
a254129e
JK
135 return 0;
136
137err:
138 kfree(cma->bitmap);
f022d8cb 139 cma->count = 0;
a254129e
JK
140 return -EINVAL;
141}
142
143static int __init cma_init_reserved_areas(void)
144{
145 int i;
146
147 for (i = 0; i < cma_area_count; i++) {
148 int ret = cma_activate_area(&cma_areas[i]);
149
150 if (ret)
151 return ret;
152 }
153
154 return 0;
155}
156core_initcall(cma_init_reserved_areas);
157
de9e14ee
MS
158/**
159 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
160 * @base: Base address of the reserved area
161 * @size: Size of the reserved area (in bytes),
162 * @order_per_bit: Order of pages represented by one bit on bitmap.
163 * @res_cma: Pointer to store the created cma region.
164 *
165 * This function creates custom contiguous area from already reserved memory.
166 */
167int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
ac173824
SL
168 unsigned int order_per_bit,
169 struct cma **res_cma)
de9e14ee
MS
170{
171 struct cma *cma;
172 phys_addr_t alignment;
173
174 /* Sanity checks */
175 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
176 pr_err("Not enough slots for CMA reserved regions!\n");
177 return -ENOSPC;
178 }
179
180 if (!size || !memblock_is_region_reserved(base, size))
181 return -EINVAL;
182
183 /* ensure minimal alignment requied by mm core */
184 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
185
186 /* alignment should be aligned with order_per_bit */
187 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
188 return -EINVAL;
189
190 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
191 return -EINVAL;
192
193 /*
194 * Each reserved area must be initialised later, when more kernel
195 * subsystems (like slab allocator) are available.
196 */
197 cma = &cma_areas[cma_area_count];
198 cma->base_pfn = PFN_DOWN(base);
199 cma->count = size >> PAGE_SHIFT;
200 cma->order_per_bit = order_per_bit;
201 *res_cma = cma;
202 cma_area_count++;
94737a85 203 totalcma_pages += (size / PAGE_SIZE);
de9e14ee
MS
204
205 return 0;
206}
207
a254129e
JK
208/**
209 * cma_declare_contiguous() - reserve custom contiguous area
a254129e 210 * @base: Base address of the reserved area optional, use 0 for any
c1f733aa 211 * @size: Size of the reserved area (in bytes),
a254129e
JK
212 * @limit: End address of the reserved memory (optional, 0 for any).
213 * @alignment: Alignment for the CMA area, should be power of 2 or zero
214 * @order_per_bit: Order of pages represented by one bit on bitmap.
a254129e 215 * @fixed: hint about where to place the reserved area
c1f733aa 216 * @res_cma: Pointer to store the created cma region.
a254129e
JK
217 *
218 * This function reserves memory from early allocator. It should be
219 * called by arch specific code once the early allocator (memblock or bootmem)
220 * has been activated and all other subsystems have already allocated/reserved
221 * memory. This function allows to create custom reserved areas.
222 *
223 * If @fixed is true, reserve contiguous area at exactly @base. If false,
224 * reserve in range from @base to @limit.
225 */
c1f733aa
JK
226int __init cma_declare_contiguous(phys_addr_t base,
227 phys_addr_t size, phys_addr_t limit,
a254129e 228 phys_addr_t alignment, unsigned int order_per_bit,
c1f733aa 229 bool fixed, struct cma **res_cma)
a254129e 230{
f7426b98 231 phys_addr_t memblock_end = memblock_end_of_DRAM();
6b101e2a 232 phys_addr_t highmem_start;
a254129e
JK
233 int ret = 0;
234
6b101e2a
JK
235#ifdef CONFIG_X86
236 /*
237 * high_memory isn't direct mapped memory so retrieving its physical
238 * address isn't appropriate. But it would be useful to check the
239 * physical address of the highmem boundary so it's justfiable to get
240 * the physical address from it. On x86 there is a validation check for
241 * this case, so the following workaround is needed to avoid it.
242 */
243 highmem_start = __pa_nodebug(high_memory);
244#else
245 highmem_start = __pa(high_memory);
246#endif
56fa4f60
LP
247 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
248 __func__, &size, &base, &limit, &alignment);
a254129e
JK
249
250 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
251 pr_err("Not enough slots for CMA reserved regions!\n");
252 return -ENOSPC;
253 }
254
255 if (!size)
256 return -EINVAL;
257
258 if (alignment && !is_power_of_2(alignment))
259 return -EINVAL;
260
261 /*
262 * Sanitise input arguments.
263 * Pages both ends in CMA area could be merged into adjacent unmovable
264 * migratetype page by page allocator's buddy algorithm. In the case,
265 * you couldn't get a contiguous memory, which is not what we want.
266 */
267 alignment = max(alignment,
268 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
269 base = ALIGN(base, alignment);
270 size = ALIGN(size, alignment);
271 limit &= ~(alignment - 1);
272
800a85d3
LP
273 if (!base)
274 fixed = false;
275
a254129e
JK
276 /* size should be aligned with order_per_bit */
277 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
278 return -EINVAL;
279
f7426b98 280 /*
16195ddd
LP
281 * If allocating at a fixed base the request region must not cross the
282 * low/high memory boundary.
f7426b98 283 */
16195ddd 284 if (fixed && base < highmem_start && base + size > highmem_start) {
f7426b98 285 ret = -EINVAL;
56fa4f60
LP
286 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
287 &base, &highmem_start);
f7426b98
MS
288 goto err;
289 }
290
16195ddd
LP
291 /*
292 * If the limit is unspecified or above the memblock end, its effective
293 * value will be the memblock end. Set it explicitly to simplify further
294 * checks.
295 */
296 if (limit == 0 || limit > memblock_end)
297 limit = memblock_end;
298
a254129e 299 /* Reserve memory */
800a85d3 300 if (fixed) {
a254129e
JK
301 if (memblock_is_region_reserved(base, size) ||
302 memblock_reserve(base, size) < 0) {
303 ret = -EBUSY;
304 goto err;
305 }
306 } else {
16195ddd
LP
307 phys_addr_t addr = 0;
308
309 /*
310 * All pages in the reserved area must come from the same zone.
311 * If the requested region crosses the low/high memory boundary,
312 * try allocating from high memory first and fall back to low
313 * memory in case of failure.
314 */
315 if (base < highmem_start && limit > highmem_start) {
316 addr = memblock_alloc_range(size, alignment,
317 highmem_start, limit);
318 limit = highmem_start;
319 }
320
a254129e 321 if (!addr) {
16195ddd
LP
322 addr = memblock_alloc_range(size, alignment, base,
323 limit);
324 if (!addr) {
325 ret = -ENOMEM;
326 goto err;
327 }
a254129e 328 }
16195ddd 329
620951e2
TR
330 /*
331 * kmemleak scans/reads tracked objects for pointers to other
332 * objects but this address isn't mapped and accessible
333 */
334 kmemleak_ignore(phys_to_virt(addr));
16195ddd 335 base = addr;
a254129e
JK
336 }
337
de9e14ee
MS
338 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
339 if (ret)
340 goto err;
a254129e 341
56fa4f60
LP
342 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
343 &base);
a254129e
JK
344 return 0;
345
346err:
0de9d2eb 347 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
a254129e
JK
348 return ret;
349}
350
351/**
352 * cma_alloc() - allocate pages from contiguous area
353 * @cma: Contiguous memory region for which the allocation is performed.
354 * @count: Requested number of pages.
355 * @align: Requested alignment of pages (in PAGE_SIZE order).
356 *
357 * This function allocates part of contiguous memory on specific
358 * contiguous memory area.
359 */
ac173824 360struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
a254129e 361{
b5be83e3 362 unsigned long mask, offset, pfn, start = 0;
a254129e
JK
363 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
364 struct page *page = NULL;
365 int ret;
366
367 if (!cma || !cma->count)
368 return NULL;
369
370 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
371 count, align);
372
373 if (!count)
374 return NULL;
375
376 mask = cma_bitmap_aligned_mask(cma, align);
b5be83e3 377 offset = cma_bitmap_aligned_offset(cma, align);
a254129e
JK
378 bitmap_maxno = cma_bitmap_maxno(cma);
379 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
380
381 for (;;) {
382 mutex_lock(&cma->lock);
b5be83e3
GF
383 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
384 bitmap_maxno, start, bitmap_count, mask,
385 offset);
a254129e
JK
386 if (bitmap_no >= bitmap_maxno) {
387 mutex_unlock(&cma->lock);
388 break;
389 }
390 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
391 /*
392 * It's safe to drop the lock here. We've marked this region for
393 * our exclusive use. If the migration fails we will take the
394 * lock again and unmark it.
395 */
396 mutex_unlock(&cma->lock);
397
398 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
399 mutex_lock(&cma_mutex);
400 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
401 mutex_unlock(&cma_mutex);
402 if (ret == 0) {
403 page = pfn_to_page(pfn);
404 break;
a254129e 405 }
b7155e76 406
a254129e 407 cma_clear_bitmap(cma, pfn, count);
b7155e76
JK
408 if (ret != -EBUSY)
409 break;
410
a254129e
JK
411 pr_debug("%s(): memory range at %p is busy, retrying\n",
412 __func__, pfn_to_page(pfn));
413 /* try again with a bit different memory target */
414 start = bitmap_no + mask + 1;
415 }
416
417 pr_debug("%s(): returned %p\n", __func__, page);
418 return page;
419}
420
421/**
422 * cma_release() - release allocated pages
423 * @cma: Contiguous memory region for which the allocation is performed.
424 * @pages: Allocated pages.
425 * @count: Number of allocated pages.
426 *
427 * This function releases memory allocated by alloc_cma().
428 * It returns false when provided pages do not belong to contiguous area and
429 * true otherwise.
430 */
ac173824 431bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
a254129e
JK
432{
433 unsigned long pfn;
434
435 if (!cma || !pages)
436 return false;
437
438 pr_debug("%s(page %p)\n", __func__, (void *)pages);
439
440 pfn = page_to_pfn(pages);
441
442 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
443 return false;
444
445 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
446
447 free_contig_range(pfn, count);
448 cma_clear_bitmap(cma, pfn, count);
449
450 return true;
451}