| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Contiguous Memory Allocator | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Copyright (c) 2010-2011 by Samsung Electronics. | 
					
						
							|  |  |  |  * Copyright IBM Corporation, 2013 | 
					
						
							|  |  |  |  * Copyright LG Electronics Inc., 2014 | 
					
						
							|  |  |  |  * Written by: | 
					
						
							|  |  |  |  *	Marek Szyprowski <m.szyprowski@samsung.com> | 
					
						
							|  |  |  |  *	Michal Nazarewicz <mina86@mina86.com> | 
					
						
							|  |  |  |  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 
					
						
							|  |  |  |  *	Joonsoo Kim <iamjoonsoo.kim@lge.com> | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This program is free software; you can redistribute it and/or | 
					
						
							|  |  |  |  * modify it under the terms of the GNU General Public License as | 
					
						
							|  |  |  |  * published by the Free Software Foundation; either version 2 of the | 
					
						
							|  |  |  |  * License or (at your optional) any later version of the license. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define pr_fmt(fmt) "cma: " fmt
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_CMA_DEBUG
 | 
					
						
							|  |  |  | #ifndef DEBUG
 | 
					
						
							|  |  |  | #  define DEBUG
 | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include <linux/memblock.h>
 | 
					
						
							|  |  |  | #include <linux/err.h>
 | 
					
						
							|  |  |  | #include <linux/mm.h>
 | 
					
						
							|  |  |  | #include <linux/mutex.h>
 | 
					
						
							|  |  |  | #include <linux/sizes.h>
 | 
					
						
							|  |  |  | #include <linux/slab.h>
 | 
					
						
							|  |  |  | #include <linux/log2.h>
 | 
					
						
							|  |  |  | #include <linux/cma.h>
 | 
					
						
							| 
									
										
										
										
											2014-10-09 15:26:47 -07:00
										 |  |  | #include <linux/highmem.h>
 | 
					
						
							| 
									
										
										
										
											2014-12-12 16:58:31 -08:00
										 |  |  | #include <linux/io.h>
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | struct cma { | 
					
						
							|  |  |  | 	unsigned long	base_pfn; | 
					
						
							|  |  |  | 	unsigned long	count; | 
					
						
							|  |  |  | 	unsigned long	*bitmap; | 
					
						
							|  |  |  | 	unsigned int order_per_bit; /* Order of pages represented by one bit */ | 
					
						
							|  |  |  | 	struct mutex	lock; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static struct cma cma_areas[MAX_CMA_AREAS]; | 
					
						
							|  |  |  | static unsigned cma_area_count; | 
					
						
							|  |  |  | static DEFINE_MUTEX(cma_mutex); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | phys_addr_t cma_get_base(struct cma *cma) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return PFN_PHYS(cma->base_pfn); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | unsigned long cma_get_size(struct cma *cma) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return cma->count << PAGE_SHIFT; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2014-10-13 15:51:03 -07:00
										 |  |  | 	if (align_order <= cma->order_per_bit) | 
					
						
							|  |  |  | 		return 0; | 
					
						
							|  |  |  | 	return (1UL << (align_order - cma->order_per_bit)) - 1; | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-12 16:54:48 -08:00
										 |  |  | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned int alignment; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (align_order <= cma->order_per_bit) | 
					
						
							|  |  |  | 		return 0; | 
					
						
							|  |  |  | 	alignment = 1UL << (align_order - cma->order_per_bit); | 
					
						
							|  |  |  | 	return ALIGN(cma->base_pfn, alignment) - | 
					
						
							|  |  |  | 		(cma->base_pfn >> cma->order_per_bit); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | static unsigned long cma_bitmap_maxno(struct cma *cma) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return cma->count >> cma->order_per_bit; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, | 
					
						
							|  |  |  | 						unsigned long pages) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long bitmap_no, bitmap_count; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; | 
					
						
							|  |  |  | 	bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mutex_lock(&cma->lock); | 
					
						
							|  |  |  | 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); | 
					
						
							|  |  |  | 	mutex_unlock(&cma->lock); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static int __init cma_activate_area(struct cma *cma) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); | 
					
						
							|  |  |  | 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; | 
					
						
							|  |  |  | 	unsigned i = cma->count >> pageblock_order; | 
					
						
							|  |  |  | 	struct zone *zone; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!cma->bitmap) | 
					
						
							|  |  |  | 		return -ENOMEM; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	WARN_ON_ONCE(!pfn_valid(pfn)); | 
					
						
							|  |  |  | 	zone = page_zone(pfn_to_page(pfn)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	do { | 
					
						
							|  |  |  | 		unsigned j; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		base_pfn = pfn; | 
					
						
							|  |  |  | 		for (j = pageblock_nr_pages; j; --j, pfn++) { | 
					
						
							|  |  |  | 			WARN_ON_ONCE(!pfn_valid(pfn)); | 
					
						
							|  |  |  | 			/*
 | 
					
						
							|  |  |  | 			 * alloc_contig_range requires the pfn range | 
					
						
							|  |  |  | 			 * specified to be in the same zone. Make this | 
					
						
							|  |  |  | 			 * simple by forcing the entire CMA resv range | 
					
						
							|  |  |  | 			 * to be in the same zone. | 
					
						
							|  |  |  | 			 */ | 
					
						
							|  |  |  | 			if (page_zone(pfn_to_page(pfn)) != zone) | 
					
						
							|  |  |  | 				goto err; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | 
					
						
							|  |  |  | 	} while (--i); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mutex_init(&cma->lock); | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | err: | 
					
						
							|  |  |  | 	kfree(cma->bitmap); | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:39 +03:00
										 |  |  | 	cma->count = 0; | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	return -EINVAL; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static int __init cma_init_reserved_areas(void) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < cma_area_count; i++) { | 
					
						
							|  |  |  | 		int ret = cma_activate_area(&cma_areas[i]); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if (ret) | 
					
						
							|  |  |  | 			return ret; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | core_initcall(cma_init_reserved_areas); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-13 15:51:09 -07:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * cma_init_reserved_mem() - create custom contiguous area from reserved memory | 
					
						
							|  |  |  |  * @base: Base address of the reserved area | 
					
						
							|  |  |  |  * @size: Size of the reserved area (in bytes), | 
					
						
							|  |  |  |  * @order_per_bit: Order of pages represented by one bit on bitmap. | 
					
						
							|  |  |  |  * @res_cma: Pointer to store the created cma region. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This function creates custom contiguous area from already reserved memory. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | 
					
						
							|  |  |  | 				 int order_per_bit, struct cma **res_cma) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct cma *cma; | 
					
						
							|  |  |  | 	phys_addr_t alignment; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Sanity checks */ | 
					
						
							|  |  |  | 	if (cma_area_count == ARRAY_SIZE(cma_areas)) { | 
					
						
							|  |  |  | 		pr_err("Not enough slots for CMA reserved regions!\n"); | 
					
						
							|  |  |  | 		return -ENOSPC; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!size || !memblock_is_region_reserved(base, size)) | 
					
						
							|  |  |  | 		return -EINVAL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* ensure minimal alignment requied by mm core */ | 
					
						
							|  |  |  | 	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* alignment should be aligned with order_per_bit */ | 
					
						
							|  |  |  | 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) | 
					
						
							|  |  |  | 		return -EINVAL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) | 
					
						
							|  |  |  | 		return -EINVAL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Each reserved area must be initialised later, when more kernel | 
					
						
							|  |  |  | 	 * subsystems (like slab allocator) are available. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	cma = &cma_areas[cma_area_count]; | 
					
						
							|  |  |  | 	cma->base_pfn = PFN_DOWN(base); | 
					
						
							|  |  |  | 	cma->count = size >> PAGE_SHIFT; | 
					
						
							|  |  |  | 	cma->order_per_bit = order_per_bit; | 
					
						
							|  |  |  | 	*res_cma = cma; | 
					
						
							|  |  |  | 	cma_area_count++; | 
					
						
							| 
									
										
										
										
											2015-02-11 15:26:27 -08:00
										 |  |  | 	totalcma_pages += (size / PAGE_SIZE); | 
					
						
							| 
									
										
										
										
											2014-10-13 15:51:09 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * cma_declare_contiguous() - reserve custom contiguous area | 
					
						
							|  |  |  |  * @base: Base address of the reserved area optional, use 0 for any | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:32 -07:00
										 |  |  |  * @size: Size of the reserved area (in bytes), | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  |  * @limit: End address of the reserved memory (optional, 0 for any). | 
					
						
							|  |  |  |  * @alignment: Alignment for the CMA area, should be power of 2 or zero | 
					
						
							|  |  |  |  * @order_per_bit: Order of pages represented by one bit on bitmap. | 
					
						
							|  |  |  |  * @fixed: hint about where to place the reserved area | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:32 -07:00
										 |  |  |  * @res_cma: Pointer to store the created cma region. | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * This function reserves memory from early allocator. It should be | 
					
						
							|  |  |  |  * called by arch specific code once the early allocator (memblock or bootmem) | 
					
						
							|  |  |  |  * has been activated and all other subsystems have already allocated/reserved | 
					
						
							|  |  |  |  * memory. This function allows to create custom reserved areas. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * If @fixed is true, reserve contiguous area at exactly @base.  If false, | 
					
						
							|  |  |  |  * reserve in range from @base to @limit. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:32 -07:00
										 |  |  | int __init cma_declare_contiguous(phys_addr_t base, | 
					
						
							|  |  |  | 			phys_addr_t size, phys_addr_t limit, | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 			phys_addr_t alignment, unsigned int order_per_bit, | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:32 -07:00
										 |  |  | 			bool fixed, struct cma **res_cma) | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2014-10-09 15:26:47 -07:00
										 |  |  | 	phys_addr_t memblock_end = memblock_end_of_DRAM(); | 
					
						
							| 
									
										
										
										
											2014-12-10 15:41:12 -08:00
										 |  |  | 	phys_addr_t highmem_start; | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	int ret = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-10 15:41:12 -08:00
										 |  |  | #ifdef CONFIG_X86
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * high_memory isn't direct mapped memory so retrieving its physical | 
					
						
							|  |  |  | 	 * address isn't appropriate.  But it would be useful to check the | 
					
						
							|  |  |  | 	 * physical address of the highmem boundary so it's justfiable to get | 
					
						
							|  |  |  | 	 * the physical address from it.  On x86 there is a validation check for | 
					
						
							|  |  |  | 	 * this case, so the following workaround is needed to avoid it. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	highmem_start = __pa_nodebug(high_memory); | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | 	highmem_start = __pa(high_memory); | 
					
						
							|  |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:42 +03:00
										 |  |  | 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", | 
					
						
							|  |  |  | 		__func__, &size, &base, &limit, &alignment); | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (cma_area_count == ARRAY_SIZE(cma_areas)) { | 
					
						
							|  |  |  | 		pr_err("Not enough slots for CMA reserved regions!\n"); | 
					
						
							|  |  |  | 		return -ENOSPC; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!size) | 
					
						
							|  |  |  | 		return -EINVAL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (alignment && !is_power_of_2(alignment)) | 
					
						
							|  |  |  | 		return -EINVAL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Sanitise input arguments. | 
					
						
							|  |  |  | 	 * Pages both ends in CMA area could be merged into adjacent unmovable | 
					
						
							|  |  |  | 	 * migratetype page by page allocator's buddy algorithm. In the case, | 
					
						
							|  |  |  | 	 * you couldn't get a contiguous memory, which is not what we want. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	alignment = max(alignment, | 
					
						
							|  |  |  | 		(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); | 
					
						
							|  |  |  | 	base = ALIGN(base, alignment); | 
					
						
							|  |  |  | 	size = ALIGN(size, alignment); | 
					
						
							|  |  |  | 	limit &= ~(alignment - 1); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:40 +03:00
										 |  |  | 	if (!base) | 
					
						
							|  |  |  | 		fixed = false; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	/* size should be aligned with order_per_bit */ | 
					
						
							|  |  |  | 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) | 
					
						
							|  |  |  | 		return -EINVAL; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-09 15:26:47 -07:00
										 |  |  | 	/*
 | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:41 +03:00
										 |  |  | 	 * If allocating at a fixed base the request region must not cross the | 
					
						
							|  |  |  | 	 * low/high memory boundary. | 
					
						
							| 
									
										
										
										
											2014-10-09 15:26:47 -07:00
										 |  |  | 	 */ | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:41 +03:00
										 |  |  | 	if (fixed && base < highmem_start && base + size > highmem_start) { | 
					
						
							| 
									
										
										
										
											2014-10-09 15:26:47 -07:00
										 |  |  | 		ret = -EINVAL; | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:42 +03:00
										 |  |  | 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", | 
					
						
							|  |  |  | 			&base, &highmem_start); | 
					
						
							| 
									
										
										
										
											2014-10-09 15:26:47 -07:00
										 |  |  | 		goto err; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:41 +03:00
										 |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * If the limit is unspecified or above the memblock end, its effective | 
					
						
							|  |  |  | 	 * value will be the memblock end. Set it explicitly to simplify further | 
					
						
							|  |  |  | 	 * checks. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (limit == 0 || limit > memblock_end) | 
					
						
							|  |  |  | 		limit = memblock_end; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	/* Reserve memory */ | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:40 +03:00
										 |  |  | 	if (fixed) { | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 		if (memblock_is_region_reserved(base, size) || | 
					
						
							|  |  |  | 		    memblock_reserve(base, size) < 0) { | 
					
						
							|  |  |  | 			ret = -EBUSY; | 
					
						
							|  |  |  | 			goto err; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:41 +03:00
										 |  |  | 		phys_addr_t addr = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * All pages in the reserved area must come from the same zone. | 
					
						
							|  |  |  | 		 * If the requested region crosses the low/high memory boundary, | 
					
						
							|  |  |  | 		 * try allocating from high memory first and fall back to low | 
					
						
							|  |  |  | 		 * memory in case of failure. | 
					
						
							|  |  |  | 		 */ | 
					
						
							|  |  |  | 		if (base < highmem_start && limit > highmem_start) { | 
					
						
							|  |  |  | 			addr = memblock_alloc_range(size, alignment, | 
					
						
							|  |  |  | 						    highmem_start, limit); | 
					
						
							|  |  |  | 			limit = highmem_start; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 		if (!addr) { | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:41 +03:00
										 |  |  | 			addr = memblock_alloc_range(size, alignment, base, | 
					
						
							|  |  |  | 						    limit); | 
					
						
							|  |  |  | 			if (!addr) { | 
					
						
							|  |  |  | 				ret = -ENOMEM; | 
					
						
							|  |  |  | 				goto err; | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:41 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-12 16:58:31 -08:00
										 |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * kmemleak scans/reads tracked objects for pointers to other | 
					
						
							|  |  |  | 		 * objects but this address isn't mapped and accessible | 
					
						
							|  |  |  | 		 */ | 
					
						
							|  |  |  | 		kmemleak_ignore(phys_to_virt(addr)); | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:41 +03:00
										 |  |  | 		base = addr; | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-13 15:51:09 -07:00
										 |  |  | 	ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); | 
					
						
							|  |  |  | 	if (ret) | 
					
						
							|  |  |  | 		goto err; | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-24 13:18:42 +03:00
										 |  |  | 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, | 
					
						
							|  |  |  | 		&base); | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	return 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | err: | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:34 -07:00
										 |  |  | 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * cma_alloc() - allocate pages from contiguous area | 
					
						
							|  |  |  |  * @cma:   Contiguous memory region for which the allocation is performed. | 
					
						
							|  |  |  |  * @count: Requested number of pages. | 
					
						
							|  |  |  |  * @align: Requested alignment of pages (in PAGE_SIZE order). | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This function allocates part of contiguous memory on specific | 
					
						
							|  |  |  |  * contiguous memory area. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | struct page *cma_alloc(struct cma *cma, int count, unsigned int align) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2014-12-12 16:54:48 -08:00
										 |  |  | 	unsigned long mask, offset, pfn, start = 0; | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	unsigned long bitmap_maxno, bitmap_no, bitmap_count; | 
					
						
							|  |  |  | 	struct page *page = NULL; | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!cma || !cma->count) | 
					
						
							|  |  |  | 		return NULL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, | 
					
						
							|  |  |  | 		 count, align); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!count) | 
					
						
							|  |  |  | 		return NULL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mask = cma_bitmap_aligned_mask(cma, align); | 
					
						
							| 
									
										
										
										
											2014-12-12 16:54:48 -08:00
										 |  |  | 	offset = cma_bitmap_aligned_offset(cma, align); | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 	bitmap_maxno = cma_bitmap_maxno(cma); | 
					
						
							|  |  |  | 	bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (;;) { | 
					
						
							|  |  |  | 		mutex_lock(&cma->lock); | 
					
						
							| 
									
										
										
										
											2014-12-12 16:54:48 -08:00
										 |  |  | 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, | 
					
						
							|  |  |  | 				bitmap_maxno, start, bitmap_count, mask, | 
					
						
							|  |  |  | 				offset); | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 		if (bitmap_no >= bitmap_maxno) { | 
					
						
							|  |  |  | 			mutex_unlock(&cma->lock); | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count); | 
					
						
							|  |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * It's safe to drop the lock here. We've marked this region for | 
					
						
							|  |  |  | 		 * our exclusive use. If the migration fails we will take the | 
					
						
							|  |  |  | 		 * lock again and unmark it. | 
					
						
							|  |  |  | 		 */ | 
					
						
							|  |  |  | 		mutex_unlock(&cma->lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); | 
					
						
							|  |  |  | 		mutex_lock(&cma_mutex); | 
					
						
							|  |  |  | 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); | 
					
						
							|  |  |  | 		mutex_unlock(&cma_mutex); | 
					
						
							|  |  |  | 		if (ret == 0) { | 
					
						
							|  |  |  | 			page = pfn_to_page(pfn); | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:30 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 		cma_clear_bitmap(cma, pfn, count); | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:30 -07:00
										 |  |  | 		if (ret != -EBUSY) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-06 16:05:25 -07:00
										 |  |  | 		pr_debug("%s(): memory range at %p is busy, retrying\n", | 
					
						
							|  |  |  | 			 __func__, pfn_to_page(pfn)); | 
					
						
							|  |  |  | 		/* try again with a bit different memory target */ | 
					
						
							|  |  |  | 		start = bitmap_no + mask + 1; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pr_debug("%s(): returned %p\n", __func__, page); | 
					
						
							|  |  |  | 	return page; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * cma_release() - release allocated pages | 
					
						
							|  |  |  |  * @cma:   Contiguous memory region for which the allocation is performed. | 
					
						
							|  |  |  |  * @pages: Allocated pages. | 
					
						
							|  |  |  |  * @count: Number of allocated pages. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This function releases memory allocated by alloc_cma(). | 
					
						
							|  |  |  |  * It returns false when provided pages do not belong to contiguous area and | 
					
						
							|  |  |  |  * true otherwise. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | bool cma_release(struct cma *cma, struct page *pages, int count) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned long pfn; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!cma || !pages) | 
					
						
							|  |  |  | 		return false; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pr_debug("%s(page %p)\n", __func__, (void *)pages); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pfn = page_to_pfn(pages); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) | 
					
						
							|  |  |  | 		return false; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	free_contig_range(pfn, count); | 
					
						
							|  |  |  | 	cma_clear_bitmap(cma, pfn, count); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return true; | 
					
						
							|  |  |  | } |