mm, CMA: change cma_declare_contiguous() to obey coding convention

Conventionally, we put output param to the end of param list and put the
'base' ahead of 'size', but cma_declare_contiguous() doesn't look like
that, so change it.

Additionally, move down cma_areas reference code to the position where
it is really needed.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Alexander Graf <agraf@suse.de>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Gleb Natapov <gleb@kernel.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Joonsoo Kim 2014-08-06 16:05:32 -07:00 committed by Linus Torvalds
parent b7155e76a7
commit c1f733aaaf
4 changed files with 11 additions and 10 deletions

View file

@ -185,8 +185,8 @@ void __init kvm_cma_reserve(void)
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size); align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
cma_declare_contiguous(selected_size, 0, 0, align_size, cma_declare_contiguous(0, selected_size, 0, align_size,
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, &kvm_cma, false); KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
} }
} }

View file

@ -165,7 +165,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
{ {
int ret; int ret;
ret = cma_declare_contiguous(size, base, limit, 0, 0, res_cma, fixed); ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
if (ret) if (ret)
return ret; return ret;

View file

@ -21,7 +21,7 @@ extern unsigned long cma_get_size(struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t size, extern int __init cma_declare_contiguous(phys_addr_t size,
phys_addr_t base, phys_addr_t limit, phys_addr_t base, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit, phys_addr_t alignment, unsigned int order_per_bit,
struct cma **res_cma, bool fixed); bool fixed, struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
extern bool cma_release(struct cma *cma, struct page *pages, int count); extern bool cma_release(struct cma *cma, struct page *pages, int count);
#endif #endif

View file

@ -141,13 +141,13 @@ core_initcall(cma_init_reserved_areas);
/** /**
* cma_declare_contiguous() - reserve custom contiguous area * cma_declare_contiguous() - reserve custom contiguous area
* @size: Size of the reserved area (in bytes),
* @base: Base address of the reserved area optional, use 0 for any * @base: Base address of the reserved area optional, use 0 for any
* @size: Size of the reserved area (in bytes),
* @limit: End address of the reserved memory (optional, 0 for any). * @limit: End address of the reserved memory (optional, 0 for any).
* @alignment: Alignment for the CMA area, should be power of 2 or zero * @alignment: Alignment for the CMA area, should be power of 2 or zero
* @order_per_bit: Order of pages represented by one bit on bitmap. * @order_per_bit: Order of pages represented by one bit on bitmap.
* @res_cma: Pointer to store the created cma region.
* @fixed: hint about where to place the reserved area * @fixed: hint about where to place the reserved area
* @res_cma: Pointer to store the created cma region.
* *
* This function reserves memory from early allocator. It should be * This function reserves memory from early allocator. It should be
* called by arch specific code once the early allocator (memblock or bootmem) * called by arch specific code once the early allocator (memblock or bootmem)
@ -157,12 +157,12 @@ core_initcall(cma_init_reserved_areas);
* If @fixed is true, reserve contiguous area at exactly @base. If false, * If @fixed is true, reserve contiguous area at exactly @base. If false,
* reserve in range from @base to @limit. * reserve in range from @base to @limit.
*/ */
int __init cma_declare_contiguous(phys_addr_t size, int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t base, phys_addr_t limit, phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit, phys_addr_t alignment, unsigned int order_per_bit,
struct cma **res_cma, bool fixed) bool fixed, struct cma **res_cma)
{ {
struct cma *cma = &cma_areas[cma_area_count]; struct cma *cma;
int ret = 0; int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
@ -218,6 +218,7 @@ int __init cma_declare_contiguous(phys_addr_t size,
* Each reserved area must be initialised later, when more kernel * Each reserved area must be initialised later, when more kernel
* subsystems (like slab allocator) are available. * subsystems (like slab allocator) are available.
*/ */
cma = &cma_areas[cma_area_count];
cma->base_pfn = PFN_DOWN(base); cma->base_pfn = PFN_DOWN(base);
cma->count = size >> PAGE_SHIFT; cma->count = size >> PAGE_SHIFT;
cma->order_per_bit = order_per_bit; cma->order_per_bit = order_per_bit;