From c2273a185354fe9420fb342b1ca09a6fed857fb3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 16 Jan 2015 18:01:43 +0100 Subject: [PATCH 1/5] ARM: 8288/1: dma-mapping: don't detach devices without an IOMMU during teardown When tearing down the DMA ops for a device via of_dma_deconfigure, we unconditionally detach the device from its IOMMU domain. For devices that aren't actually behind an IOMMU, this produces a "Not attached" warning message on the console. This patch changes the teardown code so that we don't detach from the IOMMU domain when there isn't an IOMMU dma mapping to start with. Reported-by: Laurent Pinchart Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7864797609b3..f142ddd6c40a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2025,6 +2025,9 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; + if (!mapping) + return; + arm_iommu_detach_device(dev); arm_iommu_release_mapping(mapping); } From c2607f74aad96d18316a6e709b40e0ffe9def148 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 27 Jan 2015 16:10:42 +0100 Subject: [PATCH 2/5] ARM: 8294/1: ATAG_DTB_COMPAT: remove the DT workspace's hardcoded 64KB size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is currently a hardcoded limit of 64KB for the DTB to live in and be extended with ATAG info. Some DTBs have outgrown that limit: $ du -b arch/arm/boot/dts/omap3-n900.dtb 70212 arch/arm/boot/dts/omap3-n900.dtb Furthermore, the actual size passed to atags_to_fdt() included the stack size which is obviously wrong. The initial DTB size is known, so use it to size the allocated workspace with a 50% growth assumption and relocate the temporary stack above that. This is also clamped to 32KB min / 1MB max for robustness against bad DTB data. Reported-by: Pali Rohár Tested-by: Pavel Machek Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/boot/compressed/head.S | 39 +++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 68be9017593d..132c70e2d2f1 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -263,16 +263,37 @@ restart: adr r0, LC0 * OK... Let's do some funky business here. * If we do have a DTB appended to zImage, and we do have * an ATAG list around, we want the later to be translated - * and folded into the former here. To be on the safe side, - * let's temporarily move the stack away into the malloc - * area. No GOT fixup has occurred yet, but none of the - * code we're about to call uses any global variable. + * and folded into the former here. No GOT fixup has occurred + * yet, but none of the code we're about to call uses any + * global variable. */ - add sp, sp, #0x10000 + + /* Get the initial DTB size */ + ldr r5, [r6, #4] +#ifndef __ARMEB__ + /* convert to little endian */ + eor r1, r5, r5, ror #16 + bic r1, r1, #0x00ff0000 + mov r5, r5, ror #8 + eor r5, r5, r1, lsr #8 +#endif + /* 50% DTB growth should be good enough */ + add r5, r5, r5, lsr #1 + /* preserve 64-bit alignment */ + add r5, r5, #7 + bic r5, r5, #7 + /* clamp to 32KB min and 1MB max */ + cmp r5, #(1 << 15) + movlo r5, #(1 << 15) + cmp r5, #(1 << 20) + movhi r5, #(1 << 20) + /* temporarily relocate the stack past the DTB work space */ + add sp, sp, r5 + stmfd sp!, {r0-r3, ip, lr} mov r0, r8 mov r1, r6 - sub r2, sp, r6 + mov r2, r5 bl atags_to_fdt /* @@ -285,11 +306,11 @@ restart: adr r0, LC0 bic r0, r0, #1 add r0, r0, #0x100 mov r1, r6 - sub r2, sp, r6 + mov r2, r5 bleq atags_to_fdt ldmfd sp!, {r0-r3, ip, lr} - sub sp, sp, #0x10000 + sub sp, sp, r5 #endif mov r8, r6 @ use the appended device tree @@ -306,7 +327,7 @@ restart: adr r0, LC0 subs r1, r5, r1 addhi r9, r9, r1 - /* Get the dtb's size */ + /* Get the current DTB size */ ldr r5, [r6, #4] #ifndef __ARMEB__ /* convert r5 (dtb size) to little endian */ From ed46092518aaed9e5266f8dd87ac12bf18cfc8e8 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 28 Jan 2015 16:05:04 +0100 Subject: [PATCH 3/5] ARM: 8295/1: fix v7M build for !CONFIG_PRINTK Minimal builds for v7M are broken when printk is disabled. The caller is assembly so add the necessary ifdef around the call. Signed-off-by: Rob Herring Signed-off-by: Russell King --- arch/arm/kernel/entry-v7m.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index 2260f1855820..8944f4991c3c 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -22,10 +22,12 @@ __invalid_entry: v7m_exception_entry +#ifdef CONFIG_PRINTK adr r0, strerr mrs r1, ipsr mov r2, lr bl printk +#endif mov r0, sp bl show_regs 1: b 1b From fba289054f24d2550f47a1413e1ccc24f4165560 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 28 Jan 2015 17:58:33 +0100 Subject: [PATCH 4/5] ARM: 8298/1: ARM_KERNMEM_PERMS only works with MMU enabled The recently added ARM_KERNMEM_PERMS feature works by manipulating the kernel page tables, which obviously requires an MMU. Trying to enable this feature when the MMU is disabled results in a lot of compile errors in mm/init.c, so let's add a Kconfig dependency to avoid that case. Signed-off-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 03823e784f63..c43c71455566 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN config ARM_KERNMEM_PERMS bool "Restrict kernel memory permissions" + depends on MMU help If this is set, kernel memory other than kernel text (and rodata) will be made non-executable. The tradeoff is that each region is From 8e64806672466392acf19e14427d1c29df3e58b9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Jan 2015 16:41:46 +0100 Subject: [PATCH 5/5] ARM: 8299/1: mm: ensure local active ASID is marked as allocated on rollover Commit e1a5848e3398 ("ARM: 7924/1: mm: don't bother with reserved ttbr0 when running with LPAE") removed the use of the reserved TTBR0 value for LPAE systems, since the ASID is held in the TTBR and can be updated atomicly with the pgd of the next mm. Unfortunately, this patch forgot to update flush_context, which deliberately avoids marking the local active ASID as allocated, since we used to switch via ASID zero and didn't need to allocate the ASID of the previous mm. The side-effect of this is that we can allocate the same ASID to the next mm and, between flushing the local TLB and updating TTBR0, we can perform speculative TLB fills for userspace nG mappings using the page table of the previous mm. The consequence of this is that the next mm can erroneously hit some mappings of the previous mm. Note that this was made significantly harder to hit by a391263cd84e ("ARM: 8203/1: mm: try to re-use old ASID assignments following a rollover") but is still theoretically possible. This patch fixes the problem by removing the code from flush_context that forces the allocated ASID to zero for the local CPU. Many thanks to the Broadcom guys for tracking this one down. Fixes: e1a5848e3398 ("ARM: 7924/1: mm: don't bother with reserved ttbr0 when running with LPAE") Cc: # v3.14+ Reported-by: Raymond Ngun Tested-by: Raymond Ngun Reviewed-by: Gregory Fong Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/context.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 91892569710f..845769e41332 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); for_each_possible_cpu(i) { - if (i == cpu) { - asid = 0; - } else { - asid = atomic64_xchg(&per_cpu(active_asids, i), 0); - /* - * If this CPU has already been through a - * rollover, but hasn't run another task in - * the meantime, we must preserve its reserved - * ASID, as this is the only trace we have of - * the process it is still running. - */ - if (asid == 0) - asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); - } + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = per_cpu(reserved_asids, i); + __set_bit(asid & ~ASID_MASK, asid_map); per_cpu(reserved_asids, i) = asid; }