Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates (part one) from Russell King: - MMC patches from Ulf Hansson and Pawel Moll. These add support for DDR mode and the latest variant found on ARM Versatile Express, as well as a number of cleanups. - A fix for to improve the behaviour of ARMs sched_clock() - Changes to the ARM ioremap() code. I'm not convinced with the primary arguments for this, but it's been around for a while, and people seem happy with it - and the "other" justification for this is at http://lkml.org/lkml/2012/12/6/184 - Add SCHED_HRTICK to ARMs Kconfig - Making the ARM SHA/AES code Thumb-2 compatible - A collection of other small updates. * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (26 commits) ARM: add SCHED_HRTICK config option ARM: 7650/1: mm: replace direct access to mm->context.id with new macro ARM: 7649/1: mm: mm->context.id fix for big-endian ARM: 7648/1: pci: Allow passing per-controller private data ARM: 7647/1: pci: Keep pci_common_init() around after init ARM: fix warnings introduced by previous patch ARM: 7646/1: mm: use static_vm for managing static mapped areas ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area ARM: 7644/1: vmregion: remove vmregion code entirely MAINTAINERS: Re-assert MMCI driver maintainer status MAINTAINERS: add additional file for MMCI driver MAINTAINERS: add maintainer entry for AMBA serial drivers ARM: 7637/1: memory: use SZ_ constants for defining the virtual memory layout ARM: 7643/1: sched: correct update_sched_clock() ARM: 7635/1: versatile: fix the PCI IRQ regression ARM: 7639/1: cache-l2x0: add missed dummy outer_resume entry ARM: 7630/1: mmc: mmci: Fixup and cleanup code for DMA handling ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path ARM: 7631/1: mmc: mmci: Add new VE MMCI variant ARM: 7623/1: mmc: mmci: Fixup clock gating when freq is 0 for ST-variants ...
This commit is contained in:
commit
32f9aab8eb
26 changed files with 403 additions and 542 deletions
10
MAINTAINERS
10
MAINTAINERS
|
@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.*
|
|||
F: include/linux/amba/kmi.h
|
||||
|
||||
ARM PRIMECELL MMCI PL180/1 DRIVER
|
||||
S: Orphan
|
||||
M: Russell King <linux@arm.linux.org.uk>
|
||||
S: Maintained
|
||||
F: drivers/mmc/host/mmci.*
|
||||
F: include/linux/amba/mmci.h
|
||||
|
||||
ARM PRIMECELL UART PL010 AND PL011 DRIVERS
|
||||
M: Russell King <linux@arm.linux.org.uk>
|
||||
S: Maintained
|
||||
F: drivers/tty/serial/amba-pl01*.c
|
||||
F: include/linux/amba/serial.h
|
||||
|
||||
ARM PRIMECELL BUS SUPPORT
|
||||
M: Russell King <linux@arm.linux.org.uk>
|
||||
|
|
|
@ -1654,6 +1654,9 @@ config HZ
|
|||
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
|
||||
default 100
|
||||
|
||||
config SCHED_HRTICK
|
||||
def_bool HIGH_RES_TIMERS
|
||||
|
||||
config THUMB2_KERNEL
|
||||
bool "Compile the kernel in Thumb-2 mode"
|
||||
depends on CPU_V7 && !CPU_V6 && !CPU_V6K
|
||||
|
|
|
@ -34,8 +34,9 @@
|
|||
@ A little glue here to select the correct code below for the ARM CPU
|
||||
@ that is being targetted.
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
.code 32
|
||||
|
||||
.type AES_Te,%object
|
||||
.align 5
|
||||
|
@ -145,10 +146,8 @@ AES_Te:
|
|||
|
||||
@ void AES_encrypt(const unsigned char *in, unsigned char *out,
|
||||
@ const AES_KEY *key) {
|
||||
.global AES_encrypt
|
||||
.type AES_encrypt,%function
|
||||
.align 5
|
||||
AES_encrypt:
|
||||
ENTRY(AES_encrypt)
|
||||
sub r3,pc,#8 @ AES_encrypt
|
||||
stmdb sp!,{r1,r4-r12,lr}
|
||||
mov r12,r0 @ inp
|
||||
|
@ -239,15 +238,8 @@ AES_encrypt:
|
|||
strb r6,[r12,#14]
|
||||
strb r3,[r12,#15]
|
||||
#endif
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4-r12,pc}
|
||||
#else
|
||||
ldmia sp!,{r4-r12,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.size AES_encrypt,.-AES_encrypt
|
||||
ENDPROC(AES_encrypt)
|
||||
|
||||
.type _armv4_AES_encrypt,%function
|
||||
.align 2
|
||||
|
@ -386,10 +378,8 @@ _armv4_AES_encrypt:
|
|||
ldr pc,[sp],#4 @ pop and return
|
||||
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
|
||||
|
||||
.global private_AES_set_encrypt_key
|
||||
.type private_AES_set_encrypt_key,%function
|
||||
.align 5
|
||||
private_AES_set_encrypt_key:
|
||||
ENTRY(private_AES_set_encrypt_key)
|
||||
_armv4_AES_set_encrypt_key:
|
||||
sub r3,pc,#8 @ AES_set_encrypt_key
|
||||
teq r0,#0
|
||||
|
@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:
|
|||
|
||||
.Ldone: mov r0,#0
|
||||
ldmia sp!,{r4-r12,lr}
|
||||
.Labrt: tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
|
||||
.Labrt: mov pc,lr
|
||||
ENDPROC(private_AES_set_encrypt_key)
|
||||
|
||||
.global private_AES_set_decrypt_key
|
||||
.type private_AES_set_decrypt_key,%function
|
||||
.align 5
|
||||
private_AES_set_decrypt_key:
|
||||
ENTRY(private_AES_set_decrypt_key)
|
||||
str lr,[sp,#-4]! @ push lr
|
||||
#if 0
|
||||
@ kernel does both of these in setkey so optimise this bit out by
|
||||
|
@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
|
|||
bne .Lmix
|
||||
|
||||
mov r0,#0
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4-r12,pc}
|
||||
#else
|
||||
ldmia sp!,{r4-r12,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
|
||||
ENDPROC(private_AES_set_decrypt_key)
|
||||
|
||||
.type AES_Td,%object
|
||||
.align 5
|
||||
|
@ -862,10 +841,8 @@ AES_Td:
|
|||
|
||||
@ void AES_decrypt(const unsigned char *in, unsigned char *out,
|
||||
@ const AES_KEY *key) {
|
||||
.global AES_decrypt
|
||||
.type AES_decrypt,%function
|
||||
.align 5
|
||||
AES_decrypt:
|
||||
ENTRY(AES_decrypt)
|
||||
sub r3,pc,#8 @ AES_decrypt
|
||||
stmdb sp!,{r1,r4-r12,lr}
|
||||
mov r12,r0 @ inp
|
||||
|
@ -956,15 +933,8 @@ AES_decrypt:
|
|||
strb r6,[r12,#14]
|
||||
strb r3,[r12,#15]
|
||||
#endif
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4-r12,pc}
|
||||
#else
|
||||
ldmia sp!,{r4-r12,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.size AES_decrypt,.-AES_decrypt
|
||||
ENDPROC(AES_decrypt)
|
||||
|
||||
.type _armv4_AES_decrypt,%function
|
||||
.align 2
|
||||
|
@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
|
|||
and r9,lr,r1,lsr#8
|
||||
|
||||
ldrb r7,[r10,r7] @ Td4[s1>>0]
|
||||
ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24]
|
||||
ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24]
|
||||
THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24]
|
||||
THUMB( ldrb r1,[r1] )
|
||||
ldrb r8,[r10,r8] @ Td4[s1>>16]
|
||||
eor r0,r7,r0,lsl#24
|
||||
ldrb r9,[r10,r9] @ Td4[s1>>8]
|
||||
|
@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
|
|||
ldrb r8,[r10,r8] @ Td4[s2>>0]
|
||||
and r9,lr,r2,lsr#16
|
||||
|
||||
ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24]
|
||||
ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24]
|
||||
THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24]
|
||||
THUMB( ldrb r2,[r2] )
|
||||
eor r0,r0,r7,lsl#8
|
||||
ldrb r9,[r10,r9] @ Td4[s2>>16]
|
||||
eor r1,r8,r1,lsl#16
|
||||
|
@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
|
|||
and r9,lr,r3 @ i2
|
||||
|
||||
ldrb r9,[r10,r9] @ Td4[s3>>0]
|
||||
ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24]
|
||||
ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24]
|
||||
THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24]
|
||||
THUMB( ldrb r3,[r3] )
|
||||
eor r0,r0,r7,lsl#16
|
||||
ldr r7,[r11,#0]
|
||||
eor r1,r1,r8,lsl#8
|
||||
|
|
|
@ -51,13 +51,12 @@
|
|||
@ Profiler-assisted and platform-specific optimization resulted in 10%
|
||||
@ improvement on Cortex A8 core and 12.2 cycles per byte.
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
||||
.global sha1_block_data_order
|
||||
.type sha1_block_data_order,%function
|
||||
|
||||
.align 2
|
||||
sha1_block_data_order:
|
||||
ENTRY(sha1_block_data_order)
|
||||
stmdb sp!,{r4-r12,lr}
|
||||
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
|
||||
ldmia r0,{r3,r4,r5,r6,r7}
|
||||
|
@ -194,7 +193,7 @@ sha1_block_data_order:
|
|||
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
|
||||
str r9,[r14,#-4]!
|
||||
add r3,r3,r10 @ E+=F_00_19(B,C,D)
|
||||
teq r14,sp
|
||||
cmp r14,sp
|
||||
bne .L_00_15 @ [((11+4)*5+2)*3]
|
||||
#if __ARM_ARCH__<7
|
||||
ldrb r10,[r1,#2]
|
||||
|
@ -374,7 +373,9 @@ sha1_block_data_order:
|
|||
@ F_xx_xx
|
||||
add r3,r3,r9 @ E+=X[i]
|
||||
add r3,r3,r10 @ E+=F_20_39(B,C,D)
|
||||
teq r14,sp @ preserve carry
|
||||
ARM( teq r14,sp ) @ preserve carry
|
||||
THUMB( mov r11,sp )
|
||||
THUMB( teq r14,r11 ) @ preserve carry
|
||||
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
|
||||
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
|
||||
|
||||
|
@ -466,7 +467,7 @@ sha1_block_data_order:
|
|||
add r3,r3,r9 @ E+=X[i]
|
||||
add r3,r3,r10 @ E+=F_40_59(B,C,D)
|
||||
add r3,r3,r11,ror#2
|
||||
teq r14,sp
|
||||
cmp r14,sp
|
||||
bne .L_40_59 @ [+((12+5)*5+2)*4]
|
||||
|
||||
ldr r8,.LK_60_79
|
||||
|
@ -485,19 +486,12 @@ sha1_block_data_order:
|
|||
teq r1,r2
|
||||
bne .Lloop @ [+18], total 1307
|
||||
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4-r12,pc}
|
||||
#else
|
||||
ldmia sp!,{r4-r12,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.align 2
|
||||
.LK_00_19: .word 0x5a827999
|
||||
.LK_20_39: .word 0x6ed9eba1
|
||||
.LK_40_59: .word 0x8f1bbcdc
|
||||
.LK_60_79: .word 0xca62c1d6
|
||||
.size sha1_block_data_order,.-sha1_block_data_order
|
||||
ENDPROC(sha1_block_data_order)
|
||||
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
|
||||
.align 2
|
||||
|
|
|
@ -23,6 +23,7 @@ struct hw_pci {
|
|||
#endif
|
||||
struct pci_ops *ops;
|
||||
int nr_controllers;
|
||||
void **private_data;
|
||||
int (*setup)(int nr, struct pci_sys_data *);
|
||||
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
|
||||
void (*preinit)(void);
|
||||
|
|
|
@ -36,23 +36,23 @@
|
|||
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
|
||||
*/
|
||||
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
||||
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
|
||||
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
|
||||
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
|
||||
|
||||
/*
|
||||
* The maximum size of a 26-bit user space task.
|
||||
*/
|
||||
#define TASK_SIZE_26 UL(0x04000000)
|
||||
#define TASK_SIZE_26 (UL(1) << 26)
|
||||
|
||||
/*
|
||||
* The module space lives between the addresses given by TASK_SIZE
|
||||
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
|
||||
*/
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
|
||||
#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
|
||||
#else
|
||||
/* smaller range for Thumb-2 symbols relocation (2^24)*/
|
||||
#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024)
|
||||
#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
|
||||
#endif
|
||||
|
||||
#if TASK_SIZE > MODULES_VADDR
|
||||
|
|
|
@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
|
|||
static inline void outer_flush_all(void) { }
|
||||
static inline void outer_inv_all(void) { }
|
||||
static inline void outer_disable(void) { }
|
||||
static inline void outer_resume(void) { }
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned long tmp;
|
||||
u32 slock;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
" mov %1, #1\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" uadd16 %0, %0, %1\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (slock), "=&r" (tmp)
|
||||
: "r" (&lock->slock)
|
||||
: "cc");
|
||||
|
||||
lock->tickets.owner++;
|
||||
dsb_sev();
|
||||
}
|
||||
|
||||
|
|
|
@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
|||
return irq;
|
||||
}
|
||||
|
||||
static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
|
||||
static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
|
||||
{
|
||||
int ret;
|
||||
struct pci_host_bridge_window *window;
|
||||
|
@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
|
||||
static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
|
||||
{
|
||||
struct pci_sys_data *sys = NULL;
|
||||
int ret;
|
||||
|
@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
|
|||
sys->map_irq = hw->map_irq;
|
||||
INIT_LIST_HEAD(&sys->resources);
|
||||
|
||||
if (hw->private_data)
|
||||
sys->private_data = hw->private_data[nr];
|
||||
|
||||
ret = hw->setup(nr, sys);
|
||||
|
||||
if (ret > 0) {
|
||||
|
@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
|
|||
}
|
||||
}
|
||||
|
||||
void __init pci_common_init(struct hw_pci *hw)
|
||||
void pci_common_init(struct hw_pci *hw)
|
||||
{
|
||||
struct pci_sys_data *sys;
|
||||
LIST_HEAD(head);
|
||||
|
|
|
@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
|
|||
* detectable in cyc_to_fixed_sched_clock().
|
||||
*/
|
||||
raw_local_irq_save(flags);
|
||||
cd.epoch_cyc = cyc;
|
||||
cd.epoch_cyc_copy = cyc;
|
||||
smp_wmb();
|
||||
cd.epoch_ns = ns;
|
||||
smp_wmb();
|
||||
cd.epoch_cyc_copy = cyc;
|
||||
cd.epoch_cyc = cyc;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
|
|||
smp_ops.smp_init_cpus();
|
||||
}
|
||||
|
||||
static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
if (smp_ops.smp_prepare_cpus)
|
||||
smp_ops.smp_prepare_cpus(max_cpus);
|
||||
}
|
||||
|
||||
static void __cpuinit platform_secondary_init(unsigned int cpu)
|
||||
{
|
||||
if (smp_ops.smp_secondary_init)
|
||||
smp_ops.smp_secondary_init(cpu);
|
||||
}
|
||||
|
||||
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
if (smp_ops.smp_boot_secondary)
|
||||
|
@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void platform_cpu_die(unsigned int cpu)
|
||||
{
|
||||
if (smp_ops.cpu_die)
|
||||
smp_ops.cpu_die(cpu);
|
||||
}
|
||||
|
||||
static int platform_cpu_disable(unsigned int cpu)
|
||||
{
|
||||
if (smp_ops.cpu_disable)
|
||||
|
@ -257,7 +239,8 @@ void __ref cpu_die(void)
|
|||
* actual CPU shutdown procedure is at least platform (if not
|
||||
* CPU) specific.
|
||||
*/
|
||||
platform_cpu_die(cpu);
|
||||
if (smp_ops.cpu_die)
|
||||
smp_ops.cpu_die(cpu);
|
||||
|
||||
/*
|
||||
* Do not return to the idle loop - jump back to the secondary
|
||||
|
@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
/*
|
||||
* Give the platform a chance to do its own initialisation.
|
||||
*/
|
||||
platform_secondary_init(cpu);
|
||||
if (smp_ops.smp_secondary_init)
|
||||
smp_ops.smp_secondary_init(cpu);
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
|
@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
/*
|
||||
* Initialise the present map, which describes the set of CPUs
|
||||
* actually populated at the present time. A platform should
|
||||
* re-initialize the map in platform_smp_prepare_cpus() if
|
||||
* present != possible (e.g. physical hotplug).
|
||||
* re-initialize the map in the platforms smp_prepare_cpus()
|
||||
* if present != possible (e.g. physical hotplug).
|
||||
*/
|
||||
init_cpu_present(cpu_possible_mask);
|
||||
|
||||
|
@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
* Initialise the SCU if there are more than one CPU
|
||||
* and let them know where to start.
|
||||
*/
|
||||
platform_smp_prepare_cpus(max_cpus);
|
||||
if (smp_ops.smp_prepare_cpus)
|
||||
smp_ops.smp_prepare_cpus(max_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/mtd/physmap.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hardware/arm_timer.h>
|
||||
|
@ -65,16 +66,28 @@
|
|||
#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
|
||||
#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
|
||||
|
||||
/* These PIC IRQs are valid in each configuration */
|
||||
#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \
|
||||
BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \
|
||||
BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \
|
||||
BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \
|
||||
BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \
|
||||
BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \
|
||||
BIT(SIC_INT_PCI3)
|
||||
#if 1
|
||||
#define IRQ_MMCI0A IRQ_VICSOURCE22
|
||||
#define IRQ_AACI IRQ_VICSOURCE24
|
||||
#define IRQ_ETH IRQ_VICSOURCE25
|
||||
#define PIC_MASK 0xFFD00000
|
||||
#define PIC_VALID PIC_VALID_ALL
|
||||
#else
|
||||
#define IRQ_MMCI0A IRQ_SIC_MMCI0A
|
||||
#define IRQ_AACI IRQ_SIC_AACI
|
||||
#define IRQ_ETH IRQ_SIC_ETH
|
||||
#define PIC_MASK 0
|
||||
#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \
|
||||
BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \
|
||||
BIT(SIC_INT_ETH)
|
||||
#endif
|
||||
|
||||
/* Lookup table for finding a DT node that represents the vic instance */
|
||||
|
@ -102,7 +115,7 @@ void __init versatile_init_irq(void)
|
|||
VERSATILE_SIC_BASE);
|
||||
|
||||
fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START,
|
||||
IRQ_VICSOURCE31, ~PIC_MASK, np);
|
||||
IRQ_VICSOURCE31, PIC_VALID, np);
|
||||
|
||||
/*
|
||||
* Interrupts on secondary controller from 0 to 8 are routed to
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/io.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mach/pci.h>
|
||||
|
||||
|
@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
|||
int irq;
|
||||
|
||||
/* slot, pin, irq
|
||||
* 24 1 27
|
||||
* 25 1 28
|
||||
* 26 1 29
|
||||
* 27 1 30
|
||||
* 24 1 IRQ_SIC_PCI0
|
||||
* 25 1 IRQ_SIC_PCI1
|
||||
* 26 1 IRQ_SIC_PCI2
|
||||
* 27 1 IRQ_SIC_PCI3
|
||||
*/
|
||||
irq = 27 + ((slot - 24 + pin - 1) & 3);
|
||||
irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
|
|||
iomap.o
|
||||
|
||||
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
|
||||
mmap.o pgd.o mmu.o vmregion.o
|
||||
mmap.o pgd.o mmu.o
|
||||
|
||||
ifneq ($(CONFIG_MMU),y)
|
||||
obj-y += nommu.o
|
||||
|
|
|
@ -34,6 +34,9 @@
|
|||
* The ASID is used to tag entries in the CPU caches and TLBs.
|
||||
* The context ID is used by debuggers and trace logic, and
|
||||
* should be unique within all running processes.
|
||||
*
|
||||
* In big endian operation, the two 32 bit words are swapped if accesed by
|
||||
* non 64-bit operations.
|
||||
*/
|
||||
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
|
||||
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
|
||||
|
|
|
@ -39,6 +39,70 @@
|
|||
#include <asm/mach/pci.h>
|
||||
#include "mm.h"
|
||||
|
||||
|
||||
LIST_HEAD(static_vmlist);
|
||||
|
||||
static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
|
||||
size_t size, unsigned int mtype)
|
||||
{
|
||||
struct static_vm *svm;
|
||||
struct vm_struct *vm;
|
||||
|
||||
list_for_each_entry(svm, &static_vmlist, list) {
|
||||
vm = &svm->vm;
|
||||
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
||||
continue;
|
||||
if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
|
||||
continue;
|
||||
|
||||
if (vm->phys_addr > paddr ||
|
||||
paddr + size - 1 > vm->phys_addr + vm->size - 1)
|
||||
continue;
|
||||
|
||||
return svm;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct static_vm *find_static_vm_vaddr(void *vaddr)
|
||||
{
|
||||
struct static_vm *svm;
|
||||
struct vm_struct *vm;
|
||||
|
||||
list_for_each_entry(svm, &static_vmlist, list) {
|
||||
vm = &svm->vm;
|
||||
|
||||
/* static_vmlist is ascending order */
|
||||
if (vm->addr > vaddr)
|
||||
break;
|
||||
|
||||
if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
|
||||
return svm;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void __init add_static_vm_early(struct static_vm *svm)
|
||||
{
|
||||
struct static_vm *curr_svm;
|
||||
struct vm_struct *vm;
|
||||
void *vaddr;
|
||||
|
||||
vm = &svm->vm;
|
||||
vm_area_add_early(vm);
|
||||
vaddr = vm->addr;
|
||||
|
||||
list_for_each_entry(curr_svm, &static_vmlist, list) {
|
||||
vm = &curr_svm->vm;
|
||||
|
||||
if (vm->addr > vaddr)
|
||||
break;
|
||||
}
|
||||
list_add_tail(&svm->list, &curr_svm->list);
|
||||
}
|
||||
|
||||
int ioremap_page(unsigned long virt, unsigned long phys,
|
||||
const struct mem_type *mtype)
|
||||
{
|
||||
|
@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|||
const struct mem_type *type;
|
||||
int err;
|
||||
unsigned long addr;
|
||||
struct vm_struct * area;
|
||||
struct vm_struct *area;
|
||||
phys_addr_t paddr = __pfn_to_phys(pfn);
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* High mappings must be supersection aligned
|
||||
*/
|
||||
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
|
||||
if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
|
||||
return NULL;
|
||||
#endif
|
||||
|
||||
|
@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|||
/*
|
||||
* Try to reuse one of the static mapping whenever possible.
|
||||
*/
|
||||
read_lock(&vmlist_lock);
|
||||
for (area = vmlist; area; area = area->next) {
|
||||
if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
|
||||
break;
|
||||
if (!(area->flags & VM_ARM_STATIC_MAPPING))
|
||||
continue;
|
||||
if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
|
||||
continue;
|
||||
if (__phys_to_pfn(area->phys_addr) > pfn ||
|
||||
__pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
|
||||
continue;
|
||||
/* we can drop the lock here as we know *area is static */
|
||||
read_unlock(&vmlist_lock);
|
||||
addr = (unsigned long)area->addr;
|
||||
addr += __pfn_to_phys(pfn) - area->phys_addr;
|
||||
return (void __iomem *) (offset + addr);
|
||||
if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
|
||||
struct static_vm *svm;
|
||||
|
||||
svm = find_static_vm_paddr(paddr, size, mtype);
|
||||
if (svm) {
|
||||
addr = (unsigned long)svm->vm.addr;
|
||||
addr += paddr - svm->vm.phys_addr;
|
||||
return (void __iomem *) (offset + addr);
|
||||
}
|
||||
}
|
||||
read_unlock(&vmlist_lock);
|
||||
|
||||
/*
|
||||
* Don't allow RAM to be mapped - this causes problems with ARMv6+
|
||||
|
@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|||
if (!area)
|
||||
return NULL;
|
||||
addr = (unsigned long)area->addr;
|
||||
area->phys_addr = __pfn_to_phys(pfn);
|
||||
area->phys_addr = paddr;
|
||||
|
||||
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
||||
if (DOMAIN_IO == 0 &&
|
||||
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
|
||||
cpu_is_xsc3()) && pfn >= 0x100000 &&
|
||||
!((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
|
||||
!((paddr | size | addr) & ~SUPERSECTION_MASK)) {
|
||||
area->flags |= VM_ARM_SECTION_MAPPING;
|
||||
err = remap_area_supersections(addr, pfn, size, type);
|
||||
} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
|
||||
} else if (!((paddr | size | addr) & ~PMD_MASK)) {
|
||||
area->flags |= VM_ARM_SECTION_MAPPING;
|
||||
err = remap_area_sections(addr, pfn, size, type);
|
||||
} else
|
||||
#endif
|
||||
err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
|
||||
err = ioremap_page_range(addr, addr + size, paddr,
|
||||
__pgprot(type->prot_pte));
|
||||
|
||||
if (err) {
|
||||
|
@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
|
|||
void __iounmap(volatile void __iomem *io_addr)
|
||||
{
|
||||
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
||||
struct vm_struct *vm;
|
||||
struct static_vm *svm;
|
||||
|
||||
/* If this is a static mapping, we must leave it alone */
|
||||
svm = find_static_vm_vaddr(addr);
|
||||
if (svm)
|
||||
return;
|
||||
|
||||
read_lock(&vmlist_lock);
|
||||
for (vm = vmlist; vm; vm = vm->next) {
|
||||
if (vm->addr > addr)
|
||||
break;
|
||||
if (!(vm->flags & VM_IOREMAP))
|
||||
continue;
|
||||
/* If this is a static mapping we must leave it alone */
|
||||
if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
|
||||
(vm->addr <= addr) && (vm->addr + vm->size > addr)) {
|
||||
read_unlock(&vmlist_lock);
|
||||
return;
|
||||
}
|
||||
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
||||
{
|
||||
struct vm_struct *vm;
|
||||
|
||||
vm = find_vm_area(addr);
|
||||
|
||||
/*
|
||||
* If this is a section based mapping we need to handle it
|
||||
* specially as the VM subsystem does not know how to handle
|
||||
* such a beast.
|
||||
*/
|
||||
if ((vm->addr == addr) &&
|
||||
(vm->flags & VM_ARM_SECTION_MAPPING)) {
|
||||
if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
|
||||
unmap_area_sections((unsigned long)vm->addr, vm->size);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
read_unlock(&vmlist_lock);
|
||||
#endif
|
||||
|
||||
vunmap(addr);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
#ifdef CONFIG_MMU
|
||||
#include <linux/list.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/* the upper-most page table pointer */
|
||||
extern pmd_t *top_pmd;
|
||||
|
@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
|||
/* consistent regions used by dma_alloc_attrs() */
|
||||
#define VM_ARM_DMA_CONSISTENT 0x20000000
|
||||
|
||||
|
||||
struct static_vm {
|
||||
struct vm_struct vm;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
extern struct list_head static_vmlist;
|
||||
extern struct static_vm *find_static_vm_vaddr(void *vaddr);
|
||||
extern __init void add_static_vm_early(struct static_vm *svm);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
|
|
|
@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
|
|||
{
|
||||
struct map_desc *md;
|
||||
struct vm_struct *vm;
|
||||
struct static_vm *svm;
|
||||
|
||||
if (!nr)
|
||||
return;
|
||||
|
||||
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
|
||||
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
|
||||
|
||||
for (md = io_desc; nr; md++, nr--) {
|
||||
create_mapping(md);
|
||||
|
||||
vm = &svm->vm;
|
||||
vm->addr = (void *)(md->virtual & PAGE_MASK);
|
||||
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
||||
vm->phys_addr = __pfn_to_phys(md->pfn);
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->flags |= VM_ARM_MTYPE(md->type);
|
||||
vm->caller = iotable_init;
|
||||
vm_area_add_early(vm++);
|
||||
add_static_vm_early(svm++);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
|
|||
void *caller)
|
||||
{
|
||||
struct vm_struct *vm;
|
||||
struct static_vm *svm;
|
||||
|
||||
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
|
||||
svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
|
||||
|
||||
vm = &svm->vm;
|
||||
vm->addr = (void *)addr;
|
||||
vm->size = size;
|
||||
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
|
||||
vm->caller = caller;
|
||||
vm_area_add_early(vm);
|
||||
add_static_vm_early(svm);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
|
@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
|
|||
|
||||
static void __init fill_pmd_gaps(void)
|
||||
{
|
||||
struct static_vm *svm;
|
||||
struct vm_struct *vm;
|
||||
unsigned long addr, next = 0;
|
||||
pmd_t *pmd;
|
||||
|
||||
/* we're still single threaded hence no lock needed here */
|
||||
for (vm = vmlist; vm; vm = vm->next) {
|
||||
if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
|
||||
continue;
|
||||
list_for_each_entry(svm, &static_vmlist, list) {
|
||||
vm = &svm->vm;
|
||||
addr = (unsigned long)vm->addr;
|
||||
if (addr < next)
|
||||
continue;
|
||||
|
@ -857,19 +862,12 @@ static void __init fill_pmd_gaps(void)
|
|||
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
|
||||
static void __init pci_reserve_io(void)
|
||||
{
|
||||
struct vm_struct *vm;
|
||||
unsigned long addr;
|
||||
struct static_vm *svm;
|
||||
|
||||
/* we're still single threaded hence no lock needed here */
|
||||
for (vm = vmlist; vm; vm = vm->next) {
|
||||
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
||||
continue;
|
||||
addr = (unsigned long)vm->addr;
|
||||
addr &= ~(SZ_2M - 1);
|
||||
if (addr == PCI_IO_VIRT_BASE)
|
||||
return;
|
||||
svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
|
||||
if (svm)
|
||||
return;
|
||||
|
||||
}
|
||||
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -38,9 +38,14 @@
|
|||
|
||||
/*
|
||||
* mmid - get context id from mm pointer (mm->context.id)
|
||||
* note, this field is 64bit, so in big-endian the two words are swapped too.
|
||||
*/
|
||||
.macro mmid, rd, rn
|
||||
#ifdef __ARMEB__
|
||||
ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
|
||||
#else
|
||||
ldr \rd, [\rn, #MM_CONTEXT_ID]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
|
|
@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
|
|||
ENTRY(cpu_v6_switch_mm)
|
||||
#ifdef CONFIG_MMU
|
||||
mov r2, #0
|
||||
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
|
||||
mmid r1, r1 @ get mm->context.id
|
||||
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
|
||||
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
|
||||
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
ENTRY(cpu_v7_switch_mm)
|
||||
#ifdef CONFIG_MMU
|
||||
mov r2, #0
|
||||
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
|
||||
mmid r1, r1 @ get mm->context.id
|
||||
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
|
||||
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
|
||||
#ifdef CONFIG_ARM_ERRATA_430973
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
*/
|
||||
ENTRY(cpu_v7_switch_mm)
|
||||
#ifdef CONFIG_MMU
|
||||
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
|
||||
mmid r1, r1 @ get mm->context.id
|
||||
and r3, r1, #0xff
|
||||
mov r3, r3, lsl #(48 - 32) @ ASID
|
||||
mcrr p15, 0, r0, r3, c2 @ set TTB 0
|
||||
|
|
|
@ -1,205 +0,0 @@
|
|||
#include <linux/fs.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "vmregion.h"
|
||||
|
||||
/*
|
||||
* VM region handling support.
|
||||
*
|
||||
* This should become something generic, handling VM region allocations for
|
||||
* vmalloc and similar (ioremap, module space, etc).
|
||||
*
|
||||
* I envisage vmalloc()'s supporting vm_struct becoming:
|
||||
*
|
||||
* struct vm_struct {
|
||||
* struct vmregion region;
|
||||
* unsigned long flags;
|
||||
* struct page **pages;
|
||||
* unsigned int nr_pages;
|
||||
* unsigned long phys_addr;
|
||||
* };
|
||||
*
|
||||
* get_vm_area() would then call vmregion_alloc with an appropriate
|
||||
* struct vmregion head (eg):
|
||||
*
|
||||
* struct vmregion vmalloc_head = {
|
||||
* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
|
||||
* .vm_start = VMALLOC_START,
|
||||
* .vm_end = VMALLOC_END,
|
||||
* };
|
||||
*
|
||||
* However, vmalloc_head.vm_start is variable (typically, it is dependent on
|
||||
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
|
||||
* would have to initialise this each time prior to calling vmregion_alloc().
|
||||
*/
|
||||
|
||||
struct arm_vmregion *
|
||||
arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
|
||||
size_t size, gfp_t gfp, const void *caller)
|
||||
{
|
||||
unsigned long start = head->vm_start, addr = head->vm_end;
|
||||
unsigned long flags;
|
||||
struct arm_vmregion *c, *new;
|
||||
|
||||
if (head->vm_end - head->vm_start < size) {
|
||||
printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
|
||||
__func__, size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
new = kmalloc(sizeof(struct arm_vmregion), gfp);
|
||||
if (!new)
|
||||
goto out;
|
||||
|
||||
new->caller = caller;
|
||||
|
||||
spin_lock_irqsave(&head->vm_lock, flags);
|
||||
|
||||
addr = rounddown(addr - size, align);
|
||||
list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
|
||||
if (addr >= c->vm_end)
|
||||
goto found;
|
||||
addr = rounddown(c->vm_start - size, align);
|
||||
if (addr < start)
|
||||
goto nospc;
|
||||
}
|
||||
|
||||
found:
|
||||
/*
|
||||
* Insert this entry after the one we found.
|
||||
*/
|
||||
list_add(&new->vm_list, &c->vm_list);
|
||||
new->vm_start = addr;
|
||||
new->vm_end = addr + size;
|
||||
new->vm_active = 1;
|
||||
|
||||
spin_unlock_irqrestore(&head->vm_lock, flags);
|
||||
return new;
|
||||
|
||||
nospc:
|
||||
spin_unlock_irqrestore(&head->vm_lock, flags);
|
||||
kfree(new);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
|
||||
{
|
||||
struct arm_vmregion *c;
|
||||
|
||||
list_for_each_entry(c, &head->vm_list, vm_list) {
|
||||
if (c->vm_active && c->vm_start == addr)
|
||||
goto out;
|
||||
}
|
||||
c = NULL;
|
||||
out:
|
||||
return c;
|
||||
}
|
||||
|
||||
struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
|
||||
{
|
||||
struct arm_vmregion *c;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&head->vm_lock, flags);
|
||||
c = __arm_vmregion_find(head, addr);
|
||||
spin_unlock_irqrestore(&head->vm_lock, flags);
|
||||
return c;
|
||||
}
|
||||
|
||||
struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
|
||||
{
|
||||
struct arm_vmregion *c;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&head->vm_lock, flags);
|
||||
c = __arm_vmregion_find(head, addr);
|
||||
if (c)
|
||||
c->vm_active = 0;
|
||||
spin_unlock_irqrestore(&head->vm_lock, flags);
|
||||
return c;
|
||||
}
|
||||
|
||||
void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&head->vm_lock, flags);
|
||||
list_del(&c->vm_list);
|
||||
spin_unlock_irqrestore(&head->vm_lock, flags);
|
||||
|
||||
kfree(c);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int arm_vmregion_show(struct seq_file *m, void *p)
|
||||
{
|
||||
struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
|
||||
|
||||
seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
|
||||
c->vm_end - c->vm_start);
|
||||
if (c->caller)
|
||||
seq_printf(m, " %pS", (void *)c->caller);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct arm_vmregion_head *h = m->private;
|
||||
spin_lock_irq(&h->vm_lock);
|
||||
return seq_list_start(&h->vm_list, *pos);
|
||||
}
|
||||
|
||||
static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
{
|
||||
struct arm_vmregion_head *h = m->private;
|
||||
return seq_list_next(p, &h->vm_list, pos);
|
||||
}
|
||||
|
||||
static void arm_vmregion_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
struct arm_vmregion_head *h = m->private;
|
||||
spin_unlock_irq(&h->vm_lock);
|
||||
}
|
||||
|
||||
static const struct seq_operations arm_vmregion_ops = {
|
||||
.start = arm_vmregion_start,
|
||||
.stop = arm_vmregion_stop,
|
||||
.next = arm_vmregion_next,
|
||||
.show = arm_vmregion_show,
|
||||
};
|
||||
|
||||
static int arm_vmregion_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct arm_vmregion_head *h = PDE(inode)->data;
|
||||
int ret = seq_open(file, &arm_vmregion_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = h;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations arm_vmregion_fops = {
|
||||
.open = arm_vmregion_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
|
||||
{
|
||||
proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
|
@ -1,31 +0,0 @@
|
|||
#ifndef VMREGION_H
|
||||
#define VMREGION_H
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct page;
|
||||
|
||||
struct arm_vmregion_head {
|
||||
spinlock_t vm_lock;
|
||||
struct list_head vm_list;
|
||||
unsigned long vm_start;
|
||||
unsigned long vm_end;
|
||||
};
|
||||
|
||||
struct arm_vmregion {
|
||||
struct list_head vm_list;
|
||||
unsigned long vm_start;
|
||||
unsigned long vm_end;
|
||||
int vm_active;
|
||||
const void *caller;
|
||||
};
|
||||
|
||||
struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
|
||||
struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
|
||||
struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
|
||||
void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
|
||||
|
||||
int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
|
||||
|
||||
#endif
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/mmc/pm.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/amba/bus.h>
|
||||
|
@ -59,6 +60,7 @@ static unsigned int fmax = 515633;
|
|||
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
|
||||
* @pwrreg_powerup: power up value for MMCIPOWER register
|
||||
* @signal_direction: input/out direction of bus signals can be indicated
|
||||
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
|
||||
*/
|
||||
struct variant_data {
|
||||
unsigned int clkreg;
|
||||
|
@ -71,6 +73,7 @@ struct variant_data {
|
|||
bool blksz_datactrl16;
|
||||
u32 pwrreg_powerup;
|
||||
bool signal_direction;
|
||||
bool pwrreg_clkgate;
|
||||
};
|
||||
|
||||
static struct variant_data variant_arm = {
|
||||
|
@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = {
|
|||
.pwrreg_powerup = MCI_PWR_UP,
|
||||
};
|
||||
|
||||
static struct variant_data variant_arm_extended_fifo_hwfc = {
|
||||
.fifosize = 128 * 4,
|
||||
.fifohalfsize = 64 * 4,
|
||||
.clkreg_enable = MCI_ARM_HWFCEN,
|
||||
.datalength_bits = 16,
|
||||
.pwrreg_powerup = MCI_PWR_UP,
|
||||
};
|
||||
|
||||
static struct variant_data variant_u300 = {
|
||||
.fifosize = 16 * 4,
|
||||
.fifohalfsize = 8 * 4,
|
||||
|
@ -95,6 +106,7 @@ static struct variant_data variant_u300 = {
|
|||
.sdio = true,
|
||||
.pwrreg_powerup = MCI_PWR_ON,
|
||||
.signal_direction = true,
|
||||
.pwrreg_clkgate = true,
|
||||
};
|
||||
|
||||
static struct variant_data variant_nomadik = {
|
||||
|
@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = {
|
|||
.st_clkdiv = true,
|
||||
.pwrreg_powerup = MCI_PWR_ON,
|
||||
.signal_direction = true,
|
||||
.pwrreg_clkgate = true,
|
||||
};
|
||||
|
||||
static struct variant_data variant_ux500 = {
|
||||
|
@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = {
|
|||
.st_clkdiv = true,
|
||||
.pwrreg_powerup = MCI_PWR_ON,
|
||||
.signal_direction = true,
|
||||
.pwrreg_clkgate = true,
|
||||
};
|
||||
|
||||
static struct variant_data variant_ux500v2 = {
|
||||
|
@ -131,8 +145,27 @@ static struct variant_data variant_ux500v2 = {
|
|||
.blksz_datactrl16 = true,
|
||||
.pwrreg_powerup = MCI_PWR_ON,
|
||||
.signal_direction = true,
|
||||
.pwrreg_clkgate = true,
|
||||
};
|
||||
|
||||
/*
|
||||
* Validate mmc prerequisites
|
||||
*/
|
||||
static int mmci_validate_data(struct mmci_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
if (!is_power_of_2(data->blksz)) {
|
||||
dev_err(mmc_dev(host->mmc),
|
||||
"unsupported block size (%d bytes)\n", data->blksz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called with host->lock held
|
||||
*/
|
||||
|
@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
|
|||
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
|
||||
clk |= MCI_ST_8BIT_BUS;
|
||||
|
||||
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
|
||||
clk |= MCI_ST_UX500_NEG_EDGE;
|
||||
|
||||
mmci_write_clkreg(host, clk);
|
||||
}
|
||||
|
||||
|
@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
|
|||
host->dma_rx_channel = host->dma_tx_channel = NULL;
|
||||
}
|
||||
|
||||
static void mmci_dma_data_error(struct mmci_host *host)
|
||||
{
|
||||
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
||||
dmaengine_terminate_all(host->dma_current);
|
||||
host->dma_current = NULL;
|
||||
host->dma_desc_current = NULL;
|
||||
host->data->host_cookie = 0;
|
||||
}
|
||||
|
||||
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
||||
{
|
||||
struct dma_chan *chan = host->dma_current;
|
||||
struct dma_chan *chan;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
dir = DMA_FROM_DEVICE;
|
||||
chan = host->dma_rx_channel;
|
||||
} else {
|
||||
dir = DMA_TO_DEVICE;
|
||||
chan = host->dma_tx_channel;
|
||||
}
|
||||
|
||||
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
||||
}
|
||||
|
||||
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
||||
{
|
||||
u32 status;
|
||||
int i;
|
||||
|
||||
|
@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|||
* contiguous buffers. On TX, we'll get a FIFO underrun error.
|
||||
*/
|
||||
if (status & MCI_RXDATAAVLBLMASK) {
|
||||
dmaengine_terminate_all(chan);
|
||||
mmci_dma_data_error(host);
|
||||
if (!data->error)
|
||||
data->error = -EIO;
|
||||
}
|
||||
|
||||
if (data->flags & MMC_DATA_WRITE) {
|
||||
dir = DMA_TO_DEVICE;
|
||||
} else {
|
||||
dir = DMA_FROM_DEVICE;
|
||||
}
|
||||
|
||||
if (!data->host_cookie)
|
||||
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
||||
mmci_dma_unmap(host, data);
|
||||
|
||||
/*
|
||||
* Use of DMA with scatter-gather is impossible.
|
||||
|
@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|||
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
|
||||
mmci_dma_release(host);
|
||||
}
|
||||
|
||||
host->dma_current = NULL;
|
||||
host->dma_desc_current = NULL;
|
||||
}
|
||||
|
||||
static void mmci_dma_data_error(struct mmci_host *host)
|
||||
{
|
||||
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
||||
dmaengine_terminate_all(host->dma_current);
|
||||
}
|
||||
|
||||
static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
||||
struct mmci_host_next *next)
|
||||
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
|
||||
static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
||||
struct dma_chan **dma_chan,
|
||||
struct dma_async_tx_descriptor **dma_desc)
|
||||
{
|
||||
struct variant_data *variant = host->variant;
|
||||
struct dma_slave_config conf = {
|
||||
|
@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|||
enum dma_data_direction buffer_dirn;
|
||||
int nr_sg;
|
||||
|
||||
/* Check if next job is already prepared */
|
||||
if (data->host_cookie && !next &&
|
||||
host->dma_current && host->dma_desc_current)
|
||||
return 0;
|
||||
|
||||
if (!next) {
|
||||
host->dma_current = NULL;
|
||||
host->dma_desc_current = NULL;
|
||||
}
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
conf.direction = DMA_DEV_TO_MEM;
|
||||
buffer_dirn = DMA_FROM_DEVICE;
|
||||
|
@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|||
if (!desc)
|
||||
goto unmap_exit;
|
||||
|
||||
if (next) {
|
||||
next->dma_chan = chan;
|
||||
next->dma_desc = desc;
|
||||
} else {
|
||||
host->dma_current = chan;
|
||||
host->dma_desc_current = desc;
|
||||
}
|
||||
*dma_chan = chan;
|
||||
*dma_desc = desc;
|
||||
|
||||
return 0;
|
||||
|
||||
unmap_exit:
|
||||
if (!next)
|
||||
dmaengine_terminate_all(chan);
|
||||
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline int mmci_dma_prep_data(struct mmci_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
/* Check if next job is already prepared. */
|
||||
if (host->dma_current && host->dma_desc_current)
|
||||
return 0;
|
||||
|
||||
/* No job were prepared thus do it now. */
|
||||
return __mmci_dma_prep_data(host, data, &host->dma_current,
|
||||
&host->dma_desc_current);
|
||||
}
|
||||
|
||||
static inline int mmci_dma_prep_next(struct mmci_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
struct mmci_host_next *nd = &host->next_data;
|
||||
return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
|
||||
}
|
||||
|
||||
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||
{
|
||||
int ret;
|
||||
struct mmc_data *data = host->data;
|
||||
|
||||
ret = mmci_dma_prep_data(host, host->data, NULL);
|
||||
ret = mmci_dma_prep_data(host, host->data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|||
{
|
||||
struct mmci_host_next *next = &host->next_data;
|
||||
|
||||
if (data->host_cookie && data->host_cookie != next->cookie) {
|
||||
pr_warning("[%s] invalid cookie: data->host_cookie %d"
|
||||
" host->next_data.cookie %d\n",
|
||||
__func__, data->host_cookie, host->next_data.cookie);
|
||||
data->host_cookie = 0;
|
||||
}
|
||||
|
||||
if (!data->host_cookie)
|
||||
return;
|
||||
WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
|
||||
WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
|
||||
|
||||
host->dma_desc_current = next->dma_desc;
|
||||
host->dma_current = next->dma_chan;
|
||||
|
||||
next->dma_desc = NULL;
|
||||
next->dma_chan = NULL;
|
||||
}
|
||||
|
@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|||
if (!data)
|
||||
return;
|
||||
|
||||
if (data->host_cookie) {
|
||||
data->host_cookie = 0;
|
||||
return;
|
||||
}
|
||||
BUG_ON(data->host_cookie);
|
||||
|
||||
/* if config for dma */
|
||||
if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
|
||||
((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
|
||||
if (mmci_dma_prep_data(host, data, nd))
|
||||
data->host_cookie = 0;
|
||||
else
|
||||
data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
||||
}
|
||||
if (mmci_validate_data(host, data))
|
||||
return;
|
||||
|
||||
if (!mmci_dma_prep_next(host, data))
|
||||
data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
||||
}
|
||||
|
||||
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
||||
|
@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|||
{
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
struct mmc_data *data = mrq->data;
|
||||
struct dma_chan *chan;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
if (!data)
|
||||
if (!data || !data->host_cookie)
|
||||
return;
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
dir = DMA_FROM_DEVICE;
|
||||
chan = host->dma_rx_channel;
|
||||
} else {
|
||||
dir = DMA_TO_DEVICE;
|
||||
chan = host->dma_tx_channel;
|
||||
}
|
||||
mmci_dma_unmap(host, data);
|
||||
|
||||
if (err) {
|
||||
struct mmci_host_next *next = &host->next_data;
|
||||
struct dma_chan *chan;
|
||||
if (data->flags & MMC_DATA_READ)
|
||||
chan = host->dma_rx_channel;
|
||||
else
|
||||
chan = host->dma_tx_channel;
|
||||
dmaengine_terminate_all(chan);
|
||||
|
||||
/* if config for dma */
|
||||
if (chan) {
|
||||
if (err)
|
||||
dmaengine_terminate_all(chan);
|
||||
if (data->host_cookie)
|
||||
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
||||
data->sg_len, dir);
|
||||
mrq->data->host_cookie = 0;
|
||||
next->dma_desc = NULL;
|
||||
next->dma_chan = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void mmci_dma_finalize(struct mmci_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmci_dma_data_error(struct mmci_host *host)
|
||||
{
|
||||
}
|
||||
|
@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|||
mmci_write_clkreg(host, clk);
|
||||
}
|
||||
|
||||
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
|
||||
datactrl |= MCI_ST_DPSM_DDRMODE;
|
||||
|
||||
/*
|
||||
* Attempt to use DMA operation mode, if this
|
||||
* should fail, fall back to PIO mode
|
||||
|
@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|||
u32 remain, success;
|
||||
|
||||
/* Terminate the DMA transfer */
|
||||
if (dma_inprogress(host))
|
||||
if (dma_inprogress(host)) {
|
||||
mmci_dma_data_error(host);
|
||||
mmci_dma_unmap(host, data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate how far we are into the transfer. Note that
|
||||
|
@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|||
|
||||
if (status & MCI_DATAEND || data->error) {
|
||||
if (dma_inprogress(host))
|
||||
mmci_dma_unmap(host, data);
|
||||
mmci_dma_finalize(host, data);
|
||||
mmci_stop_data(host);
|
||||
|
||||
if (!data->error)
|
||||
|
@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|||
if (!cmd->data || cmd->error) {
|
||||
if (host->data) {
|
||||
/* Terminate the DMA transfer */
|
||||
if (dma_inprogress(host))
|
||||
if (dma_inprogress(host)) {
|
||||
mmci_dma_data_error(host);
|
||||
mmci_dma_unmap(host, host->data);
|
||||
}
|
||||
mmci_stop_data(host);
|
||||
}
|
||||
mmci_request_end(host, cmd->mrq);
|
||||
|
@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||
|
||||
WARN_ON(host->mrq != NULL);
|
||||
|
||||
if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
|
||||
dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
|
||||
mrq->data->blksz);
|
||||
mrq->cmd->error = -EINVAL;
|
||||
mrq->cmd->error = mmci_validate_data(host, mrq->data);
|
||||
if (mrq->cmd->error) {
|
||||
mmc_request_done(mmc, mrq);
|
||||
return;
|
||||
}
|
||||
|
@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
struct variant_data *variant = host->variant;
|
||||
u32 pwr = 0;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(mmc_dev(mmc));
|
||||
|
||||
|
@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
|
||||
switch (ios->power_mode) {
|
||||
case MMC_POWER_OFF:
|
||||
if (host->vcc)
|
||||
ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
|
||||
if (!IS_ERR(mmc->supply.vmmc))
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
|
||||
break;
|
||||
case MMC_POWER_UP:
|
||||
if (host->vcc) {
|
||||
ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
|
||||
if (ret) {
|
||||
dev_err(mmc_dev(mmc), "unable to set OCR\n");
|
||||
/*
|
||||
* The .set_ios() function in the mmc_host_ops
|
||||
* struct return void, and failing to set the
|
||||
* power should be rare so we print an error
|
||||
* and return here.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (!IS_ERR(mmc->supply.vmmc))
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||
|
||||
/*
|
||||
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
|
||||
* and instead uses MCI_PWR_ON so apply whatever value is
|
||||
|
@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If clock = 0 and the variant requires the MMCIPOWER to be used for
|
||||
* gating the clock, the MCI_PWR_ON bit is cleared.
|
||||
*/
|
||||
if (!ios->clock && variant->pwrreg_clkgate)
|
||||
pwr &= ~MCI_PWR_ON;
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
mmci_set_clkreg(host, ios->clock);
|
||||
|
@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
out:
|
||||
pm_runtime_mark_last_busy(mmc_dev(mmc));
|
||||
pm_runtime_put_autosuspend(mmc_dev(mmc));
|
||||
}
|
||||
|
@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev,
|
|||
} else
|
||||
dev_warn(&dev->dev, "could not get default pinstate\n");
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
/* If we're using the regulator framework, try to fetch a regulator */
|
||||
host->vcc = regulator_get(&dev->dev, "vmmc");
|
||||
if (IS_ERR(host->vcc))
|
||||
host->vcc = NULL;
|
||||
else {
|
||||
int mask = mmc_regulator_get_ocrmask(host->vcc);
|
||||
|
||||
if (mask < 0)
|
||||
dev_err(&dev->dev, "error getting OCR mask (%d)\n",
|
||||
mask);
|
||||
else {
|
||||
host->mmc->ocr_avail = (u32) mask;
|
||||
if (plat->ocr_mask)
|
||||
dev_warn(&dev->dev,
|
||||
"Provided ocr_mask/setpower will not be used "
|
||||
"(using regulator instead)\n");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* Fall back to platform data if no regulator is found */
|
||||
if (host->vcc == NULL)
|
||||
/* Get regulators and the supported OCR mask */
|
||||
mmc_regulator_get_supply(mmc);
|
||||
if (!mmc->ocr_avail)
|
||||
mmc->ocr_avail = plat->ocr_mask;
|
||||
else if (plat->ocr_mask)
|
||||
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
|
||||
|
||||
mmc->caps = plat->capabilities;
|
||||
mmc->caps2 = plat->capabilities2;
|
||||
|
||||
/* We support these PM capabilities. */
|
||||
mmc->pm_caps = MMC_PM_KEEP_POWER;
|
||||
|
||||
/*
|
||||
* We can do SGIO
|
||||
*/
|
||||
|
@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev)
|
|||
clk_disable_unprepare(host->clk);
|
||||
clk_put(host->clk);
|
||||
|
||||
if (host->vcc)
|
||||
mmc_regulator_set_ocr(mmc, host->vcc, 0);
|
||||
regulator_put(host->vcc);
|
||||
|
||||
mmc_free_host(mmc);
|
||||
|
||||
amba_release_regions(dev);
|
||||
|
@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
static int mmci_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct amba_device *adev = to_amba_device(dev);
|
||||
struct mmc_host *mmc = amba_get_drvdata(adev);
|
||||
|
||||
if (mmc) {
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
clk_disable_unprepare(host->clk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmci_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct amba_device *adev = to_amba_device(dev);
|
||||
struct mmc_host *mmc = amba_get_drvdata(adev);
|
||||
|
||||
if (mmc) {
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
clk_prepare_enable(host->clk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops mmci_dev_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
|
||||
SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static struct amba_id mmci_ids[] = {
|
||||
|
@ -1651,6 +1702,11 @@ static struct amba_id mmci_ids[] = {
|
|||
.mask = 0xff0fffff,
|
||||
.data = &variant_arm_extended_fifo,
|
||||
},
|
||||
{
|
||||
.id = 0x02041180,
|
||||
.mask = 0xff0fffff,
|
||||
.data = &variant_arm_extended_fifo_hwfc,
|
||||
},
|
||||
{
|
||||
.id = 0x00041181,
|
||||
.mask = 0x000fffff,
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#define MCI_ST_UX500_NEG_EDGE (1 << 13)
|
||||
#define MCI_ST_UX500_HWFCEN (1 << 14)
|
||||
#define MCI_ST_UX500_CLK_INV (1 << 15)
|
||||
/* Modified PL180 on Versatile Express platform */
|
||||
#define MCI_ARM_HWFCEN (1 << 12)
|
||||
|
||||
#define MMCIARGUMENT 0x008
|
||||
#define MMCICOMMAND 0x00c
|
||||
|
@ -193,7 +195,6 @@ struct mmci_host {
|
|||
/* pio stuff */
|
||||
struct sg_mapping_iter sg_miter;
|
||||
unsigned int size;
|
||||
struct regulator *vcc;
|
||||
|
||||
/* pinctrl handles */
|
||||
struct pinctrl *pinctrl;
|
||||
|
|
Loading…
Reference in a new issue