eb50439b92
It turns out that the logical CPU mapping is useful even when !CONFIG_SMP for manipulation of devices like interrupt and power controllers when running a UP kernel on a CPU other than 0. This can happen when kexecing a UP image from an SMP kernel. In the future, multi-cluster systems running AMP configurations will require something similar for mapping cluster IDs, so it makes sense to decouple this logic in preparation for this support. Acked-by: Yang Bai <hamo.by@gmail.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Reported-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
154 lines
3.6 KiB
C
154 lines
3.6 KiB
C
/*
|
|
* SMP support for R-Mobile / SH-Mobile - r8a7779 portion
|
|
*
|
|
* Copyright (C) 2011 Renesas Solutions Corp.
|
|
* Copyright (C) 2011 Magnus Damm
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; version 2 of the License.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/io.h>
|
|
#include <linux/delay.h>
|
|
#include <mach/common.h>
|
|
#include <mach/r8a7779.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/smp_scu.h>
|
|
#include <asm/smp_twd.h>
|
|
#include <asm/hardware/gic.h>
|
|
|
|
#define AVECR 0xfe700040
|
|
|
|
static struct r8a7779_pm_ch r8a7779_ch_cpu1 = {
|
|
.chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
|
|
.chan_bit = 1, /* ARM1 */
|
|
.isr_bit = 1, /* ARM1 */
|
|
};
|
|
|
|
static struct r8a7779_pm_ch r8a7779_ch_cpu2 = {
|
|
.chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
|
|
.chan_bit = 2, /* ARM2 */
|
|
.isr_bit = 2, /* ARM2 */
|
|
};
|
|
|
|
static struct r8a7779_pm_ch r8a7779_ch_cpu3 = {
|
|
.chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
|
|
.chan_bit = 3, /* ARM3 */
|
|
.isr_bit = 3, /* ARM3 */
|
|
};
|
|
|
|
static struct r8a7779_pm_ch *r8a7779_ch_cpu[4] = {
|
|
[1] = &r8a7779_ch_cpu1,
|
|
[2] = &r8a7779_ch_cpu2,
|
|
[3] = &r8a7779_ch_cpu3,
|
|
};
|
|
|
|
static void __iomem *scu_base_addr(void)
|
|
{
|
|
return (void __iomem *)0xf0000000;
|
|
}
|
|
|
|
static DEFINE_SPINLOCK(scu_lock);
|
|
static unsigned long tmp;
|
|
|
|
static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
|
|
{
|
|
void __iomem *scu_base = scu_base_addr();
|
|
|
|
spin_lock(&scu_lock);
|
|
tmp = __raw_readl(scu_base + 8);
|
|
tmp &= ~clr;
|
|
tmp |= set;
|
|
spin_unlock(&scu_lock);
|
|
|
|
/* disable cache coherency after releasing the lock */
|
|
__raw_writel(tmp, scu_base + 8);
|
|
}
|
|
|
|
unsigned int __init r8a7779_get_core_count(void)
|
|
{
|
|
void __iomem *scu_base = scu_base_addr();
|
|
|
|
#ifdef CONFIG_HAVE_ARM_TWD
|
|
/* twd_base needs to be initialized before percpu_timer_setup() */
|
|
twd_base = (void __iomem *)0xf0000600;
|
|
#endif
|
|
|
|
return scu_get_core_count(scu_base);
|
|
}
|
|
|
|
int r8a7779_platform_cpu_kill(unsigned int cpu)
|
|
{
|
|
struct r8a7779_pm_ch *ch = NULL;
|
|
int ret = -EIO;
|
|
|
|
cpu = cpu_logical_map(cpu);
|
|
|
|
/* disable cache coherency */
|
|
modify_scu_cpu_psr(3 << (cpu * 8), 0);
|
|
|
|
if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
|
|
ch = r8a7779_ch_cpu[cpu];
|
|
|
|
if (ch)
|
|
ret = r8a7779_sysc_power_down(ch);
|
|
|
|
return ret ? ret : 1;
|
|
}
|
|
|
|
void __cpuinit r8a7779_secondary_init(unsigned int cpu)
|
|
{
|
|
gic_secondary_init(0);
|
|
}
|
|
|
|
int __cpuinit r8a7779_boot_secondary(unsigned int cpu)
|
|
{
|
|
struct r8a7779_pm_ch *ch = NULL;
|
|
int ret = -EIO;
|
|
|
|
cpu = cpu_logical_map(cpu);
|
|
|
|
/* enable cache coherency */
|
|
modify_scu_cpu_psr(0, 3 << (cpu * 8));
|
|
|
|
if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
|
|
ch = r8a7779_ch_cpu[cpu];
|
|
|
|
if (ch)
|
|
ret = r8a7779_sysc_power_up(ch);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void __init r8a7779_smp_prepare_cpus(void)
|
|
{
|
|
int cpu = cpu_logical_map(0);
|
|
|
|
scu_enable(scu_base_addr());
|
|
|
|
/* Map the reset vector (in headsmp.S) */
|
|
__raw_writel(__pa(shmobile_secondary_vector), __io(AVECR));
|
|
|
|
/* enable cache coherency on CPU0 */
|
|
modify_scu_cpu_psr(0, 3 << (cpu * 8));
|
|
|
|
r8a7779_pm_init();
|
|
|
|
/* power off secondary CPUs */
|
|
r8a7779_platform_cpu_kill(1);
|
|
r8a7779_platform_cpu_kill(2);
|
|
r8a7779_platform_cpu_kill(3);
|
|
}
|