2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* ppc64 MMU hashtable management routines
|
|
|
|
*
|
2005-11-06 17:06:55 -07:00
|
|
|
* (c) Copyright IBM Corp. 2003, 2005
|
2005-04-16 16:20:36 -06:00
|
|
|
*
|
|
|
|
* Maintained by: Benjamin Herrenschmidt
|
|
|
|
* <benh@kernel.crashing.org>
|
|
|
|
*
|
|
|
|
* This file is covered by the GNU Public Licence v2 as
|
|
|
|
* described in the kernel's COPYING file.
|
|
|
|
*/
|
|
|
|
|
2005-10-10 05:58:35 -06:00
|
|
|
#include <asm/reg.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/types.h>
|
|
|
|
#include <asm/ppc_asm.h>
|
2005-09-09 12:57:26 -06:00
|
|
|
#include <asm/asm-offsets.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <asm/cputable.h>
|
|
|
|
|
|
|
|
.text
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stackframe:
|
|
|
|
*
|
|
|
|
* +-> Back chain (SP + 256)
|
|
|
|
* | General register save area (SP + 112)
|
|
|
|
* | Parameter save area (SP + 48)
|
|
|
|
* | TOC save area (SP + 40)
|
|
|
|
* | link editor doubleword (SP + 32)
|
|
|
|
* | compiler doubleword (SP + 24)
|
|
|
|
* | LR save area (SP + 16)
|
|
|
|
* | CR save area (SP + 8)
|
|
|
|
* SP ---> +-- Back chain (SP + 0)
|
|
|
|
*/
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
#ifndef CONFIG_PPC_64K_PAGES
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
* *
|
|
|
|
* 4K SW & 4K HW pages implementation *
|
|
|
|
* *
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
2005-11-06 17:06:55 -07:00
|
|
|
* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
2007-10-11 04:37:10 -06:00
|
|
|
* pte_t *ptep, unsigned long trap, int local, int ssize)
|
2005-04-16 16:20:36 -06:00
|
|
|
*
|
2005-11-06 17:06:55 -07:00
|
|
|
* Adds a 4K page to the hash table in a segment of 4K pages only
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
|
2005-11-06 17:06:55 -07:00
|
|
|
_GLOBAL(__hash_page_4K)
|
2005-04-16 16:20:36 -06:00
|
|
|
mflr r0
|
|
|
|
std r0,16(r1)
|
|
|
|
stdu r1,-STACKFRAMESIZE(r1)
|
|
|
|
/* Save all params that we need after a function call */
|
2012-06-25 07:33:14 -06:00
|
|
|
std r6,STK_PARAM(R6)(r1)
|
|
|
|
std r8,STK_PARAM(R8)(r1)
|
|
|
|
std r9,STK_PARAM(R9)(r1)
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Save non-volatile registers.
|
|
|
|
* r31 will hold "old PTE"
|
|
|
|
* r30 is "new PTE"
|
2012-09-09 20:52:50 -06:00
|
|
|
* r29 is vpn
|
2005-04-16 16:20:36 -06:00
|
|
|
* r28 is a hash value
|
|
|
|
* r27 is hashtab mask (maybe dynamic patched instead ?)
|
|
|
|
*/
|
2012-06-25 07:33:10 -06:00
|
|
|
std r27,STK_REG(R27)(r1)
|
|
|
|
std r28,STK_REG(R28)(r1)
|
|
|
|
std r29,STK_REG(R29)(r1)
|
|
|
|
std r30,STK_REG(R30)(r1)
|
|
|
|
std r31,STK_REG(R31)(r1)
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Step 1:
|
|
|
|
*
|
|
|
|
* Check permissions, atomically mark the linux PTE busy
|
|
|
|
* and hashed.
|
|
|
|
*/
|
|
|
|
1:
|
|
|
|
ldarx r31,0,r6
|
|
|
|
/* Check access rights (access & ~(pte_val(*ptep))) */
|
|
|
|
andc. r0,r4,r31
|
|
|
|
bne- htab_wrong_access
|
|
|
|
/* Check if PTE is busy */
|
|
|
|
andi. r0,r31,_PAGE_BUSY
|
2005-05-01 09:58:45 -06:00
|
|
|
/* If so, just bail out and refault if needed. Someone else
|
|
|
|
* is changing this PTE anyway and might hash it.
|
|
|
|
*/
|
2005-11-06 17:06:55 -07:00
|
|
|
bne- htab_bail_ok
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/* Prepare new PTE value (turn access RW into DIRTY, then
|
|
|
|
* add BUSY,HASHPTE and ACCESSED)
|
|
|
|
*/
|
|
|
|
rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
|
|
|
|
or r30,r30,r31
|
|
|
|
ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
|
|
|
|
/* Write the linux PTE atomically (setting busy) */
|
|
|
|
stdcx. r30,0,r6
|
|
|
|
bne- 1b
|
|
|
|
isync
|
|
|
|
|
|
|
|
/* Step 2:
|
|
|
|
*
|
|
|
|
* Insert/Update the HPTE in the hash table. At this point,
|
|
|
|
* r4 (access) is re-useable, we use it for the new HPTE flags
|
|
|
|
*/
|
|
|
|
|
2007-10-11 04:37:10 -06:00
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
cmpdi r9,0 /* check segment size */
|
|
|
|
bne 3f
|
2011-04-06 13:48:50 -06:00
|
|
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
2012-09-09 20:52:50 -06:00
|
|
|
/* Calc vpn and put it in r29 */
|
|
|
|
sldi r29,r5,SID_SHIFT - VPN_SHIFT
|
|
|
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
|
|
|
or r29,r28,r29
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Calculate hash value for primary slot and store it in r28 */
|
|
|
|
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
|
|
|
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
|
|
|
|
xor r28,r5,r0
|
2007-10-11 04:37:10 -06:00
|
|
|
b 4f
|
|
|
|
|
2012-09-09 20:52:50 -06:00
|
|
|
3: /* Calc vpn and put it in r29 */
|
|
|
|
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
|
|
|
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
|
|
|
|
or r29,r28,r29
|
|
|
|
|
|
|
|
/*
|
|
|
|
* calculate hash value for primary slot and
|
|
|
|
* store it in r28 for 1T segment
|
|
|
|
*/
|
2007-10-11 04:37:10 -06:00
|
|
|
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
|
|
|
clrldi r5,r5,40 /* vsid & 0xffffff */
|
|
|
|
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
|
|
|
|
xor r28,r28,r5
|
|
|
|
xor r28,r28,r0 /* hash */
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Convert linux PTE bits into HW equivalents */
|
2007-10-11 04:37:10 -06:00
|
|
|
4: andi. r3,r30,0x1fe /* Get basic set of flags */
|
2005-11-06 17:06:55 -07:00
|
|
|
xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
|
2005-04-16 16:20:36 -06:00
|
|
|
rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
|
|
|
|
rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
|
2005-11-06 17:06:55 -07:00
|
|
|
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
|
2005-04-16 16:20:36 -06:00
|
|
|
andc r0,r30,r0 /* r0 = pte & ~r0 */
|
|
|
|
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
|
2006-05-29 22:14:19 -06:00
|
|
|
ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* We eventually do the icache sync here (maybe inline that
|
|
|
|
* code rather than call a C function...)
|
|
|
|
*/
|
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
mr r4,r30
|
|
|
|
mr r5,r7
|
|
|
|
bl .hash_page_do_lazy_icache
|
2005-07-26 23:47:23 -06:00
|
|
|
END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* At this point, r3 contains new PP bits, save them in
|
|
|
|
* place of "access" in the param area (sic)
|
|
|
|
*/
|
2012-06-25 07:33:14 -06:00
|
|
|
std r3,STK_PARAM(R4)(r1)
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Get htab_hash_mask */
|
|
|
|
ld r4,htab_hash_mask@got(2)
|
|
|
|
ld r27,0(r4) /* htab_hash_mask -> r27 */
|
|
|
|
|
|
|
|
/* Check if we may already be in the hashtable, in this case, we
|
|
|
|
* go to out-of-line code to try to modify the HPTE
|
|
|
|
*/
|
|
|
|
andi. r0,r31,_PAGE_HASHPTE
|
|
|
|
bne htab_modify_pte
|
|
|
|
|
|
|
|
htab_insert_pte:
|
|
|
|
/* Clear hpte bits in new pte (we also clear BUSY btw) and
|
|
|
|
* add _PAGE_HASHPTE
|
|
|
|
*/
|
|
|
|
lis r0,_PAGE_HPTEFLAGS@h
|
|
|
|
ori r0,r0,_PAGE_HPTEFLAGS@l
|
|
|
|
andc r30,r30,r0
|
|
|
|
ori r30,r30,_PAGE_HASHPTE
|
|
|
|
|
2005-11-06 17:06:55 -07:00
|
|
|
/* physical address r5 */
|
|
|
|
rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
|
|
|
|
sldi r5,r5,PAGE_SHIFT
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Calculate primary group hash */
|
|
|
|
and r0,r28,r27
|
2005-11-06 17:06:55 -07:00
|
|
|
rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Call ppc_md.hpte_insert */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r4,r29 /* Retrieve vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r7,0 /* !bolted, !secondary */
|
|
|
|
li r8,MMU_PAGE_4K /* page size */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r9,STK_PARAM(R9)(r1) /* segment size */
|
2005-04-16 16:20:36 -06:00
|
|
|
_GLOBAL(htab_call_hpte_insert1)
|
2005-11-06 17:06:55 -07:00
|
|
|
bl . /* Patched by htab_finish_init() */
|
2005-04-16 16:20:36 -06:00
|
|
|
cmpdi 0,r3,0
|
|
|
|
bge htab_pte_insert_ok /* Insertion successful */
|
|
|
|
cmpdi 0,r3,-2 /* Critical failure */
|
|
|
|
beq- htab_pte_insert_failure
|
|
|
|
|
|
|
|
/* Now try secondary slot */
|
|
|
|
|
2005-11-06 17:06:55 -07:00
|
|
|
/* physical address r5 */
|
|
|
|
rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
|
|
|
|
sldi r5,r5,PAGE_SHIFT
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Calculate secondary group hash */
|
|
|
|
andc r0,r27,r28
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_insert */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r4,r29 /* Retrieve vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
|
|
|
|
li r8,MMU_PAGE_4K /* page size */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r9,STK_PARAM(R9)(r1) /* segment size */
|
2005-04-16 16:20:36 -06:00
|
|
|
_GLOBAL(htab_call_hpte_insert2)
|
2005-11-06 17:06:55 -07:00
|
|
|
bl . /* Patched by htab_finish_init() */
|
2005-04-16 16:20:36 -06:00
|
|
|
cmpdi 0,r3,0
|
|
|
|
bge+ htab_pte_insert_ok /* Insertion successful */
|
|
|
|
cmpdi 0,r3,-2 /* Critical failure */
|
|
|
|
beq- htab_pte_insert_failure
|
|
|
|
|
|
|
|
/* Both are full, we need to evict something */
|
|
|
|
mftb r0
|
|
|
|
/* Pick a random group based on TB */
|
|
|
|
andi. r0,r0,1
|
|
|
|
mr r5,r28
|
|
|
|
bne 2f
|
|
|
|
not r5,r5
|
|
|
|
2: and r0,r5,r27
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
/* Call ppc_md.hpte_remove */
|
|
|
|
_GLOBAL(htab_call_hpte_remove)
|
2005-11-06 17:06:55 -07:00
|
|
|
bl . /* Patched by htab_finish_init() */
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Try all again */
|
|
|
|
b htab_insert_pte
|
|
|
|
|
2005-11-06 17:06:55 -07:00
|
|
|
htab_bail_ok:
|
2005-05-01 09:58:45 -06:00
|
|
|
li r3,0
|
2005-11-06 17:06:55 -07:00
|
|
|
b htab_bail
|
2005-05-01 09:58:45 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
htab_pte_insert_ok:
|
|
|
|
/* Insert slot number & secondary bit in PTE */
|
|
|
|
rldimi r30,r3,12,63-15
|
|
|
|
|
|
|
|
/* Write out the PTE with a normal write
|
|
|
|
* (maybe add eieio may be good still ?)
|
|
|
|
*/
|
|
|
|
htab_write_out_pte:
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-04-16 16:20:36 -06:00
|
|
|
std r30,0(r6)
|
|
|
|
li r3, 0
|
2005-11-06 17:06:55 -07:00
|
|
|
htab_bail:
|
2012-06-25 07:33:10 -06:00
|
|
|
ld r27,STK_REG(R27)(r1)
|
|
|
|
ld r28,STK_REG(R28)(r1)
|
|
|
|
ld r29,STK_REG(R29)(r1)
|
|
|
|
ld r30,STK_REG(R30)(r1)
|
|
|
|
ld r31,STK_REG(R31)(r1)
|
2005-04-16 16:20:36 -06:00
|
|
|
addi r1,r1,STACKFRAMESIZE
|
|
|
|
ld r0,16(r1)
|
|
|
|
mtlr r0
|
|
|
|
blr
|
|
|
|
|
|
|
|
htab_modify_pte:
|
|
|
|
/* Keep PP bits in r4 and slot idx from the PTE around in r3 */
|
|
|
|
mr r4,r3
|
|
|
|
rlwinm r3,r31,32-12,29,31
|
|
|
|
|
|
|
|
/* Secondary group ? if yes, get a inverted hash value */
|
|
|
|
mr r5,r28
|
|
|
|
andi. r0,r31,_PAGE_SECONDARY
|
|
|
|
beq 1f
|
|
|
|
not r5,r5
|
|
|
|
1:
|
|
|
|
/* Calculate proper slot value for ppc_md.hpte_updatepp */
|
|
|
|
and r0,r5,r27
|
|
|
|
rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
add r3,r0,r3 /* add slot idx */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_updatepp */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r5,r29 /* vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r6,MMU_PAGE_4K /* page size */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r7,STK_PARAM(R9)(r1) /* segment size */
|
|
|
|
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
|
2005-04-16 16:20:36 -06:00
|
|
|
_GLOBAL(htab_call_hpte_updatepp)
|
2005-11-06 17:06:55 -07:00
|
|
|
bl . /* Patched by htab_finish_init() */
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* if we failed because typically the HPTE wasn't really here
|
|
|
|
* we try an insertion.
|
|
|
|
*/
|
|
|
|
cmpdi 0,r3,-1
|
|
|
|
beq- htab_insert_pte
|
|
|
|
|
|
|
|
/* Clear the BUSY bit and Write out the PTE */
|
|
|
|
li r0,_PAGE_BUSY
|
|
|
|
andc r30,r30,r0
|
|
|
|
b htab_write_out_pte
|
|
|
|
|
|
|
|
htab_wrong_access:
|
|
|
|
/* Bail out clearing reservation */
|
|
|
|
stdcx. r31,0,r6
|
|
|
|
li r3,1
|
2005-11-06 17:06:55 -07:00
|
|
|
b htab_bail
|
|
|
|
|
|
|
|
htab_pte_insert_failure:
|
|
|
|
/* Bail out restoring old PTE */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
std r31,0(r6)
|
|
|
|
li r3,-1
|
|
|
|
b htab_bail
|
|
|
|
|
|
|
|
|
|
|
|
#else /* CONFIG_PPC_64K_PAGES */
|
|
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
* *
|
|
|
|
* 64K SW & 4K or 64K HW in a 4K segment pages implementation *
|
|
|
|
* *
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for
emulators that are trying to emulate an ISA, such as x86, which use a
smaller page size, since the emulator can no longer use the MMU and
the normal system calls for controlling page protections. Of course,
the emulator can emulate the MMU by checking and possibly remapping
the address for each memory access in software, but that is pretty
slow.
This provides a facility for such programs to control the access
permissions on individual 4k sub-pages of 64k pages. The idea is
that the emulator supplies an array of protection masks to apply to a
specified range of virtual addresses. These masks are applied at the
level where hardware PTEs are inserted into the hardware page table
based on the Linux PTEs, so the Linux PTEs are not affected. Note
that this new mechanism does not allow any access that would otherwise
be prohibited; it can only prohibit accesses that would otherwise be
allowed. This new facility is only available on 64-bit PowerPC and
only when the kernel is configured for 64k pages.
The masks are supplied using a new subpage_prot system call, which
takes a starting virtual address and length, and a pointer to an array
of protection masks in memory. The array has a 32-bit word per 64k
page to be protected; each 32-bit word consists of 16 2-bit fields,
for which 0 allows any access (that is otherwise allowed), 1 prevents
write accesses, and 2 or 3 prevent any access.
Implicit in this is that the regions of the address space that are
protected are switched to use 4k hardware pages rather than 64k
hardware pages (on machines with hardware 64k page support). In fact
the whole process is switched to use 4k hardware pages when the
subpage_prot system call is used, but this could be improved in future
to switch only the affected segments.
The subpage protection bits are stored in a 3 level tree akin to the
page table tree. The top level of this tree is stored in a structure
that is appended to the top level of the page table tree, i.e., the
pgd array. Since it will often only be 32-bit addresses (below 4GB)
that are protected, the pointers to the first four bottom level pages
are also stored in this structure (each bottom level page contains the
protection bits for 1GB of address space), so the protection bits for
addresses below 4GB can be accessed with one fewer loads than those
for higher addresses.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-01-23 14:35:13 -07:00
|
|
|
* pte_t *ptep, unsigned long trap, int local, int ssize,
|
|
|
|
* int subpg_prot)
|
2005-11-06 17:06:55 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For now, we do NOT implement Admixed pages
|
|
|
|
*/
|
|
|
|
_GLOBAL(__hash_page_4K)
|
|
|
|
mflr r0
|
|
|
|
std r0,16(r1)
|
|
|
|
stdu r1,-STACKFRAMESIZE(r1)
|
|
|
|
/* Save all params that we need after a function call */
|
2012-06-25 07:33:14 -06:00
|
|
|
std r6,STK_PARAM(R6)(r1)
|
|
|
|
std r8,STK_PARAM(R8)(r1)
|
|
|
|
std r9,STK_PARAM(R9)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Save non-volatile registers.
|
|
|
|
* r31 will hold "old PTE"
|
|
|
|
* r30 is "new PTE"
|
2012-09-09 20:52:50 -06:00
|
|
|
* r29 is vpn
|
2005-11-06 17:06:55 -07:00
|
|
|
* r28 is a hash value
|
|
|
|
* r27 is hashtab mask (maybe dynamic patched instead ?)
|
|
|
|
* r26 is the hidx mask
|
|
|
|
* r25 is the index in combo page
|
|
|
|
*/
|
2012-06-25 07:33:10 -06:00
|
|
|
std r25,STK_REG(R25)(r1)
|
|
|
|
std r26,STK_REG(R26)(r1)
|
|
|
|
std r27,STK_REG(R27)(r1)
|
|
|
|
std r28,STK_REG(R28)(r1)
|
|
|
|
std r29,STK_REG(R29)(r1)
|
|
|
|
std r30,STK_REG(R30)(r1)
|
|
|
|
std r31,STK_REG(R31)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Step 1:
|
|
|
|
*
|
|
|
|
* Check permissions, atomically mark the linux PTE busy
|
|
|
|
* and hashed.
|
|
|
|
*/
|
|
|
|
1:
|
|
|
|
ldarx r31,0,r6
|
|
|
|
/* Check access rights (access & ~(pte_val(*ptep))) */
|
|
|
|
andc. r0,r4,r31
|
|
|
|
bne- htab_wrong_access
|
|
|
|
/* Check if PTE is busy */
|
|
|
|
andi. r0,r31,_PAGE_BUSY
|
|
|
|
/* If so, just bail out and refault if needed. Someone else
|
|
|
|
* is changing this PTE anyway and might hash it.
|
|
|
|
*/
|
|
|
|
bne- htab_bail_ok
|
|
|
|
/* Prepare new PTE value (turn access RW into DIRTY, then
|
|
|
|
* add BUSY and ACCESSED)
|
|
|
|
*/
|
|
|
|
rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
|
|
|
|
or r30,r30,r31
|
2008-06-10 23:37:10 -06:00
|
|
|
ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
|
2006-06-14 18:45:18 -06:00
|
|
|
oris r30,r30,_PAGE_COMBO@h
|
2005-11-06 17:06:55 -07:00
|
|
|
/* Write the linux PTE atomically (setting busy) */
|
|
|
|
stdcx. r30,0,r6
|
|
|
|
bne- 1b
|
|
|
|
isync
|
|
|
|
|
|
|
|
/* Step 2:
|
|
|
|
*
|
|
|
|
* Insert/Update the HPTE in the hash table. At this point,
|
|
|
|
* r4 (access) is re-useable, we use it for the new HPTE flags
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Load the hidx index */
|
|
|
|
rldicl r25,r3,64-12,60
|
|
|
|
|
2007-10-11 04:37:10 -06:00
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
cmpdi r9,0 /* check segment size */
|
|
|
|
bne 3f
|
2011-04-06 13:48:50 -06:00
|
|
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
2012-09-09 20:52:50 -06:00
|
|
|
/* Calc vpn and put it in r29 */
|
|
|
|
sldi r29,r5,SID_SHIFT - VPN_SHIFT
|
|
|
|
/*
|
|
|
|
* clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
|
|
|
|
* srdi r28,r3,VPN_SHIFT
|
|
|
|
*/
|
|
|
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
|
|
|
or r29,r28,r29
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Calculate hash value for primary slot and store it in r28 */
|
|
|
|
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
|
|
|
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
|
|
|
|
xor r28,r5,r0
|
2007-10-11 04:37:10 -06:00
|
|
|
b 4f
|
|
|
|
|
2012-09-09 20:52:50 -06:00
|
|
|
3: /* Calc vpn and put it in r29 */
|
|
|
|
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
|
|
|
|
/*
|
|
|
|
* clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
|
|
|
|
* srdi r28,r3,VPN_SHIFT
|
|
|
|
*/
|
|
|
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
|
|
|
|
or r29,r28,r29
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate hash value for primary slot and
|
|
|
|
* store it in r28 for 1T segment
|
|
|
|
*/
|
2007-10-11 04:37:10 -06:00
|
|
|
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
|
|
|
clrldi r5,r5,40 /* vsid & 0xffffff */
|
|
|
|
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
|
|
|
|
xor r28,r28,r5
|
|
|
|
xor r28,r28,r0 /* hash */
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Convert linux PTE bits into HW equivalents */
|
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for
emulators that are trying to emulate an ISA, such as x86, which use a
smaller page size, since the emulator can no longer use the MMU and
the normal system calls for controlling page protections. Of course,
the emulator can emulate the MMU by checking and possibly remapping
the address for each memory access in software, but that is pretty
slow.
This provides a facility for such programs to control the access
permissions on individual 4k sub-pages of 64k pages. The idea is
that the emulator supplies an array of protection masks to apply to a
specified range of virtual addresses. These masks are applied at the
level where hardware PTEs are inserted into the hardware page table
based on the Linux PTEs, so the Linux PTEs are not affected. Note
that this new mechanism does not allow any access that would otherwise
be prohibited; it can only prohibit accesses that would otherwise be
allowed. This new facility is only available on 64-bit PowerPC and
only when the kernel is configured for 64k pages.
The masks are supplied using a new subpage_prot system call, which
takes a starting virtual address and length, and a pointer to an array
of protection masks in memory. The array has a 32-bit word per 64k
page to be protected; each 32-bit word consists of 16 2-bit fields,
for which 0 allows any access (that is otherwise allowed), 1 prevents
write accesses, and 2 or 3 prevent any access.
Implicit in this is that the regions of the address space that are
protected are switched to use 4k hardware pages rather than 64k
hardware pages (on machines with hardware 64k page support). In fact
the whole process is switched to use 4k hardware pages when the
subpage_prot system call is used, but this could be improved in future
to switch only the affected segments.
The subpage protection bits are stored in a 3 level tree akin to the
page table tree. The top level of this tree is stored in a structure
that is appended to the top level of the page table tree, i.e., the
pgd array. Since it will often only be 32-bit addresses (below 4GB)
that are protected, the pointers to the first four bottom level pages
are also stored in this structure (each bottom level page contains the
protection bits for 1GB of address space), so the protection bits for
addresses below 4GB can be accessed with one fewer loads than those
for higher addresses.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-01-23 14:35:13 -07:00
|
|
|
4:
|
|
|
|
#ifdef CONFIG_PPC_SUBPAGE_PROT
|
|
|
|
andc r10,r30,r10
|
|
|
|
andi. r3,r10,0x1fe /* Get basic set of flags */
|
|
|
|
rlwinm r0,r10,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
|
|
|
|
#else
|
|
|
|
andi. r3,r30,0x1fe /* Get basic set of flags */
|
2005-11-06 17:06:55 -07:00
|
|
|
rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
|
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for
emulators that are trying to emulate an ISA, such as x86, which use a
smaller page size, since the emulator can no longer use the MMU and
the normal system calls for controlling page protections. Of course,
the emulator can emulate the MMU by checking and possibly remapping
the address for each memory access in software, but that is pretty
slow.
This provides a facility for such programs to control the access
permissions on individual 4k sub-pages of 64k pages. The idea is
that the emulator supplies an array of protection masks to apply to a
specified range of virtual addresses. These masks are applied at the
level where hardware PTEs are inserted into the hardware page table
based on the Linux PTEs, so the Linux PTEs are not affected. Note
that this new mechanism does not allow any access that would otherwise
be prohibited; it can only prohibit accesses that would otherwise be
allowed. This new facility is only available on 64-bit PowerPC and
only when the kernel is configured for 64k pages.
The masks are supplied using a new subpage_prot system call, which
takes a starting virtual address and length, and a pointer to an array
of protection masks in memory. The array has a 32-bit word per 64k
page to be protected; each 32-bit word consists of 16 2-bit fields,
for which 0 allows any access (that is otherwise allowed), 1 prevents
write accesses, and 2 or 3 prevent any access.
Implicit in this is that the regions of the address space that are
protected are switched to use 4k hardware pages rather than 64k
hardware pages (on machines with hardware 64k page support). In fact
the whole process is switched to use 4k hardware pages when the
subpage_prot system call is used, but this could be improved in future
to switch only the affected segments.
The subpage protection bits are stored in a 3 level tree akin to the
page table tree. The top level of this tree is stored in a structure
that is appended to the top level of the page table tree, i.e., the
pgd array. Since it will often only be 32-bit addresses (below 4GB)
that are protected, the pointers to the first four bottom level pages
are also stored in this structure (each bottom level page contains the
protection bits for 1GB of address space), so the protection bits for
addresses below 4GB can be accessed with one fewer loads than those
for higher addresses.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-01-23 14:35:13 -07:00
|
|
|
#endif
|
|
|
|
xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
|
2005-11-06 17:06:55 -07:00
|
|
|
rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
|
|
|
|
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
|
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for
emulators that are trying to emulate an ISA, such as x86, which use a
smaller page size, since the emulator can no longer use the MMU and
the normal system calls for controlling page protections. Of course,
the emulator can emulate the MMU by checking and possibly remapping
the address for each memory access in software, but that is pretty
slow.
This provides a facility for such programs to control the access
permissions on individual 4k sub-pages of 64k pages. The idea is
that the emulator supplies an array of protection masks to apply to a
specified range of virtual addresses. These masks are applied at the
level where hardware PTEs are inserted into the hardware page table
based on the Linux PTEs, so the Linux PTEs are not affected. Note
that this new mechanism does not allow any access that would otherwise
be prohibited; it can only prohibit accesses that would otherwise be
allowed. This new facility is only available on 64-bit PowerPC and
only when the kernel is configured for 64k pages.
The masks are supplied using a new subpage_prot system call, which
takes a starting virtual address and length, and a pointer to an array
of protection masks in memory. The array has a 32-bit word per 64k
page to be protected; each 32-bit word consists of 16 2-bit fields,
for which 0 allows any access (that is otherwise allowed), 1 prevents
write accesses, and 2 or 3 prevent any access.
Implicit in this is that the regions of the address space that are
protected are switched to use 4k hardware pages rather than 64k
hardware pages (on machines with hardware 64k page support). In fact
the whole process is switched to use 4k hardware pages when the
subpage_prot system call is used, but this could be improved in future
to switch only the affected segments.
The subpage protection bits are stored in a 3 level tree akin to the
page table tree. The top level of this tree is stored in a structure
that is appended to the top level of the page table tree, i.e., the
pgd array. Since it will often only be 32-bit addresses (below 4GB)
that are protected, the pointers to the first four bottom level pages
are also stored in this structure (each bottom level page contains the
protection bits for 1GB of address space), so the protection bits for
addresses below 4GB can be accessed with one fewer loads than those
for higher addresses.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-01-23 14:35:13 -07:00
|
|
|
andc r0,r3,r0 /* r0 = pte & ~r0 */
|
2005-11-06 17:06:55 -07:00
|
|
|
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
|
2006-05-29 22:14:19 -06:00
|
|
|
ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* We eventually do the icache sync here (maybe inline that
|
|
|
|
* code rather than call a C function...)
|
|
|
|
*/
|
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
mr r4,r30
|
|
|
|
mr r5,r7
|
|
|
|
bl .hash_page_do_lazy_icache
|
|
|
|
END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
|
|
|
|
|
|
|
|
/* At this point, r3 contains new PP bits, save them in
|
|
|
|
* place of "access" in the param area (sic)
|
|
|
|
*/
|
2012-06-25 07:33:14 -06:00
|
|
|
std r3,STK_PARAM(R4)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Get htab_hash_mask */
|
|
|
|
ld r4,htab_hash_mask@got(2)
|
|
|
|
ld r27,0(r4) /* htab_hash_mask -> r27 */
|
|
|
|
|
|
|
|
/* Check if we may already be in the hashtable, in this case, we
|
|
|
|
* go to out-of-line code to try to modify the HPTE. We look for
|
|
|
|
* the bit at (1 >> (index + 32))
|
|
|
|
*/
|
2008-06-10 23:37:10 -06:00
|
|
|
rldicl. r0,r31,64-12,48
|
2005-11-06 17:06:55 -07:00
|
|
|
li r26,0 /* Default hidx */
|
|
|
|
beq htab_insert_pte
|
2006-06-14 18:45:18 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the pte was already inserted into the hash table
|
|
|
|
* as a 64k HW page, and invalidate the 64k HPTE if so.
|
|
|
|
*/
|
|
|
|
andis. r0,r31,_PAGE_COMBO@h
|
|
|
|
beq htab_inval_old_hpte
|
|
|
|
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
ori r26,r6,0x8000 /* Load the hidx mask */
|
|
|
|
ld r26,0(r26)
|
|
|
|
addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
|
|
|
|
rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
|
|
|
|
bne htab_modify_pte
|
|
|
|
|
|
|
|
htab_insert_pte:
|
|
|
|
/* real page number in r5, PTE RPN value + index */
|
[POWERPC] Allow drivers to map individual 4k pages to userspace
Some drivers have resources that they want to be able to map into
userspace that are 4k in size. On a kernel configured with 64k pages
we currently end up mapping the 4k we want plus another 60k of
physical address space, which could contain anything. This can
introduce security problems, for example in the case of an infiniband
adaptor where the other 60k could contain registers that some other
program is using for its communications.
This patch adds a new function, remap_4k_pfn, which drivers can use to
map a single 4k page to userspace regardless of whether the kernel is
using a 4k or a 64k page size. Like remap_pfn_range, it would
typically be called in a driver's mmap function. It only maps a
single 4k page, which on a 64k page kernel appears replicated 16 times
throughout a 64k page. On a 4k page kernel it reduces to a call to
remap_pfn_range.
The way this works on a 64k kernel is that a new bit, _PAGE_4K_PFN,
gets set on the linux PTE. This alters the way that __hash_page_4K
computes the real address to put in the HPTE. The RPN field of the
linux PTE becomes the 4k RPN directly rather than being interpreted as
a 64k RPN. Since the RPN field is 32 bits, this means that physical
addresses being mapped with remap_4k_pfn have to be below 2^44,
i.e. 0x100000000000.
The patch also factors out the code in arch/powerpc/mm/hash_utils_64.c
that deals with demoting a process to use 4k pages into one function
that gets called in the various different places where we need to do
that. There were some discrepancies between exactly what was done in
the various places, such as a call to spu_flush_all_slbs in one case
but not in others.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2007-04-03 05:24:02 -06:00
|
|
|
andis. r0,r31,_PAGE_4K_PFN@h
|
|
|
|
srdi r5,r31,PTE_RPN_SHIFT
|
|
|
|
bne- htab_special_pfn
|
2005-11-06 17:06:55 -07:00
|
|
|
sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
|
|
|
|
add r5,r5,r25
|
[POWERPC] Allow drivers to map individual 4k pages to userspace
Some drivers have resources that they want to be able to map into
userspace that are 4k in size. On a kernel configured with 64k pages
we currently end up mapping the 4k we want plus another 60k of
physical address space, which could contain anything. This can
introduce security problems, for example in the case of an infiniband
adaptor where the other 60k could contain registers that some other
program is using for its communications.
This patch adds a new function, remap_4k_pfn, which drivers can use to
map a single 4k page to userspace regardless of whether the kernel is
using a 4k or a 64k page size. Like remap_pfn_range, it would
typically be called in a driver's mmap function. It only maps a
single 4k page, which on a 64k page kernel appears replicated 16 times
throughout a 64k page. On a 4k page kernel it reduces to a call to
remap_pfn_range.
The way this works on a 64k kernel is that a new bit, _PAGE_4K_PFN,
gets set on the linux PTE. This alters the way that __hash_page_4K
computes the real address to put in the HPTE. The RPN field of the
linux PTE becomes the 4k RPN directly rather than being interpreted as
a 64k RPN. Since the RPN field is 32 bits, this means that physical
addresses being mapped with remap_4k_pfn have to be below 2^44,
i.e. 0x100000000000.
The patch also factors out the code in arch/powerpc/mm/hash_utils_64.c
that deals with demoting a process to use 4k pages into one function
that gets called in the various different places where we need to do
that. There were some discrepancies between exactly what was done in
the various places, such as a call to spu_flush_all_slbs in one case
but not in others.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2007-04-03 05:24:02 -06:00
|
|
|
htab_special_pfn:
|
2005-11-06 17:06:55 -07:00
|
|
|
sldi r5,r5,HW_PAGE_SHIFT
|
|
|
|
|
|
|
|
/* Calculate primary group hash */
|
|
|
|
and r0,r28,r27
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_insert */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r4,r29 /* Retrieve vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r7,0 /* !bolted, !secondary */
|
|
|
|
li r8,MMU_PAGE_4K /* page size */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r9,STK_PARAM(R9)(r1) /* segment size */
|
2005-11-06 17:06:55 -07:00
|
|
|
_GLOBAL(htab_call_hpte_insert1)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
cmpdi 0,r3,0
|
|
|
|
bge htab_pte_insert_ok /* Insertion successful */
|
|
|
|
cmpdi 0,r3,-2 /* Critical failure */
|
|
|
|
beq- htab_pte_insert_failure
|
|
|
|
|
|
|
|
/* Now try secondary slot */
|
|
|
|
|
|
|
|
/* real page number in r5, PTE RPN value + index */
|
2007-08-03 03:16:11 -06:00
|
|
|
andis. r0,r31,_PAGE_4K_PFN@h
|
|
|
|
srdi r5,r31,PTE_RPN_SHIFT
|
|
|
|
bne- 3f
|
2005-11-06 17:06:55 -07:00
|
|
|
sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
|
|
|
|
add r5,r5,r25
|
2007-08-03 03:16:11 -06:00
|
|
|
3: sldi r5,r5,HW_PAGE_SHIFT
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Calculate secondary group hash */
|
|
|
|
andc r0,r27,r28
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_insert */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r4,r29 /* Retrieve vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
|
|
|
|
li r8,MMU_PAGE_4K /* page size */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r9,STK_PARAM(R9)(r1) /* segment size */
|
2005-11-06 17:06:55 -07:00
|
|
|
_GLOBAL(htab_call_hpte_insert2)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
cmpdi 0,r3,0
|
|
|
|
bge+ htab_pte_insert_ok /* Insertion successful */
|
|
|
|
cmpdi 0,r3,-2 /* Critical failure */
|
|
|
|
beq- htab_pte_insert_failure
|
|
|
|
|
|
|
|
/* Both are full, we need to evict something */
|
|
|
|
mftb r0
|
|
|
|
/* Pick a random group based on TB */
|
|
|
|
andi. r0,r0,1
|
|
|
|
mr r5,r28
|
|
|
|
bne 2f
|
|
|
|
not r5,r5
|
|
|
|
2: and r0,r5,r27
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
/* Call ppc_md.hpte_remove */
|
|
|
|
_GLOBAL(htab_call_hpte_remove)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
|
|
|
|
/* Try all again */
|
|
|
|
b htab_insert_pte
|
|
|
|
|
2006-06-14 18:45:18 -06:00
|
|
|
/*
|
|
|
|
* Call out to C code to invalidate an 64k HW HPTE that is
|
|
|
|
* useless now that the segment has been switched to 4k pages.
|
|
|
|
*/
|
|
|
|
htab_inval_old_hpte:
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r3,r29 /* vpn */
|
2006-06-14 18:45:18 -06:00
|
|
|
mr r4,r31 /* PTE.pte */
|
|
|
|
li r5,0 /* PTE.hidx */
|
|
|
|
li r6,MMU_PAGE_64K /* psize */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r7,STK_PARAM(R9)(r1) /* ssize */
|
|
|
|
ld r8,STK_PARAM(R8)(r1) /* local */
|
2006-06-14 18:45:18 -06:00
|
|
|
bl .flush_hash_page
|
2008-06-18 00:40:35 -06:00
|
|
|
/* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
|
|
|
|
lis r0,_PAGE_HPTE_SUB@h
|
|
|
|
ori r0,r0,_PAGE_HPTE_SUB@l
|
|
|
|
andc r30,r30,r0
|
2006-06-14 18:45:18 -06:00
|
|
|
b htab_insert_pte
|
|
|
|
|
2005-11-06 17:06:55 -07:00
|
|
|
htab_bail_ok:
|
|
|
|
li r3,0
|
|
|
|
b htab_bail
|
|
|
|
|
|
|
|
htab_pte_insert_ok:
|
|
|
|
/* Insert slot number & secondary bit in PTE second half,
|
|
|
|
* clear _PAGE_BUSY and set approriate HPTE slot bit
|
|
|
|
*/
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
li r0,_PAGE_BUSY
|
|
|
|
andc r30,r30,r0
|
|
|
|
/* HPTE SUB bit */
|
|
|
|
li r0,1
|
|
|
|
subfic r5,r25,27 /* Must match bit position in */
|
|
|
|
sld r0,r0,r5 /* pgtable.h */
|
|
|
|
or r30,r30,r0
|
|
|
|
/* hindx */
|
|
|
|
sldi r5,r25,2
|
|
|
|
sld r3,r3,r5
|
|
|
|
li r4,0xf
|
|
|
|
sld r4,r4,r5
|
|
|
|
andc r26,r26,r4
|
|
|
|
or r26,r26,r3
|
|
|
|
ori r5,r6,0x8000
|
|
|
|
std r26,0(r5)
|
|
|
|
lwsync
|
|
|
|
std r30,0(r6)
|
|
|
|
li r3, 0
|
|
|
|
htab_bail:
|
2012-06-25 07:33:10 -06:00
|
|
|
ld r25,STK_REG(R25)(r1)
|
|
|
|
ld r26,STK_REG(R26)(r1)
|
|
|
|
ld r27,STK_REG(R27)(r1)
|
|
|
|
ld r28,STK_REG(R28)(r1)
|
|
|
|
ld r29,STK_REG(R29)(r1)
|
|
|
|
ld r30,STK_REG(R30)(r1)
|
|
|
|
ld r31,STK_REG(R31)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
addi r1,r1,STACKFRAMESIZE
|
|
|
|
ld r0,16(r1)
|
|
|
|
mtlr r0
|
|
|
|
blr
|
|
|
|
|
|
|
|
htab_modify_pte:
|
|
|
|
/* Keep PP bits in r4 and slot idx from the PTE around in r3 */
|
|
|
|
mr r4,r3
|
|
|
|
sldi r5,r25,2
|
|
|
|
srd r3,r26,r5
|
|
|
|
|
|
|
|
/* Secondary group ? if yes, get a inverted hash value */
|
|
|
|
mr r5,r28
|
|
|
|
andi. r0,r3,0x8 /* page secondary ? */
|
|
|
|
beq 1f
|
|
|
|
not r5,r5
|
|
|
|
1: andi. r3,r3,0x7 /* extract idx alone */
|
|
|
|
|
|
|
|
/* Calculate proper slot value for ppc_md.hpte_updatepp */
|
|
|
|
and r0,r5,r27
|
|
|
|
rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
add r3,r0,r3 /* add slot idx */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_updatepp */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r5,r29 /* vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r6,MMU_PAGE_4K /* page size */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r7,STK_PARAM(R9)(r1) /* segment size */
|
|
|
|
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
|
2005-11-06 17:06:55 -07:00
|
|
|
_GLOBAL(htab_call_hpte_updatepp)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
|
|
|
|
/* if we failed because typically the HPTE wasn't really here
|
|
|
|
* we try an insertion.
|
|
|
|
*/
|
|
|
|
cmpdi 0,r3,-1
|
|
|
|
beq- htab_insert_pte
|
|
|
|
|
|
|
|
/* Clear the BUSY bit and Write out the PTE */
|
|
|
|
li r0,_PAGE_BUSY
|
|
|
|
andc r30,r30,r0
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
std r30,0(r6)
|
|
|
|
li r3,0
|
|
|
|
b htab_bail
|
|
|
|
|
|
|
|
htab_wrong_access:
|
|
|
|
/* Bail out clearing reservation */
|
|
|
|
stdcx. r31,0,r6
|
|
|
|
li r3,1
|
|
|
|
b htab_bail
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
htab_pte_insert_failure:
|
|
|
|
/* Bail out restoring old PTE */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-04-16 16:20:36 -06:00
|
|
|
std r31,0(r6)
|
|
|
|
li r3,-1
|
2005-11-06 17:06:55 -07:00
|
|
|
b htab_bail
|
|
|
|
|
2007-05-08 00:27:28 -06:00
|
|
|
#endif /* CONFIG_PPC_64K_PAGES */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_HAS_HASH_64K
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
* *
|
|
|
|
* 64K SW & 64K HW in a 64K segment pages implementation *
|
|
|
|
* *
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
_GLOBAL(__hash_page_64K)
|
|
|
|
mflr r0
|
|
|
|
std r0,16(r1)
|
|
|
|
stdu r1,-STACKFRAMESIZE(r1)
|
|
|
|
/* Save all params that we need after a function call */
|
2012-06-25 07:33:14 -06:00
|
|
|
std r6,STK_PARAM(R6)(r1)
|
|
|
|
std r8,STK_PARAM(R8)(r1)
|
|
|
|
std r9,STK_PARAM(R9)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Save non-volatile registers.
|
|
|
|
* r31 will hold "old PTE"
|
|
|
|
* r30 is "new PTE"
|
2012-09-09 20:52:50 -06:00
|
|
|
* r29 is vpn
|
2005-11-06 17:06:55 -07:00
|
|
|
* r28 is a hash value
|
|
|
|
* r27 is hashtab mask (maybe dynamic patched instead ?)
|
|
|
|
*/
|
2012-06-25 07:33:10 -06:00
|
|
|
std r27,STK_REG(R27)(r1)
|
|
|
|
std r28,STK_REG(R28)(r1)
|
|
|
|
std r29,STK_REG(R29)(r1)
|
|
|
|
std r30,STK_REG(R30)(r1)
|
|
|
|
std r31,STK_REG(R31)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Step 1:
|
|
|
|
*
|
|
|
|
* Check permissions, atomically mark the linux PTE busy
|
|
|
|
* and hashed.
|
|
|
|
*/
|
|
|
|
1:
|
|
|
|
ldarx r31,0,r6
|
|
|
|
/* Check access rights (access & ~(pte_val(*ptep))) */
|
|
|
|
andc. r0,r4,r31
|
|
|
|
bne- ht64_wrong_access
|
|
|
|
/* Check if PTE is busy */
|
|
|
|
andi. r0,r31,_PAGE_BUSY
|
|
|
|
/* If so, just bail out and refault if needed. Someone else
|
|
|
|
* is changing this PTE anyway and might hash it.
|
|
|
|
*/
|
|
|
|
bne- ht64_bail_ok
|
2006-06-14 18:45:18 -06:00
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
/* Check if PTE has the cache-inhibit bit set */
|
|
|
|
andi. r0,r31,_PAGE_NO_CACHE
|
|
|
|
/* If so, bail out and refault as a 4k page */
|
|
|
|
bne- ht64_bail_ok
|
2011-04-06 13:48:50 -06:00
|
|
|
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE)
|
2005-11-06 17:06:55 -07:00
|
|
|
/* Prepare new PTE value (turn access RW into DIRTY, then
|
2008-06-10 23:37:10 -06:00
|
|
|
* add BUSY and ACCESSED)
|
2005-11-06 17:06:55 -07:00
|
|
|
*/
|
|
|
|
rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
|
|
|
|
or r30,r30,r31
|
2008-06-10 23:37:10 -06:00
|
|
|
ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
|
2005-11-06 17:06:55 -07:00
|
|
|
/* Write the linux PTE atomically (setting busy) */
|
|
|
|
stdcx. r30,0,r6
|
|
|
|
bne- 1b
|
|
|
|
isync
|
|
|
|
|
|
|
|
/* Step 2:
|
|
|
|
*
|
|
|
|
* Insert/Update the HPTE in the hash table. At this point,
|
|
|
|
* r4 (access) is re-useable, we use it for the new HPTE flags
|
|
|
|
*/
|
|
|
|
|
2007-10-11 04:37:10 -06:00
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
cmpdi r9,0 /* check segment size */
|
|
|
|
bne 3f
|
2011-04-06 13:48:50 -06:00
|
|
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
2012-09-09 20:52:50 -06:00
|
|
|
/* Calc vpn and put it in r29 */
|
|
|
|
sldi r29,r5,SID_SHIFT - VPN_SHIFT
|
|
|
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
|
|
|
or r29,r28,r29
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Calculate hash value for primary slot and store it in r28 */
|
|
|
|
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
|
|
|
rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
|
|
|
|
xor r28,r5,r0
|
2007-10-11 04:37:10 -06:00
|
|
|
b 4f
|
|
|
|
|
2012-09-09 20:52:50 -06:00
|
|
|
3: /* Calc vpn and put it in r29 */
|
|
|
|
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
|
|
|
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
|
|
|
|
or r29,r28,r29
|
|
|
|
|
|
|
|
/*
|
|
|
|
* calculate hash value for primary slot and
|
|
|
|
* store it in r28 for 1T segment
|
|
|
|
*/
|
2007-10-11 04:37:10 -06:00
|
|
|
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
|
|
|
clrldi r5,r5,40 /* vsid & 0xffffff */
|
|
|
|
rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
|
|
|
|
xor r28,r28,r5
|
|
|
|
xor r28,r28,r0 /* hash */
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Convert linux PTE bits into HW equivalents */
|
2007-10-11 04:37:10 -06:00
|
|
|
4: andi. r3,r30,0x1fe /* Get basic set of flags */
|
2005-11-06 17:06:55 -07:00
|
|
|
xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
|
|
|
|
rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
|
|
|
|
rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
|
|
|
|
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
|
|
|
|
andc r0,r30,r0 /* r0 = pte & ~r0 */
|
|
|
|
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
|
2006-05-29 22:14:19 -06:00
|
|
|
ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* We eventually do the icache sync here (maybe inline that
|
|
|
|
* code rather than call a C function...)
|
|
|
|
*/
|
|
|
|
BEGIN_FTR_SECTION
|
|
|
|
mr r4,r30
|
|
|
|
mr r5,r7
|
|
|
|
bl .hash_page_do_lazy_icache
|
|
|
|
END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
|
|
|
|
|
|
|
|
/* At this point, r3 contains new PP bits, save them in
|
|
|
|
* place of "access" in the param area (sic)
|
|
|
|
*/
|
2012-06-25 07:33:14 -06:00
|
|
|
std r3,STK_PARAM(R4)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
|
|
|
|
/* Get htab_hash_mask */
|
|
|
|
ld r4,htab_hash_mask@got(2)
|
|
|
|
ld r27,0(r4) /* htab_hash_mask -> r27 */
|
|
|
|
|
|
|
|
/* Check if we may already be in the hashtable, in this case, we
|
|
|
|
* go to out-of-line code to try to modify the HPTE
|
|
|
|
*/
|
2008-06-10 23:37:10 -06:00
|
|
|
rldicl. r0,r31,64-12,48
|
2005-11-06 17:06:55 -07:00
|
|
|
bne ht64_modify_pte
|
|
|
|
|
|
|
|
ht64_insert_pte:
|
|
|
|
/* Clear hpte bits in new pte (we also clear BUSY btw) and
|
2008-06-10 23:37:10 -06:00
|
|
|
* add _PAGE_HPTE_SUB0
|
2005-11-06 17:06:55 -07:00
|
|
|
*/
|
|
|
|
lis r0,_PAGE_HPTEFLAGS@h
|
|
|
|
ori r0,r0,_PAGE_HPTEFLAGS@l
|
|
|
|
andc r30,r30,r0
|
2008-06-10 23:37:10 -06:00
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
oris r30,r30,_PAGE_HPTE_SUB0@h
|
|
|
|
#else
|
2005-11-06 17:06:55 -07:00
|
|
|
ori r30,r30,_PAGE_HASHPTE
|
2008-06-10 23:37:10 -06:00
|
|
|
#endif
|
2005-11-06 17:06:55 -07:00
|
|
|
/* Phyical address in r5 */
|
|
|
|
rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
|
|
|
|
sldi r5,r5,PAGE_SHIFT
|
|
|
|
|
|
|
|
/* Calculate primary group hash */
|
|
|
|
and r0,r28,r27
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_insert */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r4,r29 /* Retrieve vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r7,0 /* !bolted, !secondary */
|
|
|
|
li r8,MMU_PAGE_64K
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r9,STK_PARAM(R9)(r1) /* segment size */
|
2005-11-06 17:06:55 -07:00
|
|
|
_GLOBAL(ht64_call_hpte_insert1)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
cmpdi 0,r3,0
|
|
|
|
bge ht64_pte_insert_ok /* Insertion successful */
|
|
|
|
cmpdi 0,r3,-2 /* Critical failure */
|
|
|
|
beq- ht64_pte_insert_failure
|
|
|
|
|
|
|
|
/* Now try secondary slot */
|
|
|
|
|
|
|
|
/* Phyical address in r5 */
|
|
|
|
rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
|
|
|
|
sldi r5,r5,PAGE_SHIFT
|
|
|
|
|
|
|
|
/* Calculate secondary group hash */
|
|
|
|
andc r0,r27,r28
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_insert */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r4,r29 /* Retrieve vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
|
|
|
|
li r8,MMU_PAGE_64K
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r9,STK_PARAM(R9)(r1) /* segment size */
|
2005-11-06 17:06:55 -07:00
|
|
|
_GLOBAL(ht64_call_hpte_insert2)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
cmpdi 0,r3,0
|
|
|
|
bge+ ht64_pte_insert_ok /* Insertion successful */
|
|
|
|
cmpdi 0,r3,-2 /* Critical failure */
|
|
|
|
beq- ht64_pte_insert_failure
|
|
|
|
|
|
|
|
/* Both are full, we need to evict something */
|
|
|
|
mftb r0
|
|
|
|
/* Pick a random group based on TB */
|
|
|
|
andi. r0,r0,1
|
|
|
|
mr r5,r28
|
|
|
|
bne 2f
|
|
|
|
not r5,r5
|
|
|
|
2: and r0,r5,r27
|
|
|
|
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
/* Call ppc_md.hpte_remove */
|
|
|
|
_GLOBAL(ht64_call_hpte_remove)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
|
|
|
|
/* Try all again */
|
|
|
|
b ht64_insert_pte
|
|
|
|
|
|
|
|
ht64_bail_ok:
|
|
|
|
li r3,0
|
|
|
|
b ht64_bail
|
|
|
|
|
|
|
|
ht64_pte_insert_ok:
|
|
|
|
/* Insert slot number & secondary bit in PTE */
|
|
|
|
rldimi r30,r3,12,63-15
|
|
|
|
|
|
|
|
/* Write out the PTE with a normal write
|
|
|
|
* (maybe add eieio may be good still ?)
|
|
|
|
*/
|
|
|
|
ht64_write_out_pte:
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
std r30,0(r6)
|
|
|
|
li r3, 0
|
|
|
|
ht64_bail:
|
2012-06-25 07:33:10 -06:00
|
|
|
ld r27,STK_REG(R27)(r1)
|
|
|
|
ld r28,STK_REG(R28)(r1)
|
|
|
|
ld r29,STK_REG(R29)(r1)
|
|
|
|
ld r30,STK_REG(R30)(r1)
|
|
|
|
ld r31,STK_REG(R31)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
addi r1,r1,STACKFRAMESIZE
|
|
|
|
ld r0,16(r1)
|
|
|
|
mtlr r0
|
|
|
|
blr
|
|
|
|
|
|
|
|
ht64_modify_pte:
|
|
|
|
/* Keep PP bits in r4 and slot idx from the PTE around in r3 */
|
|
|
|
mr r4,r3
|
|
|
|
rlwinm r3,r31,32-12,29,31
|
|
|
|
|
|
|
|
/* Secondary group ? if yes, get a inverted hash value */
|
|
|
|
mr r5,r28
|
|
|
|
andi. r0,r31,_PAGE_F_SECOND
|
|
|
|
beq 1f
|
|
|
|
not r5,r5
|
|
|
|
1:
|
|
|
|
/* Calculate proper slot value for ppc_md.hpte_updatepp */
|
|
|
|
and r0,r5,r27
|
|
|
|
rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
|
|
|
add r3,r0,r3 /* add slot idx */
|
|
|
|
|
|
|
|
/* Call ppc_md.hpte_updatepp */
|
2012-09-09 20:52:50 -06:00
|
|
|
mr r5,r29 /* vpn */
|
2005-11-06 17:06:55 -07:00
|
|
|
li r6,MMU_PAGE_64K
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r7,STK_PARAM(R9)(r1) /* segment size */
|
|
|
|
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
|
2005-11-06 17:06:55 -07:00
|
|
|
_GLOBAL(ht64_call_hpte_updatepp)
|
|
|
|
bl . /* patched by htab_finish_init() */
|
|
|
|
|
|
|
|
/* if we failed because typically the HPTE wasn't really here
|
|
|
|
* we try an insertion.
|
|
|
|
*/
|
|
|
|
cmpdi 0,r3,-1
|
|
|
|
beq- ht64_insert_pte
|
|
|
|
|
|
|
|
/* Clear the BUSY bit and Write out the PTE */
|
|
|
|
li r0,_PAGE_BUSY
|
|
|
|
andc r30,r30,r0
|
|
|
|
b ht64_write_out_pte
|
|
|
|
|
|
|
|
ht64_wrong_access:
|
|
|
|
/* Bail out clearing reservation */
|
|
|
|
stdcx. r31,0,r6
|
|
|
|
li r3,1
|
|
|
|
b ht64_bail
|
|
|
|
|
|
|
|
ht64_pte_insert_failure:
|
|
|
|
/* Bail out restoring old PTE */
|
2012-06-25 07:33:14 -06:00
|
|
|
ld r6,STK_PARAM(R6)(r1)
|
2005-11-06 17:06:55 -07:00
|
|
|
std r31,0(r6)
|
|
|
|
li r3,-1
|
|
|
|
b ht64_bail
|
|
|
|
|
|
|
|
|
2007-05-08 00:27:28 -06:00
|
|
|
#endif /* CONFIG_PPC_HAS_HASH_64K */
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
|
2005-11-06 17:06:55 -07:00
|
|
|
/*****************************************************************************
|
|
|
|
* *
|
|
|
|
* Huge pages implementation is in hugetlbpage.c *
|
|
|
|
* *
|
|
|
|
*****************************************************************************/
|