2005-04-16 16:20:36 -06:00
|
|
|
/* arch/sparc64/mm/tlb.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004 David S. Miller <davem@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
2006-04-30 23:54:27 -06:00
|
|
|
#include <linux/preempt.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
|
|
|
|
/* Heavily inspired by the ppc64 code. */
|
|
|
|
|
2011-05-24 18:11:50 -06:00
|
|
|
static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
void flush_tlb_pending(void)
|
|
|
|
{
|
2011-05-24 18:11:50 -06:00
|
|
|
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-19 15:26:26 -06:00
|
|
|
struct mm_struct *mm = tb->mm;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-19 15:26:26 -06:00
|
|
|
if (!tb->tlb_nr)
|
|
|
|
goto out;
|
2006-01-31 19:29:18 -07:00
|
|
|
|
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-19 15:26:26 -06:00
|
|
|
flush_tsb_user(tb);
|
|
|
|
|
|
|
|
if (CTX_VALID(mm->context)) {
|
|
|
|
if (tb->tlb_nr == 1) {
|
|
|
|
global_flush_tlb_page(mm, tb->vaddrs[0]);
|
|
|
|
} else {
|
2005-04-16 16:20:36 -06:00
|
|
|
#ifdef CONFIG_SMP
|
2011-05-24 18:11:50 -06:00
|
|
|
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
|
|
|
|
&tb->vaddrs[0]);
|
2005-04-16 16:20:36 -06:00
|
|
|
#else
|
2011-05-24 18:11:50 -06:00
|
|
|
__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
|
|
|
|
tb->tlb_nr, &tb->vaddrs[0]);
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
2006-04-30 23:54:27 -06:00
|
|
|
|
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-19 15:26:26 -06:00
|
|
|
tb->tlb_nr = 0;
|
|
|
|
|
|
|
|
out:
|
2011-05-24 18:11:50 -06:00
|
|
|
put_cpu_var(tlb_batch);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-19 15:26:26 -06:00
|
|
|
void arch_enter_lazy_mmu_mode(void)
|
|
|
|
{
|
|
|
|
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
|
|
|
|
|
|
|
tb->active = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch_leave_lazy_mmu_mode(void)
|
|
|
|
{
|
|
|
|
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
|
|
|
|
|
|
|
if (tb->tlb_nr)
|
|
|
|
flush_tlb_pending();
|
|
|
|
tb->active = 0;
|
|
|
|
}
|
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
|
|
|
bool exec)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2011-05-24 18:11:50 -06:00
|
|
|
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
2005-04-16 16:20:36 -06:00
|
|
|
unsigned long nr;
|
|
|
|
|
|
|
|
vaddr &= PAGE_MASK;
|
2012-10-08 17:34:29 -06:00
|
|
|
if (exec)
|
2005-04-16 16:20:36 -06:00
|
|
|
vaddr |= 0x1UL;
|
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
nr = tb->tlb_nr;
|
|
|
|
|
|
|
|
if (unlikely(nr != 0 && mm != tb->mm)) {
|
|
|
|
flush_tlb_pending();
|
|
|
|
nr = 0;
|
|
|
|
}
|
|
|
|
|
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-19 15:26:26 -06:00
|
|
|
if (!tb->active) {
|
|
|
|
flush_tsb_user_page(mm, vaddr);
|
2013-06-18 08:05:36 -06:00
|
|
|
global_flush_tlb_page(mm, vaddr);
|
2013-04-24 17:52:18 -06:00
|
|
|
goto out;
|
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-19 15:26:26 -06:00
|
|
|
}
|
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
if (nr == 0)
|
|
|
|
tb->mm = mm;
|
|
|
|
|
|
|
|
tb->vaddrs[nr] = vaddr;
|
|
|
|
tb->tlb_nr = ++nr;
|
|
|
|
if (nr >= TLB_BATCH_NR)
|
|
|
|
flush_tlb_pending();
|
|
|
|
|
2013-04-24 17:52:18 -06:00
|
|
|
out:
|
2012-10-08 17:34:29 -06:00
|
|
|
put_cpu_var(tlb_batch);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
|
|
|
pte_t *ptep, pte_t orig, int fullmm)
|
|
|
|
{
|
2006-02-26 20:44:50 -07:00
|
|
|
if (tlb_type != hypervisor &&
|
|
|
|
pte_dirty(orig)) {
|
2005-04-16 16:20:36 -06:00
|
|
|
unsigned long paddr, pfn = pte_pfn(orig);
|
|
|
|
struct address_space *mapping;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
goto no_cache_flush;
|
|
|
|
|
|
|
|
page = pfn_to_page(pfn);
|
|
|
|
if (PageReserved(page))
|
|
|
|
goto no_cache_flush;
|
|
|
|
|
|
|
|
/* A real file page? */
|
|
|
|
mapping = page_mapping(page);
|
|
|
|
if (!mapping)
|
|
|
|
goto no_cache_flush;
|
|
|
|
|
|
|
|
paddr = (unsigned long) page_address(page);
|
|
|
|
if ((paddr ^ vaddr) & (1 << 13))
|
|
|
|
flush_dcache_page_all(mm, page);
|
|
|
|
}
|
|
|
|
|
|
|
|
no_cache_flush:
|
2012-10-08 17:34:29 -06:00
|
|
|
if (!fullmm)
|
|
|
|
tlb_batch_add_one(mm, vaddr, pte_exec(orig));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|
|
|
pmd_t pmd, bool exec)
|
|
|
|
{
|
|
|
|
unsigned long end;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
pte = pte_offset_map(&pmd, vaddr);
|
|
|
|
end = vaddr + HPAGE_SIZE;
|
|
|
|
while (vaddr < end) {
|
|
|
|
if (pte_val(*pte) & _PAGE_VALID)
|
|
|
|
tlb_batch_add_one(mm, vaddr, exec);
|
|
|
|
pte++;
|
|
|
|
vaddr += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
pte_unmap(pte);
|
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
|
|
{
|
|
|
|
pmd_t orig = *pmdp;
|
|
|
|
|
|
|
|
*pmdp = pmd;
|
|
|
|
|
|
|
|
if (mm == &init_mm)
|
2005-04-16 16:20:36 -06:00
|
|
|
return;
|
2012-10-08 17:34:29 -06:00
|
|
|
|
2013-09-26 14:45:15 -06:00
|
|
|
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
|
|
|
|
if (pmd_val(pmd) & _PAGE_PMD_HUGE)
|
2012-10-08 17:34:29 -06:00
|
|
|
mm->context.huge_pte_count++;
|
|
|
|
else
|
|
|
|
mm->context.huge_pte_count--;
|
2013-02-19 23:34:10 -07:00
|
|
|
|
|
|
|
/* Do not try to allocate the TSB hash table if we
|
|
|
|
* don't have one already. We have various locks held
|
|
|
|
* and thus we'll end up doing a GFP_KERNEL allocation
|
|
|
|
* in an atomic context.
|
|
|
|
*
|
|
|
|
* Instead, we let the first TLB miss on a hugepage
|
|
|
|
* take care of this.
|
|
|
|
*/
|
2011-05-24 18:11:50 -06:00
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
if (!pmd_none(orig)) {
|
2013-09-26 14:45:15 -06:00
|
|
|
pte_t orig_pte = __pte(pmd_val(orig));
|
|
|
|
bool exec = pte_exec(orig_pte);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
addr &= HPAGE_MASK;
|
2013-09-26 14:45:15 -06:00
|
|
|
if (pmd_trans_huge(orig)) {
|
2012-10-08 17:34:29 -06:00
|
|
|
tlb_batch_add_one(mm, addr, exec);
|
sparc64: Move from 4MB to 8MB huge pages.
The impetus for this is that we would like to move to 64-bit PMDs and
PGDs, but that would result in only supporting a 42-bit address space
with the current page table layout. It'd be nice to support at least
43-bits.
The reason we'd end up with only 42-bits after making PMDs and PGDs
64-bit is that we only use half-page sized PTE tables in order to make
PMDs line up to 4MB, the hardware huge page size we use.
So what we do here is we make huge pages 8MB, and fabricate them using
4MB hw TLB entries.
Facilitate this by providing a "REAL_HPAGE_SHIFT" which is used in
places that really need to operate on hardware 4MB pages.
Use full pages (512 entries) for PTE tables, and adjust PMD_SHIFT,
PGD_SHIFT, and the build time CPP test as needed. Use a CPP test to
make sure REAL_HPAGE_SHIFT and the _PAGE_SZHUGE_* we use match up.
This makes the pgtable cache completely unused, so remove the code
managing it and the state used in mm_context_t. Now we have less
spinlocks taken in the page table allocation path.
The technique we use to fabricate the 8MB pages is to transfer bit 22
from the missing virtual address into the PTEs physical address field.
That takes care of the transparent huge pages case.
For hugetlb, we fill things in at the PTE level and that code already
puts the sub huge page physical bits into the PTEs, based upon the
offset, so there is nothing special we need to do. It all just works
out.
So, a small amount of complexity in the THP case, but this code is
about to get much simpler when we move the 64-bit PMDs as we can move
away from the fancy 32-bit huge PMD encoding and just put a real PTE
value in there.
With bug fixes and help from Bob Picco.
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-25 14:48:49 -06:00
|
|
|
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
|
|
|
|
} else {
|
2012-10-08 17:34:29 -06:00
|
|
|
tlb_batch_pmd_scan(mm, addr, orig, exec);
|
sparc64: Move from 4MB to 8MB huge pages.
The impetus for this is that we would like to move to 64-bit PMDs and
PGDs, but that would result in only supporting a 42-bit address space
with the current page table layout. It'd be nice to support at least
43-bits.
The reason we'd end up with only 42-bits after making PMDs and PGDs
64-bit is that we only use half-page sized PTE tables in order to make
PMDs line up to 4MB, the hardware huge page size we use.
So what we do here is we make huge pages 8MB, and fabricate them using
4MB hw TLB entries.
Facilitate this by providing a "REAL_HPAGE_SHIFT" which is used in
places that really need to operate on hardware 4MB pages.
Use full pages (512 entries) for PTE tables, and adjust PMD_SHIFT,
PGD_SHIFT, and the build time CPP test as needed. Use a CPP test to
make sure REAL_HPAGE_SHIFT and the _PAGE_SZHUGE_* we use match up.
This makes the pgtable cache completely unused, so remove the code
managing it and the state used in mm_context_t. Now we have less
spinlocks taken in the page table allocation path.
The technique we use to fabricate the 8MB pages is to transfer bit 22
from the missing virtual address into the PTEs physical address field.
That takes care of the transparent huge pages case.
For hugetlb, we fill things in at the PTE level and that code already
puts the sub huge page physical bits into the PTEs, based upon the
offset, so there is nothing special we need to do. It all just works
out.
So, a small amount of complexity in the THP case, but this code is
about to get much simpler when we move the 64-bit PMDs as we can move
away from the fancy 32-bit huge PMD encoding and just put a real PTE
value in there.
With bug fixes and help from Bob Picco.
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-25 14:48:49 -06:00
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2012-10-08 17:34:29 -06:00
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2013-06-05 18:14:02 -06:00
|
|
|
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
|
|
pgtable_t pgtable)
|
2012-10-08 17:34:29 -06:00
|
|
|
{
|
|
|
|
struct list_head *lh = (struct list_head *) pgtable;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
assert_spin_locked(&mm->page_table_lock);
|
2011-05-24 18:11:50 -06:00
|
|
|
|
2012-10-08 17:34:29 -06:00
|
|
|
/* FIFO */
|
2013-11-14 15:30:59 -07:00
|
|
|
if (!pmd_huge_pte(mm, pmdp))
|
2012-10-08 17:34:29 -06:00
|
|
|
INIT_LIST_HEAD(lh);
|
|
|
|
else
|
2013-11-14 15:30:59 -07:00
|
|
|
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
|
|
|
|
pmd_huge_pte(mm, pmdp) = pgtable;
|
2012-10-08 17:34:29 -06:00
|
|
|
}
|
|
|
|
|
2013-06-05 18:14:02 -06:00
|
|
|
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
2012-10-08 17:34:29 -06:00
|
|
|
{
|
|
|
|
struct list_head *lh;
|
|
|
|
pgtable_t pgtable;
|
|
|
|
|
|
|
|
assert_spin_locked(&mm->page_table_lock);
|
|
|
|
|
|
|
|
/* FIFO */
|
2013-11-14 15:30:59 -07:00
|
|
|
pgtable = pmd_huge_pte(mm, pmdp);
|
2012-10-08 17:34:29 -06:00
|
|
|
lh = (struct list_head *) pgtable;
|
|
|
|
if (list_empty(lh))
|
2013-11-14 15:30:59 -07:00
|
|
|
pmd_huge_pte(mm, pmdp) = NULL;
|
2012-10-08 17:34:29 -06:00
|
|
|
else {
|
2013-11-14 15:30:59 -07:00
|
|
|
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
|
2012-10-08 17:34:29 -06:00
|
|
|
list_del(lh);
|
|
|
|
}
|
|
|
|
pte_val(pgtable[0]) = 0;
|
|
|
|
pte_val(pgtable[1]) = 0;
|
|
|
|
|
|
|
|
return pgtable;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2012-10-08 17:34:29 -06:00
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|