2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* This file contains the routines for flushing entries from the
|
|
|
|
* TLB and MMU hash table.
|
|
|
|
*
|
|
|
|
* Derived from arch/ppc64/mm/init.c:
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
|
|
*
|
|
|
|
* Derived from "arch/i386/mm/init.c"
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
|
|
*
|
|
|
|
* Dave Engebretsen <engebret@us.ibm.com>
|
|
|
|
* Rework for PPC64 port.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
2005-11-06 17:06:55 -07:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/tlb.h>
|
2005-11-06 17:06:55 -07:00
|
|
|
#include <asm/bug.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
|
|
|
|
|
|
|
|
/* This is declared as we are using the more or less generic
|
2006-01-23 09:58:20 -07:00
|
|
|
* include/asm-powerpc/tlb.h file -- tgall
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
|
|
DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
|
|
|
|
unsigned long pte_freelist_forced_free;
|
|
|
|
|
2005-08-05 03:39:06 -06:00
|
|
|
struct pte_freelist_batch
|
|
|
|
{
|
|
|
|
struct rcu_head rcu;
|
|
|
|
unsigned int index;
|
|
|
|
pgtable_free_t tables[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
|
|
|
|
unsigned long pte_freelist_forced_free;
|
|
|
|
|
|
|
|
#define PTE_FREELIST_SIZE \
|
|
|
|
((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
|
|
|
|
/ sizeof(pgtable_free_t))
|
|
|
|
|
|
|
|
static void pte_free_smp_sync(void *arg)
|
|
|
|
{
|
|
|
|
/* Do nothing, just ensure we sync with all CPUs */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is only called when we are critically out of memory
|
|
|
|
* (and fail to get a page in pte_free_tlb).
|
|
|
|
*/
|
|
|
|
static void pgtable_free_now(pgtable_free_t pgf)
|
|
|
|
{
|
|
|
|
pte_freelist_forced_free++;
|
|
|
|
|
|
|
|
smp_call_function(pte_free_smp_sync, NULL, 0, 1);
|
|
|
|
|
|
|
|
pgtable_free(pgf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pte_free_rcu_callback(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct pte_freelist_batch *batch =
|
|
|
|
container_of(head, struct pte_freelist_batch, rcu);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < batch->index; i++)
|
|
|
|
pgtable_free(batch->tables[i]);
|
|
|
|
|
|
|
|
free_page((unsigned long)batch);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pte_free_submit(struct pte_freelist_batch *batch)
|
|
|
|
{
|
|
|
|
INIT_RCU_HEAD(&batch->rcu);
|
|
|
|
call_rcu(&batch->rcu, pte_free_rcu_callback);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2005-11-23 14:37:39 -07:00
|
|
|
/* This is safe since tlb_gather_mmu has disabled preemption */
|
2005-04-16 16:20:36 -06:00
|
|
|
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
|
|
|
|
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
|
|
|
|
|
|
|
|
if (atomic_read(&tlb->mm->mm_users) < 2 ||
|
|
|
|
cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
|
2005-08-05 03:39:06 -06:00
|
|
|
pgtable_free(pgf);
|
2005-04-16 16:20:36 -06:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*batchp == NULL) {
|
|
|
|
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
|
|
|
|
if (*batchp == NULL) {
|
2005-08-05 03:39:06 -06:00
|
|
|
pgtable_free_now(pgf);
|
2005-04-16 16:20:36 -06:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
(*batchp)->index = 0;
|
|
|
|
}
|
2005-08-05 03:39:06 -06:00
|
|
|
(*batchp)->tables[(*batchp)->index++] = pgf;
|
2005-04-16 16:20:36 -06:00
|
|
|
if ((*batchp)->index == PTE_FREELIST_SIZE) {
|
|
|
|
pte_free_submit(*batchp);
|
|
|
|
*batchp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-04-10 01:09:37 -06:00
|
|
|
* A linux PTE was changed and the corresponding hash table entry
|
|
|
|
* neesd to be flushed. This function will either perform the flush
|
|
|
|
* immediately or will batch it up if the current CPU has an active
|
|
|
|
* batch on it.
|
|
|
|
*
|
|
|
|
* Must be called from within some kind of spinlock/non-preempt region...
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2007-04-10 01:09:37 -06:00
|
|
|
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, unsigned long pte, int huge)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
|
2007-04-10 01:09:37 -06:00
|
|
|
unsigned long vsid, vaddr;
|
2006-06-14 18:45:18 -06:00
|
|
|
unsigned int psize;
|
2007-10-11 04:37:10 -06:00
|
|
|
int ssize;
|
2007-04-10 01:09:37 -06:00
|
|
|
real_pte_t rpte;
|
2005-09-19 21:52:50 -06:00
|
|
|
int i;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
i = batch->index;
|
|
|
|
|
2005-11-06 17:06:55 -07:00
|
|
|
/* We mask the address for the base page size. Huge pages will
|
|
|
|
* have applied their own masking already
|
|
|
|
*/
|
|
|
|
addr &= PAGE_MASK;
|
|
|
|
|
2007-05-08 00:27:28 -06:00
|
|
|
/* Get page size (maybe move back to caller).
|
|
|
|
*
|
|
|
|
* NOTE: when using special 64K mappings in 4K environment like
|
|
|
|
* for SPEs, we obtain the page size from the slice, which thus
|
|
|
|
* must still exist (and thus the VMA not reused) at the time
|
|
|
|
* of this call
|
|
|
|
*/
|
2005-11-06 17:06:55 -07:00
|
|
|
if (huge) {
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
psize = mmu_huge_psize;
|
|
|
|
#else
|
|
|
|
BUG();
|
2007-05-08 00:27:28 -06:00
|
|
|
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
|
2005-11-06 17:06:55 -07:00
|
|
|
#endif
|
2006-06-14 18:45:18 -06:00
|
|
|
} else
|
2007-05-08 00:27:28 -06:00
|
|
|
psize = pte_pagesize_index(mm, addr, pte);
|
2005-11-06 17:06:55 -07:00
|
|
|
|
2007-04-10 01:09:37 -06:00
|
|
|
/* Build full vaddr */
|
|
|
|
if (!is_kernel_addr(addr)) {
|
2007-10-11 04:37:10 -06:00
|
|
|
ssize = user_segment_size(addr);
|
|
|
|
vsid = get_vsid(mm->context.id, addr, ssize);
|
2007-04-10 01:09:37 -06:00
|
|
|
WARN_ON(vsid == 0);
|
2007-10-11 04:37:10 -06:00
|
|
|
} else {
|
|
|
|
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
|
|
|
|
ssize = mmu_kernel_ssize;
|
|
|
|
}
|
|
|
|
vaddr = hpt_va(addr, vsid, ssize);
|
2007-04-10 01:09:37 -06:00
|
|
|
rpte = __real_pte(__pte(pte), ptep);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if we have an active batch on this CPU. If not, just
|
|
|
|
* flush now and return. For now, we don global invalidates
|
|
|
|
* in that case, might be worth testing the mm cpu mask though
|
|
|
|
* and decide to use local invalidates instead...
|
|
|
|
*/
|
|
|
|
if (!batch->active) {
|
2007-10-11 04:37:10 -06:00
|
|
|
flush_hash_page(vaddr, rpte, psize, ssize, 0);
|
2007-04-10 01:09:37 -06:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* This can happen when we are in the middle of a TLB batch and
|
|
|
|
* we encounter memory pressure (eg copy_page_range when it tries
|
|
|
|
* to allocate a new pte). If we have to reclaim memory and end
|
|
|
|
* up scanning and resetting referenced bits then our batch context
|
|
|
|
* will change mid stream.
|
2005-11-06 17:06:55 -07:00
|
|
|
*
|
|
|
|
* We also need to ensure only one page size is present in a given
|
|
|
|
* batch
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2007-10-11 04:37:10 -06:00
|
|
|
if (i != 0 && (mm != batch->mm || batch->psize != psize ||
|
|
|
|
batch->ssize != ssize)) {
|
2007-04-10 01:09:37 -06:00
|
|
|
__flush_tlb_pending(batch);
|
2005-04-16 16:20:36 -06:00
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
if (i == 0) {
|
|
|
|
batch->mm = mm;
|
2005-11-06 17:06:55 -07:00
|
|
|
batch->psize = psize;
|
2007-10-11 04:37:10 -06:00
|
|
|
batch->ssize = ssize;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2007-04-10 01:09:37 -06:00
|
|
|
batch->pte[i] = rpte;
|
|
|
|
batch->vaddr[i] = vaddr;
|
2005-04-16 16:20:36 -06:00
|
|
|
batch->index = ++i;
|
|
|
|
if (i >= PPC64_TLB_BATCH_NR)
|
2007-04-10 01:09:37 -06:00
|
|
|
__flush_tlb_pending(batch);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2007-04-10 01:09:37 -06:00
|
|
|
/*
|
|
|
|
* This function is called when terminating an mmu batch or when a batch
|
|
|
|
* is full. It will perform the flush of all the entries currently stored
|
|
|
|
* in a batch.
|
|
|
|
*
|
|
|
|
* Must be called from within some kind of spinlock/non-preempt region...
|
|
|
|
*/
|
2005-04-16 16:20:36 -06:00
|
|
|
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
|
|
|
|
{
|
|
|
|
cpumask_t tmp;
|
2007-04-10 01:09:37 -06:00
|
|
|
int i, local = 0;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
i = batch->index;
|
2007-04-10 01:09:37 -06:00
|
|
|
tmp = cpumask_of_cpu(smp_processor_id());
|
2005-04-16 16:20:36 -06:00
|
|
|
if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
|
|
|
|
local = 1;
|
|
|
|
if (i == 1)
|
2005-11-06 17:06:55 -07:00
|
|
|
flush_hash_page(batch->vaddr[0], batch->pte[0],
|
2007-10-11 04:37:10 -06:00
|
|
|
batch->psize, batch->ssize, local);
|
2005-04-16 16:20:36 -06:00
|
|
|
else
|
2005-09-19 21:52:50 -06:00
|
|
|
flush_hash_range(i, local);
|
2005-04-16 16:20:36 -06:00
|
|
|
batch->index = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pte_free_finish(void)
|
|
|
|
{
|
2005-11-23 14:37:39 -07:00
|
|
|
/* This is safe since tlb_gather_mmu has disabled preemption */
|
2005-04-16 16:20:36 -06:00
|
|
|
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
|
|
|
|
|
|
|
|
if (*batchp == NULL)
|
|
|
|
return;
|
|
|
|
pte_free_submit(*batchp);
|
|
|
|
*batchp = NULL;
|
|
|
|
}
|
[POWERPC] Rewrite IO allocation & mapping on powerpc64
This rewrites pretty much from scratch the handling of MMIO and PIO
space allocations on powerpc64. The main goals are:
- Get rid of imalloc and use more common code where possible
- Simplify the current mess so that PIO space is allocated and
mapped in a single place for PCI bridges
- Handle allocation constraints of PIO for all bridges including
hot plugged ones within the 2GB space reserved for IO ports,
so that devices on hotplugged busses will now work with drivers
that assume IO ports fit in an int.
- Cleanup and separate tracking of the ISA space in the reserved
low 64K of IO space. No ISA -> Nothing mapped there.
I booted a cell blade with IDE on PIO and MMIO and a dual G5 so
far, that's it :-)
With this patch, all allocations are done using the code in
mm/vmalloc.c, though we use the low level __get_vm_area with
explicit start/stop constraints in order to manage separate
areas for vmalloc/vmap, ioremap, and PCI IOs.
This greatly simplifies a lot of things, as you can see in the
diffstat of that patch :-)
A new pair of functions pcibios_map/unmap_io_space() now replace
all of the previous code that used to manipulate PCI IOs space.
The allocation is done at mapping time, which is now called from
scan_phb's, just before the devices are probed (instead of after,
which is by itself a bug fix). The only other caller is the PCI
hotplug code for hot adding PCI-PCI bridges (slots).
imalloc is gone, as is the "sub-allocation" thing, but I do beleive
that hotplug should still work in the sense that the space allocation
is always done by the PHB, but if you unmap a child bus of this PHB
(which seems to be possible), then the code should properly tear
down all the HPTE mappings for that area of the PHB allocated IO space.
I now always reserve the first 64K of IO space for the bridge with
the ISA bus on it. I have moved the code for tracking ISA in a separate
file which should also make it smarter if we ever are capable of
hot unplugging or re-plugging an ISA bridge.
This should have a side effect on platforms like powermac where VGA IOs
will no longer work. This is done on purpose though as they would have
worked semi-randomly before. The idea at this point is to isolate drivers
that might need to access those and fix them by providing a proper
function to obtain an offset to the legacy IOs of a given bus.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2007-06-03 23:15:36 -06:00
|
|
|
|
|
|
|
/**
|
|
|
|
* __flush_hash_table_range - Flush all HPTEs for a given address range
|
|
|
|
* from the hash table (and the TLB). But keeps
|
|
|
|
* the linux PTEs intact.
|
|
|
|
*
|
|
|
|
* @mm : mm_struct of the target address space (generally init_mm)
|
|
|
|
* @start : starting address
|
|
|
|
* @end : ending address (not included in the flush)
|
|
|
|
*
|
|
|
|
* This function is mostly to be used by some IO hotplug code in order
|
|
|
|
* to remove all hash entries from a given address range used to map IO
|
|
|
|
* space on a removed PCI-PCI bidge without tearing down the full mapping
|
|
|
|
* since 64K pages may overlap with other bridges when using 64K pages
|
|
|
|
* with 4K HW pages on IO space.
|
|
|
|
*
|
|
|
|
* Because of that usage pattern, it's only available with CONFIG_HOTPLUG
|
|
|
|
* and is implemented for small size rather than speed.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_HOTPLUG
|
|
|
|
|
|
|
|
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
start = _ALIGN_DOWN(start, PAGE_SIZE);
|
|
|
|
end = _ALIGN_UP(end, PAGE_SIZE);
|
|
|
|
|
|
|
|
BUG_ON(!mm->pgd);
|
|
|
|
|
|
|
|
/* Note: Normally, we should only ever use a batch within a
|
|
|
|
* PTE locked section. This violates the rule, but will work
|
|
|
|
* since we don't actually modify the PTEs, we just flush the
|
|
|
|
* hash while leaving the PTEs intact (including their reference
|
|
|
|
* to being hashed). This is not the most performance oriented
|
|
|
|
* way to do things but is fine for our needs here.
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
|
|
|
arch_enter_lazy_mmu_mode();
|
|
|
|
for (; start < end; start += PAGE_SIZE) {
|
|
|
|
pte_t *ptep = find_linux_pte(mm->pgd, start);
|
|
|
|
unsigned long pte;
|
|
|
|
|
|
|
|
if (ptep == NULL)
|
|
|
|
continue;
|
|
|
|
pte = pte_val(*ptep);
|
|
|
|
if (!(pte & _PAGE_HASHPTE))
|
|
|
|
continue;
|
|
|
|
hpte_need_flush(mm, start, ptep, pte, 0);
|
|
|
|
}
|
|
|
|
arch_leave_lazy_mmu_mode();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_HOTPLUG */
|