x86: Rename e820_table_* to pgt_buf_*
e820_table_{start|end|top}, which are used to buffer page table allocation during early boot, are now derived from memblock and don't have much to do with e820. Change the names so that they reflect what they're used for. This patch doesn't introduce any behavior change. -v2: Ingo found that earlier patch "x86: Use early pre-allocated page table buffer top-down" caused crash on 32bit and needed to be dropped. This patch was updated to reflect the change. -tj: Updated commit description. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
8bc1f91e1f
commit
d1b19426b0
5 changed files with 20 additions and 20 deletions
|
@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start,
|
|||
unsigned long page_size_mask);
|
||||
|
||||
|
||||
extern unsigned long __initdata e820_table_start;
|
||||
extern unsigned long __meminitdata e820_table_end;
|
||||
extern unsigned long __meminitdata e820_table_top;
|
||||
extern unsigned long __initdata pgt_buf_start;
|
||||
extern unsigned long __meminitdata pgt_buf_end;
|
||||
extern unsigned long __meminitdata pgt_buf_top;
|
||||
|
||||
#endif /* _ASM_X86_INIT_32_H */
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
|
||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
|
||||
unsigned long __initdata e820_table_start;
|
||||
unsigned long __meminitdata e820_table_end;
|
||||
unsigned long __meminitdata e820_table_top;
|
||||
unsigned long __initdata pgt_buf_start;
|
||||
unsigned long __meminitdata pgt_buf_end;
|
||||
unsigned long __meminitdata pgt_buf_top;
|
||||
|
||||
int after_bootmem;
|
||||
|
||||
|
@ -73,12 +73,12 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
|
|||
if (base == MEMBLOCK_ERROR)
|
||||
panic("Cannot find space for the kernel page tables");
|
||||
|
||||
e820_table_start = base >> PAGE_SHIFT;
|
||||
e820_table_end = e820_table_start;
|
||||
e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
|
||||
pgt_buf_start = base >> PAGE_SHIFT;
|
||||
pgt_buf_end = pgt_buf_start;
|
||||
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
|
||||
|
||||
printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
|
||||
end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
|
||||
end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
struct map_range {
|
||||
|
@ -272,9 +272,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|||
|
||||
__flush_tlb_all();
|
||||
|
||||
if (!after_bootmem && e820_table_end > e820_table_start)
|
||||
memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT,
|
||||
e820_table_end << PAGE_SHIFT, "PGTABLE");
|
||||
if (!after_bootmem && pgt_buf_end > pgt_buf_start)
|
||||
memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
|
||||
pgt_buf_end << PAGE_SHIFT, "PGTABLE");
|
||||
|
||||
if (!after_bootmem)
|
||||
early_memtest(start, end);
|
||||
|
|
|
@ -62,10 +62,10 @@ bool __read_mostly __vmalloc_start_set = false;
|
|||
|
||||
static __init void *alloc_low_page(void)
|
||||
{
|
||||
unsigned long pfn = e820_table_end++;
|
||||
unsigned long pfn = pgt_buf_end++;
|
||||
void *adr;
|
||||
|
||||
if (pfn >= e820_table_top)
|
||||
if (pfn >= pgt_buf_top)
|
||||
panic("alloc_low_page: ran out of memory");
|
||||
|
||||
adr = __va(pfn * PAGE_SIZE);
|
||||
|
@ -163,8 +163,8 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
|
|||
if (pmd_idx_kmap_begin != pmd_idx_kmap_end
|
||||
&& (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
|
||||
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
|
||||
&& ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
|
||||
|| (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
|
||||
&& ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
|
||||
|| (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
|
||||
pte_t *newpte;
|
||||
int i;
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ void __init cleanup_highmap(void)
|
|||
|
||||
static __ref void *alloc_low_page(unsigned long *phys)
|
||||
{
|
||||
unsigned long pfn = e820_table_end++;
|
||||
unsigned long pfn = pgt_buf_end++;
|
||||
void *adr;
|
||||
|
||||
if (after_bootmem) {
|
||||
|
@ -324,7 +324,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
|
|||
return adr;
|
||||
}
|
||||
|
||||
if (pfn >= e820_table_top)
|
||||
if (pfn >= pgt_buf_top)
|
||||
panic("alloc_low_page: ran out of memory");
|
||||
|
||||
adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
|
||||
|
|
|
@ -1443,7 +1443,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
|||
* early_ioremap fixmap slot, make sure it is RO.
|
||||
*/
|
||||
if (!is_early_ioremap_ptep(ptep) &&
|
||||
pfn >= e820_table_start && pfn < e820_table_end)
|
||||
pfn >= pgt_buf_start && pfn < pgt_buf_end)
|
||||
pte = pte_wrprotect(pte);
|
||||
|
||||
return pte;
|
||||
|
|
Loading…
Reference in a new issue