8feae13110
Make VMAs per mm_struct as for MMU-mode linux. This solves two problems: (1) In SYSV SHM where nattch for a segment does not reflect the number of shmat's (and forks) done. (2) In mmap() where the VMA's vm_mm is set to point to the parent mm by an exec'ing process when VM_EXECUTABLE is specified, regardless of the fact that a VMA might be shared and already have its vm_mm assigned to another process or a dead process. A new struct (vm_region) is introduced to track a mapped region and to remember the circumstances under which it may be shared and the vm_list_struct structure is discarded as it's no longer required. This patch makes the following additional changes: (1) Regions are now allocated with alloc_pages() rather than kmalloc() and with no recourse to __GFP_COMP, so the pages are not composite. Instead, each page has a reference on it held by the region. Anything else that is interested in such a page will have to get a reference on it to retain it. When the pages are released due to unmapping, each page is passed to put_page() and will be freed when the page usage count reaches zero. (2) Excess pages are trimmed after an allocation as the allocation must be made as a power-of-2 quantity of pages. (3) VMAs are added to the parent MM's R/B tree and mmap lists. As an MM may end up with overlapping VMAs within the tree, the VMA struct address is appended to the sort key. (4) Non-anonymous VMAs are now added to the backing inode's prio list. (5) Holes may be punched in anonymous VMAs with munmap(), releasing parts of the backing region. The VMA and region structs will be split if necessary. (6) sys_shmdt() only releases one attachment to a SYSV IPC shared memory segment instead of all the attachments at that addresss. Multiple shmat()'s return the same address under NOMMU-mode instead of different virtual addresses as under MMU-mode. (7) Core dumping for ELF-FDPIC requires fewer exceptions for NOMMU-mode. (8) /proc/maps is now the global list of mapped regions, and may list bits that aren't actually mapped anywhere. (9) /proc/meminfo gains a line (tagged "MmapCopy") that indicates the amount of RAM currently allocated by mmap to hold mappable regions that can't be mapped directly. These are copies of the backing device or file if not anonymous. These changes make NOMMU mode more similar to MMU mode. The downside is that NOMMU mode requires some extra memory to track things over NOMMU without this patch (VMAs are no longer shared, and there are now region structs). Signed-off-by: David Howells <dhowells@redhat.com> Tested-by: Mike Frysinger <vapier.adi@gmail.com> Acked-by: Paul Mundt <lethal@linux-sh.org>
75 lines
1.7 KiB
C
75 lines
1.7 KiB
C
#ifndef __MMU_H
|
|
#define __MMU_H
|
|
|
|
/* Default "unsigned long" context */
|
|
typedef unsigned long mm_context_id_t[NR_CPUS];
|
|
|
|
typedef struct {
|
|
#ifdef CONFIG_MMU
|
|
mm_context_id_t id;
|
|
void *vdso;
|
|
#else
|
|
unsigned long end_brk;
|
|
#endif
|
|
#ifdef CONFIG_BINFMT_ELF_FDPIC
|
|
unsigned long exec_fdpic_loadmap;
|
|
unsigned long interp_fdpic_loadmap;
|
|
#endif
|
|
} mm_context_t;
|
|
|
|
/*
|
|
* Privileged Space Mapping Buffer (PMB) definitions
|
|
*/
|
|
#define PMB_PASCR 0xff000070
|
|
#define PMB_IRMCR 0xff000078
|
|
|
|
#define PMB_ADDR 0xf6100000
|
|
#define PMB_DATA 0xf7100000
|
|
#define PMB_ENTRY_MAX 16
|
|
#define PMB_E_MASK 0x0000000f
|
|
#define PMB_E_SHIFT 8
|
|
|
|
#define PMB_SZ_16M 0x00000000
|
|
#define PMB_SZ_64M 0x00000010
|
|
#define PMB_SZ_128M 0x00000080
|
|
#define PMB_SZ_512M 0x00000090
|
|
#define PMB_SZ_MASK PMB_SZ_512M
|
|
#define PMB_C 0x00000008
|
|
#define PMB_WT 0x00000001
|
|
#define PMB_UB 0x00000200
|
|
#define PMB_V 0x00000100
|
|
|
|
#define PMB_NO_ENTRY (-1)
|
|
|
|
struct pmb_entry;
|
|
|
|
struct pmb_entry {
|
|
unsigned long vpn;
|
|
unsigned long ppn;
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
|
|
* PMB_NO_ENTRY to search for a free one
|
|
*/
|
|
int entry;
|
|
|
|
struct pmb_entry *next;
|
|
/* Adjacent entry link for contiguous multi-entry mappings */
|
|
struct pmb_entry *link;
|
|
};
|
|
|
|
/* arch/sh/mm/pmb.c */
|
|
int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
|
|
unsigned long flags, int *entry);
|
|
int set_pmb_entry(struct pmb_entry *pmbe);
|
|
void clear_pmb_entry(struct pmb_entry *pmbe);
|
|
struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
|
|
unsigned long flags);
|
|
void pmb_free(struct pmb_entry *pmbe);
|
|
long pmb_remap(unsigned long virt, unsigned long phys,
|
|
unsigned long size, unsigned long flags);
|
|
void pmb_unmap(unsigned long addr);
|
|
|
|
#endif /* __MMU_H */
|
|
|