[IA64] rename partial_page
Jens has added a partial_page thing in splice whcih conflicts with the ia64 one. Rename ia64 out of the way. (ia64 chose poorly). Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
a07ee86205
commit
3b74d18e54
6 changed files with 62 additions and 59 deletions
|
@ -249,11 +249,11 @@ ia32_init (void)
|
|||
|
||||
#if PAGE_SHIFT > IA32_PAGE_SHIFT
|
||||
{
|
||||
extern struct kmem_cache *partial_page_cachep;
|
||||
extern struct kmem_cache *ia64_partial_page_cachep;
|
||||
|
||||
partial_page_cachep = kmem_cache_create("partial_page_cache",
|
||||
sizeof(struct partial_page),
|
||||
0, SLAB_PANIC, NULL);
|
||||
ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
|
||||
sizeof(struct ia64_partial_page),
|
||||
0, SLAB_PANIC, NULL);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
|
|
@ -25,8 +25,8 @@
|
|||
* partially mapped pages provide precise accounting of which 4k sub pages
|
||||
* are mapped and which ones are not, thereby improving IA-32 compatibility.
|
||||
*/
|
||||
struct partial_page {
|
||||
struct partial_page *next; /* linked list, sorted by address */
|
||||
struct ia64_partial_page {
|
||||
struct ia64_partial_page *next; /* linked list, sorted by address */
|
||||
struct rb_node pp_rb;
|
||||
/* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
|
||||
* should suffice.*/
|
||||
|
@ -34,17 +34,17 @@ struct partial_page {
|
|||
unsigned int base;
|
||||
};
|
||||
|
||||
struct partial_page_list {
|
||||
struct partial_page *pp_head; /* list head, points to the lowest
|
||||
struct ia64_partial_page_list {
|
||||
struct ia64_partial_page *pp_head; /* list head, points to the lowest
|
||||
* addressed partial page */
|
||||
struct rb_root ppl_rb;
|
||||
struct partial_page *pp_hint; /* pp_hint->next is the last
|
||||
struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
|
||||
* accessed partial page */
|
||||
atomic_t pp_count; /* reference count */
|
||||
};
|
||||
|
||||
#if PAGE_SHIFT > IA32_PAGE_SHIFT
|
||||
struct partial_page_list* ia32_init_pp_list (void);
|
||||
struct ia64_partial_page_list* ia32_init_pp_list (void);
|
||||
#else
|
||||
# define ia32_init_pp_list() 0
|
||||
#endif
|
||||
|
|
|
@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* SLAB cache for partial_page structures */
|
||||
struct kmem_cache *partial_page_cachep;
|
||||
/* SLAB cache for ia64_partial_page structures */
|
||||
struct kmem_cache *ia64_partial_page_cachep;
|
||||
|
||||
/*
|
||||
* init partial_page_list.
|
||||
* init ia64_partial_page_list.
|
||||
* return 0 means kmalloc fail.
|
||||
*/
|
||||
struct partial_page_list*
|
||||
struct ia64_partial_page_list*
|
||||
ia32_init_pp_list(void)
|
||||
{
|
||||
struct partial_page_list *p;
|
||||
struct ia64_partial_page_list *p;
|
||||
|
||||
if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
|
||||
return p;
|
||||
|
@ -280,12 +280,12 @@ ia32_init_pp_list(void)
|
|||
* Else, return 0 and provide @pprev, @rb_link, @rb_parent to
|
||||
* be used by later __ia32_insert_pp().
|
||||
*/
|
||||
static struct partial_page *
|
||||
__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
|
||||
struct partial_page **pprev, struct rb_node ***rb_link,
|
||||
static struct ia64_partial_page *
|
||||
__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
|
||||
struct ia64_partial_page **pprev, struct rb_node ***rb_link,
|
||||
struct rb_node **rb_parent)
|
||||
{
|
||||
struct partial_page *pp;
|
||||
struct ia64_partial_page *pp;
|
||||
struct rb_node **__rb_link, *__rb_parent, *rb_prev;
|
||||
|
||||
pp = ppl->pp_hint;
|
||||
|
@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
|
|||
|
||||
while (*__rb_link) {
|
||||
__rb_parent = *__rb_link;
|
||||
pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
|
||||
pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
|
||||
|
||||
if (pp->base == start) {
|
||||
ppl->pp_hint = pp;
|
||||
|
@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
|
|||
*rb_parent = __rb_parent;
|
||||
*pprev = NULL;
|
||||
if (rb_prev)
|
||||
*pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
|
||||
*pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
|
|||
* insert @pp into @ppl.
|
||||
*/
|
||||
static void
|
||||
__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
|
||||
struct partial_page *prev, struct rb_node **rb_link,
|
||||
struct rb_node *rb_parent)
|
||||
__ia32_insert_pp(struct ia64_partial_page_list *ppl,
|
||||
struct ia64_partial_page *pp, struct ia64_partial_page *prev,
|
||||
struct rb_node **rb_link, struct rb_node *rb_parent)
|
||||
{
|
||||
/* link list */
|
||||
if (prev) {
|
||||
|
@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
|
|||
ppl->pp_head = pp;
|
||||
if (rb_parent)
|
||||
pp->next = rb_entry(rb_parent,
|
||||
struct partial_page, pp_rb);
|
||||
struct ia64_partial_page, pp_rb);
|
||||
else
|
||||
pp->next = NULL;
|
||||
}
|
||||
|
@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
|
|||
* delete @pp from partial page list @ppl.
|
||||
*/
|
||||
static void
|
||||
__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
|
||||
struct partial_page *prev)
|
||||
__ia32_delete_pp(struct ia64_partial_page_list *ppl,
|
||||
struct ia64_partial_page *pp, struct ia64_partial_page *prev)
|
||||
{
|
||||
if (prev) {
|
||||
prev->next = pp->next;
|
||||
|
@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
|
|||
ppl->pp_hint = pp->next;
|
||||
}
|
||||
rb_erase(&pp->pp_rb, &ppl->ppl_rb);
|
||||
kmem_cache_free(partial_page_cachep, pp);
|
||||
kmem_cache_free(ia64_partial_page_cachep, pp);
|
||||
}
|
||||
|
||||
static struct partial_page *
|
||||
__pp_prev(struct partial_page *pp)
|
||||
static struct ia64_partial_page *
|
||||
__pp_prev(struct ia64_partial_page *pp)
|
||||
{
|
||||
struct rb_node *prev = rb_prev(&pp->pp_rb);
|
||||
if (prev)
|
||||
return rb_entry(prev, struct partial_page, pp_rb);
|
||||
return rb_entry(prev, struct ia64_partial_page, pp_rb);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp)
|
|||
static void
|
||||
__ia32_delete_pp_range(unsigned int start, unsigned int end)
|
||||
{
|
||||
struct partial_page *pp, *prev;
|
||||
struct ia64_partial_page *pp, *prev;
|
||||
struct rb_node **rb_link, *rb_parent;
|
||||
|
||||
if (start >= end)
|
||||
|
@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
|
|||
}
|
||||
|
||||
while (pp && pp->base < end) {
|
||||
struct partial_page *tmp = pp->next;
|
||||
struct ia64_partial_page *tmp = pp->next;
|
||||
__ia32_delete_pp(current->thread.ppl, pp, prev);
|
||||
pp = tmp;
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
|
|||
static int
|
||||
__ia32_set_pp(unsigned int start, unsigned int end, int flags)
|
||||
{
|
||||
struct partial_page *pp, *prev;
|
||||
struct ia64_partial_page *pp, *prev;
|
||||
struct rb_node ** rb_link, *rb_parent;
|
||||
unsigned int pstart, start_bit, end_bit, i;
|
||||
|
||||
|
@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* new a partial_page */
|
||||
pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
|
||||
/* new a ia64_partial_page */
|
||||
pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
|
||||
if (!pp)
|
||||
return -ENOMEM;
|
||||
pp->base = pstart;
|
||||
|
@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags)
|
|||
static int
|
||||
__ia32_unset_pp(unsigned int start, unsigned int end)
|
||||
{
|
||||
struct partial_page *pp, *prev;
|
||||
struct ia64_partial_page *pp, *prev;
|
||||
struct rb_node ** rb_link, *rb_parent;
|
||||
unsigned int pstart, start_bit, end_bit, i;
|
||||
struct vm_area_struct *vma;
|
||||
|
@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* new a partial_page */
|
||||
pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
|
||||
/* new a ia64_partial_page */
|
||||
pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
|
||||
if (!pp)
|
||||
return -ENOMEM;
|
||||
pp->base = pstart;
|
||||
|
@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp)
|
|||
static int
|
||||
__ia32_compare_pp(unsigned int start, unsigned int end)
|
||||
{
|
||||
struct partial_page *pp, *prev;
|
||||
struct ia64_partial_page *pp, *prev;
|
||||
struct rb_node ** rb_link, *rb_parent;
|
||||
unsigned int pstart, start_bit, end_bit, size;
|
||||
unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
|
||||
|
@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp)
|
|||
}
|
||||
|
||||
static void
|
||||
__ia32_drop_pp_list(struct partial_page_list *ppl)
|
||||
__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
|
||||
{
|
||||
struct partial_page *pp = ppl->pp_head;
|
||||
struct ia64_partial_page *pp = ppl->pp_head;
|
||||
|
||||
while (pp) {
|
||||
struct partial_page *next = pp->next;
|
||||
kmem_cache_free(partial_page_cachep, pp);
|
||||
struct ia64_partial_page *next = pp->next;
|
||||
kmem_cache_free(ia64_partial_page_cachep, pp);
|
||||
pp = next;
|
||||
}
|
||||
|
||||
|
@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl)
|
|||
}
|
||||
|
||||
void
|
||||
ia32_drop_partial_page_list(struct task_struct *task)
|
||||
ia32_drop_ia64_partial_page_list(struct task_struct *task)
|
||||
{
|
||||
struct partial_page_list* ppl = task->thread.ppl;
|
||||
struct ia64_partial_page_list* ppl = task->thread.ppl;
|
||||
|
||||
if (ppl && atomic_dec_and_test(&ppl->pp_count))
|
||||
__ia32_drop_pp_list(ppl);
|
||||
|
@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task)
|
|||
* Copy current->thread.ppl to ppl (already initialized).
|
||||
*/
|
||||
static int
|
||||
__ia32_copy_pp_list(struct partial_page_list *ppl)
|
||||
__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
|
||||
{
|
||||
struct partial_page *pp, *tmp, *prev;
|
||||
struct ia64_partial_page *pp, *tmp, *prev;
|
||||
struct rb_node **rb_link, *rb_parent;
|
||||
|
||||
ppl->pp_head = NULL;
|
||||
|
@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
|
|||
prev = NULL;
|
||||
|
||||
for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
|
||||
tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
|
||||
tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
*tmp = *pp;
|
||||
|
@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
|
|||
}
|
||||
|
||||
int
|
||||
ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
|
||||
ia32_copy_ia64_partial_page_list(struct task_struct *p,
|
||||
unsigned long clone_flags)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
|
|
|
@ -499,7 +499,8 @@ copy_thread (int nr, unsigned long clone_flags,
|
|||
|
||||
/* Copy partially mapped page list */
|
||||
if (!retval)
|
||||
retval = ia32_copy_partial_page_list(p, clone_flags);
|
||||
retval = ia32_copy_ia64_partial_page_list(p,
|
||||
clone_flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -728,7 +729,7 @@ flush_thread (void)
|
|||
ia64_drop_fpu(current);
|
||||
#ifdef CONFIG_IA32_SUPPORT
|
||||
if (IS_IA32_PROCESS(task_pt_regs(current))) {
|
||||
ia32_drop_partial_page_list(current);
|
||||
ia32_drop_ia64_partial_page_list(current);
|
||||
current->thread.task_size = IA32_PAGE_OFFSET;
|
||||
set_fs(USER_DS);
|
||||
}
|
||||
|
@ -754,7 +755,7 @@ exit_thread (void)
|
|||
pfm_release_debug_registers(current);
|
||||
#endif
|
||||
if (IS_IA32_PROCESS(task_pt_regs(current)))
|
||||
ia32_drop_partial_page_list(current);
|
||||
ia32_drop_ia64_partial_page_list(current);
|
||||
}
|
||||
|
||||
unsigned long
|
||||
|
|
|
@ -27,11 +27,12 @@ extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
|
|||
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs *regs);
|
||||
#if PAGE_SHIFT > IA32_PAGE_SHIFT
|
||||
extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long);
|
||||
extern void ia32_drop_partial_page_list (struct task_struct *);
|
||||
extern int ia32_copy_ia64_partial_page_list(struct task_struct *,
|
||||
unsigned long);
|
||||
extern void ia32_drop_ia64_partial_page_list(struct task_struct *);
|
||||
#else
|
||||
# define ia32_copy_partial_page_list(a1, a2) 0
|
||||
# define ia32_drop_partial_page_list(a1) do { ; } while (0)
|
||||
# define ia32_copy_ia64_partial_page_list(a1, a2) 0
|
||||
# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
|
|
@ -220,7 +220,7 @@ struct desc_struct {
|
|||
|
||||
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
|
||||
|
||||
struct partial_page_list;
|
||||
struct ia64_partial_page_list;
|
||||
#endif
|
||||
|
||||
struct thread_struct {
|
||||
|
@ -242,7 +242,7 @@ struct thread_struct {
|
|||
__u64 fdr; /* IA32 fp except. data reg */
|
||||
__u64 old_k1; /* old value of ar.k1 */
|
||||
__u64 old_iob; /* old IOBase value */
|
||||
struct partial_page_list *ppl; /* partial page list for 4K page size issue */
|
||||
struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
|
||||
/* cached TLS descriptors. */
|
||||
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
|
||||
|
||||
|
|
Loading…
Reference in a new issue