x86: unify tss_struct

Although slighly different, the tss_struct is very similar in x86_64 and
i386. The really different part, which matchs the hardware vision of it, is
now called x86_hw_tss, and each of the architectures provides yours.
It's then used as a field in the outter tss_struct.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Glauber de Oliveira Costa 2008-01-30 13:31:31 +01:00 committed by Ingo Molnar
parent 0ccb8acc51
commit ca241c7503
9 changed files with 85 additions and 103 deletions

View file

@ -110,7 +110,7 @@ int main(void)
ENTRY(cr8);
BLANK();
#undef ENTRY
DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
BLANK();
DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
BLANK();

View file

@ -33,7 +33,7 @@ static void doublefault_fn(void)
printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
if (ptr_ok(tss)) {
struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
struct x86_hw_tss *t = (struct x86_hw_tss *)tss;
printk(KERN_EMERG "eip = %08lx, esp = %08lx\n",
t->ip, t->sp);

View file

@ -639,7 +639,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Reload esp0, LDT and the page table pointer:
*/
tss->sp0 = next->sp0;
tss->x86_tss.sp0 = next->sp0;
/*
* Switch DS and ES.

View file

@ -258,10 +258,10 @@ void __cpuinit cpu_init (void)
v, cpu);
}
estacks += PAGE_SIZE << order[v];
orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
}
t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
/*
* <= is required because the CPU will access up to
* 8 bits beyond the end of the IO permission bitmap.

View file

@ -614,7 +614,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
start_rip = setup_trampoline();
init_rsp = c_idle.idle->thread.sp;
per_cpu(init_tss,cpu).sp0 = init_rsp;
per_cpu(init_tss, cpu).x86_tss.sp0 = init_rsp;
initial_code = start_secondary;
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);

View file

@ -51,7 +51,7 @@ struct lguest_ro_state
/* Fields which are used when guest is running. */
struct desc_ptr guest_idt_desc;
struct desc_ptr guest_gdt_desc;
struct i386_hw_tss guest_tss;
struct x86_hw_tss guest_tss;
struct desc_struct guest_idt[IDT_ENTRIES];
struct desc_struct guest_gdt[GDT_ENTRIES];
};

View file

@ -8,6 +8,7 @@ struct task_struct;
struct mm_struct;
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/system.h>
/*
@ -38,6 +39,82 @@ static inline void load_cr3(pgd_t *pgdir)
write_cr3(__pa(pgdir));
}
#ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
unsigned short back_link, __blh;
unsigned long sp0;
unsigned short ss0, __ss0h;
unsigned long sp1;
unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
unsigned long sp2;
unsigned short ss2, __ss2h;
unsigned long __cr3;
unsigned long ip;
unsigned long flags;
unsigned long ax, cx, dx, bx;
unsigned long sp, bp, si, di;
unsigned short es, __esh;
unsigned short cs, __csh;
unsigned short ss, __ssh;
unsigned short ds, __dsh;
unsigned short fs, __fsh;
unsigned short gs, __gsh;
unsigned short ldt, __ldth;
unsigned short trace, io_bitmap_base;
} __attribute__((packed));
#else
struct x86_hw_tss {
u32 reserved1;
u64 sp0;
u64 sp1;
u64 sp2;
u64 reserved2;
u64 ist[7];
u32 reserved3;
u32 reserved4;
u16 reserved5;
u16 io_bitmap_base;
} __attribute__((packed)) ____cacheline_aligned;
#endif
/*
* Size of io_bitmap.
*/
#define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
#define INVALID_IO_BITMAP_OFFSET 0x8000
#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
struct tss_struct {
struct x86_hw_tss x86_tss;
/*
* The extra 1 is there because the CPU will access an
* additional byte beyond the end of the IO permission
* bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
/*
* Cache the current maximum and the last task that used the bitmap:
*/
unsigned long io_bitmap_max;
struct thread_struct *io_bitmap_owner;
/*
* pads the TSS to be cacheline-aligned (size is 0x100)
*/
unsigned long __cacheline_filler[35];
/*
* .. and then another 0x100 bytes for emergency kernel stack
*/
unsigned long stack[64];
} __attribute__((packed));
DECLARE_PER_CPU(struct tss_struct, init_tss);
#ifdef CONFIG_X86_32
# include "processor_32.h"
#else

View file

@ -81,7 +81,6 @@ struct cpuinfo_x86 {
extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
extern struct tss_struct doublefault_tss;
DECLARE_PER_CPU(struct tss_struct, init_tss);
#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
@ -123,16 +122,6 @@ extern unsigned int mca_pentium_flag;
#define TASK_SIZE (PAGE_OFFSET)
/*
* Size of io_bitmap.
*/
#define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
#define INVALID_IO_BITMAP_OFFSET 0x8000
#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
struct i387_fsave_struct {
long cwd;
long swd;
@ -185,57 +174,6 @@ typedef struct {
unsigned long seg;
} mm_segment_t;
struct thread_struct;
/* This is the TSS defined by the hardware. */
struct i386_hw_tss {
unsigned short back_link,__blh;
unsigned long sp0;
unsigned short ss0,__ss0h;
unsigned long sp1;
unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
unsigned long sp2;
unsigned short ss2,__ss2h;
unsigned long __cr3;
unsigned long ip;
unsigned long flags;
unsigned long ax, cx, dx, bx;
unsigned long sp, bp, si, di;
unsigned short es, __esh;
unsigned short cs, __csh;
unsigned short ss, __ssh;
unsigned short ds, __dsh;
unsigned short fs, __fsh;
unsigned short gs, __gsh;
unsigned short ldt, __ldth;
unsigned short trace, io_bitmap_base;
} __attribute__((packed));
struct tss_struct {
struct i386_hw_tss x86_tss;
/*
* The extra 1 is there because the CPU will access an
* additional byte beyond the end of the IO permission
* bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
/*
* Cache the current maximum and the last task that used the bitmap:
*/
unsigned long io_bitmap_max;
struct thread_struct *io_bitmap_owner;
/*
* pads the TSS to be cacheline-aligned (size is 0x100)
*/
unsigned long __cacheline_filler[35];
/*
* .. and then another 0x100 bytes for emergency kernel stack
*/
unsigned long stack[64];
} __attribute__((packed));
#define ARCH_MIN_TASKALIGN 16
struct thread_struct {

View file

@ -91,14 +91,6 @@ extern void identify_cpu(struct cpuinfo_x86 *);
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
/*
* Size of io_bitmap.
*/
#define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
#define INVALID_IO_BITMAP_OFFSET 0x8000
struct i387_fxsave_struct {
u16 cwd;
@ -118,32 +110,7 @@ union i387_union {
struct i387_fxsave_struct fxsave;
};
struct tss_struct {
u32 reserved1;
u64 sp0;
u64 sp1;
u64 sp2;
u64 reserved2;
u64 ist[7];
u32 reserved3;
u32 reserved4;
u16 reserved5;
u16 io_bitmap_base;
/*
* The extra 1 is there because the CPU will access an
* additional byte beyond the end of the IO permission
* bitmap. The extra byte must be all 1 bits, and must
* be within the limit. Thus we have:
*
* 128 bytes, the bitmap itself, for ports 0..0x3ff
* 8 bytes, for an extra "long" of ~0UL
*/
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
} __attribute__((packed)) ____cacheline_aligned;
extern struct cpuinfo_x86 boot_cpu_data;
DECLARE_PER_CPU(struct tss_struct,init_tss);
/* Save the original ist values for checking stack pointers during debugging */
struct orig_ist {
unsigned long ist[7];
@ -195,7 +162,7 @@ struct thread_struct {
}
#define INIT_TSS { \
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}
#define INIT_MMAP \