Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (613 commits) x86: standalone trampoline code x86: move suspend wakeup code to C x86: coding style fixes to arch/x86/kernel/acpi/sleep.c x86: setup_trampoline() - fix section mismatch warning x86: section mismatch fixes, #1 x86: fix paranoia about using BIOS quickboot mechanism. x86: print out buggy mptable x86: use cpu_online() x86: use cpumask_of_cpu() x86: remove unnecessary tmp local variable x86: remove unnecessary memset() x86: use ioapic_read_entry() and ioapic_write_entry() x86: avoid redundant loop in io_apic_level_ack_pending() x86: remove superfluous initialisation in boot code. x86: merge mpparse_{32,64}.c x86: unify mp_register_gsi x86: unify mp_config_acpi_legacy_irqs x86: unify mp_register_ioapic x86: unify uniq_io_apic_id x86: unify smp_scan_config ...
This commit is contained in:
commit
9e9abecfc0
359 changed files with 13661 additions and 12435 deletions
|
@ -212,7 +212,7 @@ Who: Stephen Hemminger <shemminger@linux-foundation.org>
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
What: i386/x86_64 bzImage symlinks
|
What: i386/x86_64 bzImage symlinks
|
||||||
When: April 2008
|
When: April 2010
|
||||||
|
|
||||||
Why: The i386/x86_64 merge provides a symlink to the old bzImage
|
Why: The i386/x86_64 merge provides a symlink to the old bzImage
|
||||||
location so not yet updated user space tools, e.g. package
|
location so not yet updated user space tools, e.g. package
|
||||||
|
|
|
@ -170,6 +170,8 @@ Offset Proto Name Meaning
|
||||||
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
|
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
|
||||||
023C/4 2.07+ hardware_subarch Hardware subarchitecture
|
023C/4 2.07+ hardware_subarch Hardware subarchitecture
|
||||||
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
|
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
|
||||||
|
0248/4 2.08+ payload_offset Offset of kernel payload
|
||||||
|
024C/4 2.08+ payload_length Length of kernel payload
|
||||||
|
|
||||||
(1) For backwards compatibility, if the setup_sects field contains 0, the
|
(1) For backwards compatibility, if the setup_sects field contains 0, the
|
||||||
real value is 4.
|
real value is 4.
|
||||||
|
@ -512,6 +514,32 @@ Protocol: 2.07+
|
||||||
|
|
||||||
A pointer to data that is specific to hardware subarch
|
A pointer to data that is specific to hardware subarch
|
||||||
|
|
||||||
|
Field name: payload_offset
|
||||||
|
Type: read
|
||||||
|
Offset/size: 0x248/4
|
||||||
|
Protocol: 2.08+
|
||||||
|
|
||||||
|
If non-zero then this field contains the offset from the end of the
|
||||||
|
real-mode code to the payload.
|
||||||
|
|
||||||
|
The payload may be compressed. The format of both the compressed and
|
||||||
|
uncompressed data should be determined using the standard magic
|
||||||
|
numbers. Currently only gzip compressed ELF is used.
|
||||||
|
|
||||||
|
Field name: payload_length
|
||||||
|
Type: read
|
||||||
|
Offset/size: 0x24c/4
|
||||||
|
Protocol: 2.08+
|
||||||
|
|
||||||
|
The length of the payload.
|
||||||
|
|
||||||
|
**** THE IMAGE CHECKSUM
|
||||||
|
|
||||||
|
From boot protocol version 2.08 onwards the CRC-32 is calculated over
|
||||||
|
the entire file using the characteristic polynomial 0x04C11DB7 and an
|
||||||
|
initial remainder of 0xffffffff. The checksum is appended to the
|
||||||
|
file; therefore the CRC of the file up to the limit specified in the
|
||||||
|
syssize field of the header is always 0.
|
||||||
|
|
||||||
**** THE KERNEL COMMAND LINE
|
**** THE KERNEL COMMAND LINE
|
||||||
|
|
||||||
|
|
|
@ -812,6 +812,19 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
|
|
||||||
inttest= [IA64]
|
inttest= [IA64]
|
||||||
|
|
||||||
|
iommu= [x86]
|
||||||
|
off
|
||||||
|
force
|
||||||
|
noforce
|
||||||
|
biomerge
|
||||||
|
panic
|
||||||
|
nopanic
|
||||||
|
merge
|
||||||
|
nomerge
|
||||||
|
forcesac
|
||||||
|
soft
|
||||||
|
|
||||||
|
|
||||||
intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option
|
intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option
|
||||||
off
|
off
|
||||||
Disable intel iommu driver.
|
Disable intel iommu driver.
|
||||||
|
@ -1134,6 +1147,11 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
or
|
or
|
||||||
memmap=0x10000$0x18690000
|
memmap=0x10000$0x18690000
|
||||||
|
|
||||||
|
memtest= [KNL,X86_64] Enable memtest
|
||||||
|
Format: <integer>
|
||||||
|
range: 0,4 : pattern number
|
||||||
|
default : 0 <disable>
|
||||||
|
|
||||||
meye.*= [HW] Set MotionEye Camera parameters
|
meye.*= [HW] Set MotionEye Camera parameters
|
||||||
See Documentation/video4linux/meye.txt.
|
See Documentation/video4linux/meye.txt.
|
||||||
|
|
||||||
|
|
100
Documentation/x86/pat.txt
Normal file
100
Documentation/x86/pat.txt
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
|
||||||
|
PAT (Page Attribute Table)
|
||||||
|
|
||||||
|
x86 Page Attribute Table (PAT) allows for setting the memory attribute at the
|
||||||
|
page level granularity. PAT is complementary to the MTRR settings which allows
|
||||||
|
for setting of memory types over physical address ranges. However, PAT is
|
||||||
|
more flexible than MTRR due to its capability to set attributes at page level
|
||||||
|
and also due to the fact that there are no hardware limitations on number of
|
||||||
|
such attribute settings allowed. Added flexibility comes with guidelines for
|
||||||
|
not having memory type aliasing for the same physical memory with multiple
|
||||||
|
virtual addresses.
|
||||||
|
|
||||||
|
PAT allows for different types of memory attributes. The most commonly used
|
||||||
|
ones that will be supported at this time are Write-back, Uncached,
|
||||||
|
Write-combined and Uncached Minus.
|
||||||
|
|
||||||
|
There are many different APIs in the kernel that allows setting of memory
|
||||||
|
attributes at the page level. In order to avoid aliasing, these interfaces
|
||||||
|
should be used thoughtfully. Below is a table of interfaces available,
|
||||||
|
their intended usage and their memory attribute relationships. Internally,
|
||||||
|
these APIs use a reserve_memtype()/free_memtype() interface on the physical
|
||||||
|
address range to avoid any aliasing.
|
||||||
|
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
API | RAM | ACPI,... | Reserved/Holes |
|
||||||
|
-----------------------|----------|------------|------------------|
|
||||||
|
| | | |
|
||||||
|
ioremap | -- | UC | UC |
|
||||||
|
| | | |
|
||||||
|
ioremap_cache | -- | WB | WB |
|
||||||
|
| | | |
|
||||||
|
ioremap_nocache | -- | UC | UC |
|
||||||
|
| | | |
|
||||||
|
ioremap_wc | -- | -- | WC |
|
||||||
|
| | | |
|
||||||
|
set_memory_uc | UC | -- | -- |
|
||||||
|
set_memory_wb | | | |
|
||||||
|
| | | |
|
||||||
|
set_memory_wc | WC | -- | -- |
|
||||||
|
set_memory_wb | | | |
|
||||||
|
| | | |
|
||||||
|
pci sysfs resource | -- | -- | UC |
|
||||||
|
| | | |
|
||||||
|
pci sysfs resource_wc | -- | -- | WC |
|
||||||
|
is IORESOURCE_PREFETCH| | | |
|
||||||
|
| | | |
|
||||||
|
pci proc | -- | -- | UC |
|
||||||
|
!PCIIOC_WRITE_COMBINE | | | |
|
||||||
|
| | | |
|
||||||
|
pci proc | -- | -- | WC |
|
||||||
|
PCIIOC_WRITE_COMBINE | | | |
|
||||||
|
| | | |
|
||||||
|
/dev/mem | -- | UC | UC |
|
||||||
|
read-write | | | |
|
||||||
|
| | | |
|
||||||
|
/dev/mem | -- | UC | UC |
|
||||||
|
mmap SYNC flag | | | |
|
||||||
|
| | | |
|
||||||
|
/dev/mem | -- | WB/WC/UC | WB/WC/UC |
|
||||||
|
mmap !SYNC flag | |(from exist-| (from exist- |
|
||||||
|
and | | ing alias)| ing alias) |
|
||||||
|
any alias to this area| | | |
|
||||||
|
| | | |
|
||||||
|
/dev/mem | -- | WB | WB |
|
||||||
|
mmap !SYNC flag | | | |
|
||||||
|
no alias to this area | | | |
|
||||||
|
and | | | |
|
||||||
|
MTRR says WB | | | |
|
||||||
|
| | | |
|
||||||
|
/dev/mem | -- | -- | UC_MINUS |
|
||||||
|
mmap !SYNC flag | | | |
|
||||||
|
no alias to this area | | | |
|
||||||
|
and | | | |
|
||||||
|
MTRR says !WB | | | |
|
||||||
|
| | | |
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
-- in the above table mean "Not suggested usage for the API". Some of the --'s
|
||||||
|
are strictly enforced by the kernel. Some others are not really enforced
|
||||||
|
today, but may be enforced in future.
|
||||||
|
|
||||||
|
For ioremap and pci access through /sys or /proc - The actual type returned
|
||||||
|
can be more restrictive, in case of any existing aliasing for that address.
|
||||||
|
For example: If there is an existing uncached mapping, a new ioremap_wc can
|
||||||
|
return uncached mapping in place of write-combine requested.
|
||||||
|
|
||||||
|
set_memory_[uc|wc] and set_memory_wb should be used in pairs, where driver will
|
||||||
|
first make a region uc or wc and switch it back to wb after use.
|
||||||
|
|
||||||
|
Over time writes to /proc/mtrr will be deprecated in favor of using PAT based
|
||||||
|
interfaces. Users writing to /proc/mtrr are suggested to use above interfaces.
|
||||||
|
|
||||||
|
Drivers should use ioremap_[uc|wc] to access PCI BARs with [uc|wc] access
|
||||||
|
types.
|
||||||
|
|
||||||
|
Drivers should use set_memory_[uc|wc] to set access type for RAM ranges.
|
||||||
|
|
|
@ -307,3 +307,8 @@ Debugging
|
||||||
stuck (default)
|
stuck (default)
|
||||||
|
|
||||||
Miscellaneous
|
Miscellaneous
|
||||||
|
|
||||||
|
nogbpages
|
||||||
|
Do not use GB pages for kernel direct mappings.
|
||||||
|
gbpages
|
||||||
|
Use GB pages for kernel direct mappings.
|
||||||
|
|
|
@ -114,7 +114,7 @@ config ARCH_HAS_CPU_RELAX
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config HAVE_SETUP_PER_CPU_AREA
|
config HAVE_SETUP_PER_CPU_AREA
|
||||||
def_bool X86_64
|
def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
|
||||||
|
|
||||||
config ARCH_HIBERNATION_POSSIBLE
|
config ARCH_HIBERNATION_POSSIBLE
|
||||||
def_bool y
|
def_bool y
|
||||||
|
@ -168,7 +168,7 @@ config X86_64_SMP
|
||||||
config X86_HT
|
config X86_HT
|
||||||
bool
|
bool
|
||||||
depends on SMP
|
depends on SMP
|
||||||
depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || (X86_64 && !MK8)
|
depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || X86_64
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config X86_BIOS_REBOOT
|
config X86_BIOS_REBOOT
|
||||||
|
@ -178,7 +178,7 @@ config X86_BIOS_REBOOT
|
||||||
|
|
||||||
config X86_TRAMPOLINE
|
config X86_TRAMPOLINE
|
||||||
bool
|
bool
|
||||||
depends on X86_SMP || (X86_VOYAGER && SMP)
|
depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP)
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config KTIME_SCALAR
|
config KTIME_SCALAR
|
||||||
|
@ -238,8 +238,7 @@ config X86_ELAN
|
||||||
|
|
||||||
config X86_VOYAGER
|
config X86_VOYAGER
|
||||||
bool "Voyager (NCR)"
|
bool "Voyager (NCR)"
|
||||||
depends on X86_32
|
depends on X86_32 && (SMP || BROKEN)
|
||||||
select SMP if !BROKEN
|
|
||||||
help
|
help
|
||||||
Voyager is an MCA-based 32-way capable SMP architecture proprietary
|
Voyager is an MCA-based 32-way capable SMP architecture proprietary
|
||||||
to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
|
to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
|
||||||
|
@ -251,9 +250,8 @@ config X86_VOYAGER
|
||||||
|
|
||||||
config X86_NUMAQ
|
config X86_NUMAQ
|
||||||
bool "NUMAQ (IBM/Sequent)"
|
bool "NUMAQ (IBM/Sequent)"
|
||||||
select SMP
|
depends on SMP && X86_32
|
||||||
select NUMA
|
select NUMA
|
||||||
depends on X86_32
|
|
||||||
help
|
help
|
||||||
This option is used for getting Linux to run on a (IBM/Sequent) NUMA
|
This option is used for getting Linux to run on a (IBM/Sequent) NUMA
|
||||||
multiquad box. This changes the way that processors are bootstrapped,
|
multiquad box. This changes the way that processors are bootstrapped,
|
||||||
|
@ -324,7 +322,8 @@ config X86_RDC321X
|
||||||
|
|
||||||
config X86_VSMP
|
config X86_VSMP
|
||||||
bool "Support for ScaleMP vSMP"
|
bool "Support for ScaleMP vSMP"
|
||||||
depends on X86_64 && PCI
|
select PARAVIRT
|
||||||
|
depends on X86_64
|
||||||
help
|
help
|
||||||
Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
|
Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
|
||||||
supposed to run on these EM64T-based machines. Only choose this option
|
supposed to run on these EM64T-based machines. Only choose this option
|
||||||
|
@ -380,6 +379,35 @@ config PARAVIRT
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
config MEMTEST_BOOTPARAM
|
||||||
|
bool "Memtest boot parameter"
|
||||||
|
depends on X86_64
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This option adds a kernel parameter 'memtest', which allows memtest
|
||||||
|
to be disabled at boot. If this option is selected, memtest
|
||||||
|
functionality can be disabled with memtest=0 on the kernel
|
||||||
|
command line. The purpose of this option is to allow a single
|
||||||
|
kernel image to be distributed with memtest built in, but not
|
||||||
|
necessarily enabled.
|
||||||
|
|
||||||
|
If you are unsure how to answer this question, answer Y.
|
||||||
|
|
||||||
|
config MEMTEST_BOOTPARAM_VALUE
|
||||||
|
int "Memtest boot parameter default value (0-4)"
|
||||||
|
depends on MEMTEST_BOOTPARAM
|
||||||
|
range 0 4
|
||||||
|
default 0
|
||||||
|
help
|
||||||
|
This option sets the default value for the kernel parameter
|
||||||
|
'memtest', which allows memtest to be disabled at boot. If this
|
||||||
|
option is set to 0 (zero), the memtest kernel parameter will
|
||||||
|
default to 0, disabling memtest at bootup. If this option is
|
||||||
|
set to 4, the memtest kernel parameter will default to 4,
|
||||||
|
enabling memtest at bootup, and use that as pattern number.
|
||||||
|
|
||||||
|
If you are unsure how to answer this question, answer 0.
|
||||||
|
|
||||||
config ACPI_SRAT
|
config ACPI_SRAT
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH)
|
depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH)
|
||||||
|
@ -504,7 +532,7 @@ config NR_CPUS
|
||||||
|
|
||||||
config SCHED_SMT
|
config SCHED_SMT
|
||||||
bool "SMT (Hyperthreading) scheduler support"
|
bool "SMT (Hyperthreading) scheduler support"
|
||||||
depends on (X86_64 && SMP) || (X86_32 && X86_HT)
|
depends on X86_HT
|
||||||
help
|
help
|
||||||
SMT scheduler support improves the CPU scheduler's decision making
|
SMT scheduler support improves the CPU scheduler's decision making
|
||||||
when dealing with Intel Pentium 4 chips with HyperThreading at a
|
when dealing with Intel Pentium 4 chips with HyperThreading at a
|
||||||
|
@ -514,7 +542,7 @@ config SCHED_SMT
|
||||||
config SCHED_MC
|
config SCHED_MC
|
||||||
def_bool y
|
def_bool y
|
||||||
prompt "Multi-core scheduler support"
|
prompt "Multi-core scheduler support"
|
||||||
depends on (X86_64 && SMP) || (X86_32 && X86_HT)
|
depends on X86_HT
|
||||||
help
|
help
|
||||||
Multi-core scheduler support improves the CPU scheduler's decision
|
Multi-core scheduler support improves the CPU scheduler's decision
|
||||||
making when dealing with multi-core CPU chips at a cost of slightly
|
making when dealing with multi-core CPU chips at a cost of slightly
|
||||||
|
@ -883,7 +911,7 @@ config NUMA_EMU
|
||||||
number of nodes. This is only useful for debugging.
|
number of nodes. This is only useful for debugging.
|
||||||
|
|
||||||
config NODES_SHIFT
|
config NODES_SHIFT
|
||||||
int
|
int "Max num nodes shift(1-15)"
|
||||||
range 1 15 if X86_64
|
range 1 15 if X86_64
|
||||||
default "6" if X86_64
|
default "6" if X86_64
|
||||||
default "4" if X86_NUMAQ
|
default "4" if X86_NUMAQ
|
||||||
|
@ -1007,6 +1035,21 @@ config MTRR
|
||||||
|
|
||||||
See <file:Documentation/mtrr.txt> for more information.
|
See <file:Documentation/mtrr.txt> for more information.
|
||||||
|
|
||||||
|
config X86_PAT
|
||||||
|
def_bool y
|
||||||
|
prompt "x86 PAT support"
|
||||||
|
depends on MTRR && NONPROMISC_DEVMEM
|
||||||
|
help
|
||||||
|
Use PAT attributes to setup page level cache control.
|
||||||
|
|
||||||
|
PATs are the modern equivalents of MTRRs and are much more
|
||||||
|
flexible than MTRRs.
|
||||||
|
|
||||||
|
Say N here if you see bootup problems (boot crash, boot hang,
|
||||||
|
spontaneous reboots) or a non-working video driver.
|
||||||
|
|
||||||
|
If unsure, say Y.
|
||||||
|
|
||||||
config EFI
|
config EFI
|
||||||
def_bool n
|
def_bool n
|
||||||
prompt "EFI runtime service support"
|
prompt "EFI runtime service support"
|
||||||
|
@ -1075,6 +1118,7 @@ source kernel/Kconfig.hz
|
||||||
|
|
||||||
config KEXEC
|
config KEXEC
|
||||||
bool "kexec system call"
|
bool "kexec system call"
|
||||||
|
depends on X86_64 || X86_BIOS_REBOOT
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -1376,7 +1420,7 @@ endmenu
|
||||||
menu "Bus options (PCI etc.)"
|
menu "Bus options (PCI etc.)"
|
||||||
|
|
||||||
config PCI
|
config PCI
|
||||||
bool "PCI support" if !X86_VISWS
|
bool "PCI support" if !X86_VISWS && !X86_VSMP
|
||||||
depends on !X86_VOYAGER
|
depends on !X86_VOYAGER
|
||||||
default y
|
default y
|
||||||
select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
|
select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
|
||||||
|
|
|
@ -388,7 +388,7 @@ config X86_OOSTORE
|
||||||
#
|
#
|
||||||
config X86_P6_NOP
|
config X86_P6_NOP
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4)
|
depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC)
|
||||||
|
|
||||||
config X86_TSC
|
config X86_TSC
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
|
@ -54,6 +54,18 @@ config DEBUG_PER_CPU_MAPS
|
||||||
|
|
||||||
Say N if unsure.
|
Say N if unsure.
|
||||||
|
|
||||||
|
config X86_PTDUMP
|
||||||
|
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||||
|
depends on DEBUG_KERNEL
|
||||||
|
select DEBUG_FS
|
||||||
|
help
|
||||||
|
Say Y here if you want to show the kernel pagetable layout in a
|
||||||
|
debugfs file. This information is only useful for kernel developers
|
||||||
|
who are working in architecture specific areas of the kernel.
|
||||||
|
It is probably not a good idea to enable this feature in a production
|
||||||
|
kernel.
|
||||||
|
If in doubt, say "N"
|
||||||
|
|
||||||
config DEBUG_RODATA
|
config DEBUG_RODATA
|
||||||
bool "Write protect kernel read-only data structures"
|
bool "Write protect kernel read-only data structures"
|
||||||
default y
|
default y
|
||||||
|
@ -64,6 +76,18 @@ config DEBUG_RODATA
|
||||||
data. This is recommended so that we can catch kernel bugs sooner.
|
data. This is recommended so that we can catch kernel bugs sooner.
|
||||||
If in doubt, say "Y".
|
If in doubt, say "Y".
|
||||||
|
|
||||||
|
config DIRECT_GBPAGES
|
||||||
|
bool "Enable gbpages-mapped kernel pagetables"
|
||||||
|
depends on DEBUG_KERNEL && EXPERIMENTAL && X86_64
|
||||||
|
help
|
||||||
|
Enable gigabyte pages support (if the CPU supports it). This can
|
||||||
|
improve the kernel's performance a tiny bit by reducing TLB
|
||||||
|
pressure.
|
||||||
|
|
||||||
|
This is experimental code.
|
||||||
|
|
||||||
|
If in doubt, say "N".
|
||||||
|
|
||||||
config DEBUG_RODATA_TEST
|
config DEBUG_RODATA_TEST
|
||||||
bool "Testcase for the DEBUG_RODATA feature"
|
bool "Testcase for the DEBUG_RODATA feature"
|
||||||
depends on DEBUG_RODATA
|
depends on DEBUG_RODATA
|
||||||
|
@ -82,8 +106,8 @@ config DEBUG_NX_TEST
|
||||||
|
|
||||||
config 4KSTACKS
|
config 4KSTACKS
|
||||||
bool "Use 4Kb for kernel stacks instead of 8Kb"
|
bool "Use 4Kb for kernel stacks instead of 8Kb"
|
||||||
depends on DEBUG_KERNEL
|
|
||||||
depends on X86_32
|
depends on X86_32
|
||||||
|
default y
|
||||||
help
|
help
|
||||||
If you say Y here the kernel will use a 4Kb stacksize for the
|
If you say Y here the kernel will use a 4Kb stacksize for the
|
||||||
kernel stack attached to each process/thread. This facilitates
|
kernel stack attached to each process/thread. This facilitates
|
||||||
|
|
|
@ -151,7 +151,6 @@ mflags-y += -Iinclude/asm-x86/mach-default
|
||||||
# 64 bit does not support subarch support - clear sub arch variables
|
# 64 bit does not support subarch support - clear sub arch variables
|
||||||
fcore-$(CONFIG_X86_64) :=
|
fcore-$(CONFIG_X86_64) :=
|
||||||
mcore-$(CONFIG_X86_64) :=
|
mcore-$(CONFIG_X86_64) :=
|
||||||
mflags-$(CONFIG_X86_64) :=
|
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(mflags-y)
|
KBUILD_CFLAGS += $(mflags-y)
|
||||||
KBUILD_AFLAGS += $(mflags-y)
|
KBUILD_AFLAGS += $(mflags-y)
|
||||||
|
@ -160,7 +159,7 @@ KBUILD_AFLAGS += $(mflags-y)
|
||||||
# Kernel objects
|
# Kernel objects
|
||||||
|
|
||||||
head-y := arch/x86/kernel/head_$(BITS).o
|
head-y := arch/x86/kernel/head_$(BITS).o
|
||||||
head-$(CONFIG_X86_64) += arch/x86/kernel/head64.o
|
head-y += arch/x86/kernel/head$(BITS).o
|
||||||
head-y += arch/x86/kernel/init_task.o
|
head-y += arch/x86/kernel/init_task.o
|
||||||
|
|
||||||
libs-y += arch/x86/lib/
|
libs-y += arch/x86/lib/
|
||||||
|
|
|
@ -30,7 +30,7 @@ subdir- := compressed
|
||||||
|
|
||||||
setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
|
setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
|
||||||
setup-y += header.o main.o mca.o memory.o pm.o pmjump.o
|
setup-y += header.o main.o mca.o memory.o pm.o pmjump.o
|
||||||
setup-y += printf.o string.o tty.o video.o version.o
|
setup-y += printf.o string.o tty.o video.o video-mode.o version.o
|
||||||
setup-$(CONFIG_X86_APM_BOOT) += apm.o
|
setup-$(CONFIG_X86_APM_BOOT) += apm.o
|
||||||
setup-$(CONFIG_X86_VOYAGER) += voyager.o
|
setup-$(CONFIG_X86_VOYAGER) += voyager.o
|
||||||
|
|
||||||
|
@ -94,6 +94,20 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
|
||||||
|
|
||||||
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
|
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
|
||||||
|
|
||||||
|
sed-offsets := -e 's/^00*/0/' \
|
||||||
|
-e 's/^\([0-9a-fA-F]*\) . \(input_data\|input_data_end\)$$/\#define \2 0x\1/p'
|
||||||
|
|
||||||
|
quiet_cmd_offsets = OFFSETS $@
|
||||||
|
cmd_offsets = $(NM) $< | sed -n $(sed-offsets) > $@
|
||||||
|
|
||||||
|
$(obj)/offsets.h: $(obj)/compressed/vmlinux FORCE
|
||||||
|
$(call if_changed,offsets)
|
||||||
|
|
||||||
|
targets += offsets.h
|
||||||
|
|
||||||
|
AFLAGS_header.o += -I$(obj)
|
||||||
|
$(obj)/header.o: $(obj)/offsets.h
|
||||||
|
|
||||||
LDFLAGS_setup.elf := -T
|
LDFLAGS_setup.elf := -T
|
||||||
$(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
|
$(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
|
||||||
$(call if_changed,ld)
|
$(call if_changed,ld)
|
||||||
|
|
|
@ -286,6 +286,11 @@ int getchar_timeout(void);
|
||||||
/* video.c */
|
/* video.c */
|
||||||
void set_video(void);
|
void set_video(void);
|
||||||
|
|
||||||
|
/* video-mode.c */
|
||||||
|
int set_mode(u16 mode);
|
||||||
|
int mode_defined(u16 mode);
|
||||||
|
void probe_cards(int unsafe);
|
||||||
|
|
||||||
/* video-vesa.c */
|
/* video-vesa.c */
|
||||||
void vesa_store_edid(void);
|
void vesa_store_edid(void);
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ $(obj)/vmlinux: $(src)/vmlinux_$(BITS).lds $(obj)/head_$(BITS).o $(obj)/misc.o $
|
||||||
$(call if_changed,ld)
|
$(call if_changed,ld)
|
||||||
@:
|
@:
|
||||||
|
|
||||||
OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
|
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||||
$(obj)/vmlinux.bin: vmlinux FORCE
|
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||||
$(call if_changed,objcopy)
|
$(call if_changed,objcopy)
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* we just keep it from happening
|
* we just keep it from happening
|
||||||
*/
|
*/
|
||||||
#undef CONFIG_PARAVIRT
|
#undef CONFIG_PARAVIRT
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
#define _ASM_DESC_H_ 1
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#define _LINUX_STRING_H_ 1
|
#define _LINUX_STRING_H_ 1
|
||||||
#define __LINUX_BITMAP_H 1
|
#define __LINUX_BITMAP_H 1
|
||||||
|
@ -22,6 +26,7 @@
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/screen_info.h>
|
#include <linux/screen_info.h>
|
||||||
|
#include <linux/elf.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/boot.h>
|
#include <asm/boot.h>
|
||||||
|
@ -53,8 +58,8 @@
|
||||||
* 1 bit (last block flag)
|
* 1 bit (last block flag)
|
||||||
* 2 bits (block type)
|
* 2 bits (block type)
|
||||||
*
|
*
|
||||||
* 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
|
* 1 block occurs every 32K -1 bytes or when there 50% compression
|
||||||
* The smallest block type encoding is always used.
|
* has been achieved. The smallest block type encoding is always used.
|
||||||
*
|
*
|
||||||
* stored:
|
* stored:
|
||||||
* 32 bits length in bytes.
|
* 32 bits length in bytes.
|
||||||
|
@ -90,9 +95,9 @@
|
||||||
*
|
*
|
||||||
* All of which is enough to compute an amount of extra data that is required
|
* All of which is enough to compute an amount of extra data that is required
|
||||||
* to be safe. To avoid problems at the block level allocating 5 extra bytes
|
* to be safe. To avoid problems at the block level allocating 5 extra bytes
|
||||||
* per 32767 bytes of data is sufficient. To avoind problems internal to a block
|
* per 32767 bytes of data is sufficient. To avoind problems internal to a
|
||||||
* adding an extra 32767 bytes (the worst case uncompressed block size) is
|
* block adding an extra 32767 bytes (the worst case uncompressed block size)
|
||||||
* sufficient, to ensure that in the worst case the decompressed data for
|
* is sufficient, to ensure that in the worst case the decompressed data for
|
||||||
* block will stop the byte before the compressed data for a block begins.
|
* block will stop the byte before the compressed data for a block begins.
|
||||||
* To avoid problems with the compressed data's meta information an extra 18
|
* To avoid problems with the compressed data's meta information an extra 18
|
||||||
* bytes are needed. Leading to the formula:
|
* bytes are needed. Leading to the formula:
|
||||||
|
@ -122,26 +127,34 @@ typedef unsigned char uch;
|
||||||
typedef unsigned short ush;
|
typedef unsigned short ush;
|
||||||
typedef unsigned long ulg;
|
typedef unsigned long ulg;
|
||||||
|
|
||||||
#define WSIZE 0x80000000 /* Window size must be at least 32k,
|
/*
|
||||||
* and a power of two
|
* Window size must be at least 32k, and a power of two.
|
||||||
* We don't actually have a window just
|
* We don't actually have a window just a huge output buffer,
|
||||||
* a huge output buffer so I report
|
* so we report a 2G window size, as that should always be
|
||||||
* a 2G windows size, as that should
|
* larger than our output buffer:
|
||||||
* always be larger than our output buffer.
|
|
||||||
*/
|
*/
|
||||||
|
#define WSIZE 0x80000000
|
||||||
|
|
||||||
static uch *inbuf; /* input buffer */
|
/* Input buffer: */
|
||||||
static uch *window; /* Sliding window buffer, (and final output buffer) */
|
static unsigned char *inbuf;
|
||||||
|
|
||||||
static unsigned insize; /* valid bytes in inbuf */
|
/* Sliding window buffer (and final output buffer): */
|
||||||
static unsigned inptr; /* index of next byte to be processed in inbuf */
|
static unsigned char *window;
|
||||||
static unsigned outcnt; /* bytes in output buffer */
|
|
||||||
|
/* Valid bytes in inbuf: */
|
||||||
|
static unsigned insize;
|
||||||
|
|
||||||
|
/* Index of next byte to be processed in inbuf: */
|
||||||
|
static unsigned inptr;
|
||||||
|
|
||||||
|
/* Bytes in output buffer: */
|
||||||
|
static unsigned outcnt;
|
||||||
|
|
||||||
/* gzip flag byte */
|
/* gzip flag byte */
|
||||||
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
|
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
|
||||||
#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
|
#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */
|
||||||
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
|
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
|
||||||
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
|
#define ORIG_NAM 0x08 /* bit 3 set: original file name present */
|
||||||
#define COMMENT 0x10 /* bit 4 set: file comment present */
|
#define COMMENT 0x10 /* bit 4 set: file comment present */
|
||||||
#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
|
#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
|
||||||
#define RESERVED 0xC0 /* bit 6, 7: reserved */
|
#define RESERVED 0xC0 /* bit 6, 7: reserved */
|
||||||
|
@ -150,12 +163,12 @@ static unsigned outcnt; /* bytes in output buffer */
|
||||||
|
|
||||||
/* Diagnostic functions */
|
/* Diagnostic functions */
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
# define Assert(cond,msg) {if(!(cond)) error(msg);}
|
# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
|
||||||
# define Trace(x) fprintf x
|
# define Trace(x) do { fprintf x; } while (0)
|
||||||
# define Tracev(x) {if (verbose) fprintf x ;}
|
# define Tracev(x) do { if (verbose) fprintf x ; } while (0)
|
||||||
# define Tracevv(x) {if (verbose>1) fprintf x ;}
|
# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0)
|
||||||
# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
|
# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
|
||||||
# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
|
# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0)
|
||||||
#else
|
#else
|
||||||
# define Assert(cond, msg)
|
# define Assert(cond, msg)
|
||||||
# define Trace(x)
|
# define Trace(x)
|
||||||
|
@ -185,7 +198,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */
|
||||||
extern unsigned char input_data[];
|
extern unsigned char input_data[];
|
||||||
extern int input_len;
|
extern int input_len;
|
||||||
|
|
||||||
static long bytes_out = 0;
|
static long bytes_out;
|
||||||
|
|
||||||
static void *malloc(int size);
|
static void *malloc(int size);
|
||||||
static void free(void *where);
|
static void free(void *where);
|
||||||
|
@ -210,7 +223,7 @@ static memptr free_mem_end_ptr;
|
||||||
#define HEAP_SIZE 0x4000
|
#define HEAP_SIZE 0x4000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static char *vidmem = (char *)0xb8000;
|
static char *vidmem;
|
||||||
static int vidport;
|
static int vidport;
|
||||||
static int lines, cols;
|
static int lines, cols;
|
||||||
|
|
||||||
|
@ -224,8 +237,10 @@ static void *malloc(int size)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
if (size <0) error("Malloc error");
|
if (size < 0)
|
||||||
if (free_mem_ptr <= 0) error("Memory error");
|
error("Malloc error");
|
||||||
|
if (free_mem_ptr <= 0)
|
||||||
|
error("Memory error");
|
||||||
|
|
||||||
free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
|
free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
|
||||||
|
|
||||||
|
@ -341,9 +356,9 @@ static void flush_window(void)
|
||||||
/* With my window equal to my output buffer
|
/* With my window equal to my output buffer
|
||||||
* I only need to compute the crc here.
|
* I only need to compute the crc here.
|
||||||
*/
|
*/
|
||||||
ulg c = crc; /* temporary variable */
|
unsigned long c = crc; /* temporary variable */
|
||||||
unsigned n;
|
unsigned n;
|
||||||
uch *in, ch;
|
unsigned char *in, ch;
|
||||||
|
|
||||||
in = window;
|
in = window;
|
||||||
for (n = 0; n < outcnt; n++) {
|
for (n = 0; n < outcnt; n++) {
|
||||||
|
@ -351,7 +366,7 @@ static void flush_window(void)
|
||||||
c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
|
c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
|
||||||
}
|
}
|
||||||
crc = c;
|
crc = c;
|
||||||
bytes_out += (ulg)outcnt;
|
bytes_out += (unsigned long)outcnt;
|
||||||
outcnt = 0;
|
outcnt = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,9 +380,59 @@ static void error(char *x)
|
||||||
asm("hlt");
|
asm("hlt");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void parse_elf(void *output)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
Elf64_Ehdr ehdr;
|
||||||
|
Elf64_Phdr *phdrs, *phdr;
|
||||||
|
#else
|
||||||
|
Elf32_Ehdr ehdr;
|
||||||
|
Elf32_Phdr *phdrs, *phdr;
|
||||||
|
#endif
|
||||||
|
void *dest;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
memcpy(&ehdr, output, sizeof(ehdr));
|
||||||
|
if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
|
||||||
|
ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
|
||||||
|
ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
|
||||||
|
ehdr.e_ident[EI_MAG3] != ELFMAG3) {
|
||||||
|
error("Kernel is not a valid ELF file");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
putstr("Parsing ELF... ");
|
||||||
|
|
||||||
|
phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
|
||||||
|
if (!phdrs)
|
||||||
|
error("Failed to allocate space for phdrs");
|
||||||
|
|
||||||
|
memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
|
||||||
|
|
||||||
|
for (i = 0; i < ehdr.e_phnum; i++) {
|
||||||
|
phdr = &phdrs[i];
|
||||||
|
|
||||||
|
switch (phdr->p_type) {
|
||||||
|
case PT_LOAD:
|
||||||
|
#ifdef CONFIG_RELOCATABLE
|
||||||
|
dest = output;
|
||||||
|
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
|
||||||
|
#else
|
||||||
|
dest = (void *)(phdr->p_paddr);
|
||||||
|
#endif
|
||||||
|
memcpy(dest,
|
||||||
|
output + phdr->p_offset,
|
||||||
|
phdr->p_filesz);
|
||||||
|
break;
|
||||||
|
default: /* Ignore other PT_* */ break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
||||||
uch *input_data, unsigned long input_len,
|
unsigned char *input_data,
|
||||||
uch *output)
|
unsigned long input_len,
|
||||||
|
unsigned char *output)
|
||||||
{
|
{
|
||||||
real_mode = rmode;
|
real_mode = rmode;
|
||||||
|
|
||||||
|
@ -390,9 +455,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
||||||
inptr = 0;
|
inptr = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if ((ulg)output & (__KERNEL_ALIGN - 1))
|
if ((unsigned long)output & (__KERNEL_ALIGN - 1))
|
||||||
error("Destination address not 2M aligned");
|
error("Destination address not 2M aligned");
|
||||||
if ((ulg)output >= 0xffffffffffUL)
|
if ((unsigned long)output >= 0xffffffffffUL)
|
||||||
error("Destination address too large");
|
error("Destination address too large");
|
||||||
#else
|
#else
|
||||||
if ((u32)output & (CONFIG_PHYSICAL_ALIGN - 1))
|
if ((u32)output & (CONFIG_PHYSICAL_ALIGN - 1))
|
||||||
|
@ -408,6 +473,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
||||||
makecrc();
|
makecrc();
|
||||||
putstr("\nDecompressing Linux... ");
|
putstr("\nDecompressing Linux... ");
|
||||||
gunzip();
|
gunzip();
|
||||||
|
parse_elf(output);
|
||||||
putstr("done.\nBooting the kernel.\n");
|
putstr("done.\nBooting the kernel.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include "boot.h"
|
#include "boot.h"
|
||||||
|
#include "offsets.h"
|
||||||
|
|
||||||
SETUPSECTS = 4 /* default nr of setup-sectors */
|
SETUPSECTS = 4 /* default nr of setup-sectors */
|
||||||
BOOTSEG = 0x07C0 /* original address of boot-sector */
|
BOOTSEG = 0x07C0 /* original address of boot-sector */
|
||||||
|
@ -119,7 +120,7 @@ _start:
|
||||||
# Part 2 of the header, from the old setup.S
|
# Part 2 of the header, from the old setup.S
|
||||||
|
|
||||||
.ascii "HdrS" # header signature
|
.ascii "HdrS" # header signature
|
||||||
.word 0x0207 # header version number (>= 0x0105)
|
.word 0x0208 # header version number (>= 0x0105)
|
||||||
# or else old loadlin-1.5 will fail)
|
# or else old loadlin-1.5 will fail)
|
||||||
.globl realmode_swtch
|
.globl realmode_swtch
|
||||||
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
|
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
|
||||||
|
@ -223,6 +224,9 @@ hardware_subarch: .long 0 # subarchitecture, added with 2.07
|
||||||
|
|
||||||
hardware_subarch_data: .quad 0
|
hardware_subarch_data: .quad 0
|
||||||
|
|
||||||
|
payload_offset: .long input_data
|
||||||
|
payload_length: .long input_data_end-input_data
|
||||||
|
|
||||||
# End of setup header #####################################################
|
# End of setup header #####################################################
|
||||||
|
|
||||||
.section ".inittext", "ax"
|
.section ".inittext", "ax"
|
||||||
|
|
|
@ -50,6 +50,75 @@ typedef unsigned long u32;
|
||||||
u8 buf[SETUP_SECT_MAX*512];
|
u8 buf[SETUP_SECT_MAX*512];
|
||||||
int is_big_kernel;
|
int is_big_kernel;
|
||||||
|
|
||||||
|
/*----------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
static const u32 crctab32[] = {
|
||||||
|
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
|
||||||
|
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
|
||||||
|
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
|
||||||
|
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
|
||||||
|
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
|
||||||
|
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
|
||||||
|
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
|
||||||
|
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
|
||||||
|
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
|
||||||
|
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
|
||||||
|
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
|
||||||
|
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
|
||||||
|
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
|
||||||
|
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
|
||||||
|
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
|
||||||
|
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
|
||||||
|
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
|
||||||
|
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
|
||||||
|
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
|
||||||
|
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
|
||||||
|
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
|
||||||
|
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
|
||||||
|
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
|
||||||
|
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
|
||||||
|
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
|
||||||
|
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
|
||||||
|
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
|
||||||
|
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
|
||||||
|
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
|
||||||
|
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
|
||||||
|
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
|
||||||
|
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
|
||||||
|
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
|
||||||
|
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
|
||||||
|
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
|
||||||
|
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
|
||||||
|
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
|
||||||
|
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
|
||||||
|
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
|
||||||
|
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
|
||||||
|
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
|
||||||
|
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
|
||||||
|
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
|
||||||
|
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
|
||||||
|
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
|
||||||
|
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
|
||||||
|
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
|
||||||
|
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
|
||||||
|
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
|
||||||
|
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
|
||||||
|
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
|
||||||
|
0x2d02ef8d
|
||||||
|
};
|
||||||
|
|
||||||
|
static u32 partial_crc32_one(u8 c, u32 crc)
|
||||||
|
{
|
||||||
|
return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 partial_crc32(const u8 *s, int len, u32 crc)
|
||||||
|
{
|
||||||
|
while (len--)
|
||||||
|
crc = partial_crc32_one(*s++, crc);
|
||||||
|
return crc;
|
||||||
|
}
|
||||||
|
|
||||||
static void die(const char * str, ...)
|
static void die(const char * str, ...)
|
||||||
{
|
{
|
||||||
va_list args;
|
va_list args;
|
||||||
|
@ -74,6 +143,7 @@ int main(int argc, char ** argv)
|
||||||
FILE *file;
|
FILE *file;
|
||||||
int fd;
|
int fd;
|
||||||
void *kernel;
|
void *kernel;
|
||||||
|
u32 crc = 0xffffffffUL;
|
||||||
|
|
||||||
if (argc > 2 && !strcmp(argv[1], "-b"))
|
if (argc > 2 && !strcmp(argv[1], "-b"))
|
||||||
{
|
{
|
||||||
|
@ -144,7 +214,8 @@ int main(int argc, char ** argv)
|
||||||
kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
|
kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
|
||||||
if (kernel == MAP_FAILED)
|
if (kernel == MAP_FAILED)
|
||||||
die("Unable to mmap '%s': %m", argv[2]);
|
die("Unable to mmap '%s': %m", argv[2]);
|
||||||
sys_size = (sz + 15) / 16;
|
/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
|
||||||
|
sys_size = (sz + 15 + 4) / 16;
|
||||||
if (!is_big_kernel && sys_size > DEF_SYSSIZE)
|
if (!is_big_kernel && sys_size > DEF_SYSSIZE)
|
||||||
die("System is too big. Try using bzImage or modules.");
|
die("System is too big. Try using bzImage or modules.");
|
||||||
|
|
||||||
|
@ -155,12 +226,27 @@ int main(int argc, char ** argv)
|
||||||
buf[0x1f6] = sys_size >> 16;
|
buf[0x1f6] = sys_size >> 16;
|
||||||
buf[0x1f7] = sys_size >> 24;
|
buf[0x1f7] = sys_size >> 24;
|
||||||
|
|
||||||
|
crc = partial_crc32(buf, i, crc);
|
||||||
if (fwrite(buf, 1, i, stdout) != i)
|
if (fwrite(buf, 1, i, stdout) != i)
|
||||||
die("Writing setup failed");
|
die("Writing setup failed");
|
||||||
|
|
||||||
/* Copy the kernel code */
|
/* Copy the kernel code */
|
||||||
|
crc = partial_crc32(kernel, sz, crc);
|
||||||
if (fwrite(kernel, 1, sz, stdout) != sz)
|
if (fwrite(kernel, 1, sz, stdout) != sz)
|
||||||
die("Writing kernel failed");
|
die("Writing kernel failed");
|
||||||
|
|
||||||
|
/* Add padding leaving 4 bytes for the checksum */
|
||||||
|
while (sz++ < (sys_size*16) - 4) {
|
||||||
|
crc = partial_crc32_one('\0', crc);
|
||||||
|
if (fwrite("\0", 1, 1, stdout) != 1)
|
||||||
|
die("Writing padding failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Write the CRC */
|
||||||
|
fprintf(stderr, "CRC %lx\n", crc);
|
||||||
|
if (fwrite(&crc, 1, 4, stdout) != 4)
|
||||||
|
die("Writing CRC failed");
|
||||||
|
|
||||||
close(fd);
|
close(fd);
|
||||||
|
|
||||||
/* Everything is OK */
|
/* Everything is OK */
|
||||||
|
|
|
@ -50,6 +50,7 @@ static int set_bios_mode(u8 mode)
|
||||||
if (new_mode == mode)
|
if (new_mode == mode)
|
||||||
return 0; /* Mode change OK */
|
return 0; /* Mode change OK */
|
||||||
|
|
||||||
|
#ifndef _WAKEUP
|
||||||
if (new_mode != boot_params.screen_info.orig_video_mode) {
|
if (new_mode != boot_params.screen_info.orig_video_mode) {
|
||||||
/* Mode setting failed, but we didn't end up where we
|
/* Mode setting failed, but we didn't end up where we
|
||||||
started. That's bad. Try to revert to the original
|
started. That's bad. Try to revert to the original
|
||||||
|
@ -59,13 +60,18 @@ static int set_bios_mode(u8 mode)
|
||||||
: "+a" (ax)
|
: "+a" (ax)
|
||||||
: : "ebx", "ecx", "edx", "esi", "edi");
|
: : "ebx", "ecx", "edx", "esi", "edi");
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bios_probe(void)
|
static int bios_probe(void)
|
||||||
{
|
{
|
||||||
u8 mode;
|
u8 mode;
|
||||||
|
#ifdef _WAKEUP
|
||||||
|
u8 saved_mode = 0x03;
|
||||||
|
#else
|
||||||
u8 saved_mode = boot_params.screen_info.orig_video_mode;
|
u8 saved_mode = boot_params.screen_info.orig_video_mode;
|
||||||
|
#endif
|
||||||
u16 crtc;
|
u16 crtc;
|
||||||
struct mode_info *mi;
|
struct mode_info *mi;
|
||||||
int nmodes = 0;
|
int nmodes = 0;
|
||||||
|
|
173
arch/x86/boot/video-mode.c
Normal file
173
arch/x86/boot/video-mode.c
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
/* -*- linux-c -*- ------------------------------------------------------- *
|
||||||
|
*
|
||||||
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
|
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
|
||||||
|
*
|
||||||
|
* This file is part of the Linux kernel, and is made available under
|
||||||
|
* the terms of the GNU General Public License version 2.
|
||||||
|
*
|
||||||
|
* ----------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* arch/i386/boot/video-mode.c
|
||||||
|
*
|
||||||
|
* Set the video mode. This is separated out into a different
|
||||||
|
* file in order to be shared with the ACPI wakeup code.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "boot.h"
|
||||||
|
#include "video.h"
|
||||||
|
#include "vesa.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Common variables
|
||||||
|
*/
|
||||||
|
int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */
|
||||||
|
u16 video_segment;
|
||||||
|
int force_x, force_y; /* Don't query the BIOS for cols/rows */
|
||||||
|
|
||||||
|
int do_restore; /* Screen contents changed during mode flip */
|
||||||
|
int graphic_mode; /* Graphic mode with linear frame buffer */
|
||||||
|
|
||||||
|
/* Probe the video drivers and have them generate their mode lists. */
|
||||||
|
void probe_cards(int unsafe)
|
||||||
|
{
|
||||||
|
struct card_info *card;
|
||||||
|
static u8 probed[2];
|
||||||
|
|
||||||
|
if (probed[unsafe])
|
||||||
|
return;
|
||||||
|
|
||||||
|
probed[unsafe] = 1;
|
||||||
|
|
||||||
|
for (card = video_cards; card < video_cards_end; card++) {
|
||||||
|
if (card->unsafe == unsafe) {
|
||||||
|
if (card->probe)
|
||||||
|
card->nmodes = card->probe();
|
||||||
|
else
|
||||||
|
card->nmodes = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Test if a mode is defined */
|
||||||
|
int mode_defined(u16 mode)
|
||||||
|
{
|
||||||
|
struct card_info *card;
|
||||||
|
struct mode_info *mi;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (card = video_cards; card < video_cards_end; card++) {
|
||||||
|
mi = card->modes;
|
||||||
|
for (i = 0; i < card->nmodes; i++, mi++) {
|
||||||
|
if (mi->mode == mode)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set mode (without recalc) */
|
||||||
|
static int raw_set_mode(u16 mode, u16 *real_mode)
|
||||||
|
{
|
||||||
|
int nmode, i;
|
||||||
|
struct card_info *card;
|
||||||
|
struct mode_info *mi;
|
||||||
|
|
||||||
|
/* Drop the recalc bit if set */
|
||||||
|
mode &= ~VIDEO_RECALC;
|
||||||
|
|
||||||
|
/* Scan for mode based on fixed ID, position, or resolution */
|
||||||
|
nmode = 0;
|
||||||
|
for (card = video_cards; card < video_cards_end; card++) {
|
||||||
|
mi = card->modes;
|
||||||
|
for (i = 0; i < card->nmodes; i++, mi++) {
|
||||||
|
int visible = mi->x || mi->y;
|
||||||
|
|
||||||
|
if ((mode == nmode && visible) ||
|
||||||
|
mode == mi->mode ||
|
||||||
|
mode == (mi->y << 8)+mi->x) {
|
||||||
|
*real_mode = mi->mode;
|
||||||
|
return card->set_mode(mi);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (visible)
|
||||||
|
nmode++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Nothing found? Is it an "exceptional" (unprobed) mode? */
|
||||||
|
for (card = video_cards; card < video_cards_end; card++) {
|
||||||
|
if (mode >= card->xmode_first &&
|
||||||
|
mode < card->xmode_first+card->xmode_n) {
|
||||||
|
struct mode_info mix;
|
||||||
|
*real_mode = mix.mode = mode;
|
||||||
|
mix.x = mix.y = 0;
|
||||||
|
return card->set_mode(&mix);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Otherwise, failure... */
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Recalculate the vertical video cutoff (hack!)
|
||||||
|
*/
|
||||||
|
static void vga_recalc_vertical(void)
|
||||||
|
{
|
||||||
|
unsigned int font_size, rows;
|
||||||
|
u16 crtc;
|
||||||
|
u8 pt, ov;
|
||||||
|
|
||||||
|
set_fs(0);
|
||||||
|
font_size = rdfs8(0x485); /* BIOS: font size (pixels) */
|
||||||
|
rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */
|
||||||
|
|
||||||
|
rows *= font_size; /* Visible scan lines */
|
||||||
|
rows--; /* ... minus one */
|
||||||
|
|
||||||
|
crtc = vga_crtc();
|
||||||
|
|
||||||
|
pt = in_idx(crtc, 0x11);
|
||||||
|
pt &= ~0x80; /* Unlock CR0-7 */
|
||||||
|
out_idx(pt, crtc, 0x11);
|
||||||
|
|
||||||
|
out_idx((u8)rows, crtc, 0x12); /* Lower height register */
|
||||||
|
|
||||||
|
ov = in_idx(crtc, 0x07); /* Overflow register */
|
||||||
|
ov &= 0xbd;
|
||||||
|
ov |= (rows >> (8-1)) & 0x02;
|
||||||
|
ov |= (rows >> (9-6)) & 0x40;
|
||||||
|
out_idx(ov, crtc, 0x07);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set mode (with recalc if specified) */
|
||||||
|
int set_mode(u16 mode)
|
||||||
|
{
|
||||||
|
int rv;
|
||||||
|
u16 real_mode;
|
||||||
|
|
||||||
|
/* Very special mode numbers... */
|
||||||
|
if (mode == VIDEO_CURRENT_MODE)
|
||||||
|
return 0; /* Nothing to do... */
|
||||||
|
else if (mode == NORMAL_VGA)
|
||||||
|
mode = VIDEO_80x25;
|
||||||
|
else if (mode == EXTENDED_VGA)
|
||||||
|
mode = VIDEO_8POINT;
|
||||||
|
|
||||||
|
rv = raw_set_mode(mode, &real_mode);
|
||||||
|
if (rv)
|
||||||
|
return rv;
|
||||||
|
|
||||||
|
if (mode & VIDEO_RECALC)
|
||||||
|
vga_recalc_vertical();
|
||||||
|
|
||||||
|
/* Save the canonical mode number for the kernel, not
|
||||||
|
an alias, size specification or menu position */
|
||||||
|
#ifndef _WAKEUP
|
||||||
|
boot_params.hdr.vid_mode = real_mode;
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -24,7 +24,11 @@ static struct vesa_mode_info vminfo;
|
||||||
|
|
||||||
__videocard video_vesa;
|
__videocard video_vesa;
|
||||||
|
|
||||||
|
#ifndef _WAKEUP
|
||||||
static void vesa_store_mode_params_graphics(void);
|
static void vesa_store_mode_params_graphics(void);
|
||||||
|
#else /* _WAKEUP */
|
||||||
|
static inline void vesa_store_mode_params_graphics(void) {}
|
||||||
|
#endif /* _WAKEUP */
|
||||||
|
|
||||||
static int vesa_probe(void)
|
static int vesa_probe(void)
|
||||||
{
|
{
|
||||||
|
@ -165,6 +169,8 @@ static int vesa_set_mode(struct mode_info *mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef _WAKEUP
|
||||||
|
|
||||||
/* Switch DAC to 8-bit mode */
|
/* Switch DAC to 8-bit mode */
|
||||||
static void vesa_dac_set_8bits(void)
|
static void vesa_dac_set_8bits(void)
|
||||||
{
|
{
|
||||||
|
@ -288,6 +294,8 @@ void vesa_store_edid(void)
|
||||||
#endif /* CONFIG_FIRMWARE_EDID */
|
#endif /* CONFIG_FIRMWARE_EDID */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif /* not _WAKEUP */
|
||||||
|
|
||||||
__videocard video_vesa =
|
__videocard video_vesa =
|
||||||
{
|
{
|
||||||
.card_name = "VESA",
|
.card_name = "VESA",
|
||||||
|
|
|
@ -210,6 +210,8 @@ static int vga_set_mode(struct mode_info *mode)
|
||||||
*/
|
*/
|
||||||
static int vga_probe(void)
|
static int vga_probe(void)
|
||||||
{
|
{
|
||||||
|
u16 ega_bx;
|
||||||
|
|
||||||
static const char *card_name[] = {
|
static const char *card_name[] = {
|
||||||
"CGA/MDA/HGC", "EGA", "VGA"
|
"CGA/MDA/HGC", "EGA", "VGA"
|
||||||
};
|
};
|
||||||
|
@ -226,12 +228,16 @@ static int vga_probe(void)
|
||||||
u8 vga_flag;
|
u8 vga_flag;
|
||||||
|
|
||||||
asm(INT10
|
asm(INT10
|
||||||
: "=b" (boot_params.screen_info.orig_video_ega_bx)
|
: "=b" (ega_bx)
|
||||||
: "a" (0x1200), "b" (0x10) /* Check EGA/VGA */
|
: "a" (0x1200), "b" (0x10) /* Check EGA/VGA */
|
||||||
: "ecx", "edx", "esi", "edi");
|
: "ecx", "edx", "esi", "edi");
|
||||||
|
|
||||||
|
#ifndef _WAKEUP
|
||||||
|
boot_params.screen_info.orig_video_ega_bx = ega_bx;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */
|
/* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */
|
||||||
if ((u8)boot_params.screen_info.orig_video_ega_bx != 0x10) {
|
if ((u8)ega_bx != 0x10) {
|
||||||
/* EGA/VGA */
|
/* EGA/VGA */
|
||||||
asm(INT10
|
asm(INT10
|
||||||
: "=a" (vga_flag)
|
: "=a" (vga_flag)
|
||||||
|
@ -240,7 +246,9 @@ static int vga_probe(void)
|
||||||
|
|
||||||
if (vga_flag == 0x1a) {
|
if (vga_flag == 0x1a) {
|
||||||
adapter = ADAPTER_VGA;
|
adapter = ADAPTER_VGA;
|
||||||
|
#ifndef _WAKEUP
|
||||||
boot_params.screen_info.orig_video_isVGA = 1;
|
boot_params.screen_info.orig_video_isVGA = 1;
|
||||||
|
#endif
|
||||||
} else {
|
} else {
|
||||||
adapter = ADAPTER_EGA;
|
adapter = ADAPTER_EGA;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,21 +18,6 @@
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "vesa.h"
|
#include "vesa.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* Mode list variables
|
|
||||||
*/
|
|
||||||
static struct card_info cards[]; /* List of cards to probe for */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Common variables
|
|
||||||
*/
|
|
||||||
int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */
|
|
||||||
u16 video_segment;
|
|
||||||
int force_x, force_y; /* Don't query the BIOS for cols/rows */
|
|
||||||
|
|
||||||
int do_restore = 0; /* Screen contents changed during mode flip */
|
|
||||||
int graphic_mode; /* Graphic mode with linear frame buffer */
|
|
||||||
|
|
||||||
static void store_cursor_position(void)
|
static void store_cursor_position(void)
|
||||||
{
|
{
|
||||||
u16 curpos;
|
u16 curpos;
|
||||||
|
@ -107,147 +92,6 @@ static void store_mode_params(void)
|
||||||
boot_params.screen_info.orig_video_lines = y;
|
boot_params.screen_info.orig_video_lines = y;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Probe the video drivers and have them generate their mode lists. */
|
|
||||||
static void probe_cards(int unsafe)
|
|
||||||
{
|
|
||||||
struct card_info *card;
|
|
||||||
static u8 probed[2];
|
|
||||||
|
|
||||||
if (probed[unsafe])
|
|
||||||
return;
|
|
||||||
|
|
||||||
probed[unsafe] = 1;
|
|
||||||
|
|
||||||
for (card = video_cards; card < video_cards_end; card++) {
|
|
||||||
if (card->unsafe == unsafe) {
|
|
||||||
if (card->probe)
|
|
||||||
card->nmodes = card->probe();
|
|
||||||
else
|
|
||||||
card->nmodes = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Test if a mode is defined */
|
|
||||||
int mode_defined(u16 mode)
|
|
||||||
{
|
|
||||||
struct card_info *card;
|
|
||||||
struct mode_info *mi;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (card = video_cards; card < video_cards_end; card++) {
|
|
||||||
mi = card->modes;
|
|
||||||
for (i = 0; i < card->nmodes; i++, mi++) {
|
|
||||||
if (mi->mode == mode)
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set mode (without recalc) */
|
|
||||||
static int raw_set_mode(u16 mode, u16 *real_mode)
|
|
||||||
{
|
|
||||||
int nmode, i;
|
|
||||||
struct card_info *card;
|
|
||||||
struct mode_info *mi;
|
|
||||||
|
|
||||||
/* Drop the recalc bit if set */
|
|
||||||
mode &= ~VIDEO_RECALC;
|
|
||||||
|
|
||||||
/* Scan for mode based on fixed ID, position, or resolution */
|
|
||||||
nmode = 0;
|
|
||||||
for (card = video_cards; card < video_cards_end; card++) {
|
|
||||||
mi = card->modes;
|
|
||||||
for (i = 0; i < card->nmodes; i++, mi++) {
|
|
||||||
int visible = mi->x || mi->y;
|
|
||||||
|
|
||||||
if ((mode == nmode && visible) ||
|
|
||||||
mode == mi->mode ||
|
|
||||||
mode == (mi->y << 8)+mi->x) {
|
|
||||||
*real_mode = mi->mode;
|
|
||||||
return card->set_mode(mi);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (visible)
|
|
||||||
nmode++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Nothing found? Is it an "exceptional" (unprobed) mode? */
|
|
||||||
for (card = video_cards; card < video_cards_end; card++) {
|
|
||||||
if (mode >= card->xmode_first &&
|
|
||||||
mode < card->xmode_first+card->xmode_n) {
|
|
||||||
struct mode_info mix;
|
|
||||||
*real_mode = mix.mode = mode;
|
|
||||||
mix.x = mix.y = 0;
|
|
||||||
return card->set_mode(&mix);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Otherwise, failure... */
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Recalculate the vertical video cutoff (hack!)
|
|
||||||
*/
|
|
||||||
static void vga_recalc_vertical(void)
|
|
||||||
{
|
|
||||||
unsigned int font_size, rows;
|
|
||||||
u16 crtc;
|
|
||||||
u8 pt, ov;
|
|
||||||
|
|
||||||
set_fs(0);
|
|
||||||
font_size = rdfs8(0x485); /* BIOS: font size (pixels) */
|
|
||||||
rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */
|
|
||||||
|
|
||||||
rows *= font_size; /* Visible scan lines */
|
|
||||||
rows--; /* ... minus one */
|
|
||||||
|
|
||||||
crtc = vga_crtc();
|
|
||||||
|
|
||||||
pt = in_idx(crtc, 0x11);
|
|
||||||
pt &= ~0x80; /* Unlock CR0-7 */
|
|
||||||
out_idx(pt, crtc, 0x11);
|
|
||||||
|
|
||||||
out_idx((u8)rows, crtc, 0x12); /* Lower height register */
|
|
||||||
|
|
||||||
ov = in_idx(crtc, 0x07); /* Overflow register */
|
|
||||||
ov &= 0xbd;
|
|
||||||
ov |= (rows >> (8-1)) & 0x02;
|
|
||||||
ov |= (rows >> (9-6)) & 0x40;
|
|
||||||
out_idx(ov, crtc, 0x07);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set mode (with recalc if specified) */
|
|
||||||
static int set_mode(u16 mode)
|
|
||||||
{
|
|
||||||
int rv;
|
|
||||||
u16 real_mode;
|
|
||||||
|
|
||||||
/* Very special mode numbers... */
|
|
||||||
if (mode == VIDEO_CURRENT_MODE)
|
|
||||||
return 0; /* Nothing to do... */
|
|
||||||
else if (mode == NORMAL_VGA)
|
|
||||||
mode = VIDEO_80x25;
|
|
||||||
else if (mode == EXTENDED_VGA)
|
|
||||||
mode = VIDEO_8POINT;
|
|
||||||
|
|
||||||
rv = raw_set_mode(mode, &real_mode);
|
|
||||||
if (rv)
|
|
||||||
return rv;
|
|
||||||
|
|
||||||
if (mode & VIDEO_RECALC)
|
|
||||||
vga_recalc_vertical();
|
|
||||||
|
|
||||||
/* Save the canonical mode number for the kernel, not
|
|
||||||
an alias, size specification or menu position */
|
|
||||||
boot_params.hdr.vid_mode = real_mode;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int get_entry(void)
|
static unsigned int get_entry(void)
|
||||||
{
|
{
|
||||||
char entry_buf[4];
|
char entry_buf[4];
|
||||||
|
@ -486,6 +330,7 @@ void set_video(void)
|
||||||
printf("Undefined video mode number: %x\n", mode);
|
printf("Undefined video mode number: %x\n", mode);
|
||||||
mode = ASK_VGA;
|
mode = ASK_VGA;
|
||||||
}
|
}
|
||||||
|
boot_params.hdr.vid_mode = mode;
|
||||||
vesa_store_edid();
|
vesa_store_edid();
|
||||||
store_mode_params();
|
store_mode_params();
|
||||||
|
|
||||||
|
|
|
@ -468,7 +468,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
||||||
restorer = ka->sa.sa_restorer;
|
restorer = ka->sa.sa_restorer;
|
||||||
} else {
|
} else {
|
||||||
/* Return stub is in 32bit vsyscall page */
|
/* Return stub is in 32bit vsyscall page */
|
||||||
if (current->binfmt->hasvdso)
|
if (current->mm->context.vdso)
|
||||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||||
sigreturn);
|
sigreturn);
|
||||||
else
|
else
|
||||||
|
|
|
@ -162,12 +162,14 @@ sysenter_tracesys:
|
||||||
SAVE_REST
|
SAVE_REST
|
||||||
CLEAR_RREGS
|
CLEAR_RREGS
|
||||||
movq %r9,R9(%rsp)
|
movq %r9,R9(%rsp)
|
||||||
movq $-ENOSYS,RAX(%rsp) /* really needed? */
|
movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
|
||||||
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
||||||
call syscall_trace_enter
|
call syscall_trace_enter
|
||||||
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
||||||
RESTORE_REST
|
RESTORE_REST
|
||||||
xchgl %ebp,%r9d
|
xchgl %ebp,%r9d
|
||||||
|
cmpl $(IA32_NR_syscalls-1),%eax
|
||||||
|
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
|
||||||
jmp sysenter_do_call
|
jmp sysenter_do_call
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
ENDPROC(ia32_sysenter_target)
|
ENDPROC(ia32_sysenter_target)
|
||||||
|
@ -261,13 +263,15 @@ cstar_tracesys:
|
||||||
SAVE_REST
|
SAVE_REST
|
||||||
CLEAR_RREGS
|
CLEAR_RREGS
|
||||||
movq %r9,R9(%rsp)
|
movq %r9,R9(%rsp)
|
||||||
movq $-ENOSYS,RAX(%rsp) /* really needed? */
|
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
|
||||||
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
||||||
call syscall_trace_enter
|
call syscall_trace_enter
|
||||||
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
||||||
RESTORE_REST
|
RESTORE_REST
|
||||||
xchgl %ebp,%r9d
|
xchgl %ebp,%r9d
|
||||||
movl RSP-ARGOFFSET(%rsp), %r8d
|
movl RSP-ARGOFFSET(%rsp), %r8d
|
||||||
|
cmpl $(IA32_NR_syscalls-1),%eax
|
||||||
|
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
|
||||||
jmp cstar_do_call
|
jmp cstar_do_call
|
||||||
END(ia32_cstar_target)
|
END(ia32_cstar_target)
|
||||||
|
|
||||||
|
@ -325,7 +329,7 @@ ENTRY(ia32_syscall)
|
||||||
jnz ia32_tracesys
|
jnz ia32_tracesys
|
||||||
ia32_do_syscall:
|
ia32_do_syscall:
|
||||||
cmpl $(IA32_NR_syscalls-1),%eax
|
cmpl $(IA32_NR_syscalls-1),%eax
|
||||||
ja ia32_badsys
|
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
|
||||||
IA32_ARG_FIXUP
|
IA32_ARG_FIXUP
|
||||||
call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
|
call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
|
||||||
ia32_sysret:
|
ia32_sysret:
|
||||||
|
@ -335,7 +339,7 @@ ia32_sysret:
|
||||||
ia32_tracesys:
|
ia32_tracesys:
|
||||||
SAVE_REST
|
SAVE_REST
|
||||||
CLEAR_RREGS
|
CLEAR_RREGS
|
||||||
movq $-ENOSYS,RAX(%rsp) /* really needed? */
|
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
|
||||||
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
||||||
call syscall_trace_enter
|
call syscall_trace_enter
|
||||||
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
||||||
|
|
|
@ -26,51 +26,27 @@
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/signal.h>
|
#include <linux/signal.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/resource.h>
|
|
||||||
#include <linux/times.h>
|
#include <linux/times.h>
|
||||||
#include <linux/utsname.h>
|
#include <linux/utsname.h>
|
||||||
#include <linux/smp.h>
|
|
||||||
#include <linux/smp_lock.h>
|
#include <linux/smp_lock.h>
|
||||||
#include <linux/sem.h>
|
|
||||||
#include <linux/msg.h>
|
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/shm.h>
|
|
||||||
#include <linux/slab.h>
|
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/nfs_fs.h>
|
|
||||||
#include <linux/quota.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/sunrpc/svc.h>
|
|
||||||
#include <linux/nfsd/nfsd.h>
|
|
||||||
#include <linux/nfsd/cache.h>
|
|
||||||
#include <linux/nfsd/xdr.h>
|
|
||||||
#include <linux/nfsd/syscall.h>
|
|
||||||
#include <linux/poll.h>
|
#include <linux/poll.h>
|
||||||
#include <linux/personality.h>
|
#include <linux/personality.h>
|
||||||
#include <linux/stat.h>
|
#include <linux/stat.h>
|
||||||
#include <linux/ipc.h>
|
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
#include <linux/binfmts.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/aio_abi.h>
|
|
||||||
#include <linux/aio.h>
|
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
#include <linux/vfs.h>
|
#include <linux/vfs.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
#include <linux/highuid.h>
|
#include <linux/highuid.h>
|
||||||
#include <linux/vmalloc.h>
|
|
||||||
#include <linux/fsnotify.h>
|
|
||||||
#include <linux/sysctl.h>
|
#include <linux/sysctl.h>
|
||||||
#include <asm/mman.h>
|
#include <asm/mman.h>
|
||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/semaphore.h>
|
#include <asm/semaphore.h>
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
#include <asm/ldt.h>
|
|
||||||
|
|
||||||
#include <net/scm.h>
|
|
||||||
#include <net/sock.h>
|
|
||||||
#include <asm/ia32.h>
|
#include <asm/ia32.h>
|
||||||
|
#include <asm/vgtod.h>
|
||||||
|
|
||||||
#define AA(__x) ((unsigned long)(__x))
|
#define AA(__x) ((unsigned long)(__x))
|
||||||
|
|
||||||
|
@ -804,11 +780,6 @@ asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
|
||||||
if (IS_ERR(filename))
|
if (IS_ERR(filename))
|
||||||
return error;
|
return error;
|
||||||
error = compat_do_execve(filename, argv, envp, regs);
|
error = compat_do_execve(filename, argv, envp, regs);
|
||||||
if (error == 0) {
|
|
||||||
task_lock(current);
|
|
||||||
current->ptrace &= ~PT_DTRACE;
|
|
||||||
task_unlock(current);
|
|
||||||
}
|
|
||||||
putname(filename);
|
putname(filename);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,7 @@
|
||||||
# Makefile for the linux kernel.
|
# Makefile for the linux kernel.
|
||||||
#
|
#
|
||||||
|
|
||||||
extra-y := head_$(BITS).o init_task.o vmlinux.lds
|
extra-y := head_$(BITS).o head$(BITS).o init_task.o vmlinux.lds
|
||||||
extra-$(CONFIG_X86_64) += head64.o
|
|
||||||
|
|
||||||
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
|
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
|
||||||
|
|
||||||
|
@ -19,7 +18,7 @@ CFLAGS_tsc_64.o := $(nostackp)
|
||||||
obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
|
obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
|
||||||
obj-y += traps_$(BITS).o irq_$(BITS).o
|
obj-y += traps_$(BITS).o irq_$(BITS).o
|
||||||
obj-y += time_$(BITS).o ioport.o ldt.o
|
obj-y += time_$(BITS).o ioport.o ldt.o
|
||||||
obj-y += setup_$(BITS).o i8259_$(BITS).o
|
obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o
|
||||||
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
||||||
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
||||||
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
|
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
|
||||||
|
@ -29,6 +28,7 @@ obj-y += alternative.o i8253.o
|
||||||
obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o
|
obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o
|
||||||
obj-y += tsc_$(BITS).o io_delay.o rtc.o
|
obj-y += tsc_$(BITS).o io_delay.o rtc.o
|
||||||
|
|
||||||
|
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
||||||
obj-y += i387.o
|
obj-y += i387.o
|
||||||
obj-y += ptrace.o
|
obj-y += ptrace.o
|
||||||
obj-y += ds.o
|
obj-y += ds.o
|
||||||
|
@ -47,11 +47,12 @@ obj-$(CONFIG_MICROCODE) += microcode.o
|
||||||
obj-$(CONFIG_PCI) += early-quirks.o
|
obj-$(CONFIG_PCI) += early-quirks.o
|
||||||
apm-y := apm_32.o
|
apm-y := apm_32.o
|
||||||
obj-$(CONFIG_APM) += apm.o
|
obj-$(CONFIG_APM) += apm.o
|
||||||
obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o tsc_sync.o
|
obj-$(CONFIG_X86_SMP) += smp.o
|
||||||
obj-$(CONFIG_X86_32_SMP) += smpcommon_32.o
|
obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o
|
||||||
obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o
|
obj-$(CONFIG_X86_32_SMP) += smpcommon.o
|
||||||
|
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o
|
||||||
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
|
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
|
||||||
obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o
|
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
|
||||||
obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o
|
obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o
|
||||||
obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o
|
obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o
|
||||||
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
||||||
|
@ -60,7 +61,7 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
|
||||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
|
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
|
||||||
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
|
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
|
||||||
obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
|
obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
|
||||||
obj-$(CONFIG_X86_VSMP) += vsmp_64.o
|
obj-y += vsmp_64.o
|
||||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||||
obj-$(CONFIG_MODULES) += module_$(BITS).o
|
obj-$(CONFIG_MODULES) += module_$(BITS).o
|
||||||
obj-$(CONFIG_ACPI_SRAT) += srat_32.o
|
obj-$(CONFIG_ACPI_SRAT) += srat_32.o
|
||||||
|
@ -89,7 +90,7 @@ scx200-y += scx200_32.o
|
||||||
###
|
###
|
||||||
# 64 bit specific files
|
# 64 bit specific files
|
||||||
ifeq ($(CONFIG_X86_64),y)
|
ifeq ($(CONFIG_X86_64),y)
|
||||||
obj-y += genapic_64.o genapic_flat_64.o
|
obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o
|
||||||
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
|
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
|
||||||
obj-$(CONFIG_AUDIT) += audit_64.o
|
obj-$(CONFIG_AUDIT) += audit_64.o
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
|
subdir- := realmode
|
||||||
|
|
||||||
obj-$(CONFIG_ACPI) += boot.o
|
obj-$(CONFIG_ACPI) += boot.o
|
||||||
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
|
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o
|
||||||
|
|
||||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||||
obj-y += cstate.o processor.o
|
obj-y += cstate.o processor.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin
|
||||||
|
|
||||||
|
$(obj)/realmode/wakeup.bin: FORCE
|
||||||
|
$(Q)$(MAKE) $(build)=$(obj)/realmode $@
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,11 @@
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/mpspec.h>
|
#include <asm/mpspec.h>
|
||||||
|
#include <asm/smp.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
# include <mach_apic.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
static int __initdata acpi_force = 0;
|
static int __initdata acpi_force = 0;
|
||||||
|
|
||||||
|
@ -52,9 +57,7 @@ EXPORT_SYMBOL(acpi_disabled);
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
|
#include <asm/genapic.h>
|
||||||
static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
|
|
||||||
|
|
||||||
|
|
||||||
#else /* X86 */
|
#else /* X86 */
|
||||||
|
|
||||||
|
@ -111,7 +114,7 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
|
||||||
if (!phys_addr || !size)
|
if (!phys_addr || !size)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
|
if (phys_addr+size <= (max_pfn_mapped << PAGE_SHIFT) + PAGE_SIZE)
|
||||||
return __va(phys_addr);
|
return __va(phys_addr);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -237,6 +240,16 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __cpuinit acpi_register_lapic(int id, u8 enabled)
|
||||||
|
{
|
||||||
|
if (!enabled) {
|
||||||
|
++disabled_cpus;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
generic_processor_info(id, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
|
acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
|
||||||
{
|
{
|
||||||
|
@ -256,8 +269,26 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
|
||||||
* to not preallocating memory for all NR_CPUS
|
* to not preallocating memory for all NR_CPUS
|
||||||
* when we use CPU hotplug.
|
* when we use CPU hotplug.
|
||||||
*/
|
*/
|
||||||
mp_register_lapic(processor->id, /* APIC ID */
|
acpi_register_lapic(processor->id, /* APIC ID */
|
||||||
processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
|
processor->lapic_flags & ACPI_MADT_ENABLED);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init
|
||||||
|
acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
|
||||||
|
{
|
||||||
|
struct acpi_madt_local_sapic *processor = NULL;
|
||||||
|
|
||||||
|
processor = (struct acpi_madt_local_sapic *)header;
|
||||||
|
|
||||||
|
if (BAD_MADT_ENTRY(processor, end))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
acpi_table_print_madt_entry(header);
|
||||||
|
|
||||||
|
acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
|
||||||
|
processor->lapic_flags & ACPI_MADT_ENABLED);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -300,6 +331,8 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e
|
||||||
|
|
||||||
#ifdef CONFIG_X86_IO_APIC
|
#ifdef CONFIG_X86_IO_APIC
|
||||||
|
|
||||||
|
struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
|
acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
|
||||||
{
|
{
|
||||||
|
@ -532,7 +565,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||||
buffer.pointer = NULL;
|
buffer.pointer = NULL;
|
||||||
|
|
||||||
tmp_map = cpu_present_map;
|
tmp_map = cpu_present_map;
|
||||||
mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
|
acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If mp_register_lapic successfully generates a new logical cpu
|
* If mp_register_lapic successfully generates a new logical cpu
|
||||||
|
@ -732,6 +765,16 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
|
||||||
* Parse LAPIC entries in MADT
|
* Parse LAPIC entries in MADT
|
||||||
* returns 0 on success, < 0 on error
|
* returns 0 on success, < 0 on error
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static void __init acpi_register_lapic_address(unsigned long address)
|
||||||
|
{
|
||||||
|
mp_lapic_addr = address;
|
||||||
|
|
||||||
|
set_fixmap_nocache(FIX_APIC_BASE, address);
|
||||||
|
if (boot_cpu_physical_apicid == -1U)
|
||||||
|
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
|
||||||
|
}
|
||||||
|
|
||||||
static int __init acpi_parse_madt_lapic_entries(void)
|
static int __init acpi_parse_madt_lapic_entries(void)
|
||||||
{
|
{
|
||||||
int count;
|
int count;
|
||||||
|
@ -753,10 +796,14 @@ static int __init acpi_parse_madt_lapic_entries(void)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
mp_register_lapic_address(acpi_lapic_addr);
|
acpi_register_lapic_address(acpi_lapic_addr);
|
||||||
|
|
||||||
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
|
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
|
||||||
MAX_APICS);
|
acpi_parse_sapic, MAX_APICS);
|
||||||
|
|
||||||
|
if (!count)
|
||||||
|
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
|
||||||
|
acpi_parse_lapic, MAX_APICS);
|
||||||
if (!count) {
|
if (!count) {
|
||||||
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
|
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
|
||||||
/* TBD: Cleanup to allow fallback to MPS */
|
/* TBD: Cleanup to allow fallback to MPS */
|
||||||
|
|
57
arch/x86/kernel/acpi/realmode/Makefile
Normal file
57
arch/x86/kernel/acpi/realmode/Makefile
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
#
|
||||||
|
# arch/x86/kernel/acpi/realmode/Makefile
|
||||||
|
#
|
||||||
|
# This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
# License. See the file "COPYING" in the main directory of this archive
|
||||||
|
# for more details.
|
||||||
|
#
|
||||||
|
|
||||||
|
targets := wakeup.bin wakeup.elf
|
||||||
|
|
||||||
|
wakeup-y += wakeup.o wakemain.o video-mode.o copy.o
|
||||||
|
|
||||||
|
# The link order of the video-*.o modules can matter. In particular,
|
||||||
|
# video-vga.o *must* be listed first, followed by video-vesa.o.
|
||||||
|
# Hardware-specific drivers should follow in the order they should be
|
||||||
|
# probed, and video-bios.o should typically be last.
|
||||||
|
wakeup-y += video-vga.o
|
||||||
|
wakeup-y += video-vesa.o
|
||||||
|
wakeup-y += video-bios.o
|
||||||
|
|
||||||
|
targets += $(wakeup-y)
|
||||||
|
|
||||||
|
bootsrc := $(src)/../../../boot
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# How to compile the 16-bit code. Note we always compile for -march=i386,
|
||||||
|
# that way we can complain to the user if the CPU is insufficient.
|
||||||
|
# Compile with _SETUP since this is similar to the boot-time setup code.
|
||||||
|
KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
|
||||||
|
-I$(srctree)/$(bootsrc) \
|
||||||
|
$(cflags-y) \
|
||||||
|
-Wall -Wstrict-prototypes \
|
||||||
|
-march=i386 -mregparm=3 \
|
||||||
|
-include $(srctree)/$(bootsrc)/code16gcc.h \
|
||||||
|
-fno-strict-aliasing -fomit-frame-pointer \
|
||||||
|
$(call cc-option, -ffreestanding) \
|
||||||
|
$(call cc-option, -fno-toplevel-reorder,\
|
||||||
|
$(call cc-option, -fno-unit-at-a-time)) \
|
||||||
|
$(call cc-option, -fno-stack-protector) \
|
||||||
|
$(call cc-option, -mpreferred-stack-boundary=2)
|
||||||
|
KBUILD_CFLAGS += $(call cc-option, -m32)
|
||||||
|
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||||
|
|
||||||
|
WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y))
|
||||||
|
|
||||||
|
LDFLAGS_wakeup.elf := -T
|
||||||
|
|
||||||
|
CPPFLAGS_wakeup.lds += -P -C
|
||||||
|
|
||||||
|
$(obj)/wakeup.elf: $(src)/wakeup.lds $(WAKEUP_OBJS) FORCE
|
||||||
|
$(call if_changed,ld)
|
||||||
|
|
||||||
|
OBJCOPYFLAGS_wakeup.bin := -O binary
|
||||||
|
|
||||||
|
$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE
|
||||||
|
$(call if_changed,objcopy)
|
1
arch/x86/kernel/acpi/realmode/copy.S
Normal file
1
arch/x86/kernel/acpi/realmode/copy.S
Normal file
|
@ -0,0 +1 @@
|
||||||
|
#include "../../../boot/copy.S"
|
1
arch/x86/kernel/acpi/realmode/video-bios.c
Normal file
1
arch/x86/kernel/acpi/realmode/video-bios.c
Normal file
|
@ -0,0 +1 @@
|
||||||
|
#include "../../../boot/video-bios.c"
|
1
arch/x86/kernel/acpi/realmode/video-mode.c
Normal file
1
arch/x86/kernel/acpi/realmode/video-mode.c
Normal file
|
@ -0,0 +1 @@
|
||||||
|
#include "../../../boot/video-mode.c"
|
1
arch/x86/kernel/acpi/realmode/video-vesa.c
Normal file
1
arch/x86/kernel/acpi/realmode/video-vesa.c
Normal file
|
@ -0,0 +1 @@
|
||||||
|
#include "../../../boot/video-vesa.c"
|
1
arch/x86/kernel/acpi/realmode/video-vga.c
Normal file
1
arch/x86/kernel/acpi/realmode/video-vga.c
Normal file
|
@ -0,0 +1 @@
|
||||||
|
#include "../../../boot/video-vga.c"
|
81
arch/x86/kernel/acpi/realmode/wakemain.c
Normal file
81
arch/x86/kernel/acpi/realmode/wakemain.c
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
#include "wakeup.h"
|
||||||
|
#include "boot.h"
|
||||||
|
|
||||||
|
static void udelay(int loops)
|
||||||
|
{
|
||||||
|
while (loops--)
|
||||||
|
io_delay(); /* Approximately 1 us */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void beep(unsigned int hz)
|
||||||
|
{
|
||||||
|
u8 enable;
|
||||||
|
|
||||||
|
if (!hz) {
|
||||||
|
enable = 0x00; /* Turn off speaker */
|
||||||
|
} else {
|
||||||
|
u16 div = 1193181/hz;
|
||||||
|
|
||||||
|
outb(0xb6, 0x43); /* Ctr 2, squarewave, load, binary */
|
||||||
|
io_delay();
|
||||||
|
outb(div, 0x42); /* LSB of counter */
|
||||||
|
io_delay();
|
||||||
|
outb(div >> 8, 0x42); /* MSB of counter */
|
||||||
|
io_delay();
|
||||||
|
|
||||||
|
enable = 0x03; /* Turn on speaker */
|
||||||
|
}
|
||||||
|
inb(0x61); /* Dummy read of System Control Port B */
|
||||||
|
io_delay();
|
||||||
|
outb(enable, 0x61); /* Enable timer 2 output to speaker */
|
||||||
|
io_delay();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DOT_HZ 880
|
||||||
|
#define DASH_HZ 587
|
||||||
|
#define US_PER_DOT 125000
|
||||||
|
|
||||||
|
/* Okay, this is totally silly, but it's kind of fun. */
|
||||||
|
static void send_morse(const char *pattern)
|
||||||
|
{
|
||||||
|
char s;
|
||||||
|
|
||||||
|
while ((s = *pattern++)) {
|
||||||
|
switch (s) {
|
||||||
|
case '.':
|
||||||
|
beep(DOT_HZ);
|
||||||
|
udelay(US_PER_DOT);
|
||||||
|
beep(0);
|
||||||
|
udelay(US_PER_DOT);
|
||||||
|
break;
|
||||||
|
case '-':
|
||||||
|
beep(DASH_HZ);
|
||||||
|
udelay(US_PER_DOT * 3);
|
||||||
|
beep(0);
|
||||||
|
udelay(US_PER_DOT);
|
||||||
|
break;
|
||||||
|
default: /* Assume it's a space */
|
||||||
|
udelay(US_PER_DOT * 3);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void main(void)
|
||||||
|
{
|
||||||
|
/* Kill machine if structures are wrong */
|
||||||
|
if (wakeup_header.real_magic != 0x12345678)
|
||||||
|
while (1);
|
||||||
|
|
||||||
|
if (wakeup_header.realmode_flags & 4)
|
||||||
|
send_morse("...-");
|
||||||
|
|
||||||
|
if (wakeup_header.realmode_flags & 1)
|
||||||
|
asm volatile("lcallw $0xc000,$3");
|
||||||
|
|
||||||
|
if (wakeup_header.realmode_flags & 2) {
|
||||||
|
/* Need to call BIOS */
|
||||||
|
probe_cards(0);
|
||||||
|
set_mode(wakeup_header.video_mode);
|
||||||
|
}
|
||||||
|
}
|
113
arch/x86/kernel/acpi/realmode/wakeup.S
Normal file
113
arch/x86/kernel/acpi/realmode/wakeup.S
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
/*
|
||||||
|
* ACPI wakeup real mode startup stub
|
||||||
|
*/
|
||||||
|
#include <asm/segment.h>
|
||||||
|
#include <asm/msr-index.h>
|
||||||
|
#include <asm/page.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
|
.code16
|
||||||
|
.section ".header", "a"
|
||||||
|
|
||||||
|
/* This should match the structure in wakeup.h */
|
||||||
|
.globl wakeup_header
|
||||||
|
wakeup_header:
|
||||||
|
video_mode: .short 0 /* Video mode number */
|
||||||
|
pmode_return: .byte 0x66, 0xea /* ljmpl */
|
||||||
|
.long 0 /* offset goes here */
|
||||||
|
.short __KERNEL_CS
|
||||||
|
pmode_cr0: .long 0 /* Saved %cr0 */
|
||||||
|
pmode_cr3: .long 0 /* Saved %cr3 */
|
||||||
|
pmode_cr4: .long 0 /* Saved %cr4 */
|
||||||
|
pmode_efer: .quad 0 /* Saved EFER */
|
||||||
|
pmode_gdt: .quad 0
|
||||||
|
realmode_flags: .long 0
|
||||||
|
real_magic: .long 0
|
||||||
|
trampoline_segment: .word 0
|
||||||
|
signature: .long 0x51ee1111
|
||||||
|
|
||||||
|
.text
|
||||||
|
.globl _start
|
||||||
|
.code16
|
||||||
|
wakeup_code:
|
||||||
|
_start:
|
||||||
|
cli
|
||||||
|
cld
|
||||||
|
|
||||||
|
/* Set up segments */
|
||||||
|
movw %cs, %ax
|
||||||
|
movw %ax, %ds
|
||||||
|
movw %ax, %es
|
||||||
|
movw %ax, %ss
|
||||||
|
|
||||||
|
movl $wakeup_stack_end, %esp
|
||||||
|
|
||||||
|
/* Clear the EFLAGS */
|
||||||
|
pushl $0
|
||||||
|
popfl
|
||||||
|
|
||||||
|
/* Check header signature... */
|
||||||
|
movl signature, %eax
|
||||||
|
cmpl $0x51ee1111, %eax
|
||||||
|
jne bogus_real_magic
|
||||||
|
|
||||||
|
/* Check we really have everything... */
|
||||||
|
movl end_signature, %eax
|
||||||
|
cmpl $0x65a22c82, %eax
|
||||||
|
jne bogus_real_magic
|
||||||
|
|
||||||
|
/* Call the C code */
|
||||||
|
calll main
|
||||||
|
|
||||||
|
/* Do any other stuff... */
|
||||||
|
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
/* This could also be done in C code... */
|
||||||
|
movl pmode_cr3, %eax
|
||||||
|
movl %eax, %cr3
|
||||||
|
|
||||||
|
movl pmode_cr4, %ecx
|
||||||
|
jecxz 1f
|
||||||
|
movl %ecx, %cr4
|
||||||
|
1:
|
||||||
|
movl pmode_efer, %eax
|
||||||
|
movl pmode_efer + 4, %edx
|
||||||
|
movl %eax, %ecx
|
||||||
|
orl %edx, %ecx
|
||||||
|
jz 1f
|
||||||
|
movl $0xc0000080, %ecx
|
||||||
|
wrmsr
|
||||||
|
1:
|
||||||
|
|
||||||
|
lgdtl pmode_gdt
|
||||||
|
|
||||||
|
/* This really couldn't... */
|
||||||
|
movl pmode_cr0, %eax
|
||||||
|
movl %eax, %cr0
|
||||||
|
jmp pmode_return
|
||||||
|
#else
|
||||||
|
pushw $0
|
||||||
|
pushw trampoline_segment
|
||||||
|
pushw $0
|
||||||
|
lret
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bogus_real_magic:
|
||||||
|
1:
|
||||||
|
hlt
|
||||||
|
jmp 1b
|
||||||
|
|
||||||
|
.data
|
||||||
|
.balign 4
|
||||||
|
.globl HEAP, heap_end
|
||||||
|
HEAP:
|
||||||
|
.long wakeup_heap
|
||||||
|
heap_end:
|
||||||
|
.long wakeup_stack
|
||||||
|
|
||||||
|
.bss
|
||||||
|
wakeup_heap:
|
||||||
|
.space 2048
|
||||||
|
wakeup_stack:
|
||||||
|
.space 2048
|
||||||
|
wakeup_stack_end:
|
36
arch/x86/kernel/acpi/realmode/wakeup.h
Normal file
36
arch/x86/kernel/acpi/realmode/wakeup.h
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
/*
|
||||||
|
* Definitions for the wakeup data structure at the head of the
|
||||||
|
* wakeup code.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
|
||||||
|
#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
/* This must match data at wakeup.S */
|
||||||
|
struct wakeup_header {
|
||||||
|
u16 video_mode; /* Video mode number */
|
||||||
|
u16 _jmp1; /* ljmpl opcode, 32-bit only */
|
||||||
|
u32 pmode_entry; /* Protected mode resume point, 32-bit only */
|
||||||
|
u16 _jmp2; /* CS value, 32-bit only */
|
||||||
|
u32 pmode_cr0; /* Protected mode cr0 */
|
||||||
|
u32 pmode_cr3; /* Protected mode cr3 */
|
||||||
|
u32 pmode_cr4; /* Protected mode cr4 */
|
||||||
|
u32 pmode_efer_low; /* Protected mode EFER */
|
||||||
|
u32 pmode_efer_high;
|
||||||
|
u64 pmode_gdt;
|
||||||
|
u32 realmode_flags;
|
||||||
|
u32 real_magic;
|
||||||
|
u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
|
||||||
|
u32 signature; /* To check we have correct structure */
|
||||||
|
} __attribute__((__packed__));
|
||||||
|
|
||||||
|
extern struct wakeup_header wakeup_header;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define HEADER_OFFSET 0x3f00
|
||||||
|
#define WAKEUP_SIZE 0x4000
|
||||||
|
|
||||||
|
#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
|
61
arch/x86/kernel/acpi/realmode/wakeup.lds.S
Normal file
61
arch/x86/kernel/acpi/realmode/wakeup.lds.S
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
/*
|
||||||
|
* wakeup.ld
|
||||||
|
*
|
||||||
|
* Linker script for the real-mode wakeup code
|
||||||
|
*/
|
||||||
|
#undef i386
|
||||||
|
#include "wakeup.h"
|
||||||
|
|
||||||
|
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
|
||||||
|
OUTPUT_ARCH(i386)
|
||||||
|
ENTRY(_start)
|
||||||
|
|
||||||
|
SECTIONS
|
||||||
|
{
|
||||||
|
. = HEADER_OFFSET;
|
||||||
|
.header : {
|
||||||
|
*(.header)
|
||||||
|
}
|
||||||
|
|
||||||
|
. = 0;
|
||||||
|
.text : {
|
||||||
|
*(.text*)
|
||||||
|
}
|
||||||
|
|
||||||
|
. = ALIGN(16);
|
||||||
|
.rodata : {
|
||||||
|
*(.rodata*)
|
||||||
|
}
|
||||||
|
|
||||||
|
.videocards : {
|
||||||
|
video_cards = .;
|
||||||
|
*(.videocards)
|
||||||
|
video_cards_end = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
. = ALIGN(16);
|
||||||
|
.data : {
|
||||||
|
*(.data*)
|
||||||
|
}
|
||||||
|
|
||||||
|
.signature : {
|
||||||
|
end_signature = .;
|
||||||
|
LONG(0x65a22c82)
|
||||||
|
}
|
||||||
|
|
||||||
|
. = ALIGN(16);
|
||||||
|
.bss : {
|
||||||
|
__bss_start = .;
|
||||||
|
*(.bss)
|
||||||
|
__bss_end = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
. = ALIGN(16);
|
||||||
|
_end = .;
|
||||||
|
|
||||||
|
/DISCARD/ : {
|
||||||
|
*(.note*)
|
||||||
|
}
|
||||||
|
|
||||||
|
. = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
|
||||||
|
}
|
|
@ -10,30 +10,72 @@
|
||||||
#include <linux/dmi.h>
|
#include <linux/dmi.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
|
||||||
#include <asm/smp.h>
|
#include "realmode/wakeup.h"
|
||||||
|
#include "sleep.h"
|
||||||
|
|
||||||
|
unsigned long acpi_wakeup_address;
|
||||||
|
unsigned long acpi_realmode_flags;
|
||||||
|
|
||||||
/* address in low memory of the wakeup routine. */
|
/* address in low memory of the wakeup routine. */
|
||||||
unsigned long acpi_wakeup_address = 0;
|
static unsigned long acpi_realmode;
|
||||||
unsigned long acpi_realmode_flags;
|
|
||||||
extern char wakeup_start, wakeup_end;
|
|
||||||
|
|
||||||
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
|
#ifdef CONFIG_64BIT
|
||||||
|
static char temp_stack[10240];
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_save_state_mem - save kernel state
|
* acpi_save_state_mem - save kernel state
|
||||||
*
|
*
|
||||||
* Create an identity mapped page table and copy the wakeup routine to
|
* Create an identity mapped page table and copy the wakeup routine to
|
||||||
* low memory.
|
* low memory.
|
||||||
|
*
|
||||||
|
* Note that this is too late to change acpi_wakeup_address.
|
||||||
*/
|
*/
|
||||||
int acpi_save_state_mem(void)
|
int acpi_save_state_mem(void)
|
||||||
{
|
{
|
||||||
if (!acpi_wakeup_address) {
|
struct wakeup_header *header;
|
||||||
printk(KERN_ERR "Could not allocate memory during boot, S3 disabled\n");
|
|
||||||
|
if (!acpi_realmode) {
|
||||||
|
printk(KERN_ERR "Could not allocate memory during boot, "
|
||||||
|
"S3 disabled\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
memcpy((void *)acpi_wakeup_address, &wakeup_start,
|
memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE);
|
||||||
&wakeup_end - &wakeup_start);
|
|
||||||
acpi_copy_wakeup_routine(acpi_wakeup_address);
|
header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET);
|
||||||
|
if (header->signature != 0x51ee1111) {
|
||||||
|
printk(KERN_ERR "wakeup header does not match\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
header->video_mode = saved_video_mode;
|
||||||
|
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
store_gdt((struct desc_ptr *)&header->pmode_gdt);
|
||||||
|
|
||||||
|
header->pmode_efer_low = nx_enabled;
|
||||||
|
if (header->pmode_efer_low & 1) {
|
||||||
|
/* This is strange, why not save efer, always? */
|
||||||
|
rdmsr(MSR_EFER, header->pmode_efer_low,
|
||||||
|
header->pmode_efer_high);
|
||||||
|
}
|
||||||
|
#endif /* !CONFIG_64BIT */
|
||||||
|
|
||||||
|
header->pmode_cr0 = read_cr0();
|
||||||
|
header->pmode_cr4 = read_cr4();
|
||||||
|
header->realmode_flags = acpi_realmode_flags;
|
||||||
|
header->real_magic = 0x12345678;
|
||||||
|
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
header->pmode_entry = (u32)&wakeup_pmode_return;
|
||||||
|
header->pmode_cr3 = (u32)(swsusp_pg_dir - __PAGE_OFFSET);
|
||||||
|
saved_magic = 0x12345678;
|
||||||
|
#else /* CONFIG_64BIT */
|
||||||
|
header->trampoline_segment = setup_trampoline() >> 4;
|
||||||
|
init_rsp = (unsigned long)temp_stack + 4096;
|
||||||
|
initial_code = (unsigned long)wakeup_long64;
|
||||||
|
saved_magic = 0x123456789abcdef0;
|
||||||
|
#endif /* CONFIG_64BIT */
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -56,15 +98,20 @@ void acpi_restore_state_mem(void)
|
||||||
*/
|
*/
|
||||||
void __init acpi_reserve_bootmem(void)
|
void __init acpi_reserve_bootmem(void)
|
||||||
{
|
{
|
||||||
if ((&wakeup_end - &wakeup_start) > PAGE_SIZE*2) {
|
if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"ACPI: Wakeup code way too big, S3 disabled.\n");
|
"ACPI: Wakeup code way too big, S3 disabled.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2);
|
acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
|
||||||
if (!acpi_wakeup_address)
|
|
||||||
|
if (!acpi_realmode) {
|
||||||
printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
|
printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
acpi_wakeup_address = acpi_realmode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
16
arch/x86/kernel/acpi/sleep.h
Normal file
16
arch/x86/kernel/acpi/sleep.h
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
/*
|
||||||
|
* Variables and functions used by the code in sleep.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <asm/trampoline.h>
|
||||||
|
|
||||||
|
extern char wakeup_code_start, wakeup_code_end;
|
||||||
|
|
||||||
|
extern unsigned long saved_video_mode;
|
||||||
|
extern long saved_magic;
|
||||||
|
|
||||||
|
extern int wakeup_pmode_return;
|
||||||
|
extern char swsusp_pg_dir[PAGE_SIZE];
|
||||||
|
|
||||||
|
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
|
||||||
|
extern void wakeup_long64(void);
|
|
@ -1,40 +0,0 @@
|
||||||
/*
|
|
||||||
* sleep.c - x86-specific ACPI sleep support.
|
|
||||||
*
|
|
||||||
* Copyright (C) 2001-2003 Patrick Mochel
|
|
||||||
* Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/acpi.h>
|
|
||||||
#include <linux/bootmem.h>
|
|
||||||
#include <linux/dmi.h>
|
|
||||||
#include <linux/cpumask.h>
|
|
||||||
|
|
||||||
#include <asm/smp.h>
|
|
||||||
|
|
||||||
/* Ouch, we want to delete this. We already have better version in userspace, in
|
|
||||||
s2ram from suspend.sf.net project */
|
|
||||||
static __init int reset_videomode_after_s3(const struct dmi_system_id *d)
|
|
||||||
{
|
|
||||||
acpi_realmode_flags |= 2;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
|
|
||||||
{ /* Reset video mode after returning from ACPI S3 sleep */
|
|
||||||
.callback = reset_videomode_after_s3,
|
|
||||||
.ident = "Toshiba Satellite 4030cdt",
|
|
||||||
.matches = {
|
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{}
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init acpisleep_dmi_init(void)
|
|
||||||
{
|
|
||||||
dmi_check_system(acpisleep_dmi_table);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
core_initcall(acpisleep_dmi_init);
|
|
|
@ -3,178 +3,12 @@
|
||||||
#include <asm/segment.h>
|
#include <asm/segment.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
#
|
# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
|
||||||
# wakeup_code runs in real mode, and at unknown address (determined at run-time).
|
|
||||||
# Therefore it must only use relative jumps/calls.
|
|
||||||
#
|
|
||||||
# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
|
|
||||||
#
|
|
||||||
# If physical address of wakeup_code is 0x12345, BIOS should call us with
|
|
||||||
# cs = 0x1234, eip = 0x05
|
|
||||||
#
|
|
||||||
|
|
||||||
#define BEEP \
|
|
||||||
inb $97, %al; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $3, %al; \
|
|
||||||
outb %al, $97; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $-74, %al; \
|
|
||||||
outb %al, $67; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $-119, %al; \
|
|
||||||
outb %al, $66; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $15, %al; \
|
|
||||||
outb %al, $66;
|
|
||||||
|
|
||||||
ALIGN
|
|
||||||
.align 4096
|
|
||||||
ENTRY(wakeup_start)
|
|
||||||
wakeup_code:
|
|
||||||
wakeup_code_start = .
|
|
||||||
.code16
|
|
||||||
|
|
||||||
cli
|
|
||||||
cld
|
|
||||||
|
|
||||||
# setup data segment
|
|
||||||
movw %cs, %ax
|
|
||||||
movw %ax, %ds # Make ds:0 point to wakeup_start
|
|
||||||
movw %ax, %ss
|
|
||||||
|
|
||||||
testl $4, realmode_flags - wakeup_code
|
|
||||||
jz 1f
|
|
||||||
BEEP
|
|
||||||
1:
|
|
||||||
mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board
|
|
||||||
|
|
||||||
pushl $0 # Kill any dangerous flags
|
|
||||||
popfl
|
|
||||||
|
|
||||||
movl real_magic - wakeup_code, %eax
|
|
||||||
cmpl $0x12345678, %eax
|
|
||||||
jne bogus_real_magic
|
|
||||||
|
|
||||||
testl $1, realmode_flags - wakeup_code
|
|
||||||
jz 1f
|
|
||||||
lcall $0xc000,$3
|
|
||||||
movw %cs, %ax
|
|
||||||
movw %ax, %ds # Bios might have played with that
|
|
||||||
movw %ax, %ss
|
|
||||||
1:
|
|
||||||
|
|
||||||
testl $2, realmode_flags - wakeup_code
|
|
||||||
jz 1f
|
|
||||||
mov video_mode - wakeup_code, %ax
|
|
||||||
call mode_set
|
|
||||||
1:
|
|
||||||
|
|
||||||
# set up page table
|
|
||||||
movl $swsusp_pg_dir-__PAGE_OFFSET, %eax
|
|
||||||
movl %eax, %cr3
|
|
||||||
|
|
||||||
testl $1, real_efer_save_restore - wakeup_code
|
|
||||||
jz 4f
|
|
||||||
# restore efer setting
|
|
||||||
movl real_save_efer_edx - wakeup_code, %edx
|
|
||||||
movl real_save_efer_eax - wakeup_code, %eax
|
|
||||||
mov $0xc0000080, %ecx
|
|
||||||
wrmsr
|
|
||||||
4:
|
|
||||||
# make sure %cr4 is set correctly (features, etc)
|
|
||||||
movl real_save_cr4 - wakeup_code, %eax
|
|
||||||
movl %eax, %cr4
|
|
||||||
|
|
||||||
# need a gdt -- use lgdtl to force 32-bit operands, in case
|
|
||||||
# the GDT is located past 16 megabytes.
|
|
||||||
lgdtl real_save_gdt - wakeup_code
|
|
||||||
|
|
||||||
movl real_save_cr0 - wakeup_code, %eax
|
|
||||||
movl %eax, %cr0
|
|
||||||
jmp 1f
|
|
||||||
1:
|
|
||||||
movl real_magic - wakeup_code, %eax
|
|
||||||
cmpl $0x12345678, %eax
|
|
||||||
jne bogus_real_magic
|
|
||||||
|
|
||||||
testl $8, realmode_flags - wakeup_code
|
|
||||||
jz 1f
|
|
||||||
BEEP
|
|
||||||
1:
|
|
||||||
ljmpl $__KERNEL_CS, $wakeup_pmode_return
|
|
||||||
|
|
||||||
real_save_gdt: .word 0
|
|
||||||
.long 0
|
|
||||||
real_save_cr0: .long 0
|
|
||||||
real_save_cr3: .long 0
|
|
||||||
real_save_cr4: .long 0
|
|
||||||
real_magic: .long 0
|
|
||||||
video_mode: .long 0
|
|
||||||
realmode_flags: .long 0
|
|
||||||
real_efer_save_restore: .long 0
|
|
||||||
real_save_efer_edx: .long 0
|
|
||||||
real_save_efer_eax: .long 0
|
|
||||||
|
|
||||||
bogus_real_magic:
|
|
||||||
jmp bogus_real_magic
|
|
||||||
|
|
||||||
/* This code uses an extended set of video mode numbers. These include:
|
|
||||||
* Aliases for standard modes
|
|
||||||
* NORMAL_VGA (-1)
|
|
||||||
* EXTENDED_VGA (-2)
|
|
||||||
* ASK_VGA (-3)
|
|
||||||
* Video modes numbered by menu position -- NOT RECOMMENDED because of lack
|
|
||||||
* of compatibility when extending the table. These are between 0x00 and 0xff.
|
|
||||||
*/
|
|
||||||
#define VIDEO_FIRST_MENU 0x0000
|
|
||||||
|
|
||||||
/* Standard BIOS video modes (BIOS number + 0x0100) */
|
|
||||||
#define VIDEO_FIRST_BIOS 0x0100
|
|
||||||
|
|
||||||
/* VESA BIOS video modes (VESA number + 0x0200) */
|
|
||||||
#define VIDEO_FIRST_VESA 0x0200
|
|
||||||
|
|
||||||
/* Video7 special modes (BIOS number + 0x0900) */
|
|
||||||
#define VIDEO_FIRST_V7 0x0900
|
|
||||||
|
|
||||||
# Setting of user mode (AX=mode ID) => CF=success
|
|
||||||
|
|
||||||
# For now, we only handle VESA modes (0x0200..0x03ff). To handle other
|
|
||||||
# modes, we should probably compile in the video code from the boot
|
|
||||||
# directory.
|
|
||||||
mode_set:
|
|
||||||
movw %ax, %bx
|
|
||||||
subb $VIDEO_FIRST_VESA>>8, %bh
|
|
||||||
cmpb $2, %bh
|
|
||||||
jb check_vesa
|
|
||||||
|
|
||||||
setbad:
|
|
||||||
clc
|
|
||||||
ret
|
|
||||||
|
|
||||||
check_vesa:
|
|
||||||
orw $0x4000, %bx # Use linear frame buffer
|
|
||||||
movw $0x4f02, %ax # VESA BIOS mode set call
|
|
||||||
int $0x10
|
|
||||||
cmpw $0x004f, %ax # AL=4f if implemented
|
|
||||||
jnz setbad # AH=0 if OK
|
|
||||||
|
|
||||||
stc
|
|
||||||
ret
|
|
||||||
|
|
||||||
.code32
|
.code32
|
||||||
ALIGN
|
ALIGN
|
||||||
|
|
||||||
.org 0x800
|
ENTRY(wakeup_pmode_return)
|
||||||
wakeup_stack_begin: # Stack grows down
|
|
||||||
|
|
||||||
.org 0xff0 # Just below end of page
|
|
||||||
wakeup_stack:
|
|
||||||
ENTRY(wakeup_end)
|
|
||||||
|
|
||||||
.org 0x1000
|
|
||||||
|
|
||||||
wakeup_pmode_return:
|
wakeup_pmode_return:
|
||||||
movw $__KERNEL_DS, %ax
|
movw $__KERNEL_DS, %ax
|
||||||
movw %ax, %ss
|
movw %ax, %ss
|
||||||
|
@ -208,63 +42,21 @@ bogus_magic:
|
||||||
jmp bogus_magic
|
jmp bogus_magic
|
||||||
|
|
||||||
|
|
||||||
##
|
|
||||||
# acpi_copy_wakeup_routine
|
|
||||||
#
|
|
||||||
# Copy the above routine to low memory.
|
|
||||||
#
|
|
||||||
# Parameters:
|
|
||||||
# %eax: place to copy wakeup routine to
|
|
||||||
#
|
|
||||||
# Returned address is location of code in low memory (past data and stack)
|
|
||||||
#
|
|
||||||
ENTRY(acpi_copy_wakeup_routine)
|
|
||||||
|
|
||||||
pushl %ebx
|
save_registers:
|
||||||
sgdt saved_gdt
|
sgdt saved_gdt
|
||||||
sidt saved_idt
|
sidt saved_idt
|
||||||
sldt saved_ldt
|
sldt saved_ldt
|
||||||
str saved_tss
|
str saved_tss
|
||||||
|
|
||||||
movl nx_enabled, %edx
|
|
||||||
movl %edx, real_efer_save_restore - wakeup_start (%eax)
|
|
||||||
testl $1, real_efer_save_restore - wakeup_start (%eax)
|
|
||||||
jz 2f
|
|
||||||
# save efer setting
|
|
||||||
pushl %eax
|
|
||||||
movl %eax, %ebx
|
|
||||||
mov $0xc0000080, %ecx
|
|
||||||
rdmsr
|
|
||||||
movl %edx, real_save_efer_edx - wakeup_start (%ebx)
|
|
||||||
movl %eax, real_save_efer_eax - wakeup_start (%ebx)
|
|
||||||
popl %eax
|
|
||||||
2:
|
|
||||||
|
|
||||||
movl %cr3, %edx
|
|
||||||
movl %edx, real_save_cr3 - wakeup_start (%eax)
|
|
||||||
movl %cr4, %edx
|
|
||||||
movl %edx, real_save_cr4 - wakeup_start (%eax)
|
|
||||||
movl %cr0, %edx
|
|
||||||
movl %edx, real_save_cr0 - wakeup_start (%eax)
|
|
||||||
sgdt real_save_gdt - wakeup_start (%eax)
|
|
||||||
|
|
||||||
movl saved_videomode, %edx
|
|
||||||
movl %edx, video_mode - wakeup_start (%eax)
|
|
||||||
movl acpi_realmode_flags, %edx
|
|
||||||
movl %edx, realmode_flags - wakeup_start (%eax)
|
|
||||||
movl $0x12345678, real_magic - wakeup_start (%eax)
|
|
||||||
movl $0x12345678, saved_magic
|
|
||||||
popl %ebx
|
|
||||||
ret
|
|
||||||
|
|
||||||
save_registers:
|
|
||||||
leal 4(%esp), %eax
|
leal 4(%esp), %eax
|
||||||
movl %eax, saved_context_esp
|
movl %eax, saved_context_esp
|
||||||
movl %ebx, saved_context_ebx
|
movl %ebx, saved_context_ebx
|
||||||
movl %ebp, saved_context_ebp
|
movl %ebp, saved_context_ebp
|
||||||
movl %esi, saved_context_esi
|
movl %esi, saved_context_esi
|
||||||
movl %edi, saved_context_edi
|
movl %edi, saved_context_edi
|
||||||
pushfl ; popl saved_context_eflags
|
pushfl
|
||||||
|
popl saved_context_eflags
|
||||||
|
|
||||||
movl $ret_point, saved_eip
|
movl $ret_point, saved_eip
|
||||||
ret
|
ret
|
||||||
|
@ -275,7 +67,8 @@ restore_registers:
|
||||||
movl saved_context_ebx, %ebx
|
movl saved_context_ebx, %ebx
|
||||||
movl saved_context_esi, %esi
|
movl saved_context_esi, %esi
|
||||||
movl saved_context_edi, %edi
|
movl saved_context_edi, %edi
|
||||||
pushl saved_context_eflags ; popfl
|
pushl saved_context_eflags
|
||||||
|
popfl
|
||||||
ret
|
ret
|
||||||
|
|
||||||
ENTRY(do_suspend_lowlevel)
|
ENTRY(do_suspend_lowlevel)
|
||||||
|
|
|
@ -7,191 +7,18 @@
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
|
|
||||||
# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
|
# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
|
||||||
#
|
|
||||||
# wakeup_code runs in real mode, and at unknown address (determined at run-time).
|
|
||||||
# Therefore it must only use relative jumps/calls.
|
|
||||||
#
|
|
||||||
# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
|
|
||||||
#
|
|
||||||
# If physical address of wakeup_code is 0x12345, BIOS should call us with
|
|
||||||
# cs = 0x1234, eip = 0x05
|
|
||||||
#
|
|
||||||
|
|
||||||
#define BEEP \
|
|
||||||
inb $97, %al; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $3, %al; \
|
|
||||||
outb %al, $97; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $-74, %al; \
|
|
||||||
outb %al, $67; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $-119, %al; \
|
|
||||||
outb %al, $66; \
|
|
||||||
outb %al, $0x80; \
|
|
||||||
movb $15, %al; \
|
|
||||||
outb %al, $66;
|
|
||||||
|
|
||||||
|
|
||||||
ALIGN
|
|
||||||
.align 16
|
|
||||||
ENTRY(wakeup_start)
|
|
||||||
wakeup_code:
|
|
||||||
wakeup_code_start = .
|
|
||||||
.code16
|
|
||||||
|
|
||||||
# Running in *copy* of this code, somewhere in low 1MB.
|
|
||||||
|
|
||||||
cli
|
|
||||||
cld
|
|
||||||
# setup data segment
|
|
||||||
movw %cs, %ax
|
|
||||||
movw %ax, %ds # Make ds:0 point to wakeup_start
|
|
||||||
movw %ax, %ss
|
|
||||||
|
|
||||||
# Data segment must be set up before we can see whether to beep.
|
|
||||||
testl $4, realmode_flags - wakeup_code
|
|
||||||
jz 1f
|
|
||||||
BEEP
|
|
||||||
1:
|
|
||||||
|
|
||||||
# Private stack is needed for ASUS board
|
|
||||||
mov $(wakeup_stack - wakeup_code), %sp
|
|
||||||
|
|
||||||
pushl $0 # Kill any dangerous flags
|
|
||||||
popfl
|
|
||||||
|
|
||||||
movl real_magic - wakeup_code, %eax
|
|
||||||
cmpl $0x12345678, %eax
|
|
||||||
jne bogus_real_magic
|
|
||||||
|
|
||||||
testl $1, realmode_flags - wakeup_code
|
|
||||||
jz 1f
|
|
||||||
lcall $0xc000,$3
|
|
||||||
movw %cs, %ax
|
|
||||||
movw %ax, %ds # Bios might have played with that
|
|
||||||
movw %ax, %ss
|
|
||||||
1:
|
|
||||||
|
|
||||||
testl $2, realmode_flags - wakeup_code
|
|
||||||
jz 1f
|
|
||||||
mov video_mode - wakeup_code, %ax
|
|
||||||
call mode_set
|
|
||||||
1:
|
|
||||||
|
|
||||||
mov %ds, %ax # Find 32bit wakeup_code addr
|
|
||||||
movzx %ax, %esi # (Convert %ds:gdt to a liner ptr)
|
|
||||||
shll $4, %esi
|
|
||||||
# Fix up the vectors
|
|
||||||
addl %esi, wakeup_32_vector - wakeup_code
|
|
||||||
addl %esi, wakeup_long64_vector - wakeup_code
|
|
||||||
addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer
|
|
||||||
|
|
||||||
lidtl %ds:idt_48a - wakeup_code
|
|
||||||
lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is
|
|
||||||
# appropriate
|
|
||||||
|
|
||||||
movl $1, %eax # protected mode (PE) bit
|
|
||||||
lmsw %ax # This is it!
|
|
||||||
jmp 1f
|
|
||||||
1:
|
|
||||||
|
|
||||||
ljmpl *(wakeup_32_vector - wakeup_code)
|
|
||||||
|
|
||||||
.balign 4
|
|
||||||
wakeup_32_vector:
|
|
||||||
.long wakeup_32 - wakeup_code
|
|
||||||
.word __KERNEL32_CS, 0
|
|
||||||
|
|
||||||
.code32
|
|
||||||
wakeup_32:
|
|
||||||
# Running in this code, but at low address; paging is not yet turned on.
|
|
||||||
|
|
||||||
movl $__KERNEL_DS, %eax
|
|
||||||
movl %eax, %ds
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Prepare for entering 64bits mode
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Enable PAE */
|
|
||||||
xorl %eax, %eax
|
|
||||||
btsl $5, %eax
|
|
||||||
movl %eax, %cr4
|
|
||||||
|
|
||||||
/* Setup early boot stage 4 level pagetables */
|
|
||||||
leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax
|
|
||||||
movl %eax, %cr3
|
|
||||||
|
|
||||||
/* Check if nx is implemented */
|
|
||||||
movl $0x80000001, %eax
|
|
||||||
cpuid
|
|
||||||
movl %edx,%edi
|
|
||||||
|
|
||||||
/* Enable Long Mode */
|
|
||||||
xorl %eax, %eax
|
|
||||||
btsl $_EFER_LME, %eax
|
|
||||||
|
|
||||||
/* No Execute supported? */
|
|
||||||
btl $20,%edi
|
|
||||||
jnc 1f
|
|
||||||
btsl $_EFER_NX, %eax
|
|
||||||
|
|
||||||
/* Make changes effective */
|
|
||||||
1: movl $MSR_EFER, %ecx
|
|
||||||
xorl %edx, %edx
|
|
||||||
wrmsr
|
|
||||||
|
|
||||||
xorl %eax, %eax
|
|
||||||
btsl $31, %eax /* Enable paging and in turn activate Long Mode */
|
|
||||||
btsl $0, %eax /* Enable protected mode */
|
|
||||||
|
|
||||||
/* Make changes effective */
|
|
||||||
movl %eax, %cr0
|
|
||||||
|
|
||||||
/* At this point:
|
|
||||||
CR4.PAE must be 1
|
|
||||||
CS.L must be 0
|
|
||||||
CR3 must point to PML4
|
|
||||||
Next instruction must be a branch
|
|
||||||
This must be on identity-mapped page
|
|
||||||
*/
|
|
||||||
/*
|
|
||||||
* At this point we're in long mode but in 32bit compatibility mode
|
|
||||||
* with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
|
|
||||||
* EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
|
|
||||||
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Finally jump in 64bit mode */
|
|
||||||
ljmp *(wakeup_long64_vector - wakeup_code)(%esi)
|
|
||||||
|
|
||||||
.balign 4
|
|
||||||
wakeup_long64_vector:
|
|
||||||
.long wakeup_long64 - wakeup_code
|
|
||||||
.word __KERNEL_CS, 0
|
|
||||||
|
|
||||||
.code64
|
.code64
|
||||||
|
|
||||||
/* Hooray, we are in Long 64-bit mode (but still running in
|
|
||||||
* low memory)
|
|
||||||
*/
|
|
||||||
wakeup_long64:
|
|
||||||
/*
|
/*
|
||||||
* We must switch to a new descriptor in kernel space for the GDT
|
* Hooray, we are in Long 64-bit mode (but still running in low memory)
|
||||||
* because soon the kernel won't have access anymore to the userspace
|
|
||||||
* addresses where we're currently running on. We have to do that here
|
|
||||||
* because in 32bit we couldn't load a 64bit linear address.
|
|
||||||
*/
|
*/
|
||||||
lgdt cpu_gdt_descr
|
ENTRY(wakeup_long64)
|
||||||
|
wakeup_long64:
|
||||||
movq saved_magic, %rax
|
movq saved_magic, %rax
|
||||||
movq $0x123456789abcdef0, %rdx
|
movq $0x123456789abcdef0, %rdx
|
||||||
cmpq %rdx, %rax
|
cmpq %rdx, %rax
|
||||||
jne bogus_64_magic
|
jne bogus_64_magic
|
||||||
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
movw $__KERNEL_DS, %ax
|
movw $__KERNEL_DS, %ax
|
||||||
movw %ax, %ss
|
movw %ax, %ss
|
||||||
movw %ax, %ds
|
movw %ax, %ds
|
||||||
|
@ -208,131 +35,9 @@ wakeup_long64:
|
||||||
movq saved_rip, %rax
|
movq saved_rip, %rax
|
||||||
jmp *%rax
|
jmp *%rax
|
||||||
|
|
||||||
.code32
|
|
||||||
|
|
||||||
.align 64
|
|
||||||
gdta:
|
|
||||||
/* Its good to keep gdt in sync with one in trampoline.S */
|
|
||||||
.word 0, 0, 0, 0 # dummy
|
|
||||||
/* ??? Why I need the accessed bit set in order for this to work? */
|
|
||||||
.quad 0x00cf9b000000ffff # __KERNEL32_CS
|
|
||||||
.quad 0x00af9b000000ffff # __KERNEL_CS
|
|
||||||
.quad 0x00cf93000000ffff # __KERNEL_DS
|
|
||||||
|
|
||||||
idt_48a:
|
|
||||||
.word 0 # idt limit = 0
|
|
||||||
.word 0, 0 # idt base = 0L
|
|
||||||
|
|
||||||
gdt_48a:
|
|
||||||
.word 0x800 # gdt limit=2048,
|
|
||||||
# 256 GDT entries
|
|
||||||
.long gdta - wakeup_code # gdt base (relocated in later)
|
|
||||||
|
|
||||||
real_magic: .quad 0
|
|
||||||
video_mode: .quad 0
|
|
||||||
realmode_flags: .quad 0
|
|
||||||
|
|
||||||
.code16
|
|
||||||
bogus_real_magic:
|
|
||||||
jmp bogus_real_magic
|
|
||||||
|
|
||||||
.code64
|
|
||||||
bogus_64_magic:
|
bogus_64_magic:
|
||||||
jmp bogus_64_magic
|
jmp bogus_64_magic
|
||||||
|
|
||||||
/* This code uses an extended set of video mode numbers. These include:
|
|
||||||
* Aliases for standard modes
|
|
||||||
* NORMAL_VGA (-1)
|
|
||||||
* EXTENDED_VGA (-2)
|
|
||||||
* ASK_VGA (-3)
|
|
||||||
* Video modes numbered by menu position -- NOT RECOMMENDED because of lack
|
|
||||||
* of compatibility when extending the table. These are between 0x00 and 0xff.
|
|
||||||
*/
|
|
||||||
#define VIDEO_FIRST_MENU 0x0000
|
|
||||||
|
|
||||||
/* Standard BIOS video modes (BIOS number + 0x0100) */
|
|
||||||
#define VIDEO_FIRST_BIOS 0x0100
|
|
||||||
|
|
||||||
/* VESA BIOS video modes (VESA number + 0x0200) */
|
|
||||||
#define VIDEO_FIRST_VESA 0x0200
|
|
||||||
|
|
||||||
/* Video7 special modes (BIOS number + 0x0900) */
|
|
||||||
#define VIDEO_FIRST_V7 0x0900
|
|
||||||
|
|
||||||
# Setting of user mode (AX=mode ID) => CF=success
|
|
||||||
|
|
||||||
# For now, we only handle VESA modes (0x0200..0x03ff). To handle other
|
|
||||||
# modes, we should probably compile in the video code from the boot
|
|
||||||
# directory.
|
|
||||||
.code16
|
|
||||||
mode_set:
|
|
||||||
movw %ax, %bx
|
|
||||||
subb $VIDEO_FIRST_VESA>>8, %bh
|
|
||||||
cmpb $2, %bh
|
|
||||||
jb check_vesa
|
|
||||||
|
|
||||||
setbad:
|
|
||||||
clc
|
|
||||||
ret
|
|
||||||
|
|
||||||
check_vesa:
|
|
||||||
orw $0x4000, %bx # Use linear frame buffer
|
|
||||||
movw $0x4f02, %ax # VESA BIOS mode set call
|
|
||||||
int $0x10
|
|
||||||
cmpw $0x004f, %ax # AL=4f if implemented
|
|
||||||
jnz setbad # AH=0 if OK
|
|
||||||
|
|
||||||
stc
|
|
||||||
ret
|
|
||||||
|
|
||||||
wakeup_stack_begin: # Stack grows down
|
|
||||||
|
|
||||||
.org 0xff0
|
|
||||||
wakeup_stack: # Just below end of page
|
|
||||||
|
|
||||||
.org 0x1000
|
|
||||||
ENTRY(wakeup_level4_pgt)
|
|
||||||
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
||||||
.fill 510,8,0
|
|
||||||
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
|
|
||||||
.quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
||||||
|
|
||||||
ENTRY(wakeup_end)
|
|
||||||
|
|
||||||
##
|
|
||||||
# acpi_copy_wakeup_routine
|
|
||||||
#
|
|
||||||
# Copy the above routine to low memory.
|
|
||||||
#
|
|
||||||
# Parameters:
|
|
||||||
# %rdi: place to copy wakeup routine to
|
|
||||||
#
|
|
||||||
# Returned address is location of code in low memory (past data and stack)
|
|
||||||
#
|
|
||||||
.code64
|
|
||||||
ENTRY(acpi_copy_wakeup_routine)
|
|
||||||
pushq %rax
|
|
||||||
pushq %rdx
|
|
||||||
|
|
||||||
movl saved_video_mode, %edx
|
|
||||||
movl %edx, video_mode - wakeup_start (,%rdi)
|
|
||||||
movl acpi_realmode_flags, %edx
|
|
||||||
movl %edx, realmode_flags - wakeup_start (,%rdi)
|
|
||||||
movq $0x12345678, real_magic - wakeup_start (,%rdi)
|
|
||||||
movq $0x123456789abcdef0, %rdx
|
|
||||||
movq %rdx, saved_magic
|
|
||||||
|
|
||||||
movq saved_magic, %rax
|
|
||||||
movq $0x123456789abcdef0, %rdx
|
|
||||||
cmpq %rdx, %rax
|
|
||||||
jne bogus_64_magic
|
|
||||||
|
|
||||||
# restore the regs we used
|
|
||||||
popq %rdx
|
|
||||||
popq %rax
|
|
||||||
ENTRY(do_suspend_lowlevel_s4bios)
|
|
||||||
ret
|
|
||||||
|
|
||||||
.align 2
|
.align 2
|
||||||
.p2align 4,,15
|
.p2align 4,,15
|
||||||
.globl do_suspend_lowlevel
|
.globl do_suspend_lowlevel
|
||||||
|
|
10
arch/x86/kernel/acpi/wakeup_rm.S
Normal file
10
arch/x86/kernel/acpi/wakeup_rm.S
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
/*
|
||||||
|
* Wrapper script for the realmode binary as a transport object
|
||||||
|
* before copying to low memory.
|
||||||
|
*/
|
||||||
|
.section ".rodata","a"
|
||||||
|
.globl wakeup_code_start, wakeup_code_end
|
||||||
|
wakeup_code_start:
|
||||||
|
.incbin "arch/x86/kernel/acpi/realmode/wakeup.bin"
|
||||||
|
wakeup_code_end:
|
||||||
|
.size wakeup_code_start, .-wakeup_code_start
|
|
@ -11,6 +11,8 @@
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
#include <asm/vsyscall.h>
|
#include <asm/vsyscall.h>
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/io.h>
|
||||||
|
|
||||||
#define MAX_PATCH_LEN (255-1)
|
#define MAX_PATCH_LEN (255-1)
|
||||||
|
|
||||||
|
@ -177,7 +179,7 @@ static const unsigned char*const * find_nop_table(void)
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
|
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
|
||||||
static void add_nops(void *insns, unsigned int len)
|
void add_nops(void *insns, unsigned int len)
|
||||||
{
|
{
|
||||||
const unsigned char *const *noptable = find_nop_table();
|
const unsigned char *const *noptable = find_nop_table();
|
||||||
|
|
||||||
|
@ -190,6 +192,7 @@ static void add_nops(void *insns, unsigned int len)
|
||||||
len -= noplen;
|
len -= noplen;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(add_nops);
|
||||||
|
|
||||||
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
||||||
extern u8 *__smp_locks[], *__smp_locks_end[];
|
extern u8 *__smp_locks[], *__smp_locks_end[];
|
||||||
|
@ -205,7 +208,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
|
||||||
struct alt_instr *a;
|
struct alt_instr *a;
|
||||||
char insnbuf[MAX_PATCH_LEN];
|
char insnbuf[MAX_PATCH_LEN];
|
||||||
|
|
||||||
DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
|
DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
|
||||||
for (a = start; a < end; a++) {
|
for (a = start; a < end; a++) {
|
||||||
u8 *instr = a->instr;
|
u8 *instr = a->instr;
|
||||||
BUG_ON(a->replacementlen > a->instrlen);
|
BUG_ON(a->replacementlen > a->instrlen);
|
||||||
|
@ -217,13 +220,13 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
|
||||||
if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
|
if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
|
||||||
instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
|
instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
|
||||||
DPRINTK("%s: vsyscall fixup: %p => %p\n",
|
DPRINTK("%s: vsyscall fixup: %p => %p\n",
|
||||||
__FUNCTION__, a->instr, instr);
|
__func__, a->instr, instr);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
memcpy(insnbuf, a->replacement, a->replacementlen);
|
memcpy(insnbuf, a->replacement, a->replacementlen);
|
||||||
add_nops(insnbuf + a->replacementlen,
|
add_nops(insnbuf + a->replacementlen,
|
||||||
a->instrlen - a->replacementlen);
|
a->instrlen - a->replacementlen);
|
||||||
text_poke(instr, insnbuf, a->instrlen);
|
text_poke_early(instr, insnbuf, a->instrlen);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,7 +287,6 @@ void alternatives_smp_module_add(struct module *mod, char *name,
|
||||||
void *text, void *text_end)
|
void *text, void *text_end)
|
||||||
{
|
{
|
||||||
struct smp_alt_module *smp;
|
struct smp_alt_module *smp;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (noreplace_smp)
|
if (noreplace_smp)
|
||||||
return;
|
return;
|
||||||
|
@ -307,42 +309,40 @@ void alternatives_smp_module_add(struct module *mod, char *name,
|
||||||
smp->text = text;
|
smp->text = text;
|
||||||
smp->text_end = text_end;
|
smp->text_end = text_end;
|
||||||
DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
|
DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
|
||||||
__FUNCTION__, smp->locks, smp->locks_end,
|
__func__, smp->locks, smp->locks_end,
|
||||||
smp->text, smp->text_end, smp->name);
|
smp->text, smp->text_end, smp->name);
|
||||||
|
|
||||||
spin_lock_irqsave(&smp_alt, flags);
|
spin_lock(&smp_alt);
|
||||||
list_add_tail(&smp->next, &smp_alt_modules);
|
list_add_tail(&smp->next, &smp_alt_modules);
|
||||||
if (boot_cpu_has(X86_FEATURE_UP))
|
if (boot_cpu_has(X86_FEATURE_UP))
|
||||||
alternatives_smp_unlock(smp->locks, smp->locks_end,
|
alternatives_smp_unlock(smp->locks, smp->locks_end,
|
||||||
smp->text, smp->text_end);
|
smp->text, smp->text_end);
|
||||||
spin_unlock_irqrestore(&smp_alt, flags);
|
spin_unlock(&smp_alt);
|
||||||
}
|
}
|
||||||
|
|
||||||
void alternatives_smp_module_del(struct module *mod)
|
void alternatives_smp_module_del(struct module *mod)
|
||||||
{
|
{
|
||||||
struct smp_alt_module *item;
|
struct smp_alt_module *item;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (smp_alt_once || noreplace_smp)
|
if (smp_alt_once || noreplace_smp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&smp_alt, flags);
|
spin_lock(&smp_alt);
|
||||||
list_for_each_entry(item, &smp_alt_modules, next) {
|
list_for_each_entry(item, &smp_alt_modules, next) {
|
||||||
if (mod != item->mod)
|
if (mod != item->mod)
|
||||||
continue;
|
continue;
|
||||||
list_del(&item->next);
|
list_del(&item->next);
|
||||||
spin_unlock_irqrestore(&smp_alt, flags);
|
spin_unlock(&smp_alt);
|
||||||
DPRINTK("%s: %s\n", __FUNCTION__, item->name);
|
DPRINTK("%s: %s\n", __func__, item->name);
|
||||||
kfree(item);
|
kfree(item);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&smp_alt, flags);
|
spin_unlock(&smp_alt);
|
||||||
}
|
}
|
||||||
|
|
||||||
void alternatives_smp_switch(int smp)
|
void alternatives_smp_switch(int smp)
|
||||||
{
|
{
|
||||||
struct smp_alt_module *mod;
|
struct smp_alt_module *mod;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
/*
|
/*
|
||||||
|
@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp)
|
||||||
return;
|
return;
|
||||||
BUG_ON(!smp && (num_online_cpus() > 1));
|
BUG_ON(!smp && (num_online_cpus() > 1));
|
||||||
|
|
||||||
spin_lock_irqsave(&smp_alt, flags);
|
spin_lock(&smp_alt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid unnecessary switches because it forces JIT based VMs to
|
* Avoid unnecessary switches because it forces JIT based VMs to
|
||||||
|
@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp)
|
||||||
mod->text, mod->text_end);
|
mod->text, mod->text_end);
|
||||||
}
|
}
|
||||||
smp_mode = smp;
|
smp_mode = smp;
|
||||||
spin_unlock_irqrestore(&smp_alt, flags);
|
spin_unlock(&smp_alt);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -411,7 +411,7 @@ void apply_paravirt(struct paravirt_patch_site *start,
|
||||||
|
|
||||||
/* Pad the rest with nops */
|
/* Pad the rest with nops */
|
||||||
add_nops(insnbuf + used, p->len - used);
|
add_nops(insnbuf + used, p->len - used);
|
||||||
text_poke(p->instr, insnbuf, p->len);
|
text_poke_early(p->instr, insnbuf, p->len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extern struct paravirt_patch_site __start_parainstructions[],
|
extern struct paravirt_patch_site __start_parainstructions[],
|
||||||
|
@ -420,8 +420,6 @@ extern struct paravirt_patch_site __start_parainstructions[],
|
||||||
|
|
||||||
void __init alternative_instructions(void)
|
void __init alternative_instructions(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* The patching is not fully atomic, so try to avoid local interruptions
|
/* The patching is not fully atomic, so try to avoid local interruptions
|
||||||
that might execute the to be patched code.
|
that might execute the to be patched code.
|
||||||
Other CPUs are not running. */
|
Other CPUs are not running. */
|
||||||
|
@ -430,7 +428,6 @@ void __init alternative_instructions(void)
|
||||||
stop_mce();
|
stop_mce();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
apply_alternatives(__alt_instructions, __alt_instructions_end);
|
apply_alternatives(__alt_instructions, __alt_instructions_end);
|
||||||
|
|
||||||
/* switch to patch-once-at-boottime-only mode and free the
|
/* switch to patch-once-at-boottime-only mode and free the
|
||||||
|
@ -462,7 +459,6 @@ void __init alternative_instructions(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
apply_paravirt(__parainstructions, __parainstructions_end);
|
apply_paravirt(__parainstructions, __parainstructions_end);
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
if (smp_alt_once)
|
if (smp_alt_once)
|
||||||
free_init_pages("SMP alternatives",
|
free_init_pages("SMP alternatives",
|
||||||
|
@ -475,18 +471,71 @@ void __init alternative_instructions(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Warning:
|
* text_poke_early - Update instructions on a live kernel at boot time
|
||||||
|
* @addr: address to modify
|
||||||
|
* @opcode: source of the copy
|
||||||
|
* @len: length to copy
|
||||||
|
*
|
||||||
* When you use this code to patch more than one byte of an instruction
|
* When you use this code to patch more than one byte of an instruction
|
||||||
* you need to make sure that other CPUs cannot execute this code in parallel.
|
* you need to make sure that other CPUs cannot execute this code in parallel.
|
||||||
* Also no thread must be currently preempted in the middle of these instructions.
|
* Also no thread must be currently preempted in the middle of these
|
||||||
* And on the local CPU you need to be protected again NMI or MCE handlers
|
* instructions. And on the local CPU you need to be protected again NMI or MCE
|
||||||
* seeing an inconsistent instruction while you patch.
|
* handlers seeing an inconsistent instruction while you patch.
|
||||||
*/
|
*/
|
||||||
void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
|
void *text_poke_early(void *addr, const void *opcode, size_t len)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
local_irq_save(flags);
|
||||||
memcpy(addr, opcode, len);
|
memcpy(addr, opcode, len);
|
||||||
|
local_irq_restore(flags);
|
||||||
sync_core();
|
sync_core();
|
||||||
/* Could also do a CLFLUSH here to speed up CPU recovery; but
|
/* Could also do a CLFLUSH here to speed up CPU recovery; but
|
||||||
that causes hangs on some VIA CPUs. */
|
that causes hangs on some VIA CPUs. */
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* text_poke - Update instructions on a live kernel
|
||||||
|
* @addr: address to modify
|
||||||
|
* @opcode: source of the copy
|
||||||
|
* @len: length to copy
|
||||||
|
*
|
||||||
|
* Only atomic text poke/set should be allowed when not doing early patching.
|
||||||
|
* It means the size must be writable atomically and the address must be aligned
|
||||||
|
* in a way that permits an atomic write. It also makes sure we fit on a single
|
||||||
|
* page.
|
||||||
|
*/
|
||||||
|
void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
char *vaddr;
|
||||||
|
int nr_pages = 2;
|
||||||
|
|
||||||
|
BUG_ON(len > sizeof(long));
|
||||||
|
BUG_ON((((long)addr + len - 1) & ~(sizeof(long) - 1))
|
||||||
|
- ((long)addr & ~(sizeof(long) - 1)));
|
||||||
|
if (kernel_text_address((unsigned long)addr)) {
|
||||||
|
struct page *pages[2] = { virt_to_page(addr),
|
||||||
|
virt_to_page(addr + PAGE_SIZE) };
|
||||||
|
if (!pages[1])
|
||||||
|
nr_pages = 1;
|
||||||
|
vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
|
||||||
|
BUG_ON(!vaddr);
|
||||||
|
local_irq_save(flags);
|
||||||
|
memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
vunmap(vaddr);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* modules are in vmalloc'ed memory, always writable.
|
||||||
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
|
memcpy(addr, opcode, len);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
sync_core();
|
||||||
|
/* Could also do a CLFLUSH here to speed up CPU recovery; but
|
||||||
|
that causes hangs on some VIA CPUs. */
|
||||||
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,11 +27,11 @@
|
||||||
#include <asm/k8.h>
|
#include <asm/k8.h>
|
||||||
|
|
||||||
int gart_iommu_aperture;
|
int gart_iommu_aperture;
|
||||||
int gart_iommu_aperture_disabled __initdata = 0;
|
int gart_iommu_aperture_disabled __initdata;
|
||||||
int gart_iommu_aperture_allowed __initdata = 0;
|
int gart_iommu_aperture_allowed __initdata;
|
||||||
|
|
||||||
int fallback_aper_order __initdata = 1; /* 64MB */
|
int fallback_aper_order __initdata = 1; /* 64MB */
|
||||||
int fallback_aper_force __initdata = 0;
|
int fallback_aper_force __initdata;
|
||||||
|
|
||||||
int fix_aperture __initdata = 1;
|
int fix_aperture __initdata = 1;
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,11 @@
|
||||||
# error SPURIOUS_APIC_VECTOR definition error
|
# error SPURIOUS_APIC_VECTOR definition error
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
unsigned long mp_lapic_addr;
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
|
||||||
|
EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Knob to control our willingness to enable the local APIC.
|
* Knob to control our willingness to enable the local APIC.
|
||||||
*
|
*
|
||||||
|
@ -620,6 +625,35 @@ int setup_profiling_timer(unsigned int multiplier)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setup extended LVT, AMD specific (K8, family 10h)
|
||||||
|
*
|
||||||
|
* Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
|
||||||
|
* MCE interrupts are supported. Thus MCE offset must be set to 0.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define APIC_EILVT_LVTOFF_MCE 0
|
||||||
|
#define APIC_EILVT_LVTOFF_IBS 1
|
||||||
|
|
||||||
|
static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
|
||||||
|
{
|
||||||
|
unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
|
||||||
|
unsigned int v = (mask << 16) | (msg_type << 8) | vector;
|
||||||
|
apic_write(reg, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
|
||||||
|
{
|
||||||
|
setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
|
||||||
|
return APIC_EILVT_LVTOFF_MCE;
|
||||||
|
}
|
||||||
|
|
||||||
|
u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
|
||||||
|
{
|
||||||
|
setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
|
||||||
|
return APIC_EILVT_LVTOFF_IBS;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Local APIC start and shutdown
|
* Local APIC start and shutdown
|
||||||
*/
|
*/
|
||||||
|
@ -868,12 +902,50 @@ void __init init_bsp_APIC(void)
|
||||||
apic_write_around(APIC_LVT1, value);
|
apic_write_around(APIC_LVT1, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __cpuinit lapic_setup_esr(void)
|
||||||
|
{
|
||||||
|
unsigned long oldvalue, value, maxlvt;
|
||||||
|
if (lapic_is_integrated() && !esr_disable) {
|
||||||
|
/* !82489DX */
|
||||||
|
maxlvt = lapic_get_maxlvt();
|
||||||
|
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
||||||
|
apic_write(APIC_ESR, 0);
|
||||||
|
oldvalue = apic_read(APIC_ESR);
|
||||||
|
|
||||||
|
/* enables sending errors */
|
||||||
|
value = ERROR_APIC_VECTOR;
|
||||||
|
apic_write_around(APIC_LVTERR, value);
|
||||||
|
/*
|
||||||
|
* spec says clear errors after enabling vector.
|
||||||
|
*/
|
||||||
|
if (maxlvt > 3)
|
||||||
|
apic_write(APIC_ESR, 0);
|
||||||
|
value = apic_read(APIC_ESR);
|
||||||
|
if (value != oldvalue)
|
||||||
|
apic_printk(APIC_VERBOSE, "ESR value before enabling "
|
||||||
|
"vector: 0x%08lx after: 0x%08lx\n",
|
||||||
|
oldvalue, value);
|
||||||
|
} else {
|
||||||
|
if (esr_disable)
|
||||||
|
/*
|
||||||
|
* Something untraceable is creating bad interrupts on
|
||||||
|
* secondary quads ... for the moment, just leave the
|
||||||
|
* ESR disabled - we can't do anything useful with the
|
||||||
|
* errors anyway - mbligh
|
||||||
|
*/
|
||||||
|
printk(KERN_INFO "Leaving ESR disabled.\n");
|
||||||
|
else
|
||||||
|
printk(KERN_INFO "No ESR for 82489DX.\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* setup_local_APIC - setup the local APIC
|
* setup_local_APIC - setup the local APIC
|
||||||
*/
|
*/
|
||||||
void __cpuinit setup_local_APIC(void)
|
void __cpuinit setup_local_APIC(void)
|
||||||
{
|
{
|
||||||
unsigned long oldvalue, value, maxlvt, integrated;
|
unsigned long value, integrated;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
/* Pound the ESR really hard over the head with a big hammer - mbligh */
|
/* Pound the ESR really hard over the head with a big hammer - mbligh */
|
||||||
|
@ -997,40 +1069,13 @@ void __cpuinit setup_local_APIC(void)
|
||||||
if (!integrated) /* 82489DX */
|
if (!integrated) /* 82489DX */
|
||||||
value |= APIC_LVT_LEVEL_TRIGGER;
|
value |= APIC_LVT_LEVEL_TRIGGER;
|
||||||
apic_write_around(APIC_LVT1, value);
|
apic_write_around(APIC_LVT1, value);
|
||||||
|
|
||||||
if (integrated && !esr_disable) {
|
|
||||||
/* !82489DX */
|
|
||||||
maxlvt = lapic_get_maxlvt();
|
|
||||||
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
|
||||||
apic_write(APIC_ESR, 0);
|
|
||||||
oldvalue = apic_read(APIC_ESR);
|
|
||||||
|
|
||||||
/* enables sending errors */
|
|
||||||
value = ERROR_APIC_VECTOR;
|
|
||||||
apic_write_around(APIC_LVTERR, value);
|
|
||||||
/*
|
|
||||||
* spec says clear errors after enabling vector.
|
|
||||||
*/
|
|
||||||
if (maxlvt > 3)
|
|
||||||
apic_write(APIC_ESR, 0);
|
|
||||||
value = apic_read(APIC_ESR);
|
|
||||||
if (value != oldvalue)
|
|
||||||
apic_printk(APIC_VERBOSE, "ESR value before enabling "
|
|
||||||
"vector: 0x%08lx after: 0x%08lx\n",
|
|
||||||
oldvalue, value);
|
|
||||||
} else {
|
|
||||||
if (esr_disable)
|
|
||||||
/*
|
|
||||||
* Something untraceable is creating bad interrupts on
|
|
||||||
* secondary quads ... for the moment, just leave the
|
|
||||||
* ESR disabled - we can't do anything useful with the
|
|
||||||
* errors anyway - mbligh
|
|
||||||
*/
|
|
||||||
printk(KERN_INFO "Leaving ESR disabled.\n");
|
|
||||||
else
|
|
||||||
printk(KERN_INFO "No ESR for 82489DX.\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __cpuinit end_local_APIC_setup(void)
|
||||||
|
{
|
||||||
|
unsigned long value;
|
||||||
|
|
||||||
|
lapic_setup_esr();
|
||||||
/* Disable the local apic timer */
|
/* Disable the local apic timer */
|
||||||
value = apic_read(APIC_LVTT);
|
value = apic_read(APIC_LVTT);
|
||||||
value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
|
value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
|
||||||
|
@ -1147,7 +1192,7 @@ void __init init_apic_mappings(void)
|
||||||
* default configuration (or the MP table is broken).
|
* default configuration (or the MP table is broken).
|
||||||
*/
|
*/
|
||||||
if (boot_cpu_physical_apicid == -1U)
|
if (boot_cpu_physical_apicid == -1U)
|
||||||
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
|
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
|
||||||
|
|
||||||
#ifdef CONFIG_X86_IO_APIC
|
#ifdef CONFIG_X86_IO_APIC
|
||||||
{
|
{
|
||||||
|
@ -1185,6 +1230,9 @@ void __init init_apic_mappings(void)
|
||||||
* This initializes the IO-APIC and APIC hardware if this is
|
* This initializes the IO-APIC and APIC hardware if this is
|
||||||
* a UP kernel.
|
* a UP kernel.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
int apic_version[MAX_APICS];
|
||||||
|
|
||||||
int __init APIC_init_uniprocessor(void)
|
int __init APIC_init_uniprocessor(void)
|
||||||
{
|
{
|
||||||
if (enable_local_apic < 0)
|
if (enable_local_apic < 0)
|
||||||
|
@ -1214,12 +1262,13 @@ int __init APIC_init_uniprocessor(void)
|
||||||
* might be zero if read from MP tables. Get it from LAPIC.
|
* might be zero if read from MP tables. Get it from LAPIC.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_CRASH_DUMP
|
#ifdef CONFIG_CRASH_DUMP
|
||||||
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
|
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
|
||||||
#endif
|
#endif
|
||||||
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
|
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
|
||||||
|
|
||||||
setup_local_APIC();
|
setup_local_APIC();
|
||||||
|
|
||||||
|
end_local_APIC_setup();
|
||||||
#ifdef CONFIG_X86_IO_APIC
|
#ifdef CONFIG_X86_IO_APIC
|
||||||
if (smp_found_config)
|
if (smp_found_config)
|
||||||
if (!skip_ioapic_setup && nr_ioapics)
|
if (!skip_ioapic_setup && nr_ioapics)
|
||||||
|
@ -1288,6 +1337,29 @@ void smp_error_interrupt(struct pt_regs *regs)
|
||||||
irq_exit();
|
irq_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
void __init smp_intr_init(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* IRQ0 must be given a fixed assignment and initialized,
|
||||||
|
* because it's used before the IO-APIC is set up.
|
||||||
|
*/
|
||||||
|
set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
|
||||||
|
* IPI, driven by wakeup.
|
||||||
|
*/
|
||||||
|
set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
|
||||||
|
|
||||||
|
/* IPI for invalidation */
|
||||||
|
set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
|
||||||
|
|
||||||
|
/* IPI for generic function call */
|
||||||
|
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize APIC interrupts
|
* Initialize APIC interrupts
|
||||||
*/
|
*/
|
||||||
|
@ -1394,6 +1466,88 @@ void disconnect_bsp_APIC(int virt_wire_setup)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int __cpuinitdata maxcpus = NR_CPUS;
|
||||||
|
|
||||||
|
void __cpuinit generic_processor_info(int apicid, int version)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
cpumask_t tmp_map;
|
||||||
|
physid_mask_t phys_cpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Validate version
|
||||||
|
*/
|
||||||
|
if (version == 0x0) {
|
||||||
|
printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
|
||||||
|
"fixing up to 0x10. (tell your hw vendor)\n",
|
||||||
|
version);
|
||||||
|
version = 0x10;
|
||||||
|
}
|
||||||
|
apic_version[apicid] = version;
|
||||||
|
|
||||||
|
phys_cpu = apicid_to_cpu_present(apicid);
|
||||||
|
physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
|
||||||
|
|
||||||
|
if (num_processors >= NR_CPUS) {
|
||||||
|
printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
|
||||||
|
" Processor ignored.\n", NR_CPUS);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_processors >= maxcpus) {
|
||||||
|
printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
|
||||||
|
" Processor ignored.\n", maxcpus);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
num_processors++;
|
||||||
|
cpus_complement(tmp_map, cpu_present_map);
|
||||||
|
cpu = first_cpu(tmp_map);
|
||||||
|
|
||||||
|
if (apicid == boot_cpu_physical_apicid)
|
||||||
|
/*
|
||||||
|
* x86_bios_cpu_apicid is required to have processors listed
|
||||||
|
* in same order as logical cpu numbers. Hence the first
|
||||||
|
* entry is BSP, and so on.
|
||||||
|
*/
|
||||||
|
cpu = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
|
||||||
|
* but we need to work other dependencies like SMP_SUSPEND etc
|
||||||
|
* before this can be done without some confusion.
|
||||||
|
* if (CPU_HOTPLUG_ENABLED || num_processors > 8)
|
||||||
|
* - Ashok Raj <ashok.raj@intel.com>
|
||||||
|
*/
|
||||||
|
if (num_processors > 8) {
|
||||||
|
switch (boot_cpu_data.x86_vendor) {
|
||||||
|
case X86_VENDOR_INTEL:
|
||||||
|
if (!APIC_XAPIC(version)) {
|
||||||
|
def_to_bigsmp = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* If P4 and above fall through */
|
||||||
|
case X86_VENDOR_AMD:
|
||||||
|
def_to_bigsmp = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* are we being called early in kernel startup? */
|
||||||
|
if (x86_cpu_to_apicid_early_ptr) {
|
||||||
|
u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
|
||||||
|
u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
|
||||||
|
|
||||||
|
cpu_to_apicid[cpu] = apicid;
|
||||||
|
bios_cpu_apicid[cpu] = apicid;
|
||||||
|
} else {
|
||||||
|
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||||
|
per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
cpu_set(cpu, cpu_possible_map);
|
||||||
|
cpu_set(cpu, cpu_present_map);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Power management
|
* Power management
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -34,13 +34,15 @@
|
||||||
#include <asm/mpspec.h>
|
#include <asm/mpspec.h>
|
||||||
#include <asm/hpet.h>
|
#include <asm/hpet.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/mach_apic.h>
|
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
#include <asm/timex.h>
|
#include <asm/timex.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
|
||||||
|
#include <mach_ipi.h>
|
||||||
|
#include <mach_apic.h>
|
||||||
|
|
||||||
int disable_apic_timer __cpuinitdata;
|
int disable_apic_timer __cpuinitdata;
|
||||||
static int apic_calibrate_pmtmr __initdata;
|
static int apic_calibrate_pmtmr __initdata;
|
||||||
int disable_apic;
|
int disable_apic;
|
||||||
|
@ -83,6 +85,12 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||||
|
|
||||||
static unsigned long apic_phys;
|
static unsigned long apic_phys;
|
||||||
|
|
||||||
|
unsigned long mp_lapic_addr;
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
|
||||||
|
EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
|
||||||
|
|
||||||
|
unsigned int __cpuinitdata maxcpus = NR_CPUS;
|
||||||
/*
|
/*
|
||||||
* Get the LAPIC version
|
* Get the LAPIC version
|
||||||
*/
|
*/
|
||||||
|
@ -431,7 +439,8 @@ void __cpuinit check_boot_apic_timer_broadcast(void)
|
||||||
lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
|
lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
|
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
|
||||||
|
&boot_cpu_physical_apicid);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -640,10 +649,10 @@ int __init verify_local_APIC(void)
|
||||||
/*
|
/*
|
||||||
* The ID register is read/write in a real APIC.
|
* The ID register is read/write in a real APIC.
|
||||||
*/
|
*/
|
||||||
reg0 = apic_read(APIC_ID);
|
reg0 = read_apic_id();
|
||||||
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
|
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
|
||||||
apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
|
apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
|
||||||
reg1 = apic_read(APIC_ID);
|
reg1 = read_apic_id();
|
||||||
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
|
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
|
||||||
apic_write(APIC_ID, reg0);
|
apic_write(APIC_ID, reg0);
|
||||||
if (reg1 != (reg0 ^ APIC_ID_MASK))
|
if (reg1 != (reg0 ^ APIC_ID_MASK))
|
||||||
|
@ -728,6 +737,7 @@ void __cpuinit setup_local_APIC(void)
|
||||||
unsigned int value;
|
unsigned int value;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
value = apic_read(APIC_LVR);
|
value = apic_read(APIC_LVR);
|
||||||
|
|
||||||
BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
|
BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
|
||||||
|
@ -821,6 +831,7 @@ void __cpuinit setup_local_APIC(void)
|
||||||
else
|
else
|
||||||
value = APIC_DM_NMI | APIC_LVT_MASKED;
|
value = APIC_DM_NMI | APIC_LVT_MASKED;
|
||||||
apic_write(APIC_LVT1, value);
|
apic_write(APIC_LVT1, value);
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cpuinit lapic_setup_esr(void)
|
void __cpuinit lapic_setup_esr(void)
|
||||||
|
@ -857,10 +868,34 @@ static int __init detect_init_APIC(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
||||||
boot_cpu_id = 0;
|
boot_cpu_physical_apicid = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __init early_init_lapic_mapping(void)
|
||||||
|
{
|
||||||
|
unsigned long apic_phys;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If no local APIC can be found then go out
|
||||||
|
* : it means there is no mpatable and MADT
|
||||||
|
*/
|
||||||
|
if (!smp_found_config)
|
||||||
|
return;
|
||||||
|
|
||||||
|
apic_phys = mp_lapic_addr;
|
||||||
|
|
||||||
|
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
|
||||||
|
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
|
||||||
|
APIC_BASE, apic_phys);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fetch the APIC ID of the BSP in case we have a
|
||||||
|
* default configuration (or the MP table is broken).
|
||||||
|
*/
|
||||||
|
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_apic_mappings - initialize APIC mappings
|
* init_apic_mappings - initialize APIC mappings
|
||||||
*/
|
*/
|
||||||
|
@ -881,16 +916,11 @@ void __init init_apic_mappings(void)
|
||||||
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
|
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
|
||||||
APIC_BASE, apic_phys);
|
APIC_BASE, apic_phys);
|
||||||
|
|
||||||
/* Put local APIC into the resource map. */
|
|
||||||
lapic_resource.start = apic_phys;
|
|
||||||
lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
|
|
||||||
insert_resource(&iomem_resource, &lapic_resource);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fetch the APIC ID of the BSP in case we have a
|
* Fetch the APIC ID of the BSP in case we have a
|
||||||
* default configuration (or the MP table is broken).
|
* default configuration (or the MP table is broken).
|
||||||
*/
|
*/
|
||||||
boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
|
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -911,8 +941,8 @@ int __init APIC_init_uniprocessor(void)
|
||||||
|
|
||||||
verify_local_APIC();
|
verify_local_APIC();
|
||||||
|
|
||||||
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
|
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
|
||||||
apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
|
apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
|
||||||
|
|
||||||
setup_local_APIC();
|
setup_local_APIC();
|
||||||
|
|
||||||
|
@ -1029,6 +1059,52 @@ void disconnect_bsp_APIC(int virt_wire_setup)
|
||||||
apic_write(APIC_LVT1, value);
|
apic_write(APIC_LVT1, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __cpuinit generic_processor_info(int apicid, int version)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
cpumask_t tmp_map;
|
||||||
|
|
||||||
|
if (num_processors >= NR_CPUS) {
|
||||||
|
printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
|
||||||
|
" Processor ignored.\n", NR_CPUS);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_processors >= maxcpus) {
|
||||||
|
printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
|
||||||
|
" Processor ignored.\n", maxcpus);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
num_processors++;
|
||||||
|
cpus_complement(tmp_map, cpu_present_map);
|
||||||
|
cpu = first_cpu(tmp_map);
|
||||||
|
|
||||||
|
physid_set(apicid, phys_cpu_present_map);
|
||||||
|
if (apicid == boot_cpu_physical_apicid) {
|
||||||
|
/*
|
||||||
|
* x86_bios_cpu_apicid is required to have processors listed
|
||||||
|
* in same order as logical cpu numbers. Hence the first
|
||||||
|
* entry is BSP, and so on.
|
||||||
|
*/
|
||||||
|
cpu = 0;
|
||||||
|
}
|
||||||
|
/* are we being called early in kernel startup? */
|
||||||
|
if (x86_cpu_to_apicid_early_ptr) {
|
||||||
|
u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
|
||||||
|
u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
|
||||||
|
|
||||||
|
cpu_to_apicid[cpu] = apicid;
|
||||||
|
bios_cpu_apicid[cpu] = apicid;
|
||||||
|
} else {
|
||||||
|
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||||
|
per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu_set(cpu, cpu_possible_map);
|
||||||
|
cpu_set(cpu, cpu_present_map);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Power management
|
* Power management
|
||||||
*/
|
*/
|
||||||
|
@ -1065,7 +1141,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
|
||||||
|
|
||||||
maxlvt = lapic_get_maxlvt();
|
maxlvt = lapic_get_maxlvt();
|
||||||
|
|
||||||
apic_pm_state.apic_id = apic_read(APIC_ID);
|
apic_pm_state.apic_id = read_apic_id();
|
||||||
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
|
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
|
||||||
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
|
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
|
||||||
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
|
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
|
||||||
|
@ -1180,9 +1256,19 @@ __cpuinit int apic_is_clustered_box(void)
|
||||||
{
|
{
|
||||||
int i, clusters, zeros;
|
int i, clusters, zeros;
|
||||||
unsigned id;
|
unsigned id;
|
||||||
u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
|
u16 *bios_cpu_apicid;
|
||||||
DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
|
DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* there is not this kind of box with AMD CPU yet.
|
||||||
|
* Some AMD box with quadcore cpu and 8 sockets apicid
|
||||||
|
* will be [4, 0x23] or [8, 0x27] could be thought to
|
||||||
|
* vsmp box still need checking...
|
||||||
|
*/
|
||||||
|
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
|
||||||
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
|
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for (i = 0; i < NR_CPUS; i++) {
|
||||||
|
@ -1219,6 +1305,12 @@ __cpuinit int apic_is_clustered_box(void)
|
||||||
++zeros;
|
++zeros;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
|
||||||
|
* not guaranteed to be synced between boards
|
||||||
|
*/
|
||||||
|
if (is_vsmp_box() && clusters > 1)
|
||||||
|
return 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If clusters > 2, then should be multi-chassis.
|
* If clusters > 2, then should be multi-chassis.
|
||||||
* May have to revisit this when multi-core + hyperthreaded CPUs come
|
* May have to revisit this when multi-core + hyperthreaded CPUs come
|
||||||
|
@ -1290,3 +1382,21 @@ static __init int setup_apicpmtimer(char *s)
|
||||||
}
|
}
|
||||||
__setup("apicpmtimer", setup_apicpmtimer);
|
__setup("apicpmtimer", setup_apicpmtimer);
|
||||||
|
|
||||||
|
static int __init lapic_insert_resource(void)
|
||||||
|
{
|
||||||
|
if (!apic_phys)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
/* Put local APIC into the resource map. */
|
||||||
|
lapic_resource.start = apic_phys;
|
||||||
|
lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
|
||||||
|
insert_resource(&iomem_resource, &lapic_resource);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* need call insert after e820_reserve_resources()
|
||||||
|
* that is using request_resource
|
||||||
|
*/
|
||||||
|
late_initcall(lapic_insert_resource);
|
||||||
|
|
|
@ -2217,7 +2217,6 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
|
||||||
*/
|
*/
|
||||||
static int __init apm_init(void)
|
static int __init apm_init(void)
|
||||||
{
|
{
|
||||||
struct proc_dir_entry *apm_proc;
|
|
||||||
struct desc_struct *gdt;
|
struct desc_struct *gdt;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -2322,9 +2321,7 @@ static int __init apm_init(void)
|
||||||
set_base(gdt[APM_DS >> 3],
|
set_base(gdt[APM_DS >> 3],
|
||||||
__va((unsigned long)apm_info.bios.dseg << 4));
|
__va((unsigned long)apm_info.bios.dseg << 4));
|
||||||
|
|
||||||
apm_proc = create_proc_entry("apm", 0, NULL);
|
proc_create("apm", 0, NULL, &apm_file_ops);
|
||||||
if (apm_proc)
|
|
||||||
apm_proc->proc_fops = &apm_file_ops;
|
|
||||||
|
|
||||||
kapmd_task = kthread_create(apm, NULL, "kapmd");
|
kapmd_task = kthread_create(apm, NULL, "kapmd");
|
||||||
if (IS_ERR(kapmd_task)) {
|
if (IS_ERR(kapmd_task)) {
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
#include <linux/personality.h>
|
#include <linux/personality.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <asm/ucontext.h>
|
#include <asm/ucontext.h>
|
||||||
#include "sigframe_32.h"
|
#include "sigframe.h"
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
|
@ -9,13 +9,25 @@
|
||||||
#include <asm/bugs.h>
|
#include <asm/bugs.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
void __init check_bugs(void)
|
void __init check_bugs(void)
|
||||||
{
|
{
|
||||||
identify_cpu(&boot_cpu_data);
|
identify_boot_cpu();
|
||||||
#if !defined(CONFIG_SMP)
|
#if !defined(CONFIG_SMP)
|
||||||
printk("CPU: ");
|
printk("CPU: ");
|
||||||
print_cpu_info(&boot_cpu_data);
|
print_cpu_info(&boot_cpu_data);
|
||||||
#endif
|
#endif
|
||||||
alternative_instructions();
|
alternative_instructions();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the first 2MB area is not mapped by huge pages
|
||||||
|
* There are typically fixed size MTRRs in there and overlapping
|
||||||
|
* MTRRs into large pages causes slow downs.
|
||||||
|
*
|
||||||
|
* Right now we don't do that with gbpages because there seems
|
||||||
|
* very little benefit for that case.
|
||||||
|
*/
|
||||||
|
if (!direct_gbpages)
|
||||||
|
set_memory_4k((unsigned long)__va(0), 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,9 +3,9 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
obj-y := intel_cacheinfo.o addon_cpuid_features.o
|
obj-y := intel_cacheinfo.o addon_cpuid_features.o
|
||||||
obj-y += feature_names.o
|
obj-y += proc.o feature_names.o
|
||||||
|
|
||||||
obj-$(CONFIG_X86_32) += common.o proc.o bugs.o
|
obj-$(CONFIG_X86_32) += common.o bugs.o
|
||||||
obj-$(CONFIG_X86_32) += amd.o
|
obj-$(CONFIG_X86_32) += amd.o
|
||||||
obj-$(CONFIG_X86_32) += cyrix.o
|
obj-$(CONFIG_X86_32) += cyrix.o
|
||||||
obj-$(CONFIG_X86_32) += centaur.o
|
obj-$(CONFIG_X86_32) += centaur.o
|
||||||
|
|
|
@ -4,8 +4,8 @@
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/mach_apic.h>
|
|
||||||
|
|
||||||
|
#include <mach_apic.h>
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -63,12 +63,12 @@ static __cpuinit int amd_apic_timer_broken(void)
|
||||||
|
|
||||||
int force_mwait __cpuinitdata;
|
int force_mwait __cpuinitdata;
|
||||||
|
|
||||||
void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
|
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
if (cpuid_eax(0x80000000) >= 0x80000007) {
|
if (cpuid_eax(0x80000000) >= 0x80000007) {
|
||||||
c->x86_power = cpuid_edx(0x80000007);
|
c->x86_power = cpuid_edx(0x80000007);
|
||||||
if (c->x86_power & (1<<8))
|
if (c->x86_power & (1<<8))
|
||||||
set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
unsigned long long value;
|
unsigned long long value;
|
||||||
|
|
||||||
/* Disable TLB flush filter by setting HWCR.FFDIS on K8
|
/*
|
||||||
|
* Disable TLB flush filter by setting HWCR.FFDIS on K8
|
||||||
* bit 6 of msr C001_0015
|
* bit 6 of msr C001_0015
|
||||||
*
|
*
|
||||||
* Errata 63 for SH-B3 steppings
|
* Errata 63 for SH-B3 steppings
|
||||||
|
@ -102,14 +103,15 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
* no bus pipeline)
|
* no bus pipeline)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
/*
|
||||||
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
|
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
||||||
clear_bit(0*32+31, c->x86_capability);
|
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
|
||||||
|
*/
|
||||||
|
clear_cpu_cap(c, 0*32+31);
|
||||||
|
|
||||||
r = get_model_name(c);
|
r = get_model_name(c);
|
||||||
|
|
||||||
switch(c->x86)
|
switch (c->x86) {
|
||||||
{
|
|
||||||
case 4:
|
case 4:
|
||||||
/*
|
/*
|
||||||
* General Systems BIOSen alias the cpu frequency registers
|
* General Systems BIOSen alias the cpu frequency registers
|
||||||
|
@ -126,12 +128,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 5:
|
case 5:
|
||||||
if( c->x86_model < 6 )
|
if (c->x86_model < 6) {
|
||||||
{
|
|
||||||
/* Based on AMD doc 20734R - June 2000 */
|
/* Based on AMD doc 20734R - June 2000 */
|
||||||
if (c->x86_model == 0) {
|
if (c->x86_model == 0) {
|
||||||
clear_bit(X86_FEATURE_APIC, c->x86_capability);
|
clear_cpu_cap(c, X86_FEATURE_APIC);
|
||||||
set_bit(X86_FEATURE_PGE, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_PGE);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -207,7 +208,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
/* Set MTRR capability flag if appropriate */
|
/* Set MTRR capability flag if appropriate */
|
||||||
if (c->x86_model == 13 || c->x86_model == 9 ||
|
if (c->x86_model == 13 || c->x86_model == 9 ||
|
||||||
(c->x86_model == 8 && c->x86_mask >= 8))
|
(c->x86_model == 8 && c->x86_mask >= 8))
|
||||||
set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +220,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
break;
|
break;
|
||||||
case 6: /* An Athlon/Duron */
|
case 6: /* An Athlon/Duron */
|
||||||
|
|
||||||
/* Bit 15 of Athlon specific MSR 15, needs to be 0
|
/*
|
||||||
|
* Bit 15 of Athlon specific MSR 15, needs to be 0
|
||||||
* to enable SSE on Palomino/Morgan/Barton CPU's.
|
* to enable SSE on Palomino/Morgan/Barton CPU's.
|
||||||
* If the BIOS didn't enable it already, enable it here.
|
* If the BIOS didn't enable it already, enable it here.
|
||||||
*/
|
*/
|
||||||
|
@ -229,11 +231,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
rdmsr(MSR_K7_HWCR, l, h);
|
rdmsr(MSR_K7_HWCR, l, h);
|
||||||
l &= ~0x00008000;
|
l &= ~0x00008000;
|
||||||
wrmsr(MSR_K7_HWCR, l, h);
|
wrmsr(MSR_K7_HWCR, l, h);
|
||||||
set_bit(X86_FEATURE_XMM, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_XMM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* It's been determined by AMD that Athlons since model 8 stepping 1
|
/*
|
||||||
|
* It's been determined by AMD that Athlons since model 8 stepping 1
|
||||||
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
|
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
|
||||||
* As per AMD technical note 27212 0.2
|
* As per AMD technical note 27212 0.2
|
||||||
*/
|
*/
|
||||||
|
@ -253,20 +256,19 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
/* Use K8 tuning for Fam10h and Fam11h */
|
/* Use K8 tuning for Fam10h and Fam11h */
|
||||||
case 0x10:
|
case 0x10:
|
||||||
case 0x11:
|
case 0x11:
|
||||||
set_bit(X86_FEATURE_K8, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_K8);
|
||||||
break;
|
break;
|
||||||
case 6:
|
case 6:
|
||||||
set_bit(X86_FEATURE_K7, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_K7);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (c->x86 >= 6)
|
if (c->x86 >= 6)
|
||||||
set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
|
||||||
|
|
||||||
display_cacheinfo(c);
|
display_cacheinfo(c);
|
||||||
|
|
||||||
if (cpuid_eax(0x80000000) >= 0x80000008) {
|
if (cpuid_eax(0x80000000) >= 0x80000008)
|
||||||
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
|
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_HT
|
#ifdef CONFIG_X86_HT
|
||||||
/*
|
/*
|
||||||
|
@ -302,10 +304,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
|
|
||||||
/* K6s reports MCEs but don't actually have all the MSRs */
|
/* K6s reports MCEs but don't actually have all the MSRs */
|
||||||
if (c->x86 < 6)
|
if (c->x86 < 6)
|
||||||
clear_bit(X86_FEATURE_MCE, c->x86_capability);
|
clear_cpu_cap(c, X86_FEATURE_MCE);
|
||||||
|
|
||||||
if (cpu_has_xmm2)
|
if (cpu_has_xmm2)
|
||||||
set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||||
|
@ -336,6 +338,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
.c_early_init = early_init_amd,
|
||||||
.c_init = init_amd,
|
.c_init = init_amd,
|
||||||
.c_size_cache = amd_size_cache,
|
.c_size_cache = amd_size_cache,
|
||||||
};
|
};
|
||||||
|
@ -345,3 +348,5 @@ int __init amd_init_cpu(void)
|
||||||
cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
|
cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/e820.h>
|
#include <asm/e820.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
|
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
|
||||||
#ifdef CONFIG_X86_OOSTORE
|
#ifdef CONFIG_X86_OOSTORE
|
||||||
|
@ -12,8 +14,10 @@
|
||||||
static u32 __cpuinit power2(u32 x)
|
static u32 __cpuinit power2(u32 x)
|
||||||
{
|
{
|
||||||
u32 s = 1;
|
u32 s = 1;
|
||||||
|
|
||||||
while (s <= x)
|
while (s <= x)
|
||||||
s <<= 1;
|
s <<= 1;
|
||||||
|
|
||||||
return s >>= 1;
|
return s >>= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,7 +25,6 @@ static u32 __cpuinit power2(u32 x)
|
||||||
/*
|
/*
|
||||||
* Set up an actual MCR
|
* Set up an actual MCR
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
|
static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
|
||||||
{
|
{
|
||||||
u32 lo, hi;
|
u32 lo, hi;
|
||||||
|
@ -39,12 +42,11 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
|
||||||
*
|
*
|
||||||
* Shortcut: We know you can't put 4Gig of RAM on a winchip
|
* Shortcut: We know you can't put 4Gig of RAM on a winchip
|
||||||
*/
|
*/
|
||||||
|
static u32 __cpuinit ramtop(void)
|
||||||
static u32 __cpuinit ramtop(void) /* 16388 */
|
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
u32 top = 0;
|
|
||||||
u32 clip = 0xFFFFFFFFUL;
|
u32 clip = 0xFFFFFFFFUL;
|
||||||
|
u32 top = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < e820.nr_map; i++) {
|
for (i = 0; i < e820.nr_map; i++) {
|
||||||
unsigned long start, end;
|
unsigned long start, end;
|
||||||
|
@ -55,10 +57,9 @@ static u32 __cpuinit ramtop(void) /* 16388 */
|
||||||
* Don't MCR over reserved space. Ignore the ISA hole
|
* Don't MCR over reserved space. Ignore the ISA hole
|
||||||
* we frob around that catastrophe already
|
* we frob around that catastrophe already
|
||||||
*/
|
*/
|
||||||
|
if (e820.map[i].type == E820_RESERVED) {
|
||||||
if (e820.map[i].type == E820_RESERVED)
|
if (e820.map[i].addr >= 0x100000UL &&
|
||||||
{
|
e820.map[i].addr < clip)
|
||||||
if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
|
|
||||||
clip = e820.map[i].addr;
|
clip = e820.map[i].addr;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -69,18 +70,18 @@ static u32 __cpuinit ramtop(void) /* 16388 */
|
||||||
if (end > top)
|
if (end > top)
|
||||||
top = end;
|
top = end;
|
||||||
}
|
}
|
||||||
/* Everything below 'top' should be RAM except for the ISA hole.
|
/*
|
||||||
Because of the limited MCR's we want to map NV/ACPI into our
|
* Everything below 'top' should be RAM except for the ISA hole.
|
||||||
MCR range for gunk in RAM
|
* Because of the limited MCR's we want to map NV/ACPI into our
|
||||||
|
* MCR range for gunk in RAM
|
||||||
Clip might cause us to MCR insufficient RAM but that is an
|
*
|
||||||
acceptable failure mode and should only bite obscure boxes with
|
* Clip might cause us to MCR insufficient RAM but that is an
|
||||||
a VESA hole at 15Mb
|
* acceptable failure mode and should only bite obscure boxes with
|
||||||
|
* a VESA hole at 15Mb
|
||||||
The second case Clip sometimes kicks in is when the EBDA is marked
|
*
|
||||||
as reserved. Again we fail safe with reasonable results
|
* The second case Clip sometimes kicks in is when the EBDA is marked
|
||||||
|
* as reserved. Again we fail safe with reasonable results
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (top > clip)
|
if (top > clip)
|
||||||
top = clip;
|
top = clip;
|
||||||
|
|
||||||
|
@ -90,7 +91,6 @@ static u32 __cpuinit ramtop(void) /* 16388 */
|
||||||
/*
|
/*
|
||||||
* Compute a set of MCR's to give maximum coverage
|
* Compute a set of MCR's to give maximum coverage
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int __cpuinit centaur_mcr_compute(int nr, int key)
|
static int __cpuinit centaur_mcr_compute(int nr, int key)
|
||||||
{
|
{
|
||||||
u32 mem = ramtop();
|
u32 mem = ramtop();
|
||||||
|
@ -100,27 +100,25 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
|
||||||
u32 floor = 0;
|
u32 floor = 0;
|
||||||
int ct = 0;
|
int ct = 0;
|
||||||
|
|
||||||
while(ct<nr)
|
while (ct < nr) {
|
||||||
{
|
|
||||||
u32 fspace = 0;
|
u32 fspace = 0;
|
||||||
|
u32 high;
|
||||||
|
u32 low;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the largest block we will fill going upwards
|
* Find the largest block we will fill going upwards
|
||||||
*/
|
*/
|
||||||
|
high = power2(mem-top);
|
||||||
u32 high = power2(mem-top);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the largest block we will fill going downwards
|
* Find the largest block we will fill going downwards
|
||||||
*/
|
*/
|
||||||
|
low = base/2;
|
||||||
u32 low = base/2;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't fill below 1Mb going downwards as there
|
* Don't fill below 1Mb going downwards as there
|
||||||
* is an ISA hole in the way.
|
* is an ISA hole in the way.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (base <= 1024*1024)
|
if (base <= 1024*1024)
|
||||||
low = 0;
|
low = 0;
|
||||||
|
|
||||||
|
@ -139,36 +137,31 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
|
||||||
/*
|
/*
|
||||||
* Now install the largest coverage we get
|
* Now install the largest coverage we get
|
||||||
*/
|
*/
|
||||||
|
if (fspace > high && fspace > low) {
|
||||||
if(fspace > high && fspace > low)
|
|
||||||
{
|
|
||||||
centaur_mcr_insert(ct, floor, fspace, key);
|
centaur_mcr_insert(ct, floor, fspace, key);
|
||||||
floor += fspace;
|
floor += fspace;
|
||||||
}
|
} else if (high > low) {
|
||||||
else if(high > low)
|
|
||||||
{
|
|
||||||
centaur_mcr_insert(ct, top, high, key);
|
centaur_mcr_insert(ct, top, high, key);
|
||||||
top += high;
|
top += high;
|
||||||
}
|
} else if (low > 0) {
|
||||||
else if(low > 0)
|
|
||||||
{
|
|
||||||
base -= low;
|
base -= low;
|
||||||
centaur_mcr_insert(ct, base, low, key);
|
centaur_mcr_insert(ct, base, low, key);
|
||||||
}
|
} else
|
||||||
else break;
|
break;
|
||||||
ct++;
|
ct++;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* We loaded ct values. We now need to set the mask. The caller
|
* We loaded ct values. We now need to set the mask. The caller
|
||||||
* must do this bit.
|
* must do this bit.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
return ct;
|
return ct;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cpuinit centaur_create_optimal_mcr(void)
|
static void __cpuinit centaur_create_optimal_mcr(void)
|
||||||
{
|
{
|
||||||
|
int used;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate up to 6 mcrs to mark as much of ram as possible
|
* Allocate up to 6 mcrs to mark as much of ram as possible
|
||||||
* as write combining and weak write ordered.
|
* as write combining and weak write ordered.
|
||||||
|
@ -179,12 +172,11 @@ static void __cpuinit centaur_create_optimal_mcr(void)
|
||||||
* Load the registers with type 31 - full write combining, all
|
* Load the registers with type 31 - full write combining, all
|
||||||
* writes weakly ordered.
|
* writes weakly ordered.
|
||||||
*/
|
*/
|
||||||
int used = centaur_mcr_compute(6, 31);
|
used = centaur_mcr_compute(6, 31);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wipe unused MCRs
|
* Wipe unused MCRs
|
||||||
*/
|
*/
|
||||||
|
|
||||||
for (i = used; i < 8; i++)
|
for (i = used; i < 8; i++)
|
||||||
wrmsr(MSR_IDT_MCR0+i, 0, 0);
|
wrmsr(MSR_IDT_MCR0+i, 0, 0);
|
||||||
}
|
}
|
||||||
|
@ -192,6 +184,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
|
||||||
static void __cpuinit winchip2_create_optimal_mcr(void)
|
static void __cpuinit winchip2_create_optimal_mcr(void)
|
||||||
{
|
{
|
||||||
u32 lo, hi;
|
u32 lo, hi;
|
||||||
|
int used;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -203,13 +196,11 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
|
||||||
* 16 - weak read ordering
|
* 16 - weak read ordering
|
||||||
* 1 - write combining
|
* 1 - write combining
|
||||||
*/
|
*/
|
||||||
|
used = centaur_mcr_compute(6, 25);
|
||||||
int used = centaur_mcr_compute(6, 25);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark the registers we are using.
|
* Mark the registers we are using.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
||||||
for (i = 0; i < used; i++)
|
for (i = 0; i < used; i++)
|
||||||
lo |= 1<<(9+i);
|
lo |= 1<<(9+i);
|
||||||
|
@ -226,7 +217,6 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
|
||||||
/*
|
/*
|
||||||
* Handle the MCR key on the Winchip 2.
|
* Handle the MCR key on the Winchip 2.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void __cpuinit winchip2_unprotect_mcr(void)
|
static void __cpuinit winchip2_unprotect_mcr(void)
|
||||||
{
|
{
|
||||||
u32 lo, hi;
|
u32 lo, hi;
|
||||||
|
@ -292,19 +282,17 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
|
||||||
rdmsr(MSR_VIA_FCR, lo, hi);
|
rdmsr(MSR_VIA_FCR, lo, hi);
|
||||||
lo |= (1<<1 | 1<<7);
|
lo |= (1<<1 | 1<<7);
|
||||||
wrmsr(MSR_VIA_FCR, lo, hi);
|
wrmsr(MSR_VIA_FCR, lo, hi);
|
||||||
set_bit(X86_FEATURE_CX8, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CX8);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Before Nehemiah, the C3's had 3dNOW! */
|
/* Before Nehemiah, the C3's had 3dNOW! */
|
||||||
if (c->x86_model >= 6 && c->x86_model < 9)
|
if (c->x86_model >= 6 && c->x86_model < 9)
|
||||||
set_bit(X86_FEATURE_3DNOW, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_3DNOW);
|
||||||
|
|
||||||
get_model_name(c);
|
get_model_name(c);
|
||||||
display_cacheinfo(c);
|
display_cacheinfo(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
|
||||||
{
|
|
||||||
enum {
|
enum {
|
||||||
ECX8 = 1<<1,
|
ECX8 = 1<<1,
|
||||||
EIERRINT = 1<<2,
|
EIERRINT = 1<<2,
|
||||||
|
@ -326,18 +314,22 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||||
EAMD3D = 1<<20,
|
EAMD3D = 1<<20,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
|
||||||
char *name;
|
char *name;
|
||||||
u32 fcr_set = 0;
|
u32 fcr_set = 0;
|
||||||
u32 fcr_clr = 0;
|
u32 fcr_clr = 0;
|
||||||
u32 lo, hi, newlo;
|
u32 lo, hi, newlo;
|
||||||
u32 aa, bb, cc, dd;
|
u32 aa, bb, cc, dd;
|
||||||
|
|
||||||
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
/*
|
||||||
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
|
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
||||||
clear_bit(0*32+31, c->x86_capability);
|
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
|
||||||
|
*/
|
||||||
|
clear_cpu_cap(c, 0*32+31);
|
||||||
|
|
||||||
switch (c->x86) {
|
switch (c->x86) {
|
||||||
|
|
||||||
case 5:
|
case 5:
|
||||||
switch (c->x86_model) {
|
switch (c->x86_model) {
|
||||||
case 4:
|
case 4:
|
||||||
|
@ -345,18 +337,19 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||||
fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
|
fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
|
||||||
fcr_clr = DPDC;
|
fcr_clr = DPDC;
|
||||||
printk(KERN_NOTICE "Disabling bugged TSC.\n");
|
printk(KERN_NOTICE "Disabling bugged TSC.\n");
|
||||||
clear_bit(X86_FEATURE_TSC, c->x86_capability);
|
clear_cpu_cap(c, X86_FEATURE_TSC);
|
||||||
#ifdef CONFIG_X86_OOSTORE
|
#ifdef CONFIG_X86_OOSTORE
|
||||||
centaur_create_optimal_mcr();
|
centaur_create_optimal_mcr();
|
||||||
/* Enable
|
/*
|
||||||
write combining on non-stack, non-string
|
* Enable:
|
||||||
write combining on string, all types
|
* write combining on non-stack, non-string
|
||||||
weak write ordering
|
* write combining on string, all types
|
||||||
|
* weak write ordering
|
||||||
The C6 original lacks weak read order
|
*
|
||||||
|
* The C6 original lacks weak read order
|
||||||
Note 0x120 is write only on Winchip 1 */
|
*
|
||||||
|
* Note 0x120 is write only on Winchip 1
|
||||||
|
*/
|
||||||
wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
|
wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
|
@ -372,16 +365,18 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||||
name = "2B";
|
name = "2B";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
|
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
|
||||||
|
E2MMX|EAMD3D;
|
||||||
fcr_clr = DPDC;
|
fcr_clr = DPDC;
|
||||||
#ifdef CONFIG_X86_OOSTORE
|
#ifdef CONFIG_X86_OOSTORE
|
||||||
winchip2_unprotect_mcr();
|
winchip2_unprotect_mcr();
|
||||||
winchip2_create_optimal_mcr();
|
winchip2_create_optimal_mcr();
|
||||||
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
||||||
/* Enable
|
/*
|
||||||
write combining on non-stack, non-string
|
* Enable:
|
||||||
write combining on string, all types
|
* write combining on non-stack, non-string
|
||||||
weak write ordering
|
* write combining on string, all types
|
||||||
|
* weak write ordering
|
||||||
*/
|
*/
|
||||||
lo |= 31;
|
lo |= 31;
|
||||||
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
||||||
|
@ -390,16 +385,18 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||||
break;
|
break;
|
||||||
case 9:
|
case 9:
|
||||||
name = "3";
|
name = "3";
|
||||||
fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
|
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
|
||||||
|
E2MMX|EAMD3D;
|
||||||
fcr_clr = DPDC;
|
fcr_clr = DPDC;
|
||||||
#ifdef CONFIG_X86_OOSTORE
|
#ifdef CONFIG_X86_OOSTORE
|
||||||
winchip2_unprotect_mcr();
|
winchip2_unprotect_mcr();
|
||||||
winchip2_create_optimal_mcr();
|
winchip2_create_optimal_mcr();
|
||||||
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
||||||
/* Enable
|
/*
|
||||||
write combining on non-stack, non-string
|
* Enable:
|
||||||
write combining on string, all types
|
* write combining on non-stack, non-string
|
||||||
weak write ordering
|
* write combining on string, all types
|
||||||
|
* weak write ordering
|
||||||
*/
|
*/
|
||||||
lo |= 31;
|
lo |= 31;
|
||||||
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
|
||||||
|
@ -414,18 +411,19 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||||
newlo = (lo|fcr_set) & (~fcr_clr);
|
newlo = (lo|fcr_set) & (~fcr_clr);
|
||||||
|
|
||||||
if (newlo != lo) {
|
if (newlo != lo) {
|
||||||
printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
|
printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n",
|
||||||
|
lo, newlo);
|
||||||
wrmsr(MSR_IDT_FCR1, newlo, hi);
|
wrmsr(MSR_IDT_FCR1, newlo, hi);
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
|
printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
|
||||||
}
|
}
|
||||||
/* Emulate MTRRs using Centaur's MCR. */
|
/* Emulate MTRRs using Centaur's MCR. */
|
||||||
set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
|
||||||
/* Report CX8 */
|
/* Report CX8 */
|
||||||
set_bit(X86_FEATURE_CX8, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CX8);
|
||||||
/* Set 3DNow! on Winchip 2 and above. */
|
/* Set 3DNow! on Winchip 2 and above. */
|
||||||
if (c->x86_model >= 8)
|
if (c->x86_model >= 8)
|
||||||
set_bit(X86_FEATURE_3DNOW, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_3DNOW);
|
||||||
/* See if we can find out some more. */
|
/* See if we can find out some more. */
|
||||||
if (cpuid_eax(0x80000000) >= 0x80000005) {
|
if (cpuid_eax(0x80000000) >= 0x80000005) {
|
||||||
/* Yes, we can. */
|
/* Yes, we can. */
|
||||||
|
@ -442,16 +440,20 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
|
static unsigned int __cpuinit
|
||||||
|
centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||||
{
|
{
|
||||||
/* VIA C3 CPUs (670-68F) need further shifting. */
|
/* VIA C3 CPUs (670-68F) need further shifting. */
|
||||||
if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
|
if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
|
||||||
size >>= 8;
|
size >>= 8;
|
||||||
|
|
||||||
/* VIA also screwed up Nehemiah stepping 1, and made
|
/*
|
||||||
it return '65KB' instead of '64KB'
|
* There's also an erratum in Nehemiah stepping 1, which
|
||||||
- Note, it seems this may only be in engineering samples. */
|
* returns '65KB' instead of '64KB'
|
||||||
if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
|
* - Note, it seems this may only be in engineering samples.
|
||||||
|
*/
|
||||||
|
if ((c->x86 == 6) && (c->x86_model == 9) &&
|
||||||
|
(c->x86_mask == 1) && (size == 65))
|
||||||
size -= 1;
|
size -= 1;
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
|
@ -464,8 +466,4 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
|
||||||
.c_size_cache = centaur_size_cache,
|
.c_size_cache = centaur_size_cache,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init centaur_init_cpu(void)
|
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev);
|
||||||
{
|
|
||||||
cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -156,9 +156,12 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
l2size, ecx & 0xFF);
|
l2size, ecx & 0xFF);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Naming convention should be: <Name> [(<Codename>)] */
|
/*
|
||||||
/* This table only is used unless init_<vendor>() below doesn't set it; */
|
* Naming convention should be: <Name> [(<Codename>)]
|
||||||
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
|
* This table only is used unless init_<vendor>() below doesn't set it;
|
||||||
|
* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
/* Look up CPU names by table lookup. */
|
/* Look up CPU names by table lookup. */
|
||||||
static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
|
static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
|
||||||
|
@ -306,14 +309,30 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_cpu_cap(c, X86_FEATURE_PAT);
|
||||||
|
|
||||||
|
switch (c->x86_vendor) {
|
||||||
|
case X86_VENDOR_AMD:
|
||||||
|
if (c->x86 >= 0xf && c->x86 <= 0x11)
|
||||||
|
set_cpu_cap(c, X86_FEATURE_PAT);
|
||||||
|
break;
|
||||||
|
case X86_VENDOR_INTEL:
|
||||||
|
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
|
||||||
|
set_cpu_cap(c, X86_FEATURE_PAT);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Do minimum CPU detection early.
|
}
|
||||||
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
|
|
||||||
The others are not touched to avoid unwanted side effects.
|
|
||||||
|
|
||||||
WARNING: this function is only called on the BP. Don't add code here
|
/*
|
||||||
that is supposed to run on all CPUs. */
|
* Do minimum CPU detection early.
|
||||||
|
* Fields really needed: vendor, cpuid_level, family, model, mask,
|
||||||
|
* cache alignment.
|
||||||
|
* The others are not touched to avoid unwanted side effects.
|
||||||
|
*
|
||||||
|
* WARNING: this function is only called on the BP. Don't add code here
|
||||||
|
* that is supposed to run on all CPUs.
|
||||||
|
*/
|
||||||
static void __init early_cpu_detect(void)
|
static void __init early_cpu_detect(void)
|
||||||
{
|
{
|
||||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||||
|
@ -328,14 +347,9 @@ static void __init early_cpu_detect(void)
|
||||||
|
|
||||||
get_cpu_vendor(c, 1);
|
get_cpu_vendor(c, 1);
|
||||||
|
|
||||||
switch (c->x86_vendor) {
|
if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
|
||||||
case X86_VENDOR_AMD:
|
cpu_devs[c->x86_vendor]->c_early_init)
|
||||||
early_init_amd(c);
|
cpu_devs[c->x86_vendor]->c_early_init(c);
|
||||||
break;
|
|
||||||
case X86_VENDOR_INTEL:
|
|
||||||
early_init_intel(c);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
early_get_cap(c);
|
early_get_cap(c);
|
||||||
}
|
}
|
||||||
|
@ -355,7 +369,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
|
||||||
get_cpu_vendor(c, 0);
|
get_cpu_vendor(c, 0);
|
||||||
/* Initialize the standard set of capabilities */
|
/* Initialize the standard set of capabilities */
|
||||||
/* Note that the vendor-specific code below might override */
|
/* Note that the vendor-specific code below might override */
|
||||||
|
|
||||||
/* Intel-defined flags: level 0x00000001 */
|
/* Intel-defined flags: level 0x00000001 */
|
||||||
if (c->cpuid_level >= 0x00000001) {
|
if (c->cpuid_level >= 0x00000001) {
|
||||||
u32 capability, excap;
|
u32 capability, excap;
|
||||||
|
@ -369,12 +382,14 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
|
||||||
if (c->x86 >= 0x6)
|
if (c->x86 >= 0x6)
|
||||||
c->x86_model += ((tfms >> 16) & 0xF) << 4;
|
c->x86_model += ((tfms >> 16) & 0xF) << 4;
|
||||||
c->x86_mask = tfms & 15;
|
c->x86_mask = tfms & 15;
|
||||||
|
c->initial_apicid = (ebx >> 24) & 0xFF;
|
||||||
#ifdef CONFIG_X86_HT
|
#ifdef CONFIG_X86_HT
|
||||||
c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
|
c->apicid = phys_pkg_id(c->initial_apicid, 0);
|
||||||
|
c->phys_proc_id = c->initial_apicid;
|
||||||
#else
|
#else
|
||||||
c->apicid = (ebx >> 24) & 0xFF;
|
c->apicid = c->initial_apicid;
|
||||||
#endif
|
#endif
|
||||||
if (c->x86_capability[0] & (1<<19))
|
if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
|
||||||
c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
|
c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
|
||||||
} else {
|
} else {
|
||||||
/* Have CPUID level 0 only - unheard of */
|
/* Have CPUID level 0 only - unheard of */
|
||||||
|
@ -395,9 +410,18 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
|
||||||
init_scattered_cpuid_features(c);
|
init_scattered_cpuid_features(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_HT
|
clear_cpu_cap(c, X86_FEATURE_PAT);
|
||||||
c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
|
|
||||||
#endif
|
switch (c->x86_vendor) {
|
||||||
|
case X86_VENDOR_AMD:
|
||||||
|
if (c->x86 >= 0xf && c->x86 <= 0x11)
|
||||||
|
set_cpu_cap(c, X86_FEATURE_PAT);
|
||||||
|
break;
|
||||||
|
case X86_VENDOR_INTEL:
|
||||||
|
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
|
||||||
|
set_cpu_cap(c, X86_FEATURE_PAT);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
||||||
|
@ -409,7 +433,7 @@ static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
||||||
lo |= 0x200000;
|
lo |= 0x200000;
|
||||||
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
||||||
printk(KERN_NOTICE "CPU serial number disabled.\n");
|
printk(KERN_NOTICE "CPU serial number disabled.\n");
|
||||||
clear_bit(X86_FEATURE_PN, c->x86_capability);
|
clear_cpu_cap(c, X86_FEATURE_PN);
|
||||||
|
|
||||||
/* Disabling the serial number may affect the cpuid level */
|
/* Disabling the serial number may affect the cpuid level */
|
||||||
c->cpuid_level = cpuid_eax(0);
|
c->cpuid_level = cpuid_eax(0);
|
||||||
|
@ -444,8 +468,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||||
|
|
||||||
if (!have_cpuid_p()) {
|
if (!have_cpuid_p()) {
|
||||||
/* First of all, decide if this is a 486 or higher */
|
/*
|
||||||
/* It's a 486 if we can modify the AC flag */
|
* First of all, decide if this is a 486 or higher
|
||||||
|
* It's a 486 if we can modify the AC flag
|
||||||
|
*/
|
||||||
if (flag_is_changeable_p(X86_EFLAGS_AC))
|
if (flag_is_changeable_p(X86_EFLAGS_AC))
|
||||||
c->x86 = 4;
|
c->x86 = 4;
|
||||||
else
|
else
|
||||||
|
@ -552,7 +578,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||||
}
|
}
|
||||||
|
|
||||||
index_msb = get_count_order(smp_num_siblings);
|
index_msb = get_count_order(smp_num_siblings);
|
||||||
c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
|
c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
|
||||||
|
|
||||||
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
|
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
|
||||||
c->phys_proc_id);
|
c->phys_proc_id);
|
||||||
|
@ -563,7 +589,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||||
|
|
||||||
core_bits = get_count_order(c->x86_max_cores);
|
core_bits = get_count_order(c->x86_max_cores);
|
||||||
|
|
||||||
c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
|
c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
|
||||||
((1 << core_bits) - 1);
|
((1 << core_bits) - 1);
|
||||||
|
|
||||||
if (c->x86_max_cores > 1)
|
if (c->x86_max_cores > 1)
|
||||||
|
@ -616,23 +642,15 @@ __setup("clearcpuid=", setup_disablecpuid);
|
||||||
|
|
||||||
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
|
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
|
||||||
|
|
||||||
/* This is hacky. :)
|
|
||||||
* We're emulating future behavior.
|
|
||||||
* In the future, the cpu-specific init functions will be called implicitly
|
|
||||||
* via the magic of initcalls.
|
|
||||||
* They will insert themselves into the cpu_devs structure.
|
|
||||||
* Then, when cpu_init() is called, we can just iterate over that array.
|
|
||||||
*/
|
|
||||||
void __init early_cpu_init(void)
|
void __init early_cpu_init(void)
|
||||||
{
|
{
|
||||||
intel_cpu_init();
|
struct cpu_vendor_dev *cvdev;
|
||||||
cyrix_init_cpu();
|
|
||||||
nsc_init_cpu();
|
for (cvdev = __x86cpuvendor_start ;
|
||||||
amd_init_cpu();
|
cvdev < __x86cpuvendor_end ;
|
||||||
centaur_init_cpu();
|
cvdev++)
|
||||||
transmeta_init_cpu();
|
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
|
||||||
nexgen_init_cpu();
|
|
||||||
umc_init_cpu();
|
|
||||||
early_cpu_detect();
|
early_cpu_detect();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,7 @@ struct cpu_dev {
|
||||||
|
|
||||||
struct cpu_model_info c_models[4];
|
struct cpu_model_info c_models[4];
|
||||||
|
|
||||||
|
void (*c_early_init)(struct cpuinfo_x86 *c);
|
||||||
void (*c_init)(struct cpuinfo_x86 * c);
|
void (*c_init)(struct cpuinfo_x86 * c);
|
||||||
void (*c_identify)(struct cpuinfo_x86 * c);
|
void (*c_identify)(struct cpuinfo_x86 * c);
|
||||||
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
|
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
|
||||||
|
@ -21,18 +22,17 @@ struct cpu_dev {
|
||||||
|
|
||||||
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
|
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
|
||||||
|
|
||||||
|
struct cpu_vendor_dev {
|
||||||
|
int vendor;
|
||||||
|
struct cpu_dev *cpu_dev;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
|
||||||
|
static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
|
||||||
|
__attribute__((__section__(".x86cpuvendor.init"))) = \
|
||||||
|
{ cpu_vendor_id, cpu_dev }
|
||||||
|
|
||||||
|
extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
|
||||||
|
|
||||||
extern int get_model_name(struct cpuinfo_x86 *c);
|
extern int get_model_name(struct cpuinfo_x86 *c);
|
||||||
extern void display_cacheinfo(struct cpuinfo_x86 *c);
|
extern void display_cacheinfo(struct cpuinfo_x86 *c);
|
||||||
|
|
||||||
extern void early_init_intel(struct cpuinfo_x86 *c);
|
|
||||||
extern void early_init_amd(struct cpuinfo_x86 *c);
|
|
||||||
|
|
||||||
/* Specific CPU type init functions */
|
|
||||||
int intel_cpu_init(void);
|
|
||||||
int amd_init_cpu(void);
|
|
||||||
int cyrix_init_cpu(void);
|
|
||||||
int nsc_init_cpu(void);
|
|
||||||
int centaur_init_cpu(void);
|
|
||||||
int transmeta_init_cpu(void);
|
|
||||||
int nexgen_init_cpu(void);
|
|
||||||
int umc_init_cpu(void);
|
|
||||||
|
|
|
@ -37,8 +37,7 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
|
||||||
setCx86(CX86_CCR2, ccr2);
|
setCx86(CX86_CCR2, ccr2);
|
||||||
*dir0 = 0xfe;
|
*dir0 = 0xfe;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
|
setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
|
||||||
|
|
||||||
/* read DIR0 and DIR1 CPU registers */
|
/* read DIR0 and DIR1 CPU registers */
|
||||||
|
@ -187,14 +186,16 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
char *buf = c->x86_model_id;
|
char *buf = c->x86_model_id;
|
||||||
const char *p = NULL;
|
const char *p = NULL;
|
||||||
|
|
||||||
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
/*
|
||||||
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
|
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
||||||
clear_bit(0*32+31, c->x86_capability);
|
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
|
||||||
|
*/
|
||||||
|
clear_cpu_cap(c, 0*32+31);
|
||||||
|
|
||||||
/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
|
/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
|
||||||
if ( test_bit(1*32+24, c->x86_capability) ) {
|
if (test_cpu_cap(c, 1*32+24)) {
|
||||||
clear_bit(1*32+24, c->x86_capability);
|
clear_cpu_cap(c, 1*32+24);
|
||||||
set_bit(X86_FEATURE_CXMMX, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CXMMX);
|
||||||
}
|
}
|
||||||
|
|
||||||
do_cyrix_devid(&dir0, &dir1);
|
do_cyrix_devid(&dir0, &dir1);
|
||||||
|
@ -241,7 +242,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
} else /* 686 */
|
} else /* 686 */
|
||||||
p = Cx86_cb+1;
|
p = Cx86_cb+1;
|
||||||
/* Emulate MTRRs using Cyrix's ARRs. */
|
/* Emulate MTRRs using Cyrix's ARRs. */
|
||||||
set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
|
||||||
/* 6x86's contain this bug */
|
/* 6x86's contain this bug */
|
||||||
c->coma_bug = 1;
|
c->coma_bug = 1;
|
||||||
break;
|
break;
|
||||||
|
@ -250,16 +251,17 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
{
|
{
|
||||||
u32 vendor, device;
|
u32 vendor, device;
|
||||||
/* It isn't really a PCI quirk directly, but the cure is the
|
/*
|
||||||
same. The MediaGX has deep magic SMM stuff that handles the
|
* It isn't really a PCI quirk directly, but the cure is the
|
||||||
SB emulation. It throws away the fifo on disable_dma() which
|
* same. The MediaGX has deep magic SMM stuff that handles the
|
||||||
is wrong and ruins the audio.
|
* SB emulation. It throws away the fifo on disable_dma() which
|
||||||
|
* is wrong and ruins the audio.
|
||||||
Bug2: VSA1 has a wrap bug so that using maximum sized DMA
|
*
|
||||||
causes bad things. According to NatSemi VSA2 has another
|
* Bug2: VSA1 has a wrap bug so that using maximum sized DMA
|
||||||
bug to do with 'hlt'. I've not seen any boards using VSA2
|
* causes bad things. According to NatSemi VSA2 has another
|
||||||
and X doesn't seem to support it either so who cares 8).
|
* bug to do with 'hlt'. I've not seen any boards using VSA2
|
||||||
VSA1 we work around however.
|
* and X doesn't seem to support it either so who cares 8).
|
||||||
|
* VSA1 we work around however.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
|
printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
|
||||||
|
@ -296,8 +298,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
geode_configure();
|
geode_configure();
|
||||||
get_model_name(c); /* get CPU marketing name */
|
get_model_name(c); /* get CPU marketing name */
|
||||||
return;
|
return;
|
||||||
}
|
} else { /* MediaGX */
|
||||||
else { /* MediaGX */
|
|
||||||
Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
|
Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
|
||||||
p = Cx86_cb+2;
|
p = Cx86_cb+2;
|
||||||
c->x86_model = (dir1 & 0x20) ? 1 : 2;
|
c->x86_model = (dir1 & 0x20) ? 1 : 2;
|
||||||
|
@ -305,14 +306,11 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 5: /* 6x86MX/M II */
|
case 5: /* 6x86MX/M II */
|
||||||
if (dir1 > 7)
|
if (dir1 > 7) {
|
||||||
{
|
|
||||||
dir0_msn++; /* M II */
|
dir0_msn++; /* M II */
|
||||||
/* Enable MMX extensions (App note 108) */
|
/* Enable MMX extensions (App note 108) */
|
||||||
setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
|
setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
c->coma_bug = 1; /* 6x86MX, it has the bug. */
|
c->coma_bug = 1; /* 6x86MX, it has the bug. */
|
||||||
}
|
}
|
||||||
tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
|
tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
|
||||||
|
@ -321,7 +319,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
|
if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
|
||||||
(c->x86_model)++;
|
(c->x86_model)++;
|
||||||
/* Emulate MTRRs using Cyrix's ARRs. */
|
/* Emulate MTRRs using Cyrix's ARRs. */
|
||||||
set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 0xf: /* Cyrix 486 without DEVID registers */
|
case 0xf: /* Cyrix 486 without DEVID registers */
|
||||||
|
@ -343,7 +341,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
strcpy(buf, Cx86_model[dir0_msn & 7]);
|
strcpy(buf, Cx86_model[dir0_msn & 7]);
|
||||||
if (p) strcat(buf, p);
|
if (p)
|
||||||
|
strcat(buf, p);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -352,7 +351,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
*/
|
*/
|
||||||
static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
|
static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
/* There may be GX1 processors in the wild that are branded
|
/*
|
||||||
|
* There may be GX1 processors in the wild that are branded
|
||||||
* NSC and not Cyrix.
|
* NSC and not Cyrix.
|
||||||
*
|
*
|
||||||
* This function only handles the GX processor, and kicks every
|
* This function only handles the GX processor, and kicks every
|
||||||
|
@ -417,8 +417,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
|
||||||
|
|
||||||
/* Check it is an affected model */
|
/* Check it is an affected model */
|
||||||
|
|
||||||
if (dir0 == 5 || dir0 == 3)
|
if (dir0 == 5 || dir0 == 3) {
|
||||||
{
|
|
||||||
unsigned char ccr3;
|
unsigned char ccr3;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
|
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
|
||||||
|
@ -439,11 +438,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
|
||||||
.c_identify = cyrix_identify,
|
.c_identify = cyrix_identify,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init cyrix_init_cpu(void)
|
cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev);
|
||||||
{
|
|
||||||
cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
|
static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
|
||||||
.c_vendor = "NSC",
|
.c_vendor = "NSC",
|
||||||
|
@ -451,9 +446,4 @@ static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
|
||||||
.c_init = init_nsc,
|
.c_init = init_nsc,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init nsc_init_cpu(void)
|
cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev);
|
||||||
{
|
|
||||||
cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
* This file must not contain any executable code.
|
* This file must not contain any executable code.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "asm/cpufeature.h"
|
#include <asm/cpufeature.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These flag bits must match the definitions in <asm/cpufeature.h>.
|
* These flag bits must match the definitions in <asm/cpufeature.h>.
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
struct movsl_mask movsl_mask __read_mostly;
|
struct movsl_mask movsl_mask __read_mostly;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
|
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
|
||||||
if (c->x86 == 15 && c->x86_cache_alignment == 64)
|
if (c->x86 == 15 && c->x86_cache_alignment == 64)
|
||||||
|
@ -127,7 +127,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||||
*/
|
*/
|
||||||
c->f00f_bug = 0;
|
c->f00f_bug = 0;
|
||||||
if (!paravirt_enabled() && c->x86 == 5) {
|
if (!paravirt_enabled() && c->x86 == 5) {
|
||||||
static int f00f_workaround_enabled = 0;
|
static int f00f_workaround_enabled;
|
||||||
|
|
||||||
c->f00f_bug = 1;
|
c->f00f_bug = 1;
|
||||||
if (!f00f_workaround_enabled) {
|
if (!f00f_workaround_enabled) {
|
||||||
|
@ -143,16 +143,18 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||||
unsigned eax = cpuid_eax(10);
|
unsigned eax = cpuid_eax(10);
|
||||||
/* Check for version and the number of counters */
|
/* Check for version and the number of counters */
|
||||||
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
|
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
|
||||||
set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
|
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
|
||||||
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
|
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
|
||||||
clear_bit(X86_FEATURE_SEP, c->x86_capability);
|
clear_cpu_cap(c, X86_FEATURE_SEP);
|
||||||
|
|
||||||
/* Names for the Pentium II/Celeron processors
|
/*
|
||||||
detectable only by also checking the cache size.
|
* Names for the Pentium II/Celeron processors
|
||||||
Dixon is NOT a Celeron. */
|
* detectable only by also checking the cache size.
|
||||||
|
* Dixon is NOT a Celeron.
|
||||||
|
*/
|
||||||
if (c->x86 == 6) {
|
if (c->x86 == 6) {
|
||||||
switch (c->x86_model) {
|
switch (c->x86_model) {
|
||||||
case 5:
|
case 5:
|
||||||
|
@ -207,19 +209,19 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (cpu_has_xmm2)
|
if (cpu_has_xmm2)
|
||||||
set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||||
if (c->x86 == 15) {
|
if (c->x86 == 15) {
|
||||||
set_bit(X86_FEATURE_P4, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_P4);
|
||||||
}
|
}
|
||||||
if (c->x86 == 6)
|
if (c->x86 == 6)
|
||||||
set_bit(X86_FEATURE_P3, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_P3);
|
||||||
if (cpu_has_ds) {
|
if (cpu_has_ds) {
|
||||||
unsigned int l1;
|
unsigned int l1;
|
||||||
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
|
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
|
||||||
if (!(l1 & (1<<11)))
|
if (!(l1 & (1<<11)))
|
||||||
set_bit(X86_FEATURE_BTS, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_BTS);
|
||||||
if (!(l1 & (1<<12)))
|
if (!(l1 & (1<<12)))
|
||||||
set_bit(X86_FEATURE_PEBS, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_PEBS);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_has_bts)
|
if (cpu_has_bts)
|
||||||
|
@ -228,7 +230,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||||
|
|
||||||
static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||||
{
|
{
|
||||||
/* Intel PIII Tualatin. This comes in two flavours.
|
/*
|
||||||
|
* Intel PIII Tualatin. This comes in two flavours.
|
||||||
* One has 256kb of cache, the other 512. We have no way
|
* One has 256kb of cache, the other 512. We have no way
|
||||||
* to determine which, so we use a boottime override
|
* to determine which, so we use a boottime override
|
||||||
* for the 512kb model, and assume 256 otherwise.
|
* for the 512kb model, and assume 256 otherwise.
|
||||||
|
@ -290,15 +293,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
.c_early_init = early_init_intel,
|
||||||
.c_init = init_intel,
|
.c_init = init_intel,
|
||||||
.c_size_cache = intel_size_cache,
|
.c_size_cache = intel_size_cache,
|
||||||
};
|
};
|
||||||
|
|
||||||
__init int intel_cpu_init(void)
|
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
|
||||||
{
|
|
||||||
cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef CONFIG_X86_CMPXCHG
|
#ifndef CONFIG_X86_CMPXCHG
|
||||||
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
|
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
|
||||||
|
@ -364,5 +364,5 @@ unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
|
||||||
EXPORT_SYMBOL(cmpxchg_486_u64);
|
EXPORT_SYMBOL(cmpxchg_486_u64);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// arch_initcall(intel_cpu_init);
|
/* arch_initcall(intel_cpu_init); */
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
#include "mce.h"
|
#include "mce.h"
|
||||||
|
|
||||||
int mce_disabled = 0;
|
int mce_disabled;
|
||||||
int nr_mce_banks;
|
int nr_mce_banks;
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
|
EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
|
||||||
|
|
|
@ -41,7 +41,10 @@ static void mce_checkregs (void *info)
|
||||||
smp_processor_id());
|
smp_processor_id());
|
||||||
printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
|
printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
|
||||||
|
|
||||||
/* Scrub the error so we don't pick it up in MCE_RATE seconds time. */
|
/*
|
||||||
|
* Scrub the error so we don't pick it up in MCE_RATE
|
||||||
|
* seconds time.
|
||||||
|
*/
|
||||||
wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
|
wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
|
||||||
|
|
||||||
/* Serialize */
|
/* Serialize */
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/processor-flags.h>
|
#include <asm/processor-flags.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/pat.h>
|
||||||
#include "mtrr.h"
|
#include "mtrr.h"
|
||||||
|
|
||||||
struct mtrr_state {
|
struct mtrr_state {
|
||||||
|
@ -35,6 +36,8 @@ static struct fixed_range_block fixed_range_blocks[] = {
|
||||||
|
|
||||||
static unsigned long smp_changes_mask;
|
static unsigned long smp_changes_mask;
|
||||||
static struct mtrr_state mtrr_state = {};
|
static struct mtrr_state mtrr_state = {};
|
||||||
|
static int mtrr_state_set;
|
||||||
|
static u64 tom2;
|
||||||
|
|
||||||
#undef MODULE_PARAM_PREFIX
|
#undef MODULE_PARAM_PREFIX
|
||||||
#define MODULE_PARAM_PREFIX "mtrr."
|
#define MODULE_PARAM_PREFIX "mtrr."
|
||||||
|
@ -42,6 +45,111 @@ static struct mtrr_state mtrr_state = {};
|
||||||
static int mtrr_show;
|
static int mtrr_show;
|
||||||
module_param_named(show, mtrr_show, bool, 0);
|
module_param_named(show, mtrr_show, bool, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns the effective MTRR type for the region
|
||||||
|
* Error returns:
|
||||||
|
* - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
|
||||||
|
* - 0xFF - when MTRR is not enabled
|
||||||
|
*/
|
||||||
|
u8 mtrr_type_lookup(u64 start, u64 end)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u64 base, mask;
|
||||||
|
u8 prev_match, curr_match;
|
||||||
|
|
||||||
|
if (!mtrr_state_set)
|
||||||
|
return 0xFF;
|
||||||
|
|
||||||
|
if (!mtrr_state.enabled)
|
||||||
|
return 0xFF;
|
||||||
|
|
||||||
|
/* Make end inclusive end, instead of exclusive */
|
||||||
|
end--;
|
||||||
|
|
||||||
|
/* Look in fixed ranges. Just return the type as per start */
|
||||||
|
if (mtrr_state.have_fixed && (start < 0x100000)) {
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
if (start < 0x80000) {
|
||||||
|
idx = 0;
|
||||||
|
idx += (start >> 16);
|
||||||
|
return mtrr_state.fixed_ranges[idx];
|
||||||
|
} else if (start < 0xC0000) {
|
||||||
|
idx = 1 * 8;
|
||||||
|
idx += ((start - 0x80000) >> 14);
|
||||||
|
return mtrr_state.fixed_ranges[idx];
|
||||||
|
} else if (start < 0x1000000) {
|
||||||
|
idx = 3 * 8;
|
||||||
|
idx += ((start - 0xC0000) >> 12);
|
||||||
|
return mtrr_state.fixed_ranges[idx];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Look in variable ranges
|
||||||
|
* Look of multiple ranges matching this address and pick type
|
||||||
|
* as per MTRR precedence
|
||||||
|
*/
|
||||||
|
if (!mtrr_state.enabled & 2) {
|
||||||
|
return mtrr_state.def_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
prev_match = 0xFF;
|
||||||
|
for (i = 0; i < num_var_ranges; ++i) {
|
||||||
|
unsigned short start_state, end_state;
|
||||||
|
|
||||||
|
if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
|
||||||
|
(mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
|
||||||
|
mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
|
||||||
|
(mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
|
||||||
|
|
||||||
|
start_state = ((start & mask) == (base & mask));
|
||||||
|
end_state = ((end & mask) == (base & mask));
|
||||||
|
if (start_state != end_state)
|
||||||
|
return 0xFE;
|
||||||
|
|
||||||
|
if ((start & mask) != (base & mask)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
|
||||||
|
if (prev_match == 0xFF) {
|
||||||
|
prev_match = curr_match;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prev_match == MTRR_TYPE_UNCACHABLE ||
|
||||||
|
curr_match == MTRR_TYPE_UNCACHABLE) {
|
||||||
|
return MTRR_TYPE_UNCACHABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((prev_match == MTRR_TYPE_WRBACK &&
|
||||||
|
curr_match == MTRR_TYPE_WRTHROUGH) ||
|
||||||
|
(prev_match == MTRR_TYPE_WRTHROUGH &&
|
||||||
|
curr_match == MTRR_TYPE_WRBACK)) {
|
||||||
|
prev_match = MTRR_TYPE_WRTHROUGH;
|
||||||
|
curr_match = MTRR_TYPE_WRTHROUGH;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prev_match != curr_match) {
|
||||||
|
return MTRR_TYPE_UNCACHABLE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tom2) {
|
||||||
|
if (start >= (1ULL<<32) && (end < tom2))
|
||||||
|
return MTRR_TYPE_WRBACK;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prev_match != 0xFF)
|
||||||
|
return prev_match;
|
||||||
|
|
||||||
|
return mtrr_state.def_type;
|
||||||
|
}
|
||||||
|
|
||||||
/* Get the MSR pair relating to a var range */
|
/* Get the MSR pair relating to a var range */
|
||||||
static void
|
static void
|
||||||
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
|
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
|
||||||
|
@ -79,12 +187,16 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
|
||||||
base, base + step - 1, mtrr_attrib_to_str(*types));
|
base, base + step - 1, mtrr_attrib_to_str(*types));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void prepare_set(void);
|
||||||
|
static void post_set(void);
|
||||||
|
|
||||||
/* Grab all of the MTRR state for this CPU into *state */
|
/* Grab all of the MTRR state for this CPU into *state */
|
||||||
void __init get_mtrr_state(void)
|
void __init get_mtrr_state(void)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct mtrr_var_range *vrs;
|
struct mtrr_var_range *vrs;
|
||||||
unsigned lo, dummy;
|
unsigned lo, dummy;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
vrs = mtrr_state.var_ranges;
|
vrs = mtrr_state.var_ranges;
|
||||||
|
|
||||||
|
@ -100,6 +212,15 @@ void __init get_mtrr_state(void)
|
||||||
mtrr_state.def_type = (lo & 0xff);
|
mtrr_state.def_type = (lo & 0xff);
|
||||||
mtrr_state.enabled = (lo & 0xc00) >> 10;
|
mtrr_state.enabled = (lo & 0xc00) >> 10;
|
||||||
|
|
||||||
|
if (amd_special_default_mtrr()) {
|
||||||
|
unsigned lo, hi;
|
||||||
|
/* TOP_MEM2 */
|
||||||
|
rdmsr(MSR_K8_TOP_MEM2, lo, hi);
|
||||||
|
tom2 = hi;
|
||||||
|
tom2 <<= 32;
|
||||||
|
tom2 |= lo;
|
||||||
|
tom2 &= 0xffffff8000000ULL;
|
||||||
|
}
|
||||||
if (mtrr_show) {
|
if (mtrr_show) {
|
||||||
int high_width;
|
int high_width;
|
||||||
|
|
||||||
|
@ -130,8 +251,23 @@ void __init get_mtrr_state(void)
|
||||||
else
|
else
|
||||||
printk(KERN_INFO "MTRR %u disabled\n", i);
|
printk(KERN_INFO "MTRR %u disabled\n", i);
|
||||||
}
|
}
|
||||||
|
if (tom2) {
|
||||||
|
printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
|
||||||
|
tom2, tom2>>20);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
mtrr_state_set = 1;
|
||||||
|
|
||||||
|
/* PAT setup for BP. We need to go through sync steps here */
|
||||||
|
local_irq_save(flags);
|
||||||
|
prepare_set();
|
||||||
|
|
||||||
|
pat_init();
|
||||||
|
|
||||||
|
post_set();
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/* Some BIOS's are fucked and don't set all MTRRs the same! */
|
/* Some BIOS's are fucked and don't set all MTRRs the same! */
|
||||||
void __init mtrr_state_warn(void)
|
void __init mtrr_state_warn(void)
|
||||||
|
@ -397,6 +533,9 @@ static void generic_set_all(void)
|
||||||
/* Actually set the state */
|
/* Actually set the state */
|
||||||
mask = set_mtrr_state();
|
mask = set_mtrr_state();
|
||||||
|
|
||||||
|
/* also set PAT */
|
||||||
|
pat_init();
|
||||||
|
|
||||||
post_set();
|
post_set();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
|
|
@ -424,11 +424,10 @@ static int __init mtrr_if_init(void)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
proc_root_mtrr =
|
proc_root_mtrr =
|
||||||
create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root);
|
proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops);
|
||||||
if (proc_root_mtrr) {
|
|
||||||
|
if (proc_root_mtrr)
|
||||||
proc_root_mtrr->owner = THIS_MODULE;
|
proc_root_mtrr->owner = THIS_MODULE;
|
||||||
proc_root_mtrr->proc_fops = &mtrr_fops;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -627,7 +627,7 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
|
||||||
#define Tom2Enabled (1U << 21)
|
#define Tom2Enabled (1U << 21)
|
||||||
#define Tom2ForceMemTypeWB (1U << 22)
|
#define Tom2ForceMemTypeWB (1U << 22)
|
||||||
|
|
||||||
static __init int amd_special_default_mtrr(void)
|
int __init amd_special_default_mtrr(void)
|
||||||
{
|
{
|
||||||
u32 l, h;
|
u32 l, h;
|
||||||
|
|
||||||
|
|
|
@ -24,8 +24,10 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
|
||||||
write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
|
write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
|
/*
|
||||||
a side-effect */
|
* Disable and flush caches. Note that wbinvd flushes the TLBs
|
||||||
|
* as a side-effect
|
||||||
|
*/
|
||||||
cr0 = read_cr0() | X86_CR0_CD;
|
cr0 = read_cr0() | X86_CR0_CD;
|
||||||
wbinvd();
|
wbinvd();
|
||||||
write_cr0(cr0);
|
write_cr0(cr0);
|
||||||
|
|
|
@ -35,10 +35,9 @@ static void __cpuinit init_nexgen(struct cpuinfo_x86 * c)
|
||||||
static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c)
|
static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
/* Detect NexGen with old hypercode */
|
/* Detect NexGen with old hypercode */
|
||||||
if ( deep_magic_nexgen_probe() ) {
|
if (deep_magic_nexgen_probe())
|
||||||
strcpy(c->x86_vendor_id, "NexGenDriven");
|
strcpy(c->x86_vendor_id, "NexGenDriven");
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
|
static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
|
||||||
.c_vendor = "Nexgen",
|
.c_vendor = "Nexgen",
|
||||||
|
|
|
@ -8,21 +8,91 @@
|
||||||
/*
|
/*
|
||||||
* Get CPU information for use by the procfs.
|
* Get CPU information for use by the procfs.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
|
||||||
|
unsigned int cpu)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_HT
|
||||||
|
if (c->x86_max_cores * smp_num_siblings > 1) {
|
||||||
|
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
||||||
|
seq_printf(m, "siblings\t: %d\n",
|
||||||
|
cpus_weight(per_cpu(cpu_core_map, cpu)));
|
||||||
|
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
||||||
|
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
|
||||||
|
seq_printf(m, "apicid\t\t: %d\n", c->apicid);
|
||||||
|
seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We use exception 16 if we have hardware math and we've either seen
|
||||||
|
* it or the CPU claims it is internal
|
||||||
|
*/
|
||||||
|
int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
|
||||||
|
seq_printf(m,
|
||||||
|
"fdiv_bug\t: %s\n"
|
||||||
|
"hlt_bug\t\t: %s\n"
|
||||||
|
"f00f_bug\t: %s\n"
|
||||||
|
"coma_bug\t: %s\n"
|
||||||
|
"fpu\t\t: %s\n"
|
||||||
|
"fpu_exception\t: %s\n"
|
||||||
|
"cpuid level\t: %d\n"
|
||||||
|
"wp\t\t: %s\n",
|
||||||
|
c->fdiv_bug ? "yes" : "no",
|
||||||
|
c->hlt_works_ok ? "no" : "yes",
|
||||||
|
c->f00f_bug ? "yes" : "no",
|
||||||
|
c->coma_bug ? "yes" : "no",
|
||||||
|
c->hard_math ? "yes" : "no",
|
||||||
|
fpu_exception ? "yes" : "no",
|
||||||
|
c->cpuid_level,
|
||||||
|
c->wp_works_ok ? "yes" : "no");
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
|
||||||
|
unsigned int cpu)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
if (c->x86_max_cores * smp_num_siblings > 1) {
|
||||||
|
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
||||||
|
seq_printf(m, "siblings\t: %d\n",
|
||||||
|
cpus_weight(per_cpu(cpu_core_map, cpu)));
|
||||||
|
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
||||||
|
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
|
||||||
|
seq_printf(m, "apicid\t\t: %d\n", c->apicid);
|
||||||
|
seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
seq_printf(m,
|
||||||
|
"fpu\t\t: yes\n"
|
||||||
|
"fpu_exception\t: yes\n"
|
||||||
|
"cpuid level\t: %d\n"
|
||||||
|
"wp\t\t: yes\n",
|
||||||
|
c->cpuid_level);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct cpuinfo_x86 *c = v;
|
struct cpuinfo_x86 *c = v;
|
||||||
int i, n = 0;
|
unsigned int cpu = 0;
|
||||||
int fpu_exception;
|
int i;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
n = c->cpu_index;
|
cpu = c->cpu_index;
|
||||||
#endif
|
#endif
|
||||||
seq_printf(m, "processor\t: %d\n"
|
seq_printf(m, "processor\t: %u\n"
|
||||||
"vendor_id\t: %s\n"
|
"vendor_id\t: %s\n"
|
||||||
"cpu family\t: %d\n"
|
"cpu family\t: %d\n"
|
||||||
"model\t\t: %d\n"
|
"model\t\t: %u\n"
|
||||||
"model name\t: %s\n",
|
"model name\t: %s\n",
|
||||||
n,
|
cpu,
|
||||||
c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
|
c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
|
||||||
c->x86,
|
c->x86,
|
||||||
c->x86_model,
|
c->x86_model,
|
||||||
|
@ -34,7 +104,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
seq_printf(m, "stepping\t: unknown\n");
|
seq_printf(m, "stepping\t: unknown\n");
|
||||||
|
|
||||||
if (cpu_has(c, X86_FEATURE_TSC)) {
|
if (cpu_has(c, X86_FEATURE_TSC)) {
|
||||||
unsigned int freq = cpufreq_quick_get(n);
|
unsigned int freq = cpufreq_quick_get(cpu);
|
||||||
|
|
||||||
if (!freq)
|
if (!freq)
|
||||||
freq = cpu_khz;
|
freq = cpu_khz;
|
||||||
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
|
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
|
||||||
|
@ -44,42 +115,32 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
/* Cache size */
|
/* Cache size */
|
||||||
if (c->x86_cache_size >= 0)
|
if (c->x86_cache_size >= 0)
|
||||||
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
|
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
|
||||||
#ifdef CONFIG_X86_HT
|
|
||||||
if (c->x86_max_cores * smp_num_siblings > 1) {
|
|
||||||
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
|
||||||
seq_printf(m, "siblings\t: %d\n",
|
|
||||||
cpus_weight(per_cpu(cpu_core_map, n)));
|
|
||||||
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
|
||||||
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
|
show_cpuinfo_core(m, c, cpu);
|
||||||
fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
|
show_cpuinfo_misc(m, c);
|
||||||
seq_printf(m, "fdiv_bug\t: %s\n"
|
|
||||||
"hlt_bug\t\t: %s\n"
|
|
||||||
"f00f_bug\t: %s\n"
|
|
||||||
"coma_bug\t: %s\n"
|
|
||||||
"fpu\t\t: %s\n"
|
|
||||||
"fpu_exception\t: %s\n"
|
|
||||||
"cpuid level\t: %d\n"
|
|
||||||
"wp\t\t: %s\n"
|
|
||||||
"flags\t\t:",
|
|
||||||
c->fdiv_bug ? "yes" : "no",
|
|
||||||
c->hlt_works_ok ? "no" : "yes",
|
|
||||||
c->f00f_bug ? "yes" : "no",
|
|
||||||
c->coma_bug ? "yes" : "no",
|
|
||||||
c->hard_math ? "yes" : "no",
|
|
||||||
fpu_exception ? "yes" : "no",
|
|
||||||
c->cpuid_level,
|
|
||||||
c->wp_works_ok ? "yes" : "no");
|
|
||||||
|
|
||||||
|
seq_printf(m, "flags\t\t:");
|
||||||
for (i = 0; i < 32*NCAPINTS; i++)
|
for (i = 0; i < 32*NCAPINTS; i++)
|
||||||
if ( test_bit(i, c->x86_capability) &&
|
if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
|
||||||
x86_cap_flags[i] != NULL )
|
|
||||||
seq_printf(m, " %s", x86_cap_flags[i]);
|
seq_printf(m, " %s", x86_cap_flags[i]);
|
||||||
|
|
||||||
for (i = 0; i < 32; i++)
|
seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
|
||||||
|
c->loops_per_jiffy/(500000/HZ),
|
||||||
|
(c->loops_per_jiffy/(5000/HZ)) % 100);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
if (c->x86_tlbsize > 0)
|
||||||
|
seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
|
||||||
|
#endif
|
||||||
|
seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
|
||||||
|
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
|
||||||
|
c->x86_phys_bits, c->x86_virt_bits);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
seq_printf(m, "power management:");
|
||||||
|
for (i = 0; i < 32; i++) {
|
||||||
if (c->x86_power & (1 << i)) {
|
if (c->x86_power & (1 << i)) {
|
||||||
if (i < ARRAY_SIZE(x86_power_flags) &&
|
if (i < ARRAY_SIZE(x86_power_flags) &&
|
||||||
x86_power_flags[i])
|
x86_power_flags[i])
|
||||||
|
@ -89,11 +150,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
else
|
else
|
||||||
seq_printf(m, " [%d]", i);
|
seq_printf(m, " [%d]", i);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
|
seq_printf(m, "\n\n");
|
||||||
c->loops_per_jiffy/(500000/HZ),
|
|
||||||
(c->loops_per_jiffy/(5000/HZ)) % 100);
|
|
||||||
seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -106,14 +165,17 @@ static void *c_start(struct seq_file *m, loff_t *pos)
|
||||||
return &cpu_data(*pos);
|
return &cpu_data(*pos);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||||
{
|
{
|
||||||
*pos = next_cpu(*pos, cpu_online_map);
|
*pos = next_cpu(*pos, cpu_online_map);
|
||||||
return c_start(m, pos);
|
return c_start(m, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void c_stop(struct seq_file *m, void *v)
|
static void c_stop(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct seq_operations cpuinfo_op = {
|
const struct seq_operations cpuinfo_op = {
|
||||||
.start = c_start,
|
.start = c_start,
|
||||||
.next = c_next,
|
.next = c_next,
|
||||||
|
|
|
@ -74,11 +74,13 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
|
||||||
wrmsr(0x80860004, cap_mask, uk);
|
wrmsr(0x80860004, cap_mask, uk);
|
||||||
|
|
||||||
/* All Transmeta CPUs have a constant TSC */
|
/* All Transmeta CPUs have a constant TSC */
|
||||||
set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
/* randomize_va_space slows us down enormously;
|
/*
|
||||||
it probably triggers retranslation of x86->native bytecode */
|
* randomize_va_space slows us down enormously;
|
||||||
|
* it probably triggers retranslation of x86->native bytecode
|
||||||
|
*/
|
||||||
randomize_va_space = 0;
|
randomize_va_space = 0;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -102,8 +104,4 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
|
||||||
.c_identify = transmeta_identify,
|
.c_identify = transmeta_identify,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init transmeta_init_cpu(void)
|
cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev);
|
||||||
{
|
|
||||||
cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -3,7 +3,9 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
|
||||||
/* UMC chips appear to be only either 386 or 486, so no special init takes place.
|
/*
|
||||||
|
* UMC chips appear to be only either 386 or 486,
|
||||||
|
* so no special init takes place.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static struct cpu_dev umc_cpu_dev __cpuinitdata = {
|
static struct cpu_dev umc_cpu_dev __cpuinitdata = {
|
||||||
|
@ -19,8 +21,5 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init umc_init_cpu(void)
|
cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev);
|
||||||
{
|
|
||||||
cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -26,11 +26,7 @@
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
#include <mach_ipi.h>
|
#include <mach_ipi.h>
|
||||||
#else
|
|
||||||
#include <asm/mach_apic.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* This keeps a track of which one is crashing cpu. */
|
/* This keeps a track of which one is crashing cpu. */
|
||||||
static int crashing_cpu;
|
static int crashing_cpu;
|
||||||
|
|
|
@ -220,11 +220,11 @@ int ds_allocate(void **dsp, size_t bts_size_in_bytes)
|
||||||
|
|
||||||
int ds_free(void **dsp)
|
int ds_free(void **dsp)
|
||||||
{
|
{
|
||||||
if (*dsp)
|
if (*dsp) {
|
||||||
kfree((void *)get_bts_buffer_base(*dsp));
|
kfree((void *)get_bts_buffer_base(*dsp));
|
||||||
kfree(*dsp);
|
kfree(*dsp);
|
||||||
*dsp = NULL;
|
*dsp = NULL;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -457,31 +457,18 @@ int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned long long start = biosmap->addr;
|
u64 start = biosmap->addr;
|
||||||
unsigned long long size = biosmap->size;
|
u64 size = biosmap->size;
|
||||||
unsigned long long end = start + size;
|
u64 end = start + size;
|
||||||
unsigned long type = biosmap->type;
|
u32 type = biosmap->type;
|
||||||
|
|
||||||
/* Overflow in 64 bits? Ignore the memory map. */
|
/* Overflow in 64 bits? Ignore the memory map. */
|
||||||
if (start > end)
|
if (start > end)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
/*
|
|
||||||
* Some BIOSes claim RAM in the 640k - 1M region.
|
|
||||||
* Not right. Fix it up.
|
|
||||||
*/
|
|
||||||
if (type == E820_RAM) {
|
|
||||||
if (start < 0x100000ULL && end > 0xA0000ULL) {
|
|
||||||
if (start < 0xA0000ULL)
|
|
||||||
add_memory_region(start, 0xA0000ULL-start, type);
|
|
||||||
if (end <= 0x100000ULL)
|
|
||||||
continue;
|
|
||||||
start = 0x100000ULL;
|
|
||||||
size = end - start;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
add_memory_region(start, size, type);
|
add_memory_region(start, size, type);
|
||||||
} while (biosmap++, --nr_map);
|
} while (biosmap++, --nr_map);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/kdebug.h>
|
#include <asm/kdebug.h>
|
||||||
|
#include <asm/trampoline.h>
|
||||||
|
|
||||||
struct e820map e820;
|
struct e820map e820;
|
||||||
|
|
||||||
|
@ -36,11 +37,11 @@ struct e820map e820;
|
||||||
unsigned long end_pfn;
|
unsigned long end_pfn;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
|
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
|
||||||
* The direct mapping extends to end_pfn_map, so that we can directly access
|
* The direct mapping extends to max_pfn_mapped, so that we can directly access
|
||||||
* apertures, ACPI and other tables without having to play with fixmaps.
|
* apertures, ACPI and other tables without having to play with fixmaps.
|
||||||
*/
|
*/
|
||||||
unsigned long end_pfn_map;
|
unsigned long max_pfn_mapped;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Last pfn which the user wants to use.
|
* Last pfn which the user wants to use.
|
||||||
|
@ -58,8 +59,8 @@ struct early_res {
|
||||||
};
|
};
|
||||||
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
|
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
|
||||||
{ 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
|
{ 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_X86_TRAMPOLINE
|
||||||
{ SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE, "SMP_TRAMPOLINE" },
|
{ TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" },
|
||||||
#endif
|
#endif
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
@ -95,7 +96,8 @@ void __init early_res_to_bootmem(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for already reserved areas */
|
/* Check for already reserved areas */
|
||||||
static inline int bad_addr(unsigned long *addrp, unsigned long size)
|
static inline int
|
||||||
|
bad_addr(unsigned long *addrp, unsigned long size, unsigned long align)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
unsigned long addr = *addrp, last;
|
unsigned long addr = *addrp, last;
|
||||||
|
@ -105,7 +107,7 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
|
||||||
for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
|
for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
|
||||||
struct early_res *r = &early_res[i];
|
struct early_res *r = &early_res[i];
|
||||||
if (last >= r->start && addr < r->end) {
|
if (last >= r->start && addr < r->end) {
|
||||||
*addrp = addr = r->end;
|
*addrp = addr = round_up(r->end, align);
|
||||||
changed = 1;
|
changed = 1;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
@ -113,6 +115,40 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
|
||||||
return changed;
|
return changed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check for already reserved areas */
|
||||||
|
static inline int
|
||||||
|
bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
unsigned long addr = *addrp, last;
|
||||||
|
unsigned long size = *sizep;
|
||||||
|
int changed = 0;
|
||||||
|
again:
|
||||||
|
last = addr + size;
|
||||||
|
for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
|
||||||
|
struct early_res *r = &early_res[i];
|
||||||
|
if (last > r->start && addr < r->start) {
|
||||||
|
size = r->start - addr;
|
||||||
|
changed = 1;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
if (last > r->end && addr < r->end) {
|
||||||
|
addr = round_up(r->end, align);
|
||||||
|
size = last - addr;
|
||||||
|
changed = 1;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
if (last <= r->end && addr >= r->start) {
|
||||||
|
(*sizep)++;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (changed) {
|
||||||
|
*addrp = addr;
|
||||||
|
*sizep = size;
|
||||||
|
}
|
||||||
|
return changed;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* This function checks if any part of the range <start,end> is mapped
|
* This function checks if any part of the range <start,end> is mapped
|
||||||
* with type.
|
* with type.
|
||||||
|
@ -174,26 +210,27 @@ int __init e820_all_mapped(unsigned long start, unsigned long end,
|
||||||
* Find a free area with specified alignment in a specific range.
|
* Find a free area with specified alignment in a specific range.
|
||||||
*/
|
*/
|
||||||
unsigned long __init find_e820_area(unsigned long start, unsigned long end,
|
unsigned long __init find_e820_area(unsigned long start, unsigned long end,
|
||||||
unsigned size, unsigned long align)
|
unsigned long size, unsigned long align)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
unsigned long mask = ~(align - 1);
|
|
||||||
|
|
||||||
for (i = 0; i < e820.nr_map; i++) {
|
for (i = 0; i < e820.nr_map; i++) {
|
||||||
struct e820entry *ei = &e820.map[i];
|
struct e820entry *ei = &e820.map[i];
|
||||||
unsigned long addr = ei->addr, last;
|
unsigned long addr, last;
|
||||||
|
unsigned long ei_last;
|
||||||
|
|
||||||
if (ei->type != E820_RAM)
|
if (ei->type != E820_RAM)
|
||||||
continue;
|
continue;
|
||||||
|
addr = round_up(ei->addr, align);
|
||||||
|
ei_last = ei->addr + ei->size;
|
||||||
if (addr < start)
|
if (addr < start)
|
||||||
addr = start;
|
addr = round_up(start, align);
|
||||||
if (addr > ei->addr + ei->size)
|
if (addr >= ei_last)
|
||||||
continue;
|
continue;
|
||||||
while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
|
while (bad_addr(&addr, size, align) && addr+size <= ei_last)
|
||||||
;
|
;
|
||||||
addr = (addr + align - 1) & mask;
|
|
||||||
last = addr + size;
|
last = addr + size;
|
||||||
if (last > ei->addr + ei->size)
|
if (last > ei_last)
|
||||||
continue;
|
continue;
|
||||||
if (last > end)
|
if (last > end)
|
||||||
continue;
|
continue;
|
||||||
|
@ -202,6 +239,40 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end,
|
||||||
return -1UL;
|
return -1UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find next free range after *start
|
||||||
|
*/
|
||||||
|
unsigned long __init find_e820_area_size(unsigned long start,
|
||||||
|
unsigned long *sizep,
|
||||||
|
unsigned long align)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < e820.nr_map; i++) {
|
||||||
|
struct e820entry *ei = &e820.map[i];
|
||||||
|
unsigned long addr, last;
|
||||||
|
unsigned long ei_last;
|
||||||
|
|
||||||
|
if (ei->type != E820_RAM)
|
||||||
|
continue;
|
||||||
|
addr = round_up(ei->addr, align);
|
||||||
|
ei_last = ei->addr + ei->size;
|
||||||
|
if (addr < start)
|
||||||
|
addr = round_up(start, align);
|
||||||
|
if (addr >= ei_last)
|
||||||
|
continue;
|
||||||
|
*sizep = ei_last - addr;
|
||||||
|
while (bad_addr_size(&addr, sizep, align) &&
|
||||||
|
addr + *sizep <= ei_last)
|
||||||
|
;
|
||||||
|
last = addr + *sizep;
|
||||||
|
if (last > ei_last)
|
||||||
|
continue;
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
return -1UL;
|
||||||
|
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Find the highest page frame number we have available
|
* Find the highest page frame number we have available
|
||||||
*/
|
*/
|
||||||
|
@ -211,29 +282,29 @@ unsigned long __init e820_end_of_ram(void)
|
||||||
|
|
||||||
end_pfn = find_max_pfn_with_active_regions();
|
end_pfn = find_max_pfn_with_active_regions();
|
||||||
|
|
||||||
if (end_pfn > end_pfn_map)
|
if (end_pfn > max_pfn_mapped)
|
||||||
end_pfn_map = end_pfn;
|
max_pfn_mapped = end_pfn;
|
||||||
if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
|
if (max_pfn_mapped > MAXMEM>>PAGE_SHIFT)
|
||||||
end_pfn_map = MAXMEM>>PAGE_SHIFT;
|
max_pfn_mapped = MAXMEM>>PAGE_SHIFT;
|
||||||
if (end_pfn > end_user_pfn)
|
if (end_pfn > end_user_pfn)
|
||||||
end_pfn = end_user_pfn;
|
end_pfn = end_user_pfn;
|
||||||
if (end_pfn > end_pfn_map)
|
if (end_pfn > max_pfn_mapped)
|
||||||
end_pfn = end_pfn_map;
|
end_pfn = max_pfn_mapped;
|
||||||
|
|
||||||
printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map);
|
printk(KERN_INFO "max_pfn_mapped = %lu\n", max_pfn_mapped);
|
||||||
return end_pfn;
|
return end_pfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark e820 reserved areas as busy for the resource manager.
|
* Mark e820 reserved areas as busy for the resource manager.
|
||||||
*/
|
*/
|
||||||
void __init e820_reserve_resources(struct resource *code_resource,
|
void __init e820_reserve_resources(void)
|
||||||
struct resource *data_resource, struct resource *bss_resource)
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < e820.nr_map; i++) {
|
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
res = alloc_bootmem_low(sizeof(struct resource));
|
|
||||||
|
res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map);
|
||||||
|
for (i = 0; i < e820.nr_map; i++) {
|
||||||
switch (e820.map[i].type) {
|
switch (e820.map[i].type) {
|
||||||
case E820_RAM: res->name = "System RAM"; break;
|
case E820_RAM: res->name = "System RAM"; break;
|
||||||
case E820_ACPI: res->name = "ACPI Tables"; break;
|
case E820_ACPI: res->name = "ACPI Tables"; break;
|
||||||
|
@ -243,21 +314,8 @@ void __init e820_reserve_resources(struct resource *code_resource,
|
||||||
res->start = e820.map[i].addr;
|
res->start = e820.map[i].addr;
|
||||||
res->end = res->start + e820.map[i].size - 1;
|
res->end = res->start + e820.map[i].size - 1;
|
||||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||||
request_resource(&iomem_resource, res);
|
insert_resource(&iomem_resource, res);
|
||||||
if (e820.map[i].type == E820_RAM) {
|
res++;
|
||||||
/*
|
|
||||||
* We don't know which RAM region contains kernel data,
|
|
||||||
* so we try it repeatedly and let the resource manager
|
|
||||||
* test it.
|
|
||||||
*/
|
|
||||||
request_resource(res, code_resource);
|
|
||||||
request_resource(res, data_resource);
|
|
||||||
request_resource(res, bss_resource);
|
|
||||||
#ifdef CONFIG_KEXEC
|
|
||||||
if (crashk_res.start != crashk_res.end)
|
|
||||||
request_resource(res, &crashk_res);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,9 +367,9 @@ static int __init e820_find_active_region(const struct e820entry *ei,
|
||||||
if (*ei_startpfn >= *ei_endpfn)
|
if (*ei_startpfn >= *ei_endpfn)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Check if end_pfn_map should be updated */
|
/* Check if max_pfn_mapped should be updated */
|
||||||
if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map)
|
if (ei->type != E820_RAM && *ei_endpfn > max_pfn_mapped)
|
||||||
end_pfn_map = *ei_endpfn;
|
max_pfn_mapped = *ei_endpfn;
|
||||||
|
|
||||||
/* Skip if map is outside the node */
|
/* Skip if map is outside the node */
|
||||||
if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
|
if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
|
||||||
|
@ -634,10 +692,10 @@ static int __init copy_e820_map(struct e820entry *biosmap, int nr_map)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned long start = biosmap->addr;
|
u64 start = biosmap->addr;
|
||||||
unsigned long size = biosmap->size;
|
u64 size = biosmap->size;
|
||||||
unsigned long end = start + size;
|
u64 end = start + size;
|
||||||
unsigned long type = biosmap->type;
|
u32 type = biosmap->type;
|
||||||
|
|
||||||
/* Overflow in 64 bits? Ignore the memory map. */
|
/* Overflow in 64 bits? Ignore the memory map. */
|
||||||
if (start > end)
|
if (start > end)
|
||||||
|
@ -702,7 +760,7 @@ static int __init parse_memmap_opt(char *p)
|
||||||
saved_max_pfn = e820_end_of_ram();
|
saved_max_pfn = e820_end_of_ram();
|
||||||
remove_all_active_ranges();
|
remove_all_active_ranges();
|
||||||
#endif
|
#endif
|
||||||
end_pfn_map = 0;
|
max_pfn_mapped = 0;
|
||||||
e820.nr_map = 0;
|
e820.nr_map = 0;
|
||||||
userdef = 1;
|
userdef = 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
#define VGABASE (__ISA_IO_base + 0xb8000)
|
#define VGABASE (__ISA_IO_base + 0xb8000)
|
||||||
|
|
||||||
static int max_ypos = 25, max_xpos = 80;
|
static int max_ypos = 25, max_xpos = 80;
|
||||||
static int current_ypos = 25, current_xpos = 0;
|
static int current_ypos = 25, current_xpos;
|
||||||
|
|
||||||
static void early_vga_write(struct console *con, const char *str, unsigned n)
|
static void early_vga_write(struct console *con, const char *str, unsigned n)
|
||||||
{
|
{
|
||||||
|
@ -194,7 +194,7 @@ static struct console simnow_console = {
|
||||||
|
|
||||||
/* Direct interface for emergencies */
|
/* Direct interface for emergencies */
|
||||||
static struct console *early_console = &early_vga_console;
|
static struct console *early_console = &early_vga_console;
|
||||||
static int early_console_initialized = 0;
|
static int early_console_initialized;
|
||||||
|
|
||||||
void early_printk(const char *fmt, ...)
|
void early_printk(const char *fmt, ...)
|
||||||
{
|
{
|
||||||
|
|
|
@ -51,6 +51,7 @@
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
#include <asm/dwarf2.h>
|
#include <asm/dwarf2.h>
|
||||||
|
#include <asm/processor-flags.h>
|
||||||
#include "irq_vectors.h"
|
#include "irq_vectors.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -68,13 +69,6 @@
|
||||||
|
|
||||||
#define nr_syscalls ((syscall_table_size)/4)
|
#define nr_syscalls ((syscall_table_size)/4)
|
||||||
|
|
||||||
CF_MASK = 0x00000001
|
|
||||||
TF_MASK = 0x00000100
|
|
||||||
IF_MASK = 0x00000200
|
|
||||||
DF_MASK = 0x00000400
|
|
||||||
NT_MASK = 0x00004000
|
|
||||||
VM_MASK = 0x00020000
|
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
|
#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
|
||||||
#else
|
#else
|
||||||
|
@ -84,7 +78,7 @@ VM_MASK = 0x00020000
|
||||||
|
|
||||||
.macro TRACE_IRQS_IRET
|
.macro TRACE_IRQS_IRET
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
|
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
|
||||||
jz 1f
|
jz 1f
|
||||||
TRACE_IRQS_ON
|
TRACE_IRQS_ON
|
||||||
1:
|
1:
|
||||||
|
@ -246,7 +240,7 @@ ret_from_intr:
|
||||||
check_userspace:
|
check_userspace:
|
||||||
movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
|
movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
|
||||||
movb PT_CS(%esp), %al
|
movb PT_CS(%esp), %al
|
||||||
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
|
andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
|
||||||
cmpl $USER_RPL, %eax
|
cmpl $USER_RPL, %eax
|
||||||
jb resume_kernel # not returning to v8086 or userspace
|
jb resume_kernel # not returning to v8086 or userspace
|
||||||
|
|
||||||
|
@ -271,7 +265,7 @@ need_resched:
|
||||||
movl TI_flags(%ebp), %ecx # need_resched set ?
|
movl TI_flags(%ebp), %ecx # need_resched set ?
|
||||||
testb $_TIF_NEED_RESCHED, %cl
|
testb $_TIF_NEED_RESCHED, %cl
|
||||||
jz restore_all
|
jz restore_all
|
||||||
testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
||||||
jz restore_all
|
jz restore_all
|
||||||
call preempt_schedule_irq
|
call preempt_schedule_irq
|
||||||
jmp need_resched
|
jmp need_resched
|
||||||
|
@ -291,10 +285,10 @@ ENTRY(ia32_sysenter_target)
|
||||||
movl TSS_sysenter_sp0(%esp),%esp
|
movl TSS_sysenter_sp0(%esp),%esp
|
||||||
sysenter_past_esp:
|
sysenter_past_esp:
|
||||||
/*
|
/*
|
||||||
* No need to follow this irqs on/off section: the syscall
|
* Interrupts are disabled here, but we can't trace it until
|
||||||
* disabled irqs and here we enable it straight after entry:
|
* enough kernel state to call TRACE_IRQS_OFF can be called - but
|
||||||
|
* we immediately enable interrupts at that point anyway.
|
||||||
*/
|
*/
|
||||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
||||||
pushl $(__USER_DS)
|
pushl $(__USER_DS)
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
/*CFI_REL_OFFSET ss, 0*/
|
/*CFI_REL_OFFSET ss, 0*/
|
||||||
|
@ -302,6 +296,7 @@ sysenter_past_esp:
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
CFI_REL_OFFSET esp, 0
|
CFI_REL_OFFSET esp, 0
|
||||||
pushfl
|
pushfl
|
||||||
|
orl $X86_EFLAGS_IF, (%esp)
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
pushl $(__USER_CS)
|
pushl $(__USER_CS)
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
@ -315,6 +310,11 @@ sysenter_past_esp:
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
CFI_REL_OFFSET eip, 0
|
CFI_REL_OFFSET eip, 0
|
||||||
|
|
||||||
|
pushl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
SAVE_ALL
|
||||||
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Load the potential sixth argument from user stack.
|
* Load the potential sixth argument from user stack.
|
||||||
* Careful about security.
|
* Careful about security.
|
||||||
|
@ -322,14 +322,12 @@ sysenter_past_esp:
|
||||||
cmpl $__PAGE_OFFSET-3,%ebp
|
cmpl $__PAGE_OFFSET-3,%ebp
|
||||||
jae syscall_fault
|
jae syscall_fault
|
||||||
1: movl (%ebp),%ebp
|
1: movl (%ebp),%ebp
|
||||||
|
movl %ebp,PT_EBP(%esp)
|
||||||
.section __ex_table,"a"
|
.section __ex_table,"a"
|
||||||
.align 4
|
.align 4
|
||||||
.long 1b,syscall_fault
|
.long 1b,syscall_fault
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
pushl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
SAVE_ALL
|
|
||||||
GET_THREAD_INFO(%ebp)
|
GET_THREAD_INFO(%ebp)
|
||||||
|
|
||||||
/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
|
/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
|
||||||
|
@ -384,7 +382,7 @@ syscall_exit:
|
||||||
# setting need_resched or sigpending
|
# setting need_resched or sigpending
|
||||||
# between sampling and the iret
|
# between sampling and the iret
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
|
testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
|
||||||
jz no_singlestep
|
jz no_singlestep
|
||||||
orl $_TIF_SINGLESTEP,TI_flags(%ebp)
|
orl $_TIF_SINGLESTEP,TI_flags(%ebp)
|
||||||
no_singlestep:
|
no_singlestep:
|
||||||
|
@ -399,7 +397,7 @@ restore_all:
|
||||||
# See comments in process.c:copy_thread() for details.
|
# See comments in process.c:copy_thread() for details.
|
||||||
movb PT_OLDSS(%esp), %ah
|
movb PT_OLDSS(%esp), %ah
|
||||||
movb PT_CS(%esp), %al
|
movb PT_CS(%esp), %al
|
||||||
andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
|
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
|
||||||
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
|
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
|
||||||
CFI_REMEMBER_STATE
|
CFI_REMEMBER_STATE
|
||||||
je ldt_ss # returning to user-space with LDT SS
|
je ldt_ss # returning to user-space with LDT SS
|
||||||
|
@ -486,7 +484,7 @@ work_resched:
|
||||||
work_notifysig: # deal with pending signals and
|
work_notifysig: # deal with pending signals and
|
||||||
# notify-resume requests
|
# notify-resume requests
|
||||||
#ifdef CONFIG_VM86
|
#ifdef CONFIG_VM86
|
||||||
testl $VM_MASK, PT_EFLAGS(%esp)
|
testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
|
||||||
movl %esp, %eax
|
movl %esp, %eax
|
||||||
jne work_notifysig_v86 # returning to kernel-space or
|
jne work_notifysig_v86 # returning to kernel-space or
|
||||||
# vm86-space
|
# vm86-space
|
||||||
|
@ -543,9 +541,6 @@ END(syscall_exit_work)
|
||||||
|
|
||||||
RING0_INT_FRAME # can't unwind into user space anyway
|
RING0_INT_FRAME # can't unwind into user space anyway
|
||||||
syscall_fault:
|
syscall_fault:
|
||||||
pushl %eax # save orig_eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
SAVE_ALL
|
|
||||||
GET_THREAD_INFO(%ebp)
|
GET_THREAD_INFO(%ebp)
|
||||||
movl $-EFAULT,PT_EAX(%esp)
|
movl $-EFAULT,PT_EAX(%esp)
|
||||||
jmp resume_userspace
|
jmp resume_userspace
|
||||||
|
|
|
@ -319,19 +319,17 @@ badsys:
|
||||||
/* Do syscall tracing */
|
/* Do syscall tracing */
|
||||||
tracesys:
|
tracesys:
|
||||||
SAVE_REST
|
SAVE_REST
|
||||||
movq $-ENOSYS,RAX(%rsp)
|
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
|
||||||
FIXUP_TOP_OF_STACK %rdi
|
FIXUP_TOP_OF_STACK %rdi
|
||||||
movq %rsp,%rdi
|
movq %rsp,%rdi
|
||||||
call syscall_trace_enter
|
call syscall_trace_enter
|
||||||
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
|
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
|
||||||
RESTORE_REST
|
RESTORE_REST
|
||||||
cmpq $__NR_syscall_max,%rax
|
cmpq $__NR_syscall_max,%rax
|
||||||
movq $-ENOSYS,%rcx
|
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
|
||||||
cmova %rcx,%rax
|
|
||||||
ja 1f
|
|
||||||
movq %r10,%rcx /* fixup for C */
|
movq %r10,%rcx /* fixup for C */
|
||||||
call *sys_call_table(,%rax,8)
|
call *sys_call_table(,%rax,8)
|
||||||
1: movq %rax,RAX-ARGOFFSET(%rsp)
|
movq %rax,RAX-ARGOFFSET(%rsp)
|
||||||
/* Use IRET because user could have changed frame */
|
/* Use IRET because user could have changed frame */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/hardirq.h>
|
||||||
|
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/ipi.h>
|
#include <asm/ipi.h>
|
||||||
|
@ -24,20 +25,20 @@
|
||||||
#include <acpi/acpi_bus.h>
|
#include <acpi/acpi_bus.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* which logical CPU number maps to which CPU (physical APIC ID) */
|
DEFINE_PER_CPU(int, x2apic_extra_bits);
|
||||||
u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata
|
|
||||||
= { [0 ... NR_CPUS-1] = BAD_APICID };
|
|
||||||
void *x86_cpu_to_apicid_early_ptr;
|
|
||||||
DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
|
|
||||||
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
|
|
||||||
|
|
||||||
struct genapic __read_mostly *genapic = &apic_flat;
|
struct genapic __read_mostly *genapic = &apic_flat;
|
||||||
|
|
||||||
|
static enum uv_system_type uv_system_type;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
|
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
|
||||||
*/
|
*/
|
||||||
void __init setup_apic_routing(void)
|
void __init setup_apic_routing(void)
|
||||||
{
|
{
|
||||||
|
if (uv_system_type == UV_NON_UNIQUE_APIC)
|
||||||
|
genapic = &apic_x2apic_uv_x;
|
||||||
|
else
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
/*
|
/*
|
||||||
* Quirk: some x86_64 machines can only use physical APIC mode
|
* Quirk: some x86_64 machines can only use physical APIC mode
|
||||||
|
@ -64,3 +65,37 @@ void send_IPI_self(int vector)
|
||||||
{
|
{
|
||||||
__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
|
__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||||
|
{
|
||||||
|
if (!strcmp(oem_id, "SGI")) {
|
||||||
|
if (!strcmp(oem_table_id, "UVL"))
|
||||||
|
uv_system_type = UV_LEGACY_APIC;
|
||||||
|
else if (!strcmp(oem_table_id, "UVX"))
|
||||||
|
uv_system_type = UV_X2APIC;
|
||||||
|
else if (!strcmp(oem_table_id, "UVH"))
|
||||||
|
uv_system_type = UV_NON_UNIQUE_APIC;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int read_apic_id(void)
|
||||||
|
{
|
||||||
|
unsigned int id;
|
||||||
|
|
||||||
|
WARN_ON(preemptible());
|
||||||
|
id = apic_read(APIC_ID);
|
||||||
|
if (uv_system_type >= UV_X2APIC)
|
||||||
|
id |= __get_cpu_var(x2apic_extra_bits);
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum uv_system_type get_uv_system_type(void)
|
||||||
|
{
|
||||||
|
return uv_system_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
int is_uv_system(void)
|
||||||
|
{
|
||||||
|
return uv_system_type != UV_NONE;
|
||||||
|
}
|
||||||
|
|
|
@ -97,7 +97,7 @@ static void flat_send_IPI_all(int vector)
|
||||||
|
|
||||||
static int flat_apic_id_registered(void)
|
static int flat_apic_id_registered(void)
|
||||||
{
|
{
|
||||||
return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
|
return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
|
static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||||
|
@ -138,12 +138,9 @@ static cpumask_t physflat_target_cpus(void)
|
||||||
|
|
||||||
static cpumask_t physflat_vector_allocation_domain(int cpu)
|
static cpumask_t physflat_vector_allocation_domain(int cpu)
|
||||||
{
|
{
|
||||||
cpumask_t domain = CPU_MASK_NONE;
|
return cpumask_of_cpu(cpu);
|
||||||
cpu_set(cpu, domain);
|
|
||||||
return domain;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
|
static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
|
||||||
{
|
{
|
||||||
send_IPI_mask_sequence(cpumask, vector);
|
send_IPI_mask_sequence(cpumask, vector);
|
||||||
|
|
245
arch/x86/kernel/genx2apic_uv_x.c
Normal file
245
arch/x86/kernel/genx2apic_uv_x.c
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
/*
|
||||||
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
* for more details.
|
||||||
|
*
|
||||||
|
* SGI UV APIC functions (note: not an Intel compatible APIC)
|
||||||
|
*
|
||||||
|
* Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/threads.h>
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/ctype.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/bootmem.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <asm/smp.h>
|
||||||
|
#include <asm/ipi.h>
|
||||||
|
#include <asm/genapic.h>
|
||||||
|
#include <asm/uv/uv_mmrs.h>
|
||||||
|
#include <asm/uv/uv_hub.h>
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||||
|
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
|
||||||
|
|
||||||
|
struct uv_blade_info *uv_blade_info;
|
||||||
|
EXPORT_SYMBOL_GPL(uv_blade_info);
|
||||||
|
|
||||||
|
short *uv_node_to_blade;
|
||||||
|
EXPORT_SYMBOL_GPL(uv_node_to_blade);
|
||||||
|
|
||||||
|
short *uv_cpu_to_blade;
|
||||||
|
EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
|
||||||
|
|
||||||
|
short uv_possible_blades;
|
||||||
|
EXPORT_SYMBOL_GPL(uv_possible_blades);
|
||||||
|
|
||||||
|
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||||
|
|
||||||
|
static cpumask_t uv_target_cpus(void)
|
||||||
|
{
|
||||||
|
return cpumask_of_cpu(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static cpumask_t uv_vector_allocation_domain(int cpu)
|
||||||
|
{
|
||||||
|
cpumask_t domain = CPU_MASK_NONE;
|
||||||
|
cpu_set(cpu, domain);
|
||||||
|
return domain;
|
||||||
|
}
|
||||||
|
|
||||||
|
int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
|
||||||
|
{
|
||||||
|
unsigned long val;
|
||||||
|
int nasid;
|
||||||
|
|
||||||
|
nasid = uv_apicid_to_nasid(phys_apicid);
|
||||||
|
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||||
|
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||||
|
(((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
|
||||||
|
(6 << UVH_IPI_INT_DELIVERY_MODE_SHFT);
|
||||||
|
uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void uv_send_IPI_one(int cpu, int vector)
|
||||||
|
{
|
||||||
|
unsigned long val, apicid;
|
||||||
|
int nasid;
|
||||||
|
|
||||||
|
apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
|
||||||
|
nasid = uv_apicid_to_nasid(apicid);
|
||||||
|
val =
|
||||||
|
(1UL << UVH_IPI_INT_SEND_SHFT) | (apicid <<
|
||||||
|
UVH_IPI_INT_APIC_ID_SHFT) |
|
||||||
|
(vector << UVH_IPI_INT_VECTOR_SHFT);
|
||||||
|
uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
|
||||||
|
printk(KERN_DEBUG
|
||||||
|
"UV: IPI to cpu %d, apicid 0x%lx, vec %d, nasid%d, val 0x%lx\n",
|
||||||
|
cpu, apicid, vector, nasid, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void uv_send_IPI_mask(cpumask_t mask, int vector)
|
||||||
|
{
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
for (cpu = 0; cpu < NR_CPUS; ++cpu)
|
||||||
|
if (cpu_isset(cpu, mask))
|
||||||
|
uv_send_IPI_one(cpu, vector);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void uv_send_IPI_allbutself(int vector)
|
||||||
|
{
|
||||||
|
cpumask_t mask = cpu_online_map;
|
||||||
|
|
||||||
|
cpu_clear(smp_processor_id(), mask);
|
||||||
|
|
||||||
|
if (!cpus_empty(mask))
|
||||||
|
uv_send_IPI_mask(mask, vector);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void uv_send_IPI_all(int vector)
|
||||||
|
{
|
||||||
|
uv_send_IPI_mask(cpu_online_map, vector);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int uv_apic_id_registered(void)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||||
|
* May as well be the first.
|
||||||
|
*/
|
||||||
|
cpu = first_cpu(cpumask);
|
||||||
|
if ((unsigned)cpu < NR_CPUS)
|
||||||
|
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||||
|
else
|
||||||
|
return BAD_APICID;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int phys_pkg_id(int index_msb)
|
||||||
|
{
|
||||||
|
return GET_APIC_ID(read_apic_id()) >> index_msb;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ZZZ /* Needs x2apic patch */
|
||||||
|
static void uv_send_IPI_self(int vector)
|
||||||
|
{
|
||||||
|
apic_write(APIC_SELF_IPI, vector);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct genapic apic_x2apic_uv_x = {
|
||||||
|
.name = "UV large system",
|
||||||
|
.int_delivery_mode = dest_Fixed,
|
||||||
|
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
|
||||||
|
.target_cpus = uv_target_cpus,
|
||||||
|
.vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */
|
||||||
|
.apic_id_registered = uv_apic_id_registered,
|
||||||
|
.send_IPI_all = uv_send_IPI_all,
|
||||||
|
.send_IPI_allbutself = uv_send_IPI_allbutself,
|
||||||
|
.send_IPI_mask = uv_send_IPI_mask,
|
||||||
|
/* ZZZ.send_IPI_self = uv_send_IPI_self, */
|
||||||
|
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
|
||||||
|
.phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */
|
||||||
|
};
|
||||||
|
|
||||||
|
static __cpuinit void set_x2apic_extra_bits(int nasid)
|
||||||
|
{
|
||||||
|
__get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called on boot cpu.
|
||||||
|
*/
|
||||||
|
static __init void uv_system_init(void)
|
||||||
|
{
|
||||||
|
union uvh_si_addr_map_config_u m_n_config;
|
||||||
|
int bytes, nid, cpu, lcpu, nasid, last_nasid, blade;
|
||||||
|
unsigned long mmr_base;
|
||||||
|
|
||||||
|
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
||||||
|
mmr_base =
|
||||||
|
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
|
||||||
|
~UV_MMR_ENABLE;
|
||||||
|
printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
|
||||||
|
|
||||||
|
last_nasid = -1;
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
nid = cpu_to_node(cpu);
|
||||||
|
nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
|
||||||
|
if (nasid != last_nasid)
|
||||||
|
uv_possible_blades++;
|
||||||
|
last_nasid = nasid;
|
||||||
|
}
|
||||||
|
printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
|
||||||
|
|
||||||
|
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||||
|
uv_blade_info = alloc_bootmem_pages(bytes);
|
||||||
|
|
||||||
|
bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
|
||||||
|
uv_node_to_blade = alloc_bootmem_pages(bytes);
|
||||||
|
memset(uv_node_to_blade, 255, bytes);
|
||||||
|
|
||||||
|
bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
|
||||||
|
uv_cpu_to_blade = alloc_bootmem_pages(bytes);
|
||||||
|
memset(uv_cpu_to_blade, 255, bytes);
|
||||||
|
|
||||||
|
last_nasid = -1;
|
||||||
|
blade = -1;
|
||||||
|
lcpu = -1;
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
nid = cpu_to_node(cpu);
|
||||||
|
nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
|
||||||
|
if (nasid != last_nasid) {
|
||||||
|
blade++;
|
||||||
|
lcpu = -1;
|
||||||
|
uv_blade_info[blade].nr_posible_cpus = 0;
|
||||||
|
uv_blade_info[blade].nr_online_cpus = 0;
|
||||||
|
}
|
||||||
|
last_nasid = nasid;
|
||||||
|
lcpu++;
|
||||||
|
|
||||||
|
uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt;
|
||||||
|
uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt;
|
||||||
|
uv_cpu_hub_info(cpu)->numa_blade_id = blade;
|
||||||
|
uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
|
||||||
|
uv_cpu_hub_info(cpu)->local_nasid = nasid;
|
||||||
|
uv_cpu_hub_info(cpu)->gnode_upper =
|
||||||
|
nasid & ~((1 << uv_hub_info->n_val) - 1);
|
||||||
|
uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
|
||||||
|
uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
|
||||||
|
uv_blade_info[blade].nasid = nasid;
|
||||||
|
uv_blade_info[blade].nr_posible_cpus++;
|
||||||
|
uv_node_to_blade[nid] = blade;
|
||||||
|
uv_cpu_to_blade[cpu] = blade;
|
||||||
|
|
||||||
|
printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n",
|
||||||
|
cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid);
|
||||||
|
printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called on each cpu to initialize the per_cpu UV data area.
|
||||||
|
*/
|
||||||
|
void __cpuinit uv_cpu_init(void)
|
||||||
|
{
|
||||||
|
if (!uv_node_to_blade)
|
||||||
|
uv_system_init();
|
||||||
|
|
||||||
|
uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
|
||||||
|
|
||||||
|
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
|
||||||
|
set_x2apic_extra_bits(uv_hub_info->local_nasid);
|
||||||
|
}
|
14
arch/x86/kernel/head32.c
Normal file
14
arch/x86/kernel/head32.c
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
/*
|
||||||
|
* linux/arch/i386/kernel/head32.c -- prepare to run common code
|
||||||
|
*
|
||||||
|
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
|
||||||
|
* Copyright (C) 2007 Eric Biederman <ebiederm@xmission.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/start_kernel.h>
|
||||||
|
|
||||||
|
void __init i386_start_kernel(void)
|
||||||
|
{
|
||||||
|
start_kernel();
|
||||||
|
}
|
|
@ -49,39 +49,75 @@ static void __init copy_bootdata(char *real_mode_data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EBDA_ADDR_POINTER 0x40E
|
#define BIOS_EBDA_SEGMENT 0x40E
|
||||||
|
#define BIOS_LOWMEM_KILOBYTES 0x413
|
||||||
static __init void reserve_ebda(void)
|
|
||||||
{
|
|
||||||
unsigned ebda_addr, ebda_size;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* there is a real-mode segmented pointer pointing to the
|
* The BIOS places the EBDA/XBDA at the top of conventional
|
||||||
* 4K EBDA area at 0x40E
|
* memory, and usually decreases the reported amount of
|
||||||
|
* conventional memory (int 0x12) too. This also contains a
|
||||||
|
* workaround for Dell systems that neglect to reserve EBDA.
|
||||||
|
* The same workaround also avoids a problem with the AMD768MPX
|
||||||
|
* chipset: reserve a page before VGA to prevent PCI prefetch
|
||||||
|
* into it (errata #56). Usually the page is reserved anyways,
|
||||||
|
* unless you have no PS/2 mouse plugged in.
|
||||||
*/
|
*/
|
||||||
ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
|
static void __init reserve_ebda_region(void)
|
||||||
ebda_addr <<= 4;
|
{
|
||||||
|
unsigned int lowmem, ebda_addr;
|
||||||
|
|
||||||
if (!ebda_addr)
|
/* To determine the position of the EBDA and the */
|
||||||
|
/* end of conventional memory, we need to look at */
|
||||||
|
/* the BIOS data area. In a paravirtual environment */
|
||||||
|
/* that area is absent. We'll just have to assume */
|
||||||
|
/* that the paravirt case can handle memory setup */
|
||||||
|
/* correctly, without our help. */
|
||||||
|
if (paravirt_enabled())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ebda_size = *(unsigned short *)__va(ebda_addr);
|
/* end of low (conventional) memory */
|
||||||
|
lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
|
||||||
|
lowmem <<= 10;
|
||||||
|
|
||||||
/* Round EBDA up to pages */
|
/* start of EBDA area */
|
||||||
if (ebda_size == 0)
|
ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT);
|
||||||
ebda_size = 1;
|
ebda_addr <<= 4;
|
||||||
ebda_size <<= 10;
|
|
||||||
ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
|
|
||||||
if (ebda_size > 64*1024)
|
|
||||||
ebda_size = 64*1024;
|
|
||||||
|
|
||||||
reserve_early(ebda_addr, ebda_addr + ebda_size, "EBDA");
|
/* Fixup: bios puts an EBDA in the top 64K segment */
|
||||||
|
/* of conventional memory, but does not adjust lowmem. */
|
||||||
|
if ((lowmem - ebda_addr) <= 0x10000)
|
||||||
|
lowmem = ebda_addr;
|
||||||
|
|
||||||
|
/* Fixup: bios does not report an EBDA at all. */
|
||||||
|
/* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
|
||||||
|
if ((ebda_addr == 0) && (lowmem >= 0x9f000))
|
||||||
|
lowmem = 0x9f000;
|
||||||
|
|
||||||
|
/* Paranoia: should never happen, but... */
|
||||||
|
if ((lowmem == 0) || (lowmem >= 0x100000))
|
||||||
|
lowmem = 0x9f000;
|
||||||
|
|
||||||
|
/* reserve all memory between lowmem and the 1MB mark */
|
||||||
|
reserve_early(lowmem, 0x100000, "BIOS reserved");
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init x86_64_start_kernel(char * real_mode_data)
|
void __init x86_64_start_kernel(char * real_mode_data)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Build-time sanity checks on the kernel image and module
|
||||||
|
* area mappings. (these are purely build-time and produce no code)
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START);
|
||||||
|
BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE);
|
||||||
|
BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
|
||||||
|
BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0);
|
||||||
|
BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
|
||||||
|
BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
|
||||||
|
BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
|
||||||
|
(__START_KERNEL & PGDIR_MASK)));
|
||||||
|
|
||||||
/* clear bss before set_intr_gate with early_idt_handler */
|
/* clear bss before set_intr_gate with early_idt_handler */
|
||||||
clear_bss();
|
clear_bss();
|
||||||
|
|
||||||
|
@ -91,7 +127,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
|
||||||
/* Cleanup the over mapped high alias */
|
/* Cleanup the over mapped high alias */
|
||||||
cleanup_highmap();
|
cleanup_highmap();
|
||||||
|
|
||||||
for (i = 0; i < IDT_ENTRIES; i++) {
|
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
|
||||||
#ifdef CONFIG_EARLY_PRINTK
|
#ifdef CONFIG_EARLY_PRINTK
|
||||||
set_intr_gate(i, &early_idt_handlers[i]);
|
set_intr_gate(i, &early_idt_handlers[i]);
|
||||||
#else
|
#else
|
||||||
|
@ -118,7 +154,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
|
||||||
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
|
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
|
||||||
}
|
}
|
||||||
|
|
||||||
reserve_ebda();
|
reserve_ebda_region();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point everything still needed from the boot loader
|
* At this point everything still needed from the boot loader
|
||||||
|
|
|
@ -450,7 +450,7 @@ is386: movl $2,%ecx # set MP
|
||||||
jmp initialize_secondary # all other CPUs call initialize_secondary
|
jmp initialize_secondary # all other CPUs call initialize_secondary
|
||||||
1:
|
1:
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
jmp start_kernel
|
jmp i386_start_kernel
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We depend on ET to be correct. This checks for 287/387.
|
* We depend on ET to be correct. This checks for 287/387.
|
||||||
|
|
|
@ -132,10 +132,6 @@ ident_complete:
|
||||||
addq %rbp, trampoline_level4_pgt + 0(%rip)
|
addq %rbp, trampoline_level4_pgt + 0(%rip)
|
||||||
addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
|
addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ACPI_SLEEP
|
|
||||||
addq %rbp, wakeup_level4_pgt + 0(%rip)
|
|
||||||
addq %rbp, wakeup_level4_pgt + (511*8)(%rip)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Due to ENTRY(), sometimes the empty space gets filled with
|
/* Due to ENTRY(), sometimes the empty space gets filled with
|
||||||
* zeros. Better take a jmp than relying on empty space being
|
* zeros. Better take a jmp than relying on empty space being
|
||||||
|
@ -267,21 +263,16 @@ ENTRY(secondary_startup_64)
|
||||||
bad_address:
|
bad_address:
|
||||||
jmp bad_address
|
jmp bad_address
|
||||||
|
|
||||||
|
.section ".init.text","ax"
|
||||||
#ifdef CONFIG_EARLY_PRINTK
|
#ifdef CONFIG_EARLY_PRINTK
|
||||||
.macro early_idt_tramp first, last
|
|
||||||
.ifgt \last-\first
|
|
||||||
early_idt_tramp \first, \last-1
|
|
||||||
.endif
|
|
||||||
movl $\last,%esi
|
|
||||||
jmp early_idt_handler
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.globl early_idt_handlers
|
.globl early_idt_handlers
|
||||||
early_idt_handlers:
|
early_idt_handlers:
|
||||||
early_idt_tramp 0, 63
|
i = 0
|
||||||
early_idt_tramp 64, 127
|
.rept NUM_EXCEPTION_VECTORS
|
||||||
early_idt_tramp 128, 191
|
movl $i, %esi
|
||||||
early_idt_tramp 192, 255
|
jmp early_idt_handler
|
||||||
|
i = i + 1
|
||||||
|
.endr
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ENTRY(early_idt_handler)
|
ENTRY(early_idt_handler)
|
||||||
|
@ -327,6 +318,7 @@ early_idt_msg:
|
||||||
early_idt_ripmsg:
|
early_idt_ripmsg:
|
||||||
.asciz "RIP %s\n"
|
.asciz "RIP %s\n"
|
||||||
#endif /* CONFIG_EARLY_PRINTK */
|
#endif /* CONFIG_EARLY_PRINTK */
|
||||||
|
.previous
|
||||||
|
|
||||||
.balign PAGE_SIZE
|
.balign PAGE_SIZE
|
||||||
|
|
||||||
|
@ -383,12 +375,12 @@ NEXT_PAGE(level2_ident_pgt)
|
||||||
|
|
||||||
NEXT_PAGE(level2_kernel_pgt)
|
NEXT_PAGE(level2_kernel_pgt)
|
||||||
/*
|
/*
|
||||||
* 128 MB kernel mapping. We spend a full page on this pagetable
|
* 512 MB kernel mapping. We spend a full page on this pagetable
|
||||||
* anyway.
|
* anyway.
|
||||||
*
|
*
|
||||||
* The kernel code+data+bss must not be bigger than that.
|
* The kernel code+data+bss must not be bigger than that.
|
||||||
*
|
*
|
||||||
* (NOTE: at +128MB starts the module area, see MODULES_VADDR.
|
* (NOTE: at +512MB starts the module area, see MODULES_VADDR.
|
||||||
* If you want to increase this then increase MODULES_VADDR
|
* If you want to increase this then increase MODULES_VADDR
|
||||||
* too.)
|
* too.)
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -5,32 +5,27 @@
|
||||||
* General FPU state handling cleanups
|
* General FPU state handling cleanups
|
||||||
* Gareth Hughes <gareth@valinux.com>, May 2000
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/regset.h>
|
#include <linux/regset.h>
|
||||||
#include <asm/processor.h>
|
#include <linux/sched.h>
|
||||||
#include <asm/i387.h>
|
|
||||||
#include <asm/math_emu.h>
|
|
||||||
#include <asm/sigcontext.h>
|
#include <asm/sigcontext.h>
|
||||||
#include <asm/user.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/math_emu.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
#include <asm/ptrace.h>
|
||||||
|
#include <asm/i387.h>
|
||||||
|
#include <asm/user.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
# include <asm/sigcontext32.h>
|
# include <asm/sigcontext32.h>
|
||||||
# include <asm/user32.h>
|
# include <asm/user32.h>
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
# define save_i387_ia32 save_i387
|
# define save_i387_ia32 save_i387
|
||||||
# define restore_i387_ia32 restore_i387
|
# define restore_i387_ia32 restore_i387
|
||||||
|
|
||||||
# define _fpstate_ia32 _fpstate
|
# define _fpstate_ia32 _fpstate
|
||||||
# define user_i387_ia32_struct user_i387_struct
|
# define user_i387_ia32_struct user_i387_struct
|
||||||
# define user32_fxsr_struct user_fxsr_struct
|
# define user32_fxsr_struct user_fxsr_struct
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MATH_EMULATION
|
#ifdef CONFIG_MATH_EMULATION
|
||||||
|
@ -44,6 +39,7 @@ static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
|
||||||
void mxcsr_feature_mask_init(void)
|
void mxcsr_feature_mask_init(void)
|
||||||
{
|
{
|
||||||
unsigned long mask = 0;
|
unsigned long mask = 0;
|
||||||
|
|
||||||
clts();
|
clts();
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr) {
|
||||||
memset(¤t->thread.i387.fxsave, 0,
|
memset(¤t->thread.i387.fxsave, 0,
|
||||||
|
@ -69,10 +65,11 @@ void __cpuinit fpu_init(void)
|
||||||
|
|
||||||
if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
|
if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
|
||||||
__bad_fxsave_alignment();
|
__bad_fxsave_alignment();
|
||||||
|
|
||||||
set_in_cr4(X86_CR4_OSFXSR);
|
set_in_cr4(X86_CR4_OSFXSR);
|
||||||
set_in_cr4(X86_CR4_OSXMMEXCPT);
|
set_in_cr4(X86_CR4_OSXMMEXCPT);
|
||||||
|
|
||||||
write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
|
write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
|
||||||
|
|
||||||
mxcsr_feature_mask_init();
|
mxcsr_feature_mask_init();
|
||||||
/* clean state in init */
|
/* clean state in init */
|
||||||
|
@ -178,6 +175,7 @@ static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
|
||||||
tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
|
tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
|
||||||
tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
|
tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
|
||||||
tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
|
tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
|
||||||
|
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,8 +230,8 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
|
||||||
* FXSR floating point environment conversions.
|
* FXSR floating point environment conversions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void convert_from_fxsr(struct user_i387_ia32_struct *env,
|
static void
|
||||||
struct task_struct *tsk)
|
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
|
struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
|
||||||
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
|
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
|
||||||
|
@ -252,10 +250,11 @@ static void convert_from_fxsr(struct user_i387_ia32_struct *env,
|
||||||
* should be actually ds/cs at fpu exception time, but
|
* should be actually ds/cs at fpu exception time, but
|
||||||
* that information is not available in 64bit mode.
|
* that information is not available in 64bit mode.
|
||||||
*/
|
*/
|
||||||
asm("mov %%ds,%0" : "=r" (env->fos));
|
asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
|
||||||
asm("mov %%cs,%0" : "=r" (env->fcs));
|
asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
|
||||||
} else {
|
} else {
|
||||||
struct pt_regs *regs = task_pt_regs(tsk);
|
struct pt_regs *regs = task_pt_regs(tsk);
|
||||||
|
|
||||||
env->fos = 0xffff0000 | tsk->thread.ds;
|
env->fos = 0xffff0000 | tsk->thread.ds;
|
||||||
env->fcs = regs->cs;
|
env->fcs = regs->cs;
|
||||||
}
|
}
|
||||||
|
@ -309,9 +308,10 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
|
|
||||||
init_fpu(target);
|
init_fpu(target);
|
||||||
|
|
||||||
if (!cpu_has_fxsr)
|
if (!cpu_has_fxsr) {
|
||||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.i387.fsave, 0, -1);
|
&target->thread.i387.fsave, 0, -1);
|
||||||
|
}
|
||||||
|
|
||||||
if (kbuf && pos == 0 && count == sizeof(env)) {
|
if (kbuf && pos == 0 && count == sizeof(env)) {
|
||||||
convert_from_fxsr(kbuf, target);
|
convert_from_fxsr(kbuf, target);
|
||||||
|
@ -319,6 +319,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
}
|
}
|
||||||
|
|
||||||
convert_from_fxsr(&env, target);
|
convert_from_fxsr(&env, target);
|
||||||
|
|
||||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,9 +336,10 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
init_fpu(target);
|
init_fpu(target);
|
||||||
set_stopped_child_used_math(target);
|
set_stopped_child_used_math(target);
|
||||||
|
|
||||||
if (!cpu_has_fxsr)
|
if (!cpu_has_fxsr) {
|
||||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.i387.fsave, 0, -1);
|
&target->thread.i387.fsave, 0, -1);
|
||||||
|
}
|
||||||
|
|
||||||
if (pos > 0 || count < sizeof(env))
|
if (pos > 0 || count < sizeof(env))
|
||||||
convert_from_fxsr(&env, target);
|
convert_from_fxsr(&env, target);
|
||||||
|
@ -392,28 +394,28 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf)
|
||||||
{
|
{
|
||||||
if (!used_math())
|
if (!used_math())
|
||||||
return 0;
|
return 0;
|
||||||
|
/*
|
||||||
/* This will cause a "finit" to be triggered by the next
|
* This will cause a "finit" to be triggered by the next
|
||||||
* attempted FPU operation by the 'current' process.
|
* attempted FPU operation by the 'current' process.
|
||||||
*/
|
*/
|
||||||
clear_used_math();
|
clear_used_math();
|
||||||
|
|
||||||
if (HAVE_HWFP) {
|
if (!HAVE_HWFP) {
|
||||||
if (cpu_has_fxsr) {
|
|
||||||
return save_i387_fxsave(buf);
|
|
||||||
} else {
|
|
||||||
return save_i387_fsave(buf);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fpregs_soft_get(current, NULL,
|
return fpregs_soft_get(current, NULL,
|
||||||
0, sizeof(struct user_i387_ia32_struct),
|
0, sizeof(struct user_i387_ia32_struct),
|
||||||
NULL, buf) ? -1 : 1;
|
NULL, buf) ? -1 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cpu_has_fxsr)
|
||||||
|
return save_i387_fxsave(buf);
|
||||||
|
else
|
||||||
|
return save_i387_fsave(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
|
static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
clear_fpu(tsk);
|
clear_fpu(tsk);
|
||||||
return __copy_from_user(&tsk->thread.i387.fsave, buf,
|
return __copy_from_user(&tsk->thread.i387.fsave, buf,
|
||||||
sizeof(struct i387_fsave_struct));
|
sizeof(struct i387_fsave_struct));
|
||||||
|
@ -421,9 +423,10 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
|
||||||
|
|
||||||
static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
|
static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
|
||||||
{
|
{
|
||||||
int err;
|
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
struct user_i387_ia32_struct env;
|
struct user_i387_ia32_struct env;
|
||||||
|
int err;
|
||||||
|
|
||||||
clear_fpu(tsk);
|
clear_fpu(tsk);
|
||||||
err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
|
err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
|
||||||
sizeof(struct i387_fxsave_struct));
|
sizeof(struct i387_fxsave_struct));
|
||||||
|
@ -432,6 +435,7 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
|
||||||
if (err || __copy_from_user(&env, buf, sizeof(env)))
|
if (err || __copy_from_user(&env, buf, sizeof(env)))
|
||||||
return 1;
|
return 1;
|
||||||
convert_to_fxsr(tsk, &env);
|
convert_to_fxsr(tsk, &env);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,17 +444,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (HAVE_HWFP) {
|
if (HAVE_HWFP) {
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr)
|
||||||
err = restore_i387_fxsave(buf);
|
err = restore_i387_fxsave(buf);
|
||||||
} else {
|
else
|
||||||
err = restore_i387_fsave(buf);
|
err = restore_i387_fsave(buf);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
err = fpregs_soft_set(current, NULL,
|
err = fpregs_soft_set(current, NULL,
|
||||||
0, sizeof(struct user_i387_ia32_struct),
|
0, sizeof(struct user_i387_ia32_struct),
|
||||||
NULL, buf) != 0;
|
NULL, buf) != 0;
|
||||||
}
|
}
|
||||||
set_used_math();
|
set_used_math();
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -463,8 +467,8 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
|
||||||
*/
|
*/
|
||||||
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
|
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
|
||||||
{
|
{
|
||||||
int fpvalid;
|
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
|
int fpvalid;
|
||||||
|
|
||||||
fpvalid = !!used_math();
|
fpvalid = !!used_math();
|
||||||
if (fpvalid)
|
if (fpvalid)
|
||||||
|
|
|
@ -71,6 +71,16 @@ int sis_apic_bug = -1;
|
||||||
*/
|
*/
|
||||||
int nr_ioapic_registers[MAX_IO_APICS];
|
int nr_ioapic_registers[MAX_IO_APICS];
|
||||||
|
|
||||||
|
/* I/O APIC entries */
|
||||||
|
struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
|
||||||
|
int nr_ioapics;
|
||||||
|
|
||||||
|
/* MP IRQ source entries */
|
||||||
|
struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
|
||||||
|
|
||||||
|
/* # of MP IRQ source entries */
|
||||||
|
int mp_irq_entries;
|
||||||
|
|
||||||
static int disable_timer_pin_1 __initdata;
|
static int disable_timer_pin_1 __initdata;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -810,10 +820,7 @@ static int __init find_isa_irq_pin(int irq, int type)
|
||||||
for (i = 0; i < mp_irq_entries; i++) {
|
for (i = 0; i < mp_irq_entries; i++) {
|
||||||
int lbus = mp_irqs[i].mpc_srcbus;
|
int lbus = mp_irqs[i].mpc_srcbus;
|
||||||
|
|
||||||
if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
|
if (test_bit(lbus, mp_bus_not_pci) &&
|
||||||
mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
|
|
||||||
mp_bus_id_to_type[lbus] == MP_BUS_MCA
|
|
||||||
) &&
|
|
||||||
(mp_irqs[i].mpc_irqtype == type) &&
|
(mp_irqs[i].mpc_irqtype == type) &&
|
||||||
(mp_irqs[i].mpc_srcbusirq == irq))
|
(mp_irqs[i].mpc_srcbusirq == irq))
|
||||||
|
|
||||||
|
@ -829,10 +836,7 @@ static int __init find_isa_irq_apic(int irq, int type)
|
||||||
for (i = 0; i < mp_irq_entries; i++) {
|
for (i = 0; i < mp_irq_entries; i++) {
|
||||||
int lbus = mp_irqs[i].mpc_srcbus;
|
int lbus = mp_irqs[i].mpc_srcbus;
|
||||||
|
|
||||||
if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
|
if (test_bit(lbus, mp_bus_not_pci) &&
|
||||||
mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
|
|
||||||
mp_bus_id_to_type[lbus] == MP_BUS_MCA
|
|
||||||
) &&
|
|
||||||
(mp_irqs[i].mpc_irqtype == type) &&
|
(mp_irqs[i].mpc_irqtype == type) &&
|
||||||
(mp_irqs[i].mpc_srcbusirq == irq))
|
(mp_irqs[i].mpc_srcbusirq == irq))
|
||||||
break;
|
break;
|
||||||
|
@ -872,7 +876,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
|
||||||
mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
|
mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
|
if (!test_bit(lbus, mp_bus_not_pci) &&
|
||||||
!mp_irqs[i].mpc_irqtype &&
|
!mp_irqs[i].mpc_irqtype &&
|
||||||
(bus == lbus) &&
|
(bus == lbus) &&
|
||||||
(slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
|
(slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
|
||||||
|
@ -921,6 +925,7 @@ void __init setup_ioapic_dest(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
|
||||||
/*
|
/*
|
||||||
* EISA Edge/Level control register, ELCR
|
* EISA Edge/Level control register, ELCR
|
||||||
*/
|
*/
|
||||||
|
@ -934,6 +939,13 @@ static int EISA_ELCR(unsigned int irq)
|
||||||
"Broken MPtable reports ISA irq %d\n", irq);
|
"Broken MPtable reports ISA irq %d\n", irq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* ISA interrupts are always polarity zero edge triggered,
|
||||||
|
* when listed as conforming in the MP table. */
|
||||||
|
|
||||||
|
#define default_ISA_trigger(idx) (0)
|
||||||
|
#define default_ISA_polarity(idx) (0)
|
||||||
|
|
||||||
/* EISA interrupts are always polarity zero and can be edge or level
|
/* EISA interrupts are always polarity zero and can be edge or level
|
||||||
* trigger depending on the ELCR value. If an interrupt is listed as
|
* trigger depending on the ELCR value. If an interrupt is listed as
|
||||||
|
@ -941,13 +953,7 @@ static int EISA_ELCR(unsigned int irq)
|
||||||
* be read in from the ELCR */
|
* be read in from the ELCR */
|
||||||
|
|
||||||
#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
|
#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
|
||||||
#define default_EISA_polarity(idx) (0)
|
#define default_EISA_polarity(idx) default_ISA_polarity(idx)
|
||||||
|
|
||||||
/* ISA interrupts are always polarity zero edge triggered,
|
|
||||||
* when listed as conforming in the MP table. */
|
|
||||||
|
|
||||||
#define default_ISA_trigger(idx) (0)
|
|
||||||
#define default_ISA_polarity(idx) (0)
|
|
||||||
|
|
||||||
/* PCI interrupts are always polarity one level triggered,
|
/* PCI interrupts are always polarity one level triggered,
|
||||||
* when listed as conforming in the MP table. */
|
* when listed as conforming in the MP table. */
|
||||||
|
@ -959,7 +965,7 @@ static int EISA_ELCR(unsigned int irq)
|
||||||
* when listed as conforming in the MP table. */
|
* when listed as conforming in the MP table. */
|
||||||
|
|
||||||
#define default_MCA_trigger(idx) (1)
|
#define default_MCA_trigger(idx) (1)
|
||||||
#define default_MCA_polarity(idx) (0)
|
#define default_MCA_polarity(idx) default_ISA_polarity(idx)
|
||||||
|
|
||||||
static int MPBIOS_polarity(int idx)
|
static int MPBIOS_polarity(int idx)
|
||||||
{
|
{
|
||||||
|
@ -973,35 +979,9 @@ static int MPBIOS_polarity(int idx)
|
||||||
{
|
{
|
||||||
case 0: /* conforms, ie. bus-type dependent polarity */
|
case 0: /* conforms, ie. bus-type dependent polarity */
|
||||||
{
|
{
|
||||||
switch (mp_bus_id_to_type[bus])
|
polarity = test_bit(bus, mp_bus_not_pci)?
|
||||||
{
|
default_ISA_polarity(idx):
|
||||||
case MP_BUS_ISA: /* ISA pin */
|
default_PCI_polarity(idx);
|
||||||
{
|
|
||||||
polarity = default_ISA_polarity(idx);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MP_BUS_EISA: /* EISA pin */
|
|
||||||
{
|
|
||||||
polarity = default_EISA_polarity(idx);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MP_BUS_PCI: /* PCI pin */
|
|
||||||
{
|
|
||||||
polarity = default_PCI_polarity(idx);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MP_BUS_MCA: /* MCA pin */
|
|
||||||
{
|
|
||||||
polarity = default_MCA_polarity(idx);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
printk(KERN_WARNING "broken BIOS!!\n");
|
|
||||||
polarity = 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 1: /* high active */
|
case 1: /* high active */
|
||||||
|
@ -1042,11 +1022,15 @@ static int MPBIOS_trigger(int idx)
|
||||||
{
|
{
|
||||||
case 0: /* conforms, ie. bus-type dependent */
|
case 0: /* conforms, ie. bus-type dependent */
|
||||||
{
|
{
|
||||||
|
trigger = test_bit(bus, mp_bus_not_pci)?
|
||||||
|
default_ISA_trigger(idx):
|
||||||
|
default_PCI_trigger(idx);
|
||||||
|
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
|
||||||
switch (mp_bus_id_to_type[bus])
|
switch (mp_bus_id_to_type[bus])
|
||||||
{
|
{
|
||||||
case MP_BUS_ISA: /* ISA pin */
|
case MP_BUS_ISA: /* ISA pin */
|
||||||
{
|
{
|
||||||
trigger = default_ISA_trigger(idx);
|
/* set before the switch */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MP_BUS_EISA: /* EISA pin */
|
case MP_BUS_EISA: /* EISA pin */
|
||||||
|
@ -1056,7 +1040,7 @@ static int MPBIOS_trigger(int idx)
|
||||||
}
|
}
|
||||||
case MP_BUS_PCI: /* PCI pin */
|
case MP_BUS_PCI: /* PCI pin */
|
||||||
{
|
{
|
||||||
trigger = default_PCI_trigger(idx);
|
/* set before the switch */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MP_BUS_MCA: /* MCA pin */
|
case MP_BUS_MCA: /* MCA pin */
|
||||||
|
@ -1071,6 +1055,7 @@ static int MPBIOS_trigger(int idx)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 1: /* edge */
|
case 1: /* edge */
|
||||||
|
@ -1120,17 +1105,9 @@ static int pin_2_irq(int idx, int apic, int pin)
|
||||||
if (mp_irqs[idx].mpc_dstirq != pin)
|
if (mp_irqs[idx].mpc_dstirq != pin)
|
||||||
printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
|
printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
|
||||||
|
|
||||||
switch (mp_bus_id_to_type[bus])
|
if (test_bit(bus, mp_bus_not_pci))
|
||||||
{
|
|
||||||
case MP_BUS_ISA: /* ISA pin */
|
|
||||||
case MP_BUS_EISA:
|
|
||||||
case MP_BUS_MCA:
|
|
||||||
{
|
|
||||||
irq = mp_irqs[idx].mpc_srcbusirq;
|
irq = mp_irqs[idx].mpc_srcbusirq;
|
||||||
break;
|
else {
|
||||||
}
|
|
||||||
case MP_BUS_PCI: /* PCI pin */
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* PCI IRQs are mapped in order
|
* PCI IRQs are mapped in order
|
||||||
*/
|
*/
|
||||||
|
@ -1144,15 +1121,6 @@ static int pin_2_irq(int idx, int apic, int pin)
|
||||||
*/
|
*/
|
||||||
if (ioapic_renumber_irq)
|
if (ioapic_renumber_irq)
|
||||||
irq = ioapic_renumber_irq(apic, irq);
|
irq = ioapic_renumber_irq(apic, irq);
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
printk(KERN_ERR "unknown bus type %d.\n",bus);
|
|
||||||
irq = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1260,7 +1228,6 @@ static void __init setup_IO_APIC_irqs(void)
|
||||||
{
|
{
|
||||||
struct IO_APIC_route_entry entry;
|
struct IO_APIC_route_entry entry;
|
||||||
int apic, pin, idx, irq, first_notcon = 1, vector;
|
int apic, pin, idx, irq, first_notcon = 1, vector;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
|
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
|
||||||
|
|
||||||
|
@ -1326,9 +1293,7 @@ static void __init setup_IO_APIC_irqs(void)
|
||||||
if (!apic && (irq < 16))
|
if (!apic && (irq < 16))
|
||||||
disable_8259A_irq(irq);
|
disable_8259A_irq(irq);
|
||||||
}
|
}
|
||||||
spin_lock_irqsave(&ioapic_lock, flags);
|
ioapic_write_entry(apic, pin, entry);
|
||||||
__ioapic_write_entry(apic, pin, entry);
|
|
||||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1524,8 +1489,8 @@ void /*__init*/ print_local_APIC(void * dummy)
|
||||||
|
|
||||||
printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
|
printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
|
||||||
smp_processor_id(), hard_smp_processor_id());
|
smp_processor_id(), hard_smp_processor_id());
|
||||||
v = apic_read(APIC_ID);
|
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v,
|
||||||
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
|
GET_APIC_ID(read_apic_id()));
|
||||||
v = apic_read(APIC_LVR);
|
v = apic_read(APIC_LVR);
|
||||||
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
|
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
|
||||||
ver = GET_APIC_VERSION(v);
|
ver = GET_APIC_VERSION(v);
|
||||||
|
@ -1734,7 +1699,7 @@ void disable_IO_APIC(void)
|
||||||
entry.delivery_mode = dest_ExtINT; /* ExtInt */
|
entry.delivery_mode = dest_ExtINT; /* ExtInt */
|
||||||
entry.vector = 0;
|
entry.vector = 0;
|
||||||
entry.dest.physical.physical_dest =
|
entry.dest.physical.physical_dest =
|
||||||
GET_APIC_ID(apic_read(APIC_ID));
|
GET_APIC_ID(read_apic_id());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add it to the IO-APIC irq-routing table:
|
* Add it to the IO-APIC irq-routing table:
|
||||||
|
@ -2031,8 +1996,7 @@ static inline void init_IO_APIC_traps(void)
|
||||||
* 0x80, because int 0x80 is hm, kind of importantish. ;)
|
* 0x80, because int 0x80 is hm, kind of importantish. ;)
|
||||||
*/
|
*/
|
||||||
for (irq = 0; irq < NR_IRQS ; irq++) {
|
for (irq = 0; irq < NR_IRQS ; irq++) {
|
||||||
int tmp = irq;
|
if (IO_APIC_IRQ(irq) && !irq_vector[irq]) {
|
||||||
if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
|
|
||||||
/*
|
/*
|
||||||
* Hmm.. We don't have an entry for this,
|
* Hmm.. We don't have an entry for this,
|
||||||
* so default to an old-fashioned 8259
|
* so default to an old-fashioned 8259
|
||||||
|
@ -2156,8 +2120,6 @@ static inline void unlock_ExtINT_logic(void)
|
||||||
ioapic_write_entry(apic, pin, entry0);
|
ioapic_write_entry(apic, pin, entry0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int timer_uses_ioapic_pin_0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This code may look a bit paranoid, but it's supposed to cooperate with
|
* This code may look a bit paranoid, but it's supposed to cooperate with
|
||||||
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
|
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
|
||||||
|
@ -2168,10 +2130,14 @@ static inline void __init check_timer(void)
|
||||||
{
|
{
|
||||||
int apic1, pin1, apic2, pin2;
|
int apic1, pin1, apic2, pin2;
|
||||||
int vector;
|
int vector;
|
||||||
|
unsigned int ver;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
ver = apic_read(APIC_LVR);
|
||||||
|
ver = GET_APIC_VERSION(ver);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get/set the timer IRQ vector:
|
* get/set the timer IRQ vector:
|
||||||
*/
|
*/
|
||||||
|
@ -2184,11 +2150,15 @@ static inline void __init check_timer(void)
|
||||||
* mode for the 8259A whenever interrupts are routed
|
* mode for the 8259A whenever interrupts are routed
|
||||||
* through I/O APICs. Also IRQ0 has to be enabled in
|
* through I/O APICs. Also IRQ0 has to be enabled in
|
||||||
* the 8259A which implies the virtual wire has to be
|
* the 8259A which implies the virtual wire has to be
|
||||||
* disabled in the local APIC.
|
* disabled in the local APIC. Finally timer interrupts
|
||||||
|
* need to be acknowledged manually in the 8259A for
|
||||||
|
* timer_interrupt() and for the i82489DX when using
|
||||||
|
* the NMI watchdog.
|
||||||
*/
|
*/
|
||||||
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
|
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
|
||||||
init_8259A(1);
|
init_8259A(1);
|
||||||
timer_ack = 1;
|
timer_ack = !cpu_has_tsc;
|
||||||
|
timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
|
||||||
if (timer_over_8254 > 0)
|
if (timer_over_8254 > 0)
|
||||||
enable_8259A_irq(0);
|
enable_8259A_irq(0);
|
||||||
|
|
||||||
|
@ -2197,9 +2167,6 @@ static inline void __init check_timer(void)
|
||||||
pin2 = ioapic_i8259.pin;
|
pin2 = ioapic_i8259.pin;
|
||||||
apic2 = ioapic_i8259.apic;
|
apic2 = ioapic_i8259.apic;
|
||||||
|
|
||||||
if (pin1 == 0)
|
|
||||||
timer_uses_ioapic_pin_0 = 1;
|
|
||||||
|
|
||||||
printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
|
printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
|
||||||
vector, apic1, pin1, apic2, pin2);
|
vector, apic1, pin1, apic2, pin2);
|
||||||
|
|
||||||
|
@ -2789,7 +2756,6 @@ int __init io_apic_get_redir_entries (int ioapic)
|
||||||
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
|
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
|
||||||
{
|
{
|
||||||
struct IO_APIC_route_entry entry;
|
struct IO_APIC_route_entry entry;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (!IO_APIC_IRQ(irq)) {
|
if (!IO_APIC_IRQ(irq)) {
|
||||||
printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
|
printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
|
||||||
|
@ -2830,9 +2796,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
|
||||||
if (!ioapic && (irq < 16))
|
if (!ioapic && (irq < 16))
|
||||||
disable_8259A_irq(irq);
|
disable_8259A_irq(irq);
|
||||||
|
|
||||||
spin_lock_irqsave(&ioapic_lock, flags);
|
ioapic_write_entry(ioapic, pin, entry);
|
||||||
__ioapic_write_entry(ioapic, pin, entry);
|
|
||||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,13 +43,15 @@
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
#include <asm/mach_apic.h>
|
|
||||||
#include <asm/acpi.h>
|
#include <asm/acpi.h>
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
#include <asm/msidef.h>
|
#include <asm/msidef.h>
|
||||||
#include <asm/hypertransport.h>
|
#include <asm/hypertransport.h>
|
||||||
|
|
||||||
|
#include <mach_ipi.h>
|
||||||
|
#include <mach_apic.h>
|
||||||
|
|
||||||
struct irq_cfg {
|
struct irq_cfg {
|
||||||
cpumask_t domain;
|
cpumask_t domain;
|
||||||
cpumask_t old_domain;
|
cpumask_t old_domain;
|
||||||
|
@ -101,6 +103,16 @@ DEFINE_SPINLOCK(vector_lock);
|
||||||
*/
|
*/
|
||||||
int nr_ioapic_registers[MAX_IO_APICS];
|
int nr_ioapic_registers[MAX_IO_APICS];
|
||||||
|
|
||||||
|
/* I/O APIC entries */
|
||||||
|
struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
|
||||||
|
int nr_ioapics;
|
||||||
|
|
||||||
|
/* MP IRQ source entries */
|
||||||
|
struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
|
||||||
|
|
||||||
|
/* # of MP IRQ source entries */
|
||||||
|
int mp_irq_entries;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rough estimation of how many shared IRQs there are, can
|
* Rough estimation of how many shared IRQs there are, can
|
||||||
* be changed anytime.
|
* be changed anytime.
|
||||||
|
@ -155,11 +167,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int value)
|
||||||
writel(value, &io_apic->data);
|
writel(value, &io_apic->data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_apic_level_ack_pending(unsigned int irq)
|
static bool io_apic_level_ack_pending(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_pin_list *entry;
|
struct irq_pin_list *entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int pending = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&ioapic_lock, flags);
|
spin_lock_irqsave(&ioapic_lock, flags);
|
||||||
entry = irq_2_pin + irq;
|
entry = irq_2_pin + irq;
|
||||||
|
@ -172,13 +183,17 @@ static int io_apic_level_ack_pending(unsigned int irq)
|
||||||
break;
|
break;
|
||||||
reg = io_apic_read(entry->apic, 0x10 + pin*2);
|
reg = io_apic_read(entry->apic, 0x10 + pin*2);
|
||||||
/* Is the remote IRR bit set? */
|
/* Is the remote IRR bit set? */
|
||||||
pending |= (reg >> 14) & 1;
|
if ((reg >> 14) & 1) {
|
||||||
|
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (!entry->next)
|
if (!entry->next)
|
||||||
break;
|
break;
|
||||||
entry = irq_2_pin + entry->next;
|
entry = irq_2_pin + entry->next;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||||
return pending;
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -902,7 +917,6 @@ static void __init setup_IO_APIC_irqs(void)
|
||||||
static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
|
static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
|
||||||
{
|
{
|
||||||
struct IO_APIC_route_entry entry;
|
struct IO_APIC_route_entry entry;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
memset(&entry, 0, sizeof(entry));
|
memset(&entry, 0, sizeof(entry));
|
||||||
|
|
||||||
|
@ -932,10 +946,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
|
||||||
/*
|
/*
|
||||||
* Add it to the IO-APIC irq-routing table:
|
* Add it to the IO-APIC irq-routing table:
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&ioapic_lock, flags);
|
ioapic_write_entry(apic, pin, entry);
|
||||||
io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
|
|
||||||
io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
|
|
||||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
||||||
|
|
||||||
enable_8259A_irq(0);
|
enable_8259A_irq(0);
|
||||||
}
|
}
|
||||||
|
@ -1066,8 +1077,7 @@ void __apicdebuginit print_local_APIC(void * dummy)
|
||||||
|
|
||||||
printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
|
printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
|
||||||
smp_processor_id(), hard_smp_processor_id());
|
smp_processor_id(), hard_smp_processor_id());
|
||||||
v = apic_read(APIC_ID);
|
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(read_apic_id()));
|
||||||
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
|
|
||||||
v = apic_read(APIC_LVR);
|
v = apic_read(APIC_LVR);
|
||||||
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
|
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
|
||||||
ver = GET_APIC_VERSION(v);
|
ver = GET_APIC_VERSION(v);
|
||||||
|
@ -1261,7 +1271,7 @@ void disable_IO_APIC(void)
|
||||||
entry.dest_mode = 0; /* Physical */
|
entry.dest_mode = 0; /* Physical */
|
||||||
entry.delivery_mode = dest_ExtINT; /* ExtInt */
|
entry.delivery_mode = dest_ExtINT; /* ExtInt */
|
||||||
entry.vector = 0;
|
entry.vector = 0;
|
||||||
entry.dest = GET_APIC_ID(apic_read(APIC_ID));
|
entry.dest = GET_APIC_ID(read_apic_id());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add it to the IO-APIC irq-routing table:
|
* Add it to the IO-APIC irq-routing table:
|
||||||
|
@ -1352,9 +1362,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&vector_lock, flags);
|
spin_lock_irqsave(&vector_lock, flags);
|
||||||
cpus_clear(mask);
|
mask = cpumask_of_cpu(first_cpu(cfg->domain));
|
||||||
cpu_set(first_cpu(cfg->domain), mask);
|
|
||||||
|
|
||||||
send_IPI_mask(mask, cfg->vector);
|
send_IPI_mask(mask, cfg->vector);
|
||||||
spin_unlock_irqrestore(&vector_lock, flags);
|
spin_unlock_irqrestore(&vector_lock, flags);
|
||||||
|
|
||||||
|
@ -1517,8 +1525,7 @@ static inline void init_IO_APIC_traps(void)
|
||||||
* 0x80, because int 0x80 is hm, kind of importantish. ;)
|
* 0x80, because int 0x80 is hm, kind of importantish. ;)
|
||||||
*/
|
*/
|
||||||
for (irq = 0; irq < NR_IRQS ; irq++) {
|
for (irq = 0; irq < NR_IRQS ; irq++) {
|
||||||
int tmp = irq;
|
if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) {
|
||||||
if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) {
|
|
||||||
/*
|
/*
|
||||||
* Hmm.. We don't have an entry for this,
|
* Hmm.. We don't have an entry for this,
|
||||||
* so default to an old-fashioned 8259
|
* so default to an old-fashioned 8259
|
||||||
|
@ -1597,17 +1604,14 @@ static inline void unlock_ExtINT_logic(void)
|
||||||
int apic, pin, i;
|
int apic, pin, i;
|
||||||
struct IO_APIC_route_entry entry0, entry1;
|
struct IO_APIC_route_entry entry0, entry1;
|
||||||
unsigned char save_control, save_freq_select;
|
unsigned char save_control, save_freq_select;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
pin = find_isa_irq_pin(8, mp_INT);
|
pin = find_isa_irq_pin(8, mp_INT);
|
||||||
apic = find_isa_irq_apic(8, mp_INT);
|
apic = find_isa_irq_apic(8, mp_INT);
|
||||||
if (pin == -1)
|
if (pin == -1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&ioapic_lock, flags);
|
entry0 = ioapic_read_entry(apic, pin);
|
||||||
*(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
|
|
||||||
*(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
|
|
||||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
||||||
clear_IO_APIC_pin(apic, pin);
|
clear_IO_APIC_pin(apic, pin);
|
||||||
|
|
||||||
memset(&entry1, 0, sizeof(entry1));
|
memset(&entry1, 0, sizeof(entry1));
|
||||||
|
@ -1620,10 +1624,7 @@ static inline void unlock_ExtINT_logic(void)
|
||||||
entry1.trigger = 0;
|
entry1.trigger = 0;
|
||||||
entry1.vector = 0;
|
entry1.vector = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&ioapic_lock, flags);
|
ioapic_write_entry(apic, pin, entry1);
|
||||||
io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
|
|
||||||
io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
|
|
||||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
||||||
|
|
||||||
save_control = CMOS_READ(RTC_CONTROL);
|
save_control = CMOS_READ(RTC_CONTROL);
|
||||||
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
|
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
|
||||||
|
@ -1642,10 +1643,7 @@ static inline void unlock_ExtINT_logic(void)
|
||||||
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
|
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
|
||||||
clear_IO_APIC_pin(apic, pin);
|
clear_IO_APIC_pin(apic, pin);
|
||||||
|
|
||||||
spin_lock_irqsave(&ioapic_lock, flags);
|
ioapic_write_entry(apic, pin, entry0);
|
||||||
io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
|
|
||||||
io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
|
|
||||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2314,7 +2312,6 @@ static struct resource * __init ioapic_setup_resources(void)
|
||||||
res = (void *)mem;
|
res = (void *)mem;
|
||||||
|
|
||||||
if (mem != NULL) {
|
if (mem != NULL) {
|
||||||
memset(mem, 0, n);
|
|
||||||
mem += sizeof(struct resource) * nr_ioapics;
|
mem += sizeof(struct resource) * nr_ioapics;
|
||||||
|
|
||||||
for (i = 0; i < nr_ioapics; i++) {
|
for (i = 0; i < nr_ioapics; i++) {
|
||||||
|
|
178
arch/x86/kernel/ipi.c
Normal file
178
arch/x86/kernel/ipi.c
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/kernel_stat.h>
|
||||||
|
#include <linux/mc146818rtc.h>
|
||||||
|
#include <linux/cache.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
#include <asm/smp.h>
|
||||||
|
#include <asm/mtrr.h>
|
||||||
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/mmu_context.h>
|
||||||
|
#include <asm/apic.h>
|
||||||
|
#include <asm/proto.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
#include <mach_apic.h>
|
||||||
|
/*
|
||||||
|
* the following functions deal with sending IPIs between CPUs.
|
||||||
|
*
|
||||||
|
* We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline int __prepare_ICR(unsigned int shortcut, int vector)
|
||||||
|
{
|
||||||
|
unsigned int icr = shortcut | APIC_DEST_LOGICAL;
|
||||||
|
|
||||||
|
switch (vector) {
|
||||||
|
default:
|
||||||
|
icr |= APIC_DM_FIXED | vector;
|
||||||
|
break;
|
||||||
|
case NMI_VECTOR:
|
||||||
|
icr |= APIC_DM_NMI;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return icr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int __prepare_ICR2(unsigned int mask)
|
||||||
|
{
|
||||||
|
return SET_APIC_DEST_FIELD(mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __send_IPI_shortcut(unsigned int shortcut, int vector)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Subtle. In the case of the 'never do double writes' workaround
|
||||||
|
* we have to lock out interrupts to be safe. As we don't care
|
||||||
|
* of the value read we use an atomic rmw access to avoid costly
|
||||||
|
* cli/sti. Otherwise we use an even cheaper single atomic write
|
||||||
|
* to the APIC.
|
||||||
|
*/
|
||||||
|
unsigned int cfg;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait for idle.
|
||||||
|
*/
|
||||||
|
apic_wait_icr_idle();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No need to touch the target chip field
|
||||||
|
*/
|
||||||
|
cfg = __prepare_ICR(shortcut, vector);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Send the IPI. The write to APIC_ICR fires this off.
|
||||||
|
*/
|
||||||
|
apic_write_around(APIC_ICR, cfg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void send_IPI_self(int vector)
|
||||||
|
{
|
||||||
|
__send_IPI_shortcut(APIC_DEST_SELF, vector);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is used to send an IPI with no shorthand notation (the destination is
|
||||||
|
* specified in bits 56 to 63 of the ICR).
|
||||||
|
*/
|
||||||
|
static inline void __send_IPI_dest_field(unsigned long mask, int vector)
|
||||||
|
{
|
||||||
|
unsigned long cfg;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait for idle.
|
||||||
|
*/
|
||||||
|
if (unlikely(vector == NMI_VECTOR))
|
||||||
|
safe_apic_wait_icr_idle();
|
||||||
|
else
|
||||||
|
apic_wait_icr_idle();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* prepare target chip field
|
||||||
|
*/
|
||||||
|
cfg = __prepare_ICR2(mask);
|
||||||
|
apic_write_around(APIC_ICR2, cfg);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* program the ICR
|
||||||
|
*/
|
||||||
|
cfg = __prepare_ICR(0, vector);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Send the IPI. The write to APIC_ICR fires this off.
|
||||||
|
*/
|
||||||
|
apic_write_around(APIC_ICR, cfg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is only used on smaller machines.
|
||||||
|
*/
|
||||||
|
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
|
||||||
|
{
|
||||||
|
unsigned long mask = cpus_addr(cpumask)[0];
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
|
||||||
|
__send_IPI_dest_field(mask, vector);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void send_IPI_mask_sequence(cpumask_t mask, int vector)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned int query_cpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hack. The clustered APIC addressing mode doesn't allow us to send
|
||||||
|
* to an arbitrary mask, so I do a unicasts to each CPU instead. This
|
||||||
|
* should be modified to do 1 message per cluster ID - mbligh
|
||||||
|
*/
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
for_each_possible_cpu(query_cpu) {
|
||||||
|
if (cpu_isset(query_cpu, mask)) {
|
||||||
|
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
|
||||||
|
vector);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* must come after the send_IPI functions above for inlining */
|
||||||
|
#include <mach_ipi.h>
|
||||||
|
static int convert_apicid_to_cpu(int apic_id)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_possible_cpu(i) {
|
||||||
|
if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int safe_smp_processor_id(void)
|
||||||
|
{
|
||||||
|
int apicid, cpuid;
|
||||||
|
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_APIC))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
apicid = hard_smp_processor_id();
|
||||||
|
if (apicid == BAD_APICID)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cpuid = convert_apicid_to_cpu(apicid);
|
||||||
|
|
||||||
|
return cpuid >= 0 ? cpuid : 0;
|
||||||
|
}
|
||||||
|
#endif
|
|
@ -79,7 +79,7 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
||||||
|
|
||||||
if (unlikely((unsigned)irq >= NR_IRQS)) {
|
if (unlikely((unsigned)irq >= NR_IRQS)) {
|
||||||
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
||||||
__FUNCTION__, irq);
|
__func__, irq);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -410,13 +410,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||||
static void __kprobes clear_btf(void)
|
static void __kprobes clear_btf(void)
|
||||||
{
|
{
|
||||||
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
||||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
|
update_debugctlmsr(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __kprobes restore_btf(void)
|
static void __kprobes restore_btf(void)
|
||||||
{
|
{
|
||||||
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
||||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
|
update_debugctlmsr(current->thread.debugctlmsr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
@ -489,7 +489,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||||
break;
|
break;
|
||||||
case KPROBE_HIT_SS:
|
case KPROBE_HIT_SS:
|
||||||
if (p == kprobe_running()) {
|
if (p == kprobe_running()) {
|
||||||
regs->flags &= ~TF_MASK;
|
regs->flags &= ~X86_EFLAGS_TF;
|
||||||
regs->flags |= kcb->kprobe_saved_flags;
|
regs->flags |= kcb->kprobe_saved_flags;
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -858,15 +858,15 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||||
if (!cur)
|
if (!cur)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
resume_execution(cur, regs, kcb);
|
||||||
|
regs->flags |= kcb->kprobe_saved_flags;
|
||||||
|
trace_hardirqs_fixup_flags(regs->flags);
|
||||||
|
|
||||||
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
||||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||||
cur->post_handler(cur, regs, 0);
|
cur->post_handler(cur, regs, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
resume_execution(cur, regs, kcb);
|
|
||||||
regs->flags |= kcb->kprobe_saved_flags;
|
|
||||||
trace_hardirqs_fixup_flags(regs->flags);
|
|
||||||
|
|
||||||
/* Restore back the original saved kprobes variables and continue. */
|
/* Restore back the original saved kprobes variables and continue. */
|
||||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||||
restore_previous_kprobe(kcb);
|
restore_previous_kprobe(kcb);
|
||||||
|
|
|
@ -53,9 +53,9 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <asm/arch_hooks.h>
|
#include <asm/arch_hooks.h>
|
||||||
|
|
||||||
static unsigned char which_scsi = 0;
|
static unsigned char which_scsi;
|
||||||
|
|
||||||
int MCA_bus = 0;
|
int MCA_bus;
|
||||||
EXPORT_SYMBOL(MCA_bus);
|
EXPORT_SYMBOL(MCA_bus);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -68,7 +68,8 @@ static DEFINE_SPINLOCK(mca_lock);
|
||||||
|
|
||||||
/* Build the status info for the adapter */
|
/* Build the status info for the adapter */
|
||||||
|
|
||||||
static void mca_configure_adapter_status(struct mca_device *mca_dev) {
|
static void mca_configure_adapter_status(struct mca_device *mca_dev)
|
||||||
|
{
|
||||||
mca_dev->status = MCA_ADAPTER_NONE;
|
mca_dev->status = MCA_ADAPTER_NONE;
|
||||||
|
|
||||||
mca_dev->pos_id = mca_dev->pos[0]
|
mca_dev->pos_id = mca_dev->pos[0]
|
||||||
|
@ -76,7 +77,8 @@ static void mca_configure_adapter_status(struct mca_device *mca_dev) {
|
||||||
|
|
||||||
if (!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) {
|
if (!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) {
|
||||||
|
|
||||||
/* id = 0x0000 usually indicates hardware failure,
|
/*
|
||||||
|
* id = 0x0000 usually indicates hardware failure,
|
||||||
* however, ZP Gu (zpg@castle.net> reports that his 9556
|
* however, ZP Gu (zpg@castle.net> reports that his 9556
|
||||||
* has 0x0000 as id and everything still works. There
|
* has 0x0000 as id and everything still works. There
|
||||||
* also seem to be an adapter with id = 0x0000; the
|
* also seem to be an adapter with id = 0x0000; the
|
||||||
|
@ -89,7 +91,8 @@ static void mca_configure_adapter_status(struct mca_device *mca_dev) {
|
||||||
return;
|
return;
|
||||||
} else if (mca_dev->pos_id != 0xffff) {
|
} else if (mca_dev->pos_id != 0xffff) {
|
||||||
|
|
||||||
/* 0xffff usually indicates that there's no adapter,
|
/*
|
||||||
|
* 0xffff usually indicates that there's no adapter,
|
||||||
* however, some integrated adapters may have 0xffff as
|
* however, some integrated adapters may have 0xffff as
|
||||||
* their id and still be valid. Examples are on-board
|
* their id and still be valid. Examples are on-board
|
||||||
* VGA of the 55sx, the integrated SCSI of the 56 & 57,
|
* VGA of the 55sx, the integrated SCSI of the 56 & 57,
|
||||||
|
@ -133,7 +136,7 @@ static struct resource mca_standard_resources[] = {
|
||||||
|
|
||||||
#define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources)
|
#define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources)
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* mca_read_and_store_pos - read the POS registers into a memory buffer
|
* mca_read_and_store_pos - read the POS registers into a memory buffer
|
||||||
* @pos: a char pointer to 8 bytes, contains the POS register value on
|
* @pos: a char pointer to 8 bytes, contains the POS register value on
|
||||||
* successful return
|
* successful return
|
||||||
|
@ -141,12 +144,14 @@ static struct resource mca_standard_resources[] = {
|
||||||
* Returns 1 if a card actually exists (i.e. the pos isn't
|
* Returns 1 if a card actually exists (i.e. the pos isn't
|
||||||
* all 0xff) or 0 otherwise
|
* all 0xff) or 0 otherwise
|
||||||
*/
|
*/
|
||||||
static int mca_read_and_store_pos(unsigned char *pos) {
|
static int mca_read_and_store_pos(unsigned char *pos)
|
||||||
|
{
|
||||||
int j;
|
int j;
|
||||||
int found = 0;
|
int found = 0;
|
||||||
|
|
||||||
for (j = 0; j < 8; j++) {
|
for (j = 0; j < 8; j++) {
|
||||||
if((pos[j] = inb_p(MCA_POS_REG(j))) != 0xff) {
|
pos[j] = inb_p(MCA_POS_REG(j));
|
||||||
|
if (pos[j] != 0xff) {
|
||||||
/* 0xff all across means no device. 0x00 means
|
/* 0xff all across means no device. 0x00 means
|
||||||
* something's broken, but a device is
|
* something's broken, but a device is
|
||||||
* probably there. However, if you get 0x00
|
* probably there. However, if you get 0x00
|
||||||
|
@ -251,7 +256,8 @@ static int __init mca_init(void)
|
||||||
short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00};
|
short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00};
|
||||||
struct mca_bus *bus;
|
struct mca_bus *bus;
|
||||||
|
|
||||||
/* WARNING: Be careful when making changes here. Putting an adapter
|
/*
|
||||||
|
* WARNING: Be careful when making changes here. Putting an adapter
|
||||||
* and the motherboard simultaneously into setup mode may result in
|
* and the motherboard simultaneously into setup mode may result in
|
||||||
* damage to chips (according to The Indispensible PC Hardware Book
|
* damage to chips (according to The Indispensible PC Hardware Book
|
||||||
* by Hans-Peter Messmer). Also, we disable system interrupts (so
|
* by Hans-Peter Messmer). Also, we disable system interrupts (so
|
||||||
|
@ -326,7 +332,8 @@ static int __init mca_init(void)
|
||||||
mca_dev->slot = MCA_INTEGVIDEO;
|
mca_dev->slot = MCA_INTEGVIDEO;
|
||||||
mca_register_device(MCA_PRIMARY_BUS, mca_dev);
|
mca_register_device(MCA_PRIMARY_BUS, mca_dev);
|
||||||
|
|
||||||
/* Put motherboard into scsi setup mode, read integrated scsi
|
/*
|
||||||
|
* Put motherboard into scsi setup mode, read integrated scsi
|
||||||
* POS registers, and turn motherboard setup off.
|
* POS registers, and turn motherboard setup off.
|
||||||
*
|
*
|
||||||
* It seems there are two possible SCSI registers. Martin says that
|
* It seems there are two possible SCSI registers. Martin says that
|
||||||
|
@ -364,7 +371,8 @@ static int __init mca_init(void)
|
||||||
|
|
||||||
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
|
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
|
||||||
|
|
||||||
/* Now loop over MCA slots: put each adapter into setup mode, and
|
/*
|
||||||
|
* Now loop over MCA slots: put each adapter into setup mode, and
|
||||||
* read its POS registers. Then put adapter setup off.
|
* read its POS registers. Then put adapter setup off.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -448,7 +456,8 @@ static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data)
|
||||||
pos5 = mca_device_read_pos(mca_dev, 5);
|
pos5 = mca_device_read_pos(mca_dev, 5);
|
||||||
|
|
||||||
if (!(pos5 & 0x80)) {
|
if (!(pos5 & 0x80)) {
|
||||||
/* Bit 7 of POS 5 is reset when this adapter has a hardware
|
/*
|
||||||
|
* Bit 7 of POS 5 is reset when this adapter has a hardware
|
||||||
* error. Bit 7 it reset if there's error information
|
* error. Bit 7 it reset if there's error information
|
||||||
* available in POS 6 and 7.
|
* available in POS 6 and 7.
|
||||||
*/
|
*/
|
||||||
|
@ -460,7 +469,8 @@ static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data)
|
||||||
|
|
||||||
void __kprobes mca_handle_nmi(void)
|
void __kprobes mca_handle_nmi(void)
|
||||||
{
|
{
|
||||||
/* First try - scan the various adapters and see if a specific
|
/*
|
||||||
|
* First try - scan the various adapters and see if a specific
|
||||||
* adapter was responsible for the error.
|
* adapter was responsible for the error.
|
||||||
*/
|
*/
|
||||||
bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback);
|
bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback);
|
||||||
|
|
|
@ -336,11 +336,11 @@ static void apply_microcode(int cpu)
|
||||||
|
|
||||||
spin_unlock_irqrestore(µcode_update_lock, flags);
|
spin_unlock_irqrestore(µcode_update_lock, flags);
|
||||||
if (val[1] != uci->mc->hdr.rev) {
|
if (val[1] != uci->mc->hdr.rev) {
|
||||||
printk(KERN_ERR "microcode: CPU%d updated from revision "
|
printk(KERN_ERR "microcode: CPU%d update from revision "
|
||||||
"0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]);
|
"0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
pr_debug("microcode: CPU%d updated from revision "
|
printk(KERN_INFO "microcode: CPU%d updated from revision "
|
||||||
"0x%x to 0x%x, date = %08x \n",
|
"0x%x to 0x%x, date = %08x \n",
|
||||||
cpu_num, uci->rev, val[1], uci->mc->hdr.date);
|
cpu_num, uci->rev, val[1], uci->mc->hdr.date);
|
||||||
uci->rev = val[1];
|
uci->rev = val[1];
|
||||||
|
@ -534,7 +534,7 @@ static int cpu_request_microcode(int cpu)
|
||||||
c->x86, c->x86_model, c->x86_mask);
|
c->x86, c->x86_model, c->x86_mask);
|
||||||
error = request_firmware(&firmware, name, µcode_pdev->dev);
|
error = request_firmware(&firmware, name, µcode_pdev->dev);
|
||||||
if (error) {
|
if (error) {
|
||||||
pr_debug("ucode data file %s load failed\n", name);
|
pr_debug("microcode: ucode data file %s load failed\n", name);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
buf = firmware->data;
|
buf = firmware->data;
|
||||||
|
@ -709,7 +709,7 @@ static int __mc_sysdev_add(struct sys_device *sys_dev, int resume)
|
||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pr_debug("Microcode:CPU %d added\n", cpu);
|
pr_debug("microcode: CPU%d added\n", cpu);
|
||||||
memset(uci, 0, sizeof(*uci));
|
memset(uci, 0, sizeof(*uci));
|
||||||
|
|
||||||
err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
|
err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
|
||||||
|
@ -733,7 +733,7 @@ static int mc_sysdev_remove(struct sys_device *sys_dev)
|
||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pr_debug("Microcode:CPU %d removed\n", cpu);
|
pr_debug("microcode: CPU%d removed\n", cpu);
|
||||||
microcode_fini_cpu(cpu);
|
microcode_fini_cpu(cpu);
|
||||||
sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
|
sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -745,7 +745,7 @@ static int mc_sysdev_resume(struct sys_device *dev)
|
||||||
|
|
||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
return 0;
|
return 0;
|
||||||
pr_debug("Microcode:CPU %d resumed\n", cpu);
|
pr_debug("microcode: CPU%d resumed\n", cpu);
|
||||||
/* only CPU 0 will apply ucode here */
|
/* only CPU 0 will apply ucode here */
|
||||||
apply_microcode(0);
|
apply_microcode(0);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -783,7 +783,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
|
||||||
}
|
}
|
||||||
case CPU_DOWN_FAILED_FROZEN:
|
case CPU_DOWN_FAILED_FROZEN:
|
||||||
if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
|
if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
|
||||||
printk(KERN_ERR "Microcode: Failed to create the sysfs "
|
printk(KERN_ERR "microcode: Failed to create the sysfs "
|
||||||
"group for CPU%d\n", cpu);
|
"group for CPU%d\n", cpu);
|
||||||
break;
|
break;
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,867 +0,0 @@
|
||||||
/*
|
|
||||||
* Intel Multiprocessor Specification 1.1 and 1.4
|
|
||||||
* compliant MP-table parsing routines.
|
|
||||||
*
|
|
||||||
* (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
|
|
||||||
* (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
|
|
||||||
*
|
|
||||||
* Fixes
|
|
||||||
* Erich Boleyn : MP v1.4 and additional changes.
|
|
||||||
* Alan Cox : Added EBDA scanning
|
|
||||||
* Ingo Molnar : various cleanups and rewrites
|
|
||||||
* Maciej W. Rozycki: Bits for default MP configurations
|
|
||||||
* Paul Diefenbaugh: Added full ACPI support
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/delay.h>
|
|
||||||
#include <linux/bootmem.h>
|
|
||||||
#include <linux/kernel_stat.h>
|
|
||||||
#include <linux/mc146818rtc.h>
|
|
||||||
#include <linux/acpi.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
#include <asm/smp.h>
|
|
||||||
#include <asm/mtrr.h>
|
|
||||||
#include <asm/mpspec.h>
|
|
||||||
#include <asm/pgalloc.h>
|
|
||||||
#include <asm/io_apic.h>
|
|
||||||
#include <asm/proto.h>
|
|
||||||
#include <asm/acpi.h>
|
|
||||||
|
|
||||||
/* Have we found an MP table */
|
|
||||||
int smp_found_config;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Various Linux-internal data structures created from the
|
|
||||||
* MP-table.
|
|
||||||
*/
|
|
||||||
DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
|
|
||||||
int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
|
|
||||||
|
|
||||||
static int mp_current_pci_id = 0;
|
|
||||||
/* I/O APIC entries */
|
|
||||||
struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
|
|
||||||
|
|
||||||
/* # of MP IRQ source entries */
|
|
||||||
struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
|
|
||||||
|
|
||||||
/* MP IRQ source entries */
|
|
||||||
int mp_irq_entries;
|
|
||||||
|
|
||||||
int nr_ioapics;
|
|
||||||
unsigned long mp_lapic_addr = 0;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* Processor that is doing the boot up */
|
|
||||||
unsigned int boot_cpu_id = -1U;
|
|
||||||
EXPORT_SYMBOL(boot_cpu_id);
|
|
||||||
|
|
||||||
/* Internal processor count */
|
|
||||||
unsigned int num_processors;
|
|
||||||
|
|
||||||
unsigned disabled_cpus __cpuinitdata;
|
|
||||||
|
|
||||||
/* Bitmask of physically existing CPUs */
|
|
||||||
physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
|
|
||||||
|
|
||||||
u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
|
|
||||||
= { [0 ... NR_CPUS-1] = BAD_APICID };
|
|
||||||
void *x86_bios_cpu_apicid_early_ptr;
|
|
||||||
DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
|
|
||||||
EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel MP BIOS table parsing routines:
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Checksum an MP configuration block.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int __init mpf_checksum(unsigned char *mp, int len)
|
|
||||||
{
|
|
||||||
int sum = 0;
|
|
||||||
|
|
||||||
while (len--)
|
|
||||||
sum += *mp++;
|
|
||||||
|
|
||||||
return sum & 0xFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
cpumask_t tmp_map;
|
|
||||||
char *bootup_cpu = "";
|
|
||||||
|
|
||||||
if (!(m->mpc_cpuflag & CPU_ENABLED)) {
|
|
||||||
disabled_cpus++;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
|
|
||||||
bootup_cpu = " (Bootup-CPU)";
|
|
||||||
boot_cpu_id = m->mpc_apicid;
|
|
||||||
}
|
|
||||||
|
|
||||||
printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
|
|
||||||
|
|
||||||
if (num_processors >= NR_CPUS) {
|
|
||||||
printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
|
|
||||||
" Processor ignored.\n", NR_CPUS);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
num_processors++;
|
|
||||||
cpus_complement(tmp_map, cpu_present_map);
|
|
||||||
cpu = first_cpu(tmp_map);
|
|
||||||
|
|
||||||
physid_set(m->mpc_apicid, phys_cpu_present_map);
|
|
||||||
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
|
|
||||||
/*
|
|
||||||
* x86_bios_cpu_apicid is required to have processors listed
|
|
||||||
* in same order as logical cpu numbers. Hence the first
|
|
||||||
* entry is BSP, and so on.
|
|
||||||
*/
|
|
||||||
cpu = 0;
|
|
||||||
}
|
|
||||||
/* are we being called early in kernel startup? */
|
|
||||||
if (x86_cpu_to_apicid_early_ptr) {
|
|
||||||
u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
|
|
||||||
u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
|
|
||||||
|
|
||||||
cpu_to_apicid[cpu] = m->mpc_apicid;
|
|
||||||
bios_cpu_apicid[cpu] = m->mpc_apicid;
|
|
||||||
} else {
|
|
||||||
per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
|
|
||||||
per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_set(cpu, cpu_possible_map);
|
|
||||||
cpu_set(cpu, cpu_present_map);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init MP_bus_info (struct mpc_config_bus *m)
|
|
||||||
{
|
|
||||||
char str[7];
|
|
||||||
|
|
||||||
memcpy(str, m->mpc_bustype, 6);
|
|
||||||
str[6] = 0;
|
|
||||||
Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
|
|
||||||
|
|
||||||
if (strncmp(str, "ISA", 3) == 0) {
|
|
||||||
set_bit(m->mpc_busid, mp_bus_not_pci);
|
|
||||||
} else if (strncmp(str, "PCI", 3) == 0) {
|
|
||||||
clear_bit(m->mpc_busid, mp_bus_not_pci);
|
|
||||||
mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
|
|
||||||
mp_current_pci_id++;
|
|
||||||
} else {
|
|
||||||
printk(KERN_ERR "Unknown bustype %s\n", str);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int bad_ioapic(unsigned long address)
|
|
||||||
{
|
|
||||||
if (nr_ioapics >= MAX_IO_APICS) {
|
|
||||||
printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
|
|
||||||
"(found %d)\n", MAX_IO_APICS, nr_ioapics);
|
|
||||||
panic("Recompile kernel with bigger MAX_IO_APICS!\n");
|
|
||||||
}
|
|
||||||
if (!address) {
|
|
||||||
printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
|
|
||||||
" found in table, skipping!\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
|
|
||||||
{
|
|
||||||
if (!(m->mpc_flags & MPC_APIC_USABLE))
|
|
||||||
return;
|
|
||||||
|
|
||||||
printk("I/O APIC #%d at 0x%X.\n",
|
|
||||||
m->mpc_apicid, m->mpc_apicaddr);
|
|
||||||
|
|
||||||
if (bad_ioapic(m->mpc_apicaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
mp_ioapics[nr_ioapics] = *m;
|
|
||||||
nr_ioapics++;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
|
|
||||||
{
|
|
||||||
mp_irqs [mp_irq_entries] = *m;
|
|
||||||
Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
|
|
||||||
" IRQ %02x, APIC ID %x, APIC INT %02x\n",
|
|
||||||
m->mpc_irqtype, m->mpc_irqflag & 3,
|
|
||||||
(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
|
|
||||||
m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
|
|
||||||
if (++mp_irq_entries >= MAX_IRQ_SOURCES)
|
|
||||||
panic("Max # of irq sources exceeded!!\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
|
|
||||||
{
|
|
||||||
Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
|
|
||||||
" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
|
|
||||||
m->mpc_irqtype, m->mpc_irqflag & 3,
|
|
||||||
(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
|
|
||||||
m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read/parse the MPC
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int __init smp_read_mpc(struct mp_config_table *mpc)
|
|
||||||
{
|
|
||||||
char str[16];
|
|
||||||
int count=sizeof(*mpc);
|
|
||||||
unsigned char *mpt=((unsigned char *)mpc)+count;
|
|
||||||
|
|
||||||
if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
|
|
||||||
printk("MPTABLE: bad signature [%c%c%c%c]!\n",
|
|
||||||
mpc->mpc_signature[0],
|
|
||||||
mpc->mpc_signature[1],
|
|
||||||
mpc->mpc_signature[2],
|
|
||||||
mpc->mpc_signature[3]);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
|
|
||||||
printk("MPTABLE: checksum error!\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
|
|
||||||
printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
|
|
||||||
mpc->mpc_spec);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (!mpc->mpc_lapic) {
|
|
||||||
printk(KERN_ERR "MPTABLE: null local APIC address!\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
memcpy(str,mpc->mpc_oem,8);
|
|
||||||
str[8] = 0;
|
|
||||||
printk(KERN_INFO "MPTABLE: OEM ID: %s ",str);
|
|
||||||
|
|
||||||
memcpy(str,mpc->mpc_productid,12);
|
|
||||||
str[12] = 0;
|
|
||||||
printk("MPTABLE: Product ID: %s ",str);
|
|
||||||
|
|
||||||
printk("MPTABLE: APIC at: 0x%X\n",mpc->mpc_lapic);
|
|
||||||
|
|
||||||
/* save the local APIC address, it might be non-default */
|
|
||||||
if (!acpi_lapic)
|
|
||||||
mp_lapic_addr = mpc->mpc_lapic;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Now process the configuration blocks.
|
|
||||||
*/
|
|
||||||
while (count < mpc->mpc_length) {
|
|
||||||
switch(*mpt) {
|
|
||||||
case MP_PROCESSOR:
|
|
||||||
{
|
|
||||||
struct mpc_config_processor *m=
|
|
||||||
(struct mpc_config_processor *)mpt;
|
|
||||||
if (!acpi_lapic)
|
|
||||||
MP_processor_info(m);
|
|
||||||
mpt += sizeof(*m);
|
|
||||||
count += sizeof(*m);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MP_BUS:
|
|
||||||
{
|
|
||||||
struct mpc_config_bus *m=
|
|
||||||
(struct mpc_config_bus *)mpt;
|
|
||||||
MP_bus_info(m);
|
|
||||||
mpt += sizeof(*m);
|
|
||||||
count += sizeof(*m);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MP_IOAPIC:
|
|
||||||
{
|
|
||||||
struct mpc_config_ioapic *m=
|
|
||||||
(struct mpc_config_ioapic *)mpt;
|
|
||||||
MP_ioapic_info(m);
|
|
||||||
mpt += sizeof(*m);
|
|
||||||
count += sizeof(*m);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MP_INTSRC:
|
|
||||||
{
|
|
||||||
struct mpc_config_intsrc *m=
|
|
||||||
(struct mpc_config_intsrc *)mpt;
|
|
||||||
|
|
||||||
MP_intsrc_info(m);
|
|
||||||
mpt += sizeof(*m);
|
|
||||||
count += sizeof(*m);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MP_LINTSRC:
|
|
||||||
{
|
|
||||||
struct mpc_config_lintsrc *m=
|
|
||||||
(struct mpc_config_lintsrc *)mpt;
|
|
||||||
MP_lintsrc_info(m);
|
|
||||||
mpt += sizeof(*m);
|
|
||||||
count += sizeof(*m);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
setup_apic_routing();
|
|
||||||
if (!num_processors)
|
|
||||||
printk(KERN_ERR "MPTABLE: no processors registered!\n");
|
|
||||||
return num_processors;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init ELCR_trigger(unsigned int irq)
|
|
||||||
{
|
|
||||||
unsigned int port;
|
|
||||||
|
|
||||||
port = 0x4d0 + (irq >> 3);
|
|
||||||
return (inb(port) >> (irq & 7)) & 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init construct_default_ioirq_mptable(int mpc_default_type)
|
|
||||||
{
|
|
||||||
struct mpc_config_intsrc intsrc;
|
|
||||||
int i;
|
|
||||||
int ELCR_fallback = 0;
|
|
||||||
|
|
||||||
intsrc.mpc_type = MP_INTSRC;
|
|
||||||
intsrc.mpc_irqflag = 0; /* conforming */
|
|
||||||
intsrc.mpc_srcbus = 0;
|
|
||||||
intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
|
|
||||||
|
|
||||||
intsrc.mpc_irqtype = mp_INT;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If true, we have an ISA/PCI system with no IRQ entries
|
|
||||||
* in the MP table. To prevent the PCI interrupts from being set up
|
|
||||||
* incorrectly, we try to use the ELCR. The sanity check to see if
|
|
||||||
* there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
|
|
||||||
* never be level sensitive, so we simply see if the ELCR agrees.
|
|
||||||
* If it does, we assume it's valid.
|
|
||||||
*/
|
|
||||||
if (mpc_default_type == 5) {
|
|
||||||
printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
|
|
||||||
|
|
||||||
if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
|
|
||||||
printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
|
|
||||||
else {
|
|
||||||
printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
|
|
||||||
ELCR_fallback = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < 16; i++) {
|
|
||||||
switch (mpc_default_type) {
|
|
||||||
case 2:
|
|
||||||
if (i == 0 || i == 13)
|
|
||||||
continue; /* IRQ0 & IRQ13 not connected */
|
|
||||||
/* fall through */
|
|
||||||
default:
|
|
||||||
if (i == 2)
|
|
||||||
continue; /* IRQ2 is never connected */
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ELCR_fallback) {
|
|
||||||
/*
|
|
||||||
* If the ELCR indicates a level-sensitive interrupt, we
|
|
||||||
* copy that information over to the MP table in the
|
|
||||||
* irqflag field (level sensitive, active high polarity).
|
|
||||||
*/
|
|
||||||
if (ELCR_trigger(i))
|
|
||||||
intsrc.mpc_irqflag = 13;
|
|
||||||
else
|
|
||||||
intsrc.mpc_irqflag = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
intsrc.mpc_srcbusirq = i;
|
|
||||||
intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
|
|
||||||
MP_intsrc_info(&intsrc);
|
|
||||||
}
|
|
||||||
|
|
||||||
intsrc.mpc_irqtype = mp_ExtINT;
|
|
||||||
intsrc.mpc_srcbusirq = 0;
|
|
||||||
intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
|
|
||||||
MP_intsrc_info(&intsrc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __init construct_default_ISA_mptable(int mpc_default_type)
|
|
||||||
{
|
|
||||||
struct mpc_config_processor processor;
|
|
||||||
struct mpc_config_bus bus;
|
|
||||||
struct mpc_config_ioapic ioapic;
|
|
||||||
struct mpc_config_lintsrc lintsrc;
|
|
||||||
int linttypes[2] = { mp_ExtINT, mp_NMI };
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* local APIC has default address
|
|
||||||
*/
|
|
||||||
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* 2 CPUs, numbered 0 & 1.
|
|
||||||
*/
|
|
||||||
processor.mpc_type = MP_PROCESSOR;
|
|
||||||
processor.mpc_apicver = 0;
|
|
||||||
processor.mpc_cpuflag = CPU_ENABLED;
|
|
||||||
processor.mpc_cpufeature = 0;
|
|
||||||
processor.mpc_featureflag = 0;
|
|
||||||
processor.mpc_reserved[0] = 0;
|
|
||||||
processor.mpc_reserved[1] = 0;
|
|
||||||
for (i = 0; i < 2; i++) {
|
|
||||||
processor.mpc_apicid = i;
|
|
||||||
MP_processor_info(&processor);
|
|
||||||
}
|
|
||||||
|
|
||||||
bus.mpc_type = MP_BUS;
|
|
||||||
bus.mpc_busid = 0;
|
|
||||||
switch (mpc_default_type) {
|
|
||||||
default:
|
|
||||||
printk(KERN_ERR "???\nUnknown standard configuration %d\n",
|
|
||||||
mpc_default_type);
|
|
||||||
/* fall through */
|
|
||||||
case 1:
|
|
||||||
case 5:
|
|
||||||
memcpy(bus.mpc_bustype, "ISA ", 6);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
MP_bus_info(&bus);
|
|
||||||
if (mpc_default_type > 4) {
|
|
||||||
bus.mpc_busid = 1;
|
|
||||||
memcpy(bus.mpc_bustype, "PCI ", 6);
|
|
||||||
MP_bus_info(&bus);
|
|
||||||
}
|
|
||||||
|
|
||||||
ioapic.mpc_type = MP_IOAPIC;
|
|
||||||
ioapic.mpc_apicid = 2;
|
|
||||||
ioapic.mpc_apicver = 0;
|
|
||||||
ioapic.mpc_flags = MPC_APIC_USABLE;
|
|
||||||
ioapic.mpc_apicaddr = 0xFEC00000;
|
|
||||||
MP_ioapic_info(&ioapic);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We set up most of the low 16 IO-APIC pins according to MPS rules.
|
|
||||||
*/
|
|
||||||
construct_default_ioirq_mptable(mpc_default_type);
|
|
||||||
|
|
||||||
lintsrc.mpc_type = MP_LINTSRC;
|
|
||||||
lintsrc.mpc_irqflag = 0; /* conforming */
|
|
||||||
lintsrc.mpc_srcbusid = 0;
|
|
||||||
lintsrc.mpc_srcbusirq = 0;
|
|
||||||
lintsrc.mpc_destapic = MP_APIC_ALL;
|
|
||||||
for (i = 0; i < 2; i++) {
|
|
||||||
lintsrc.mpc_irqtype = linttypes[i];
|
|
||||||
lintsrc.mpc_destapiclint = i;
|
|
||||||
MP_lintsrc_info(&lintsrc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct intel_mp_floating *mpf_found;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Scan the memory blocks for an SMP configuration block.
|
|
||||||
*/
|
|
||||||
void __init get_smp_config (void)
|
|
||||||
{
|
|
||||||
struct intel_mp_floating *mpf = mpf_found;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ACPI supports both logical (e.g. Hyper-Threading) and physical
|
|
||||||
* processors, where MPS only supports physical.
|
|
||||||
*/
|
|
||||||
if (acpi_lapic && acpi_ioapic) {
|
|
||||||
printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
else if (acpi_lapic)
|
|
||||||
printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
|
|
||||||
|
|
||||||
printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Now see if we need to read further.
|
|
||||||
*/
|
|
||||||
if (mpf->mpf_feature1 != 0) {
|
|
||||||
|
|
||||||
printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
|
|
||||||
construct_default_ISA_mptable(mpf->mpf_feature1);
|
|
||||||
|
|
||||||
} else if (mpf->mpf_physptr) {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read the physical hardware table. Anything here will
|
|
||||||
* override the defaults.
|
|
||||||
*/
|
|
||||||
if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
|
|
||||||
smp_found_config = 0;
|
|
||||||
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
|
|
||||||
printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* If there are no explicit MP IRQ entries, then we are
|
|
||||||
* broken. We set up most of the low 16 IO-APIC pins to
|
|
||||||
* ISA defaults and hope it will work.
|
|
||||||
*/
|
|
||||||
if (!mp_irq_entries) {
|
|
||||||
struct mpc_config_bus bus;
|
|
||||||
|
|
||||||
printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
|
|
||||||
|
|
||||||
bus.mpc_type = MP_BUS;
|
|
||||||
bus.mpc_busid = 0;
|
|
||||||
memcpy(bus.mpc_bustype, "ISA ", 6);
|
|
||||||
MP_bus_info(&bus);
|
|
||||||
|
|
||||||
construct_default_ioirq_mptable(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
} else
|
|
||||||
BUG();
|
|
||||||
|
|
||||||
printk(KERN_INFO "Processors: %d\n", num_processors);
|
|
||||||
/*
|
|
||||||
* Only use the first configuration found.
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init smp_scan_config (unsigned long base, unsigned long length)
|
|
||||||
{
|
|
||||||
extern void __bad_mpf_size(void);
|
|
||||||
unsigned int *bp = phys_to_virt(base);
|
|
||||||
struct intel_mp_floating *mpf;
|
|
||||||
|
|
||||||
Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
|
|
||||||
if (sizeof(*mpf) != 16)
|
|
||||||
__bad_mpf_size();
|
|
||||||
|
|
||||||
while (length > 0) {
|
|
||||||
mpf = (struct intel_mp_floating *)bp;
|
|
||||||
if ((*bp == SMP_MAGIC_IDENT) &&
|
|
||||||
(mpf->mpf_length == 1) &&
|
|
||||||
!mpf_checksum((unsigned char *)bp, 16) &&
|
|
||||||
((mpf->mpf_specification == 1)
|
|
||||||
|| (mpf->mpf_specification == 4)) ) {
|
|
||||||
|
|
||||||
smp_found_config = 1;
|
|
||||||
reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE);
|
|
||||||
if (mpf->mpf_physptr)
|
|
||||||
reserve_bootmem_generic(mpf->mpf_physptr, PAGE_SIZE);
|
|
||||||
mpf_found = mpf;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
bp += 4;
|
|
||||||
length -= 16;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init find_smp_config(void)
|
|
||||||
{
|
|
||||||
unsigned int address;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME: Linux assumes you have 640K of base ram..
|
|
||||||
* this continues the error...
|
|
||||||
*
|
|
||||||
* 1) Scan the bottom 1K for a signature
|
|
||||||
* 2) Scan the top 1K of base RAM
|
|
||||||
* 3) Scan the 64K of bios
|
|
||||||
*/
|
|
||||||
if (smp_scan_config(0x0,0x400) ||
|
|
||||||
smp_scan_config(639*0x400,0x400) ||
|
|
||||||
smp_scan_config(0xF0000,0x10000))
|
|
||||||
return;
|
|
||||||
/*
|
|
||||||
* If it is an SMP machine we should know now.
|
|
||||||
*
|
|
||||||
* there is a real-mode segmented pointer pointing to the
|
|
||||||
* 4K EBDA area at 0x40E, calculate and scan it here.
|
|
||||||
*
|
|
||||||
* NOTE! There are Linux loaders that will corrupt the EBDA
|
|
||||||
* area, and as such this kind of SMP config may be less
|
|
||||||
* trustworthy, simply because the SMP table may have been
|
|
||||||
* stomped on during early boot. These loaders are buggy and
|
|
||||||
* should be fixed.
|
|
||||||
*/
|
|
||||||
|
|
||||||
address = *(unsigned short *)phys_to_virt(0x40E);
|
|
||||||
address <<= 4;
|
|
||||||
if (smp_scan_config(address, 0x1000))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* If we have come this far, we did not find an MP table */
|
|
||||||
printk(KERN_INFO "No mptable found.\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* --------------------------------------------------------------------------
|
|
||||||
ACPI-based MP Configuration
|
|
||||||
-------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI
|
|
||||||
|
|
||||||
void __init mp_register_lapic_address(u64 address)
|
|
||||||
{
|
|
||||||
mp_lapic_addr = (unsigned long) address;
|
|
||||||
set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
|
|
||||||
if (boot_cpu_id == -1U)
|
|
||||||
boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
|
|
||||||
}
|
|
||||||
|
|
||||||
void __cpuinit mp_register_lapic (u8 id, u8 enabled)
|
|
||||||
{
|
|
||||||
struct mpc_config_processor processor;
|
|
||||||
int boot_cpu = 0;
|
|
||||||
|
|
||||||
if (id == boot_cpu_id)
|
|
||||||
boot_cpu = 1;
|
|
||||||
|
|
||||||
processor.mpc_type = MP_PROCESSOR;
|
|
||||||
processor.mpc_apicid = id;
|
|
||||||
processor.mpc_apicver = 0;
|
|
||||||
processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
|
|
||||||
processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
|
|
||||||
processor.mpc_cpufeature = 0;
|
|
||||||
processor.mpc_featureflag = 0;
|
|
||||||
processor.mpc_reserved[0] = 0;
|
|
||||||
processor.mpc_reserved[1] = 0;
|
|
||||||
|
|
||||||
MP_processor_info(&processor);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define MP_ISA_BUS 0
|
|
||||||
#define MP_MAX_IOAPIC_PIN 127
|
|
||||||
|
|
||||||
static struct mp_ioapic_routing {
|
|
||||||
int apic_id;
|
|
||||||
int gsi_start;
|
|
||||||
int gsi_end;
|
|
||||||
u32 pin_programmed[4];
|
|
||||||
} mp_ioapic_routing[MAX_IO_APICS];
|
|
||||||
|
|
||||||
static int mp_find_ioapic(int gsi)
|
|
||||||
{
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
/* Find the IOAPIC that manages this GSI. */
|
|
||||||
for (i = 0; i < nr_ioapics; i++) {
|
|
||||||
if ((gsi >= mp_ioapic_routing[i].gsi_start)
|
|
||||||
&& (gsi <= mp_ioapic_routing[i].gsi_end))
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u8 uniq_ioapic_id(u8 id)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
DECLARE_BITMAP(used, 256);
|
|
||||||
bitmap_zero(used, 256);
|
|
||||||
for (i = 0; i < nr_ioapics; i++) {
|
|
||||||
struct mpc_config_ioapic *ia = &mp_ioapics[i];
|
|
||||||
__set_bit(ia->mpc_apicid, used);
|
|
||||||
}
|
|
||||||
if (!test_bit(id, used))
|
|
||||||
return id;
|
|
||||||
return find_first_zero_bit(used, 256);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
|
|
||||||
{
|
|
||||||
int idx = 0;
|
|
||||||
|
|
||||||
if (bad_ioapic(address))
|
|
||||||
return;
|
|
||||||
|
|
||||||
idx = nr_ioapics;
|
|
||||||
|
|
||||||
mp_ioapics[idx].mpc_type = MP_IOAPIC;
|
|
||||||
mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
|
|
||||||
mp_ioapics[idx].mpc_apicaddr = address;
|
|
||||||
|
|
||||||
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
|
|
||||||
mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
|
|
||||||
mp_ioapics[idx].mpc_apicver = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Build basic IRQ lookup table to facilitate gsi->io_apic lookups
|
|
||||||
* and to prevent reprogramming of IOAPIC pins (PCI IRQs).
|
|
||||||
*/
|
|
||||||
mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
|
|
||||||
mp_ioapic_routing[idx].gsi_start = gsi_base;
|
|
||||||
mp_ioapic_routing[idx].gsi_end = gsi_base +
|
|
||||||
io_apic_get_redir_entries(idx);
|
|
||||||
|
|
||||||
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, "
|
|
||||||
"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
|
|
||||||
mp_ioapics[idx].mpc_apicaddr,
|
|
||||||
mp_ioapic_routing[idx].gsi_start,
|
|
||||||
mp_ioapic_routing[idx].gsi_end);
|
|
||||||
|
|
||||||
nr_ioapics++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init
|
|
||||||
mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
|
|
||||||
{
|
|
||||||
struct mpc_config_intsrc intsrc;
|
|
||||||
int ioapic = -1;
|
|
||||||
int pin = -1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert 'gsi' to 'ioapic.pin'.
|
|
||||||
*/
|
|
||||||
ioapic = mp_find_ioapic(gsi);
|
|
||||||
if (ioapic < 0)
|
|
||||||
return;
|
|
||||||
pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TBD: This check is for faulty timer entries, where the override
|
|
||||||
* erroneously sets the trigger to level, resulting in a HUGE
|
|
||||||
* increase of timer interrupts!
|
|
||||||
*/
|
|
||||||
if ((bus_irq == 0) && (trigger == 3))
|
|
||||||
trigger = 1;
|
|
||||||
|
|
||||||
intsrc.mpc_type = MP_INTSRC;
|
|
||||||
intsrc.mpc_irqtype = mp_INT;
|
|
||||||
intsrc.mpc_irqflag = (trigger << 2) | polarity;
|
|
||||||
intsrc.mpc_srcbus = MP_ISA_BUS;
|
|
||||||
intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
|
|
||||||
intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
|
|
||||||
intsrc.mpc_dstirq = pin; /* INTIN# */
|
|
||||||
|
|
||||||
Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
|
|
||||||
intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
|
|
||||||
(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
|
|
||||||
intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
|
|
||||||
|
|
||||||
mp_irqs[mp_irq_entries] = intsrc;
|
|
||||||
if (++mp_irq_entries == MAX_IRQ_SOURCES)
|
|
||||||
panic("Max # of irq sources exceeded!\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init mp_config_acpi_legacy_irqs(void)
|
|
||||||
{
|
|
||||||
struct mpc_config_intsrc intsrc;
|
|
||||||
int i = 0;
|
|
||||||
int ioapic = -1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Fabricate the legacy ISA bus (bus #31).
|
|
||||||
*/
|
|
||||||
set_bit(MP_ISA_BUS, mp_bus_not_pci);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Locate the IOAPIC that manages the ISA IRQs (0-15).
|
|
||||||
*/
|
|
||||||
ioapic = mp_find_ioapic(0);
|
|
||||||
if (ioapic < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
intsrc.mpc_type = MP_INTSRC;
|
|
||||||
intsrc.mpc_irqflag = 0; /* Conforming */
|
|
||||||
intsrc.mpc_srcbus = MP_ISA_BUS;
|
|
||||||
intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Use the default configuration for the IRQs 0-15. Unless
|
|
||||||
* overridden by (MADT) interrupt source override entries.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < 16; i++) {
|
|
||||||
int idx;
|
|
||||||
|
|
||||||
for (idx = 0; idx < mp_irq_entries; idx++) {
|
|
||||||
struct mpc_config_intsrc *irq = mp_irqs + idx;
|
|
||||||
|
|
||||||
/* Do we already have a mapping for this ISA IRQ? */
|
|
||||||
if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* Do we already have a mapping for this IOAPIC pin */
|
|
||||||
if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
|
|
||||||
(irq->mpc_dstirq == i))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (idx != mp_irq_entries) {
|
|
||||||
printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
|
|
||||||
continue; /* IRQ already used */
|
|
||||||
}
|
|
||||||
|
|
||||||
intsrc.mpc_irqtype = mp_INT;
|
|
||||||
intsrc.mpc_srcbusirq = i; /* Identity mapped */
|
|
||||||
intsrc.mpc_dstirq = i;
|
|
||||||
|
|
||||||
Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
|
|
||||||
"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
|
|
||||||
(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
|
|
||||||
intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
|
|
||||||
intsrc.mpc_dstirq);
|
|
||||||
|
|
||||||
mp_irqs[mp_irq_entries] = intsrc;
|
|
||||||
if (++mp_irq_entries == MAX_IRQ_SOURCES)
|
|
||||||
panic("Max # of irq sources exceeded!\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
|
||||||
{
|
|
||||||
int ioapic = -1;
|
|
||||||
int ioapic_pin = 0;
|
|
||||||
int idx, bit = 0;
|
|
||||||
|
|
||||||
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
|
|
||||||
return gsi;
|
|
||||||
|
|
||||||
/* Don't set up the ACPI SCI because it's already set up */
|
|
||||||
if (acpi_gbl_FADT.sci_interrupt == gsi)
|
|
||||||
return gsi;
|
|
||||||
|
|
||||||
ioapic = mp_find_ioapic(gsi);
|
|
||||||
if (ioapic < 0) {
|
|
||||||
printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
|
|
||||||
return gsi;
|
|
||||||
}
|
|
||||||
|
|
||||||
ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Avoid pin reprogramming. PRTs typically include entries
|
|
||||||
* with redundant pin->gsi mappings (but unique PCI devices);
|
|
||||||
* we only program the IOAPIC on the first.
|
|
||||||
*/
|
|
||||||
bit = ioapic_pin % 32;
|
|
||||||
idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
|
|
||||||
if (idx > 3) {
|
|
||||||
printk(KERN_ERR "Invalid reference to IOAPIC pin "
|
|
||||||
"%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
|
|
||||||
ioapic_pin);
|
|
||||||
return gsi;
|
|
||||||
}
|
|
||||||
if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
|
|
||||||
Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
|
|
||||||
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
|
|
||||||
return gsi;
|
|
||||||
}
|
|
||||||
|
|
||||||
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
|
|
||||||
|
|
||||||
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
|
|
||||||
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
|
||||||
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
|
||||||
return gsi;
|
|
||||||
}
|
|
||||||
#endif /*CONFIG_ACPI*/
|
|
|
@ -22,9 +22,11 @@
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
|
#include <asm/timer.h>
|
||||||
|
|
||||||
#include "mach_traps.h"
|
#include "mach_traps.h"
|
||||||
|
|
||||||
|
@ -67,7 +69,7 @@ static __init void nmi_cpu_busy(void *data)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __init check_nmi_watchdog(void)
|
int __init check_nmi_watchdog(void)
|
||||||
{
|
{
|
||||||
unsigned int *prev_nmi_count;
|
unsigned int *prev_nmi_count;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
@ -80,7 +82,7 @@ static int __init check_nmi_watchdog(void)
|
||||||
|
|
||||||
prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
|
prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
|
||||||
if (!prev_nmi_count)
|
if (!prev_nmi_count)
|
||||||
return -1;
|
goto error;
|
||||||
|
|
||||||
printk(KERN_INFO "Testing NMI watchdog ... ");
|
printk(KERN_INFO "Testing NMI watchdog ... ");
|
||||||
|
|
||||||
|
@ -117,7 +119,7 @@ static int __init check_nmi_watchdog(void)
|
||||||
if (!atomic_read(&nmi_active)) {
|
if (!atomic_read(&nmi_active)) {
|
||||||
kfree(prev_nmi_count);
|
kfree(prev_nmi_count);
|
||||||
atomic_set(&nmi_active, -1);
|
atomic_set(&nmi_active, -1);
|
||||||
return -1;
|
goto error;
|
||||||
}
|
}
|
||||||
printk("OK.\n");
|
printk("OK.\n");
|
||||||
|
|
||||||
|
@ -128,9 +130,11 @@ static int __init check_nmi_watchdog(void)
|
||||||
|
|
||||||
kfree(prev_nmi_count);
|
kfree(prev_nmi_count);
|
||||||
return 0;
|
return 0;
|
||||||
|
error:
|
||||||
|
timer_ack = !cpu_has_tsc;
|
||||||
|
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
/* This needs to happen later in boot so counters are working */
|
|
||||||
late_initcall(check_nmi_watchdog);
|
|
||||||
|
|
||||||
static int __init setup_nmi_watchdog(char *str)
|
static int __init setup_nmi_watchdog(char *str)
|
||||||
{
|
{
|
||||||
|
|
|
@ -26,6 +26,8 @@
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
|
|
||||||
|
#include <mach_traps.h>
|
||||||
|
|
||||||
int unknown_nmi_panic;
|
int unknown_nmi_panic;
|
||||||
int nmi_watchdog_enabled;
|
int nmi_watchdog_enabled;
|
||||||
int panic_on_unrecovered_nmi;
|
int panic_on_unrecovered_nmi;
|
||||||
|
|
|
@ -206,13 +206,6 @@ static struct resource reserve_ioports = {
|
||||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource reserve_iomem = {
|
|
||||||
.start = 0,
|
|
||||||
.end = -1,
|
|
||||||
.name = "paravirt-iomem",
|
|
||||||
.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve the whole legacy IO space to prevent any legacy drivers
|
* Reserve the whole legacy IO space to prevent any legacy drivers
|
||||||
* from wasting time probing for their hardware. This is a fairly
|
* from wasting time probing for their hardware. This is a fairly
|
||||||
|
@ -222,16 +215,7 @@ static struct resource reserve_iomem = {
|
||||||
*/
|
*/
|
||||||
int paravirt_disable_iospace(void)
|
int paravirt_disable_iospace(void)
|
||||||
{
|
{
|
||||||
int ret;
|
return request_resource(&ioport_resource, &reserve_ioports);
|
||||||
|
|
||||||
ret = request_resource(&ioport_resource, &reserve_ioports);
|
|
||||||
if (ret == 0) {
|
|
||||||
ret = request_resource(&iomem_resource, &reserve_iomem);
|
|
||||||
if (ret)
|
|
||||||
release_resource(&reserve_ioports);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
|
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
|
||||||
|
|
|
@ -113,20 +113,13 @@ void default_idle(void)
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
if (!need_resched()) {
|
if (!need_resched()) {
|
||||||
ktime_t t0, t1;
|
|
||||||
u64 t0n, t1n;
|
|
||||||
|
|
||||||
t0 = ktime_get();
|
|
||||||
t0n = ktime_to_ns(t0);
|
|
||||||
safe_halt(); /* enables interrupts racelessly */
|
safe_halt(); /* enables interrupts racelessly */
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
t1 = ktime_get();
|
|
||||||
t1n = ktime_to_ns(t1);
|
|
||||||
sched_clock_idle_wakeup_event(t1n - t0n);
|
|
||||||
}
|
}
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
current_thread_info()->status |= TS_POLLING;
|
current_thread_info()->status |= TS_POLLING;
|
||||||
} else {
|
} else {
|
||||||
|
local_irq_enable();
|
||||||
/* loop is done by the caller */
|
/* loop is done by the caller */
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
@ -142,6 +135,7 @@ EXPORT_SYMBOL(default_idle);
|
||||||
*/
|
*/
|
||||||
static void poll_idle(void)
|
static void poll_idle(void)
|
||||||
{
|
{
|
||||||
|
local_irq_enable();
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,8 +242,11 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
|
||||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||||
smp_mb();
|
smp_mb();
|
||||||
if (!need_resched())
|
if (!need_resched())
|
||||||
__mwait(ax, cx);
|
__sti_mwait(ax, cx);
|
||||||
}
|
else
|
||||||
|
local_irq_enable();
|
||||||
|
} else
|
||||||
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
|
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
|
||||||
|
@ -332,7 +329,7 @@ void __show_registers(struct pt_regs *regs, int all)
|
||||||
init_utsname()->version);
|
init_utsname()->version);
|
||||||
|
|
||||||
printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
|
printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
|
||||||
0xffff & regs->cs, regs->ip, regs->flags,
|
(u16)regs->cs, regs->ip, regs->flags,
|
||||||
smp_processor_id());
|
smp_processor_id());
|
||||||
print_symbol("EIP is at %s\n", regs->ip);
|
print_symbol("EIP is at %s\n", regs->ip);
|
||||||
|
|
||||||
|
@ -341,8 +338,7 @@ void __show_registers(struct pt_regs *regs, int all)
|
||||||
printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
|
printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
|
||||||
regs->si, regs->di, regs->bp, sp);
|
regs->si, regs->di, regs->bp, sp);
|
||||||
printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
|
printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
|
||||||
regs->ds & 0xffff, regs->es & 0xffff,
|
(u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
|
||||||
regs->fs & 0xffff, gs, ss);
|
|
||||||
|
|
||||||
if (!all)
|
if (!all)
|
||||||
return;
|
return;
|
||||||
|
@ -513,6 +509,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
||||||
|
{
|
||||||
|
__asm__("movl %0, %%gs" :: "r"(0));
|
||||||
|
regs->fs = 0;
|
||||||
|
set_fs(USER_DS);
|
||||||
|
regs->ds = __USER_DS;
|
||||||
|
regs->es = __USER_DS;
|
||||||
|
regs->ss = __USER_DS;
|
||||||
|
regs->cs = __USER_CS;
|
||||||
|
regs->ip = new_ip;
|
||||||
|
regs->sp = new_sp;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(start_thread);
|
||||||
|
|
||||||
#ifdef CONFIG_SECCOMP
|
#ifdef CONFIG_SECCOMP
|
||||||
static void hard_disable_TSC(void)
|
static void hard_disable_TSC(void)
|
||||||
{
|
{
|
||||||
|
@ -550,12 +561,12 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||||
/* we clear debugctl to make sure DS
|
/* we clear debugctl to make sure DS
|
||||||
* is not in use when we change it */
|
* is not in use when we change it */
|
||||||
debugctl = 0;
|
debugctl = 0;
|
||||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
|
update_debugctlmsr(0);
|
||||||
wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
|
wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (next->debugctlmsr != debugctl)
|
if (next->debugctlmsr != debugctl)
|
||||||
wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
|
update_debugctlmsr(next->debugctlmsr);
|
||||||
|
|
||||||
if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
|
if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
|
||||||
set_debugreg(next->debugreg0, 0);
|
set_debugreg(next->debugreg0, 0);
|
||||||
|
|
|
@ -107,16 +107,8 @@ void default_idle(void)
|
||||||
smp_mb();
|
smp_mb();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
if (!need_resched()) {
|
if (!need_resched()) {
|
||||||
ktime_t t0, t1;
|
|
||||||
u64 t0n, t1n;
|
|
||||||
|
|
||||||
t0 = ktime_get();
|
|
||||||
t0n = ktime_to_ns(t0);
|
|
||||||
safe_halt(); /* enables interrupts racelessly */
|
safe_halt(); /* enables interrupts racelessly */
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
t1 = ktime_get();
|
|
||||||
t1n = ktime_to_ns(t1);
|
|
||||||
sched_clock_idle_wakeup_event(t1n - t0n);
|
|
||||||
}
|
}
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
current_thread_info()->status |= TS_POLLING;
|
current_thread_info()->status |= TS_POLLING;
|
||||||
|
@ -528,6 +520,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
||||||
|
{
|
||||||
|
asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
|
||||||
|
load_gs_index(0);
|
||||||
|
regs->ip = new_ip;
|
||||||
|
regs->sp = new_sp;
|
||||||
|
write_pda(oldrsp, new_sp);
|
||||||
|
regs->cs = __USER_CS;
|
||||||
|
regs->ss = __USER_DS;
|
||||||
|
regs->flags = 0x200;
|
||||||
|
set_fs(USER_DS);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(start_thread);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This special macro can be used to load a debugging register
|
* This special macro can be used to load a debugging register
|
||||||
*/
|
*/
|
||||||
|
@ -548,12 +555,12 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
|
||||||
/* we clear debugctl to make sure DS
|
/* we clear debugctl to make sure DS
|
||||||
* is not in use when we change it */
|
* is not in use when we change it */
|
||||||
debugctl = 0;
|
debugctl = 0;
|
||||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
|
update_debugctlmsr(0);
|
||||||
wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
|
wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (next->debugctlmsr != debugctl)
|
if (next->debugctlmsr != debugctl)
|
||||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr);
|
update_debugctlmsr(next->debugctlmsr);
|
||||||
|
|
||||||
if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
|
if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
|
||||||
loaddebug(next, 0);
|
loaddebug(next, 0);
|
||||||
|
|
|
@ -1456,7 +1456,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
|
||||||
/* notification of system call entry/exit
|
/* notification of system call entry/exit
|
||||||
* - triggered by current->work.syscall_trace
|
* - triggered by current->work.syscall_trace
|
||||||
*/
|
*/
|
||||||
__attribute__((regparm(3)))
|
|
||||||
int do_syscall_trace(struct pt_regs *regs, int entryexit)
|
int do_syscall_trace(struct pt_regs *regs, int entryexit)
|
||||||
{
|
{
|
||||||
int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
|
int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/reboot.h>
|
#include <linux/reboot.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
|
@ -412,12 +411,12 @@ static void native_machine_shutdown(void)
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/* See if there has been given a command line override */
|
/* See if there has been given a command line override */
|
||||||
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
|
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
|
||||||
cpu_isset(reboot_cpu, cpu_online_map))
|
cpu_online(reboot_cpu))
|
||||||
reboot_cpu_id = reboot_cpu;
|
reboot_cpu_id = reboot_cpu;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Make certain the cpu I'm about to reboot on is online */
|
/* Make certain the cpu I'm about to reboot on is online */
|
||||||
if (!cpu_isset(reboot_cpu_id, cpu_online_map))
|
if (!cpu_online(reboot_cpu_id))
|
||||||
reboot_cpu_id = smp_processor_id();
|
reboot_cpu_id = smp_processor_id();
|
||||||
|
|
||||||
/* Make certain I only run on the appropriate processor */
|
/* Make certain I only run on the appropriate processor */
|
||||||
|
|
|
@ -9,18 +9,19 @@
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/kexec.h>
|
#include <asm/kexec.h>
|
||||||
|
#include <asm/processor-flags.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be relocatable PIC code callable as a C function
|
* Must be relocatable PIC code callable as a C function
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define PTR(x) (x << 2)
|
#define PTR(x) (x << 2)
|
||||||
#define PAGE_ALIGNED (1 << PAGE_SHIFT)
|
#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
|
||||||
#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
|
#define PAE_PGD_ATTR (_PAGE_PRESENT)
|
||||||
#define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */
|
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.align PAGE_ALIGNED
|
.align PAGE_SIZE
|
||||||
.globl relocate_kernel
|
.globl relocate_kernel
|
||||||
relocate_kernel:
|
relocate_kernel:
|
||||||
movl 8(%esp), %ebp /* list of pages */
|
movl 8(%esp), %ebp /* list of pages */
|
||||||
|
@ -155,7 +156,7 @@ relocate_new_kernel:
|
||||||
movl %eax, %cr3
|
movl %eax, %cr3
|
||||||
|
|
||||||
/* setup a new stack at the end of the physical control page */
|
/* setup a new stack at the end of the physical control page */
|
||||||
lea 4096(%edi), %esp
|
lea PAGE_SIZE(%edi), %esp
|
||||||
|
|
||||||
/* jump to identity mapped page */
|
/* jump to identity mapped page */
|
||||||
movl %edi, %eax
|
movl %edi, %eax
|
||||||
|
@ -168,16 +169,16 @@ identity_mapped:
|
||||||
pushl %edx
|
pushl %edx
|
||||||
|
|
||||||
/* Set cr0 to a known state:
|
/* Set cr0 to a known state:
|
||||||
* 31 0 == Paging disabled
|
* - Paging disabled
|
||||||
* 18 0 == Alignment check disabled
|
* - Alignment check disabled
|
||||||
* 16 0 == Write protect disabled
|
* - Write protect disabled
|
||||||
* 3 0 == No task switch
|
* - No task switch
|
||||||
* 2 0 == Don't do FP software emulation.
|
* - Don't do FP software emulation.
|
||||||
* 0 1 == Proctected mode enabled
|
* - Proctected mode enabled
|
||||||
*/
|
*/
|
||||||
movl %cr0, %eax
|
movl %cr0, %eax
|
||||||
andl $~((1<<31)|(1<<18)|(1<<16)|(1<<3)|(1<<2)), %eax
|
andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
|
||||||
orl $(1<<0), %eax
|
orl $(X86_CR0_PE), %eax
|
||||||
movl %eax, %cr0
|
movl %eax, %cr0
|
||||||
|
|
||||||
/* clear cr4 if applicable */
|
/* clear cr4 if applicable */
|
||||||
|
@ -186,8 +187,7 @@ identity_mapped:
|
||||||
/* Set cr4 to a known state:
|
/* Set cr4 to a known state:
|
||||||
* Setting everything to zero seems safe.
|
* Setting everything to zero seems safe.
|
||||||
*/
|
*/
|
||||||
movl %cr4, %eax
|
xorl %eax, %eax
|
||||||
andl $0, %eax
|
|
||||||
movl %eax, %cr4
|
movl %eax, %cr4
|
||||||
|
|
||||||
jmp 1f
|
jmp 1f
|
||||||
|
|
|
@ -9,17 +9,18 @@
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/kexec.h>
|
#include <asm/kexec.h>
|
||||||
|
#include <asm/processor-flags.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be relocatable PIC code callable as a C function
|
* Must be relocatable PIC code callable as a C function
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define PTR(x) (x << 3)
|
#define PTR(x) (x << 3)
|
||||||
#define PAGE_ALIGNED (1 << PAGE_SHIFT)
|
#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
|
||||||
#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
|
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.align PAGE_ALIGNED
|
.align PAGE_SIZE
|
||||||
.code64
|
.code64
|
||||||
.globl relocate_kernel
|
.globl relocate_kernel
|
||||||
relocate_kernel:
|
relocate_kernel:
|
||||||
|
@ -160,7 +161,7 @@ relocate_new_kernel:
|
||||||
movq %r9, %cr3
|
movq %r9, %cr3
|
||||||
|
|
||||||
/* setup a new stack at the end of the physical control page */
|
/* setup a new stack at the end of the physical control page */
|
||||||
lea 4096(%r8), %rsp
|
lea PAGE_SIZE(%r8), %rsp
|
||||||
|
|
||||||
/* jump to identity mapped page */
|
/* jump to identity mapped page */
|
||||||
addq $(identity_mapped - relocate_kernel), %r8
|
addq $(identity_mapped - relocate_kernel), %r8
|
||||||
|
@ -172,33 +173,22 @@ identity_mapped:
|
||||||
pushq %rdx
|
pushq %rdx
|
||||||
|
|
||||||
/* Set cr0 to a known state:
|
/* Set cr0 to a known state:
|
||||||
* 31 1 == Paging enabled
|
* - Paging enabled
|
||||||
* 18 0 == Alignment check disabled
|
* - Alignment check disabled
|
||||||
* 16 0 == Write protect disabled
|
* - Write protect disabled
|
||||||
* 3 0 == No task switch
|
* - No task switch
|
||||||
* 2 0 == Don't do FP software emulation.
|
* - Don't do FP software emulation.
|
||||||
* 0 1 == Proctected mode enabled
|
* - Proctected mode enabled
|
||||||
*/
|
*/
|
||||||
movq %cr0, %rax
|
movq %cr0, %rax
|
||||||
andq $~((1<<18)|(1<<16)|(1<<3)|(1<<2)), %rax
|
andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
|
||||||
orl $((1<<31)|(1<<0)), %eax
|
orl $(X86_CR0_PG | X86_CR0_PE), %eax
|
||||||
movq %rax, %cr0
|
movq %rax, %cr0
|
||||||
|
|
||||||
/* Set cr4 to a known state:
|
/* Set cr4 to a known state:
|
||||||
* 10 0 == xmm exceptions disabled
|
* - physical address extension enabled
|
||||||
* 9 0 == xmm registers instructions disabled
|
|
||||||
* 8 0 == performance monitoring counter disabled
|
|
||||||
* 7 0 == page global disabled
|
|
||||||
* 6 0 == machine check exceptions disabled
|
|
||||||
* 5 1 == physical address extension enabled
|
|
||||||
* 4 0 == page size extensions disabled
|
|
||||||
* 3 0 == Debug extensions disabled
|
|
||||||
* 2 0 == Time stamp disable (disabled)
|
|
||||||
* 1 0 == Protected mode virtual interrupts disabled
|
|
||||||
* 0 0 == VME disabled
|
|
||||||
*/
|
*/
|
||||||
|
movq $X86_CR4_PAE, %rax
|
||||||
movq $((1<<5)), %rax
|
|
||||||
movq %rax, %cr4
|
movq %rax, %cr4
|
||||||
|
|
||||||
jmp 1f
|
jmp 1f
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue